Samples Python:

* Unused variables
* Bad identation
pull/9460/head
tribta 7 years ago
parent 9bb17caa24
commit 7555ab194f
  1. 2
      samples/python/asift.py
  2. 4
      samples/python/browse.py
  3. 4
      samples/python/camshift.py
  4. 4
      samples/python/common.py
  5. 2
      samples/python/demo.py
  6. 2
      samples/python/digits.py
  7. 4
      samples/python/digits_video.py
  8. 15
      samples/python/find_obj.py
  9. 4
      samples/python/gaussian_mix.py
  10. 2
      samples/python/lappyr.py
  11. 15
      samples/python/letter_recog.py
  12. 6
      samples/python/lk_homography.py
  13. 6
      samples/python/lk_track.py
  14. 2
      samples/python/mosse.py
  15. 2
      samples/python/mouse_and_match.py
  16. 2
      samples/python/opt_flow.py
  17. 2
      samples/python/plane_ar.py
  18. 4
      samples/python/squares.py
  19. 4
      samples/python/texture_flow.py
  20. 2
      samples/python/turing.py

@ -155,7 +155,7 @@ if __name__ == '__main__':
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
vis = explore_match(win, img1, img2, kp_pairs, None, H)
explore_match(win, img1, img2, kp_pairs, None, H)
match_and_draw('affine find_obj')

@ -53,8 +53,8 @@ if __name__ == '__main__':
small = cv2.pyrDown(small)
def onmouse(event, x, y, flags, param):
h, w = img.shape[:2]
h1, w1 = small.shape[:2]
h, _w = img.shape[:2]
h1, _w1 = small.shape[:2]
x, y = 1.0*x*h/h1, 1.0*y*h/h1
zoom = cv2.getRectSubPix(img, (800, 600), (x+0.5, y+0.5))
cv2.imshow('zoom', zoom)

@ -41,7 +41,7 @@ from video import presets
class App(object):
def __init__(self, video_src):
self.cam = video.create_capture(video_src, presets['cube'])
ret, self.frame = self.cam.read()
_ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
cv2.setMouseCallback('camshift', self.onmouse)
@ -76,7 +76,7 @@ class App(object):
def run(self):
while True:
ret, self.frame = self.cam.read()
_ret, self.frame = self.cam.read()
vis = self.frame.copy()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

@ -233,5 +233,5 @@ def mdot(*args):
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)

@ -99,7 +99,7 @@ class App:
run_btn = tk.Button(right, command=self.on_run, text='Run', width=8)
self.text = text = ScrolledText(right, font=('arial', 12, 'normal'), width = 30, wrap='word')
self.linker = linker = LinkManager(text, self.on_link)
self.linker = _linker = LinkManager(text, self.on_link)
self.text.tag_config("header1", font=('arial', 14, 'bold'))
self.text.tag_config("header2", font=('arial', 12, 'bold'))
text.config(state='disabled')

@ -84,7 +84,7 @@ class KNearest(StatModel):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
def predict(self, samples):
retval, results, neigh_resp, dists = self.model.findNearest(samples, self.k)
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
return results.ravel()
class SVM(StatModel):

@ -35,7 +35,7 @@ def main():
model.load_(classifier_fn) #Known bug: https://github.com/opencv/opencv/issues/4969
while True:
ret, frame = cap.read()
_ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
@ -59,12 +59,12 @@ def main():
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))
bin_roi = bin[y:,x:][:h,:w]
gray_roi = gray[y:,x:][:h,:w]
m = bin_roi != 0
if not 0.1 < m.mean() < 0.4:
continue
'''
gray_roi = gray[y:,x:][:h,:w]
v_in, v_out = gray_roi[m], gray_roi[~m]
if v_out.std() > 10.0:
continue

@ -92,7 +92,6 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
kp_color = (51, 103, 236)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
@ -123,12 +122,12 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
idxs = np.where(m)[0]
kp1s, kp2s = [], []
for i in idxs:
(x1, y1), (x2, y2) = p1[i], p2[i]
col = (red, green)[status[i]]
cv2.line(cur_vis, (x1, y1), (x2, y2), col)
kp1, kp2 = kp_pairs[i]
kp1s.append(kp1)
kp2s.append(kp2)
(x1, y1), (x2, y2) = p1[i], p2[i]
col = (red, green)[status[i]]
cv2.line(cur_vis, (x1, y1), (x2, y2), col)
kp1, kp2 = kp_pairs[i]
kp1s.append(kp1)
kp2s.append(kp2)
cur_vis = cv2.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color)
cur_vis[:,w1:] = cv2.drawKeypoints(cur_vis[:,w1:], kp2s, None, flags=4, color=kp_color)
@ -183,7 +182,7 @@ if __name__ == '__main__':
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
vis = explore_match(win, img1, img2, kp_pairs, status, H)
_vis = explore_match(win, img1, img2, kp_pairs, status, H)
match_and_draw('find_obj')
cv2.waitKey()

@ -15,7 +15,7 @@ import cv2
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for i in xrange(cluster_n):
for _i in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
@ -28,7 +28,7 @@ def make_gaussians(cluster_n, img_size):
def draw_gaussain(img, mean, cov, color):
x, y = np.int32(mean)
w, u, vt = cv2.SVDecomp(cov)
w, u, _vt = cv2.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA)

@ -28,7 +28,7 @@ from common import nothing, getsize
def build_lappyr(img, leveln=6, dtype=np.int16):
img = dtype(img)
levels = []
for i in xrange(leveln-1):
for _i in xrange(leveln-1):
next_img = cv2.pyrDown(img)
img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
levels.append(img-img1)

@ -64,12 +64,11 @@ class RTrees(LetterStatModel):
self.model = cv2.ml.RTrees_create()
def train(self, samples, responses):
sample_n, var_n = samples.shape
self.model.setMaxDepth(20)
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
ret, resp = self.model.predict(samples)
_ret, resp = self.model.predict(samples)
return resp.ravel()
@ -81,7 +80,7 @@ class KNearest(LetterStatModel):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
def predict(self, samples):
retval, results, neigh_resp, dists = self.model.findNearest(samples, k = 10)
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10)
return results.ravel()
@ -90,7 +89,7 @@ class Boost(LetterStatModel):
self.model = cv2.ml.Boost_create()
def train(self, samples, responses):
sample_n, var_n = samples.shape
_sample_n, var_n = samples.shape
new_samples = self.unroll_samples(samples)
new_responses = self.unroll_responses(responses)
var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8)
@ -101,7 +100,7 @@ class Boost(LetterStatModel):
def predict(self, samples):
new_samples = self.unroll_samples(samples)
ret, resp = self.model.predict(new_samples)
_ret, resp = self.model.predict(new_samples)
return resp.ravel().reshape(-1, self.class_n).argmax(1)
@ -118,7 +117,7 @@ class SVM(LetterStatModel):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
ret, resp = self.model.predict(samples)
_ret, resp = self.model.predict(samples)
return resp.ravel()
@ -127,7 +126,7 @@ class MLP(LetterStatModel):
self.model = cv2.ml.ANN_MLP_create()
def train(self, samples, responses):
sample_n, var_n = samples.shape
_sample_n, var_n = samples.shape
new_responses = self.unroll_responses(responses).reshape(-1, self.class_n)
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
@ -141,7 +140,7 @@ class MLP(LetterStatModel):
self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses))
def predict(self, samples):
ret, resp = self.model.predict(samples)
_ret, resp = self.model.predict(samples)
return resp.argmax(-1)

@ -39,8 +39,8 @@ feature_params = dict( maxCorners = 1000,
blockSize = 19 )
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
status = d < back_threshold
return p1, status
@ -56,7 +56,7 @@ class App:
def run(self):
while True:
ret, frame = self.cam.read()
_ret, frame = self.cam.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
vis = frame.copy()
if self.p0 is not None:

@ -46,15 +46,15 @@ class App:
def run(self):
while True:
ret, frame = self.cam.read()
_ret, frame = self.cam.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
vis = frame.copy()
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []

@ -73,7 +73,7 @@ class MOSSE:
self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 = np.zeros_like(self.G)
self.H2 = np.zeros_like(self.G)
for i in xrange(128):
for _i in xrange(128):
a = self.preprocess(rnd_warp(img))
A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)

@ -38,7 +38,7 @@ def onmouse(event, x, y, flags, param):
patch = gray[sel[1]:sel[3],sel[0]:sel[2]]
result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED)
result = np.abs(result)**3
val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)
_val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)
result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U)
cv2.imshow("result", result8)
drag_start = None

@ -29,7 +29,7 @@ def draw_flow(img, flow, step=16):
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
for (x1, y1), (_x2, _y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis

@ -92,7 +92,7 @@ class App:
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
dist_coef = np.zeros(4)
ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)
_ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)
verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
for i, j in ar_edges:

@ -30,8 +30,8 @@ def find_squares(img):
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)

@ -40,8 +40,8 @@ if __name__ == '__main__':
d = 12
points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
for x, y in np.int32(points):
vx, vy = np.int32(flow[y, x]*d)
cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
vx, vy = np.int32(flow[y, x]*d)
cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('input', img)
cv2.imshow('flow', vis)
cv2.waitKey()

@ -45,7 +45,7 @@ if __name__ == '__main__':
def process_scale(a_lods, lod):
d = a_lods[lod] - cv2.pyrUp(a_lods[lod+1])
for i in xrange(lod):
for _i in xrange(lod):
d = cv2.pyrUp(d)
v = cv2.GaussianBlur(d*d, (3, 3), 0)
return np.sign(d), v

Loading…
Cancel
Save