Home
last modified time | relevance | path

Searched refs:cv2 (Results 1 – 25 of 136) sorted by relevance

123456

/external/opencv3/samples/python2/
Dhist.py18 import cv2
30 hist_item = cv2.calcHist([im],[ch],None,[256],[0,256])
31 cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
34 cv2.polylines(h,[pts],False,col)
43 im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
44 hist_item = cv2.calcHist([im],[0],None,[256],[0,256])
45 cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
48 cv2.line(h,(x,0),(x,y),(255,255,255))
63 im = cv2.imread(fname)
69 gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
[all …]
Dcoherence.py13 import cv2
21 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
22 eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
26 gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
27 gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
28 gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
32 ero = cv2.erode(img, None)
33 dil = cv2.dilate(img, None)
48 src = cv2.imread(fn)
54 sigma = cv2.getTrackbarPos('sigma', 'control')*2+1
[all …]
Dmouse_and_match.py12 import cv2
27 if event == cv2.EVENT_LBUTTONDOWN:
30 elif event == cv2.EVENT_LBUTTONUP:
33 result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED)
35 val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)
36 result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U)
37 cv2.imshow("result", result8)
41 if flags & cv2.EVENT_FLAG_LBUTTON:
45 img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
46 cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)
[all …]
Dcontours.py13 import cv2
28 cv2.line(img, (x1, y1), (x2, y2), white)
30 cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
31 cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
32 cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
33 cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
34 cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
35 cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
36 cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
37 cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
[all …]
Ddeconvolution.py34 import cv2
42 img_pad = cv2.copyMakeBorder(img, d, d, d, d, cv2.BORDER_WRAP)
43 img_blur = cv2.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d]
55 kern = cv2.warpAffine(kern, A, (sz, sz), flags=cv2.INTER_CUBIC)
60 cv2.circle(kern, (sz, sz), d, 255, -1, cv2.LINE_AA, shift=1)
77 img = cv2.imread(fn, 0)
83 cv2.imshow('input', img)
86 IMG = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
91 ang = np.deg2rad( cv2.getTrackbarPos('angle', win) )
92 d = cv2.getTrackbarPos('d', win)
[all …]
Dmosse.py25 import cv2
39 return cv2.warpAffine(a, T, (w, h), borderMode = cv2.BORDER_REFLECT)
53 w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
57 img = cv2.getRectSubPix(frame, (w, h), (x, y))
59 self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
62 g = cv2.GaussianBlur(g, (-1, -1), 2.0)
65 self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
70 A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
71 self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
72 self.H2 += cv2.mulSpectrums( A, A, 0, conjB=True)
[all …]
Dgrabcut.py31 import cv2
58 if event == cv2.EVENT_RBUTTONDOWN:
62 elif event == cv2.EVENT_MOUSEMOVE:
65 cv2.rectangle(img,(ix,iy),(x,y),BLUE,2)
69 elif event == cv2.EVENT_RBUTTONUP:
72 cv2.rectangle(img,(ix,iy),(x,y),BLUE,2)
79 if event == cv2.EVENT_LBUTTONDOWN:
84 cv2.circle(img,(x,y),thickness,value['color'],-1)
85 cv2.circle(mask,(x,y),thickness,value['val'],-1)
87 elif event == cv2.EVENT_MOUSEMOVE:
[all …]
Dfind_obj.py18 import cv2
28 detector = cv2.xfeatures2d.SIFT_create()
29 norm = cv2.NORM_L2
31 detector = cv2.xfeatures2d.SURF_create(800)
32 norm = cv2.NORM_L2
34 detector = cv2.ORB_create(400)
35 norm = cv2.NORM_HAMMING
37 detector = cv2.AKAZE_create()
38 norm = cv2.NORM_HAMMING
40 detector = cv2.BRISK_create()
[all …]
Dsquares.py10 import cv2
18 img = cv2.GaussianBlur(img, (5, 5), 0)
20 for gray in cv2.split(img):
23 bin = cv2.Canny(gray, 0, 50, apertureSize=5)
24 bin = cv2.dilate(bin, None)
26 retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
27 bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
29 cnt_len = cv2.arcLength(cnt, True)
30 cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
31 if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
[all …]
Dcamshift.py26 import cv2
36 cv2.namedWindow('camshift')
37 cv2.setMouseCallback('camshift', self.onmouse)
46 if event == cv2.EVENT_LBUTTONDOWN:
50 if flags & cv2.EVENT_FLAG_LBUTTON:
69cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -…
70 img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
71 cv2.imshow('hist', img)
77 hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
78 mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
[all …]
Ddigits_video.py4 import cv2
33 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
36 … bin = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 31, 10)
37 bin = cv2.medianBlur(bin, 3)
38 _, contours, heirs = cv2.findContours( bin.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
48 x, y, w, h = cv2.boundingRect(cnt)
53 cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))
70 m = cv2.moments(bin_roi)
77 … bin_norm = cv2.warpAffine(bin_roi, A, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
84cv2.putText(frame, '%d'%digit, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
[all …]
Dhoughcircles.py9 import cv2
20 src = cv2.imread(fn, 1)
21 img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
22 img = cv2.medianBlur(img, 5)
25 circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
28cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.LINE_…
29cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw cent…
31 cv2.imshow("source", src)
32 cv2.imshow("detected circles", cimg)
33 cv2.waitKey(0)
Dfloodfill.py18 import cv2
28 img = cv2.imread(fn, True)
41 cv2.imshow('floodfill', img)
45 lo = cv2.getTrackbarPos('lo', 'floodfill')
46 hi = cv2.getTrackbarPos('hi', 'floodfill')
49 flags |= cv2.FLOODFILL_FIXED_RANGE
50 cv2.floodFill(flooded, mask, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
51 cv2.circle(flooded, seed_pt, 2, (0, 0, 255), -1)
52 cv2.imshow('floodfill', flooded)
56 if flags & cv2.EVENT_FLAG_LBUTTON:
[all …]
Dopt_flow.py4 import cv2
22 vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
23 cv2.polylines(vis, lines, 0, (0, 255, 0))
25 cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
37 bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
45 res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
58 prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
65 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
66 flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
69 cv2.imshow('flow', draw_flow(gray, flow))
[all …]
Ddft.py3 import cv2
57 im = cv2.imread(sys.argv[1])
59 im = cv2.imread('../data/baboon.jpg')
63 im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
69 dft_M = cv2.getOptimalDFTSize(w)
70 dft_N = cv2.getOptimalDFTSize(h)
78 cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)
80 cv2.imshow("win", im)
83 image_Re, image_Im = cv2.split(dft_A)
86 magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0)
[all …]
Dmorphology.py16 import cv2
31 img = cv2.imread(fn)
37 cv2.imshow('original', img)
45 sz = cv2.getTrackbarPos('op/size', 'morphology')
46 iters = cv2.getTrackbarPos('iters', 'morphology')
58 st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz))
59 res = cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters)
65 cv2.imshow('morphology', res)
67 cv2.namedWindow('morphology')
68 cv2.createTrackbar('op/size', 'morphology', 12, 20, update)
[all …]
Dcolor_histogram.py4 import cv2
20 hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR)
21 cv2.imshow('hsv_map', hsv_map)
23 cv2.namedWindow('hist', 0)
28 cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)
38 cv2.imshow('camera', frame)
40 small = cv2.pyrDown(frame)
42 hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV)
45 h = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
50 cv2.imshow('hist', vis)
[all …]
Dhoughlines.py7 import cv2
17 src = cv2.imread(fn)
18 dst = cv2.Canny(src, 50, 200)
19 cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
22 lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)
25cv2.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3,…
28 lines = cv2.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0)
38 cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
41 cv2.imshow("source", src)
42 cv2.imshow("detected lines", cdst)
[all …]
Dedge.py13 import cv2
33 cv2.namedWindow('edge')
34 cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
35 cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)
40 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
41 thrs1 = cv2.getTrackbarPos('thrs1', 'edge')
42 thrs2 = cv2.getTrackbarPos('thrs2', 'edge')
43 edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5)
47 cv2.imshow('edge', vis)
48 ch = cv2.waitKey(5) & 0xFF
[all …]
Dfitline.py26 import cv2
49 noise = cv2.getTrackbarPos('noise', 'fit line')
50 n = cv2.getTrackbarPos('point n', 'fit line')
51 r = cv2.getTrackbarPos('outlier %', 'fit line') / 100.0
56 cv2.line(img, toint(p0), toint(p1), (0, 255, 0))
63 cv2.circle(img, toint(p), 2, (255, 255, 255), -1)
65 cv2.circle(img, toint(p), 2, (64, 64, 255), -1)
66 func = getattr(cv2, cur_func_name)
67 vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
68 cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255))
[all …]
Dlk_homography.py24 import cv2
30 criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
38 p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
39 p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
56 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
68 … H, status = cv2.findHomography(self.p0, self.p1, (0, cv2.RANSAC)[self.use_ransac], 10.0)
70 overlay = cv2.warpPerspective(self.frame0, H, (w, h))
71 vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0)
75 cv2.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
76 cv2.circle(vis, (x1, y1), 2, (red, green)[good], -1)
[all …]
/external/opencv3/doc/py_tutorials/py_imgproc/py_template_matching/
Dpy_template_matching.markdown9 - You will see these functions : **cv2.matchTemplate()**, **cv2.minMaxLoc()**
15 image. OpenCV comes with a function **cv2.matchTemplate()** for this purpose. It simply slides the
22 of (W-w+1, H-h+1). Once you got the result, you can use **cv2.minMaxLoc()** function to find where
26 @note If you are using cv2.TM_SQDIFF as comparison method, minimum value gives the best match.
37 import cv2
41 img = cv2.imread('messi5.jpg',0)
43 template = cv2.imread('template.jpg',0)
47 methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
48 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
55 res = cv2.matchTemplate(img,template,method)
[all …]
/external/opencv3/doc/py_tutorials/py_imgproc/py_geometric_transformations/
Dpy_geometric_transformations.markdown9 - You will see these functions: **cv2.getPerspectiveTransform**
14 OpenCV provides two transformation functions, **cv2.warpAffine** and **cv2.warpPerspective**, with
15 which you can have all kinds of transformations. **cv2.warpAffine** takes a 2x3 transformation
16 matrix while **cv2.warpPerspective** takes a 3x3 transformation matrix as input.
20 Scaling is just resizing of the image. OpenCV comes with a function **cv2.resize()** for this
22 Different interpolation methods are used. Preferable interpolation methods are **cv2.INTER_AREA**
23 for shrinking and **cv2.INTER_CUBIC** (slow) & **cv2.INTER_LINEAR** for zooming. By default,
24 interpolation method used is **cv2.INTER_LINEAR** for all resizing purposes. You can resize an
27 import cv2
30 img = cv2.imread('messi5.jpg')
[all …]
/external/opencv3/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/
Dpy_histogram_backprojection.markdown36 import cv2
41 roi = cv2.imread('rose_red.png')
42 hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
45 target = cv2.imread('rose.png')
46 hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
49 M = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
50 I = cv2.calcHist([hsvt],[0, 1], None, [180, 256], [0, 180, 0, 256] )
57 h,s,v = cv2.split(hsvt)
64 disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
65 cv2.filter2D(B,-1,disc,B)
[all …]
/external/opencv3/doc/py_tutorials/py_imgproc/py_thresholding/
Dpy_thresholding.markdown9 - You will learn these functions : **cv2.threshold**, **cv2.adaptiveThreshold** etc.
16 used is **cv2.threshold**. First argument is the source image, which **should be a grayscale
22 - cv2.THRESH_BINARY
23 - cv2.THRESH_BINARY_INV
24 - cv2.THRESH_TRUNC
25 - cv2.THRESH_TOZERO
26 - cv2.THRESH_TOZERO_INV
35 import cv2
39 img = cv2.imread('gradient.png',0)
40 ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
[all …]

123456