Merged the trunk r8467:8507 (inclusive) (big bunch of documentation fixes)

pull/13383/head
Andrey Kamaev 13 years ago
parent 052d2dc23a
commit 81a5988015
  1. BIN
      3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so
  2. BIN
      3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so
  3. BIN
      3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so
  4. BIN
      3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so
  5. BIN
      3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so
  6. BIN
      3rdparty/lib/armeabi/libnative_camera_r2.2.0.so
  7. BIN
      3rdparty/lib/armeabi/libnative_camera_r2.3.3.so
  8. BIN
      3rdparty/lib/armeabi/libnative_camera_r3.0.1.so
  9. BIN
      3rdparty/lib/armeabi/libnative_camera_r4.0.0.so
  10. BIN
      3rdparty/lib/armeabi/libnative_camera_r4.0.3.so
  11. BIN
      3rdparty/lib/x86/libnative_camera_r2.3.3.so
  12. BIN
      3rdparty/lib/x86/libnative_camera_r3.0.1.so
  13. BIN
      3rdparty/lib/x86/libnative_camera_r4.0.3.so
  14. 497
      doc/check_docs2.py
  15. 6
      doc/conf.py
  16. 99
      doc/ocv.py
  17. 2
      modules/androidcamera/camera_wrapper/camera_wrapper.cpp
  18. 232
      modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
  19. 77
      modules/calib3d/include/opencv2/calib3d/calib3d.hpp
  20. 5
      modules/contrib/doc/contrib.rst
  21. 117
      modules/contrib/doc/stereo.rst
  22. 244
      modules/core/doc/basic_structures.rst
  23. 10
      modules/core/doc/clustering.rst
  24. 191
      modules/core/doc/drawing_functions.rst
  25. 133
      modules/core/doc/dynamic_structures.rst
  26. 1085
      modules/core/doc/old_basic_structures.rst
  27. 458
      modules/core/doc/old_xml_yaml_persistence.rst
  28. 572
      modules/core/doc/operations_on_arrays.rst
  29. 37
      modules/core/doc/utility_and_system_functions_and_macros.rst
  30. 64
      modules/core/doc/xml_yaml_persistence.rst
  31. 810
      modules/core/include/opencv2/core/core.hpp
  32. 2
      modules/core/include/opencv2/core/types_c.h
  33. 98
      modules/core/src/drawing.cpp
  34. 25
      modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst
  35. 36
      modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst
  36. 72
      modules/features2d/doc/common_interfaces_of_feature_detectors.rst
  37. 16
      modules/features2d/doc/common_interfaces_of_generic_descriptor_matchers.rst
  38. 10
      modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst
  39. 30
      modules/features2d/doc/feature_detection_and_description.rst
  40. 4
      modules/features2d/doc/object_categorization.rst
  41. 138
      modules/features2d/include/opencv2/features2d/features2d.hpp
  42. 2
      modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst
  43. 2
      modules/gpu/doc/data_structures.rst
  44. 8
      modules/gpu/doc/feature_detection_and_description.rst
  45. 49
      modules/gpu/doc/image_filtering.rst
  46. 46
      modules/gpu/doc/image_processing.rst
  47. 11
      modules/gpu/doc/initalization_and_information.rst
  48. 2
      modules/gpu/doc/introduction.rst
  49. 4
      modules/gpu/doc/object_detection.rst
  50. 16
      modules/gpu/doc/operations_on_matrices.rst
  51. 120
      modules/gpu/doc/per_element_operations.rst
  52. 22
      modules/gpu/include/opencv2/gpu/gpu.hpp
  53. 92
      modules/highgui/doc/qt_new_functions.rst
  54. 80
      modules/highgui/doc/reading_and_writing_images_and_video.rst
  55. 54
      modules/highgui/doc/user_interface.rst
  56. 495
      modules/highgui/src/cap_dshow.cpp
  57. 2
      modules/highgui/test/test_video_io.cpp
  58. 128
      modules/imgproc/doc/feature_detection.rst
  59. 349
      modules/imgproc/doc/filtering.rst
  60. 91
      modules/imgproc/doc/geometric_transformations.rst
  61. 311
      modules/imgproc/doc/histograms.rst
  62. 91
      modules/imgproc/doc/miscellaneous_transformations.rst
  63. 25
      modules/imgproc/doc/motion_analysis_and_object_tracking.rst
  64. 4
      modules/imgproc/doc/object_detection.rst
  65. 226
      modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst
  66. 236
      modules/imgproc/include/opencv2/imgproc/imgproc.hpp
  67. 4
      modules/imgproc/include/opencv2/imgproc/imgproc_c.h
  68. 34
      modules/imgproc/include/opencv2/imgproc/types_c.h
  69. 2
      modules/imgproc/src/_geom.h
  70. 30
      modules/imgproc/src/approx.cpp
  71. 104
      modules/imgproc/src/contours.cpp
  72. 5
      modules/java/gen_java.py
  73. 97
      modules/java/rst_parser.py
  74. 2
      modules/legacy/doc/common_interfaces_of_descriptor_extractors.rst
  75. 4
      modules/legacy/doc/common_interfaces_of_generic_descriptor_matchers.rst
  76. 32
      modules/legacy/doc/expectation_maximization.rst
  77. 65
      modules/legacy/doc/feature_detection_and_description.rst
  78. 95
      modules/legacy/doc/histograms.rst
  79. 1
      modules/legacy/doc/legacy.rst
  80. 52
      modules/legacy/doc/motion_analysis.rst
  81. 38
      modules/legacy/doc/planar_subdivisions.rst
  82. 644
      modules/legacy/include/opencv2/legacy/legacy.hpp
  83. 42
      modules/ml/doc/boosting.rst
  84. 98
      modules/ml/doc/decision_trees.rst
  85. 4
      modules/ml/doc/ertrees.rst
  86. 40
      modules/ml/doc/expectation_maximization.rst
  87. 4
      modules/ml/doc/gradient_boosted_trees.rst
  88. 2
      modules/ml/doc/k_nearest_neighbors.rst
  89. 2
      modules/ml/doc/mldata.rst
  90. 56
      modules/ml/doc/neural_networks.rst
  91. 6
      modules/ml/doc/normal_bayes_classifier.rst
  92. 16
      modules/ml/doc/random_trees.rst
  93. 2
      modules/ml/doc/statistical_models.rst
  94. 32
      modules/ml/doc/support_vector_machines.rst
  95. 19
      modules/ml/include/opencv2/ml/ml.hpp
  96. 94
      modules/nonfree/doc/feature_detection.rst
  97. 22
      modules/nonfree/include/opencv2/nonfree/features2d.hpp
  98. 14
      modules/objdetect/doc/cascade_classification.rst
  99. 244
      modules/objdetect/doc/latent_svm.rst
  100. 96
      modules/objdetect/include/opencv2/objdetect/objdetect.hpp
  101. Some files were not shown because too many files have changed in this diff Show More

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -0,0 +1,497 @@
import os, sys, glob, re
sys.path.append("../modules/python/src2/")
sys.path.append("../modules/java/")
import hdr_parser as hp
import rst_parser as rp
rp.show_warnings = False
rp.show_errors = False
allmodules = rp.allmodules
DOCUMENTED_MARKER = "verified"
ERROR_001_NOTACLASS = 1
ERROR_002_NOTASTRUCT = 2
ERROR_003_INCORRECTBASE = 3
ERROR_004_MISSEDNAMESPACE = 4
ERROR_005_MISSINGPYFUNC = 5
ERROR_006_INVALIDPYOLDDOC = 6
ERROR_007_INVALIDPYDOC = 7
ERROR_008_CFUNCISNOTGLOBAL = 8
ERROR_009_OVERLOADNOTFOUND = 9
ERROR_010_UNKNOWNCLASS = 10
ERROR_011_UNKNOWNFUNC = 11
do_python_crosscheck = True
errors_disabled = [ERROR_004_MISSEDNAMESPACE]
doc_signatures_whitelist = [
# templates
"Matx", "Vec", "SparseMat_", "Scalar_", "Mat_", "Ptr", "Size_", "Point_", "Rect_", "Point3_",
"DataType", "detail::RotationWarperBase", "flann::Index_", "CalonderDescriptorExtractor",
"gpu::DevMem2D_", "gpu::PtrStep_", "gpu::PtrElemStep_",
# black boxes
"CvArr", "CvFileStorage",
# other
"InputArray", "OutputArray",
]
defines = ["cvGraphEdgeIdx", "cvFree", "CV_Assert", "cvSqrt", "cvGetGraphVtx", "cvGraphVtxIdx",
"cvCaptureFromFile", "cvCaptureFromCAM", "cvCalcBackProjectPatch", "cvCalcBackProject",
"cvGetHistValue_1D", "cvGetHistValue_2D", "cvGetHistValue_3D", "cvGetHistValue_nD",
"cvQueryHistValue_1D", "cvQueryHistValue_2D", "cvQueryHistValue_3D", "cvQueryHistValue_nD",
# not a real function but behaves as function
"Mat::size",
# ugly "virtual" functions from ml module
"CvStatModel::train", "CvStatModel::predict",
# TODO:
"cvExtractSURF"
]
synonims = {
"StarDetector" : ["StarFeatureDetector"],
"MSER" : ["MserFeatureDetector"],
"GFTTDetector" : ["GoodFeaturesToTrackDetector"],
"cvCaptureFromFile" : ["cvCreateFileCapture"],
"cvCaptureFromCAM" : ["cvCreateCameraCapture"],
"cvCalcArrBackProjectPatch" : ["cvCalcBackProjectPatch"],
"cvCalcArrBackProject" : ["cvCalcBackProject"],
"InputArray" : ["_InputArray"],
"OutputArray" : ["_OutputArray"],
}
if do_python_crosscheck:
try:
import cv2
except ImportError:
print "Could not load cv2"
do_python_crosscheck = False
def get_cv2_object(name):
if name.startswith("cv2."):
name = name[4:]
if name.startswith("cv."):
name = name[3:]
if name == "Algorithm":
return cv2.Algorithm__create("Feature2D.ORB"), name
elif name == "FeatureDetector":
return cv2.FeatureDetector_create("ORB"), name
elif name == "DescriptorExtractor":
return cv2.DescriptorExtractor_create("ORB"), name
elif name == "BackgroundSubtractor":
return cv2.BackgroundSubtractorMOG(), name
elif name == "StatModel":
return cv2.KNearest(), name
else:
return getattr(cv2, name)(), name
def compareSignatures(f, s):
# function names
if f[0] != s[0]:
return False, "name mismatch"
# return type
stype = (s[1] or "void")
ftype = f[1]
stype = re.sub(r"\b(cv|std)::", "", stype)
if ftype:
ftype = re.sub(r"\b(cv|std)::", "", ftype)
if ftype and ftype != stype:
return False, "return type mismatch"
if ("\C" in f[2]) ^ ("\C" in s[2]):
return False, "const qualifier mismatch"
if ("\S" in f[2]) ^ ("\S" in s[2]):
return False, "static qualifier mismatch"
if ("\V" in f[2]) ^ ("\V" in s[2]):
return False, "virtual qualifier mismatch"
if ("\A" in f[2]) ^ ("\A" in s[2]):
return False, "abstract qualifier mismatch"
if len(f[3]) != len(s[3]):
return False, "different number of arguments"
for idx, arg in enumerate(zip(f[3], s[3])):
farg = arg[0]
sarg = arg[1]
ftype = re.sub(r"\b(cv|std)::", "", (farg[0] or ""))
stype = re.sub(r"\b(cv|std)::", "", (sarg[0] or ""))
if ftype != stype:
return False, "type of argument #" + str(idx+1) + " mismatch"
fname = farg[1] or "arg" + str(idx)
sname = sarg[1] or "arg" + str(idx)
if fname != sname:
return False, "name of argument #" + str(idx+1) + " mismatch"
fdef = re.sub(r"\b(cv|std)::", "", (farg[2] or ""))
sdef = re.sub(r"\b(cv|std)::", "", (sarg[2] or ""))
if fdef != sdef:
return False, "default value of argument #" + str(idx+1) + " mismatch"
return True, "match"
def formatSignature(s):
_str = ""
if "/V" in s[2]:
_str += "virtual "
if "/S" in s[2]:
_str += "static "
if s[1]:
_str += s[1] + " "
else:
if not bool(re.match(r"(\w+\.)*(?P<cls>\w+)\.(?P=cls)", s[0])):
_str += "void "
if s[0].startswith("cv."):
_str += s[0][3:].replace(".", "::")
else:
_str += s[0].replace(".", "::")
if len(s[3]) == 0:
_str += "()"
else:
_str += "( "
for idx, arg in enumerate(s[3]):
if idx > 0:
_str += ", "
argtype = re.sub(r"\bcv::", "", arg[0])
bidx = argtype.find('[')
if bidx < 0:
_str += argtype + " "
else:
_srt += argtype[:bidx]
if arg[1]:
_str += arg[1]
else:
_str += "arg" + str(idx)
if bidx >= 0:
_str += argtype[bidx:]
if arg[2]:
_str += "=" + re.sub(r"\bcv::", "", arg[2])
_str += " )"
if "/C" in s[2]:
_str += " const"
if "/A" in s[2]:
_str += " = 0"
return _str
def logerror(code, message, doc = None):
if code in errors_disabled:
return
if doc:
print doc["file"] + ":" + str(doc["line"]),
print "error %03d: %s" % (code, message)
#print
def process_module(module, path):
hppparser = hp.CppHeaderParser()
rstparser = rp.RstParser(hppparser)
rstparser.parse(module, path)
rst = rstparser.definitions
hdrlist = glob.glob(os.path.join(path, "include", "opencv2", module, "*.h*"))
hdrlist.extend(glob.glob(os.path.join(path, "include", "opencv2", module, "detail", "*.h*")))
if module == "gpu":
hdrlist.append(os.path.join(path, "..", "core", "include", "opencv2", "core", "devmem2d.hpp"))
hdrlist.append(os.path.join(path, "..", "core", "include", "opencv2", "core", "gpumat.hpp"))
decls = []
for hname in hdrlist:
if not "ts_gtest.h" in hname:
decls += hppparser.parse(hname, wmode=False)
funcs = []
# not really needed to hardcode all the namespaces. Normally all they are collected automatically
namespaces = ['cv', 'cv.gpu', 'cvflann', 'cvflann.anyimpl', 'cvflann.lsh', 'cv.flann', 'cv.linemod', 'cv.detail', 'cvtest', 'perf', 'cv.videostab']
classes = []
structs = []
# collect namespaces and classes/structs
for decl in decls:
if decl[0].startswith("const"):
pass
elif decl[0].startswith("class") or decl[0].startswith("struct"):
if decl[0][0] == 'c':
classes.append(decl)
else:
structs.append(decl)
dotIdx = decl[0].rfind('.')
if dotIdx > 0:
namespace = decl[0][decl[0].find(' ')+1:dotIdx]
if not [c for c in classes if c[0].endswith(namespace)] and not [s for s in structs if s[0].endswith(namespace)]:
if namespace not in namespaces:
namespaces.append(namespace)
else:
funcs.append(decl)
clsnamespaces = []
# process classes
for cl in classes:
name = cl[0][cl[0].find(' ')+1:]
if name.find('.') < 0 and not name.startswith("Cv"):
logerror(ERROR_004_MISSEDNAMESPACE, "class " + name + " from opencv_" + module + " is placed in global namespace but violates C-style naming convention")
clsnamespaces.append(name)
if do_python_crosscheck and not name.startswith("cv.") and name.startswith("Cv"):
clsnamespaces.append("cv." + name[2:])
if name.startswith("cv."):
name = name[3:]
name = name.replace(".", "::")
sns = synonims.get(name, [])
sns.append(name)
for name in sns:
doc = rst.get(name)
if not doc:
#TODO: class is not documented
continue
doc[DOCUMENTED_MARKER] = True
# verify class marker
if not doc.get("isclass"):
logerror(ERROR_001_NOTACLASS, "class " + name + " is not marked as \"class\" in documentation", doc)
else:
# verify base
signature = doc.get("class", "")
signature = signature.replace(" public ", " ")
namespaceIdx = signature.rfind("::")
signature = ("class " + signature).strip()
hdrsignature = ("class " + name + " " + cl[1]).replace(".", "::").replace("cv::","").strip()
if signature != hdrsignature:
logerror(ERROR_003_INCORRECTBASE, "invalid base class documentation\ndocumented: " + signature + "\nactual: " + hdrsignature, doc)
# process structs
for st in structs:
name = st[0][st[0].find(' ')+1:]
if name.find('.') < 0 and not name.startswith("Cv"):
logerror(ERROR_004_MISSEDNAMESPACE, "struct " + name + " from opencv_" + module + " is placed in global namespace but violates C-style naming convention")
clsnamespaces.append(name)
if name.startswith("cv."):
name = name[3:]
name = name.replace(".", "::")
doc = rst.get(name)
if not doc:
#TODO: struct is not documented
continue
doc[DOCUMENTED_MARKER] = True
# verify struct marker
if not doc.get("isstruct"):
logerror(ERROR_002_NOTASTRUCT, "struct " + name + " is not marked as \"struct\" in documentation", doc)
else:
# verify base
signature = doc.get("class", "")
signature = signature.replace(", public ", " ").replace(" public ", " ")
signature = signature.replace(", protected ", " ").replace(" protected ", " ")
signature = signature.replace(", private ", " ").replace(" private ", " ")
signature = ("struct " + signature).strip()
hdrsignature = (st[0] + " " + st[1]).replace("struct cv.", "struct ").replace(".", "::").strip()
if signature != hdrsignature:
logerror(ERROR_003_INCORRECTBASE, "invalid base struct documentation\ndocumented: " + signature + "\nactual: " + hdrsignature, doc)
print st, doc
# process functions and methods
flookup = {}
for fn in funcs:
name = fn[0]
parent = None
namespace = None
for cl in clsnamespaces:
if name.startswith(cl + "."):
if cl.startswith(parent or ""):
parent = cl
if parent:
name = name[len(parent) + 1:]
for nm in namespaces:
if parent.startswith(nm + "."):
if nm.startswith(namespace or ""):
namespace = nm
if namespace:
parent = parent[len(namespace) + 1:]
else:
for nm in namespaces:
if name.startswith(nm + "."):
if nm.startswith(namespace or ""):
namespace = nm
if namespace:
name = name[len(namespace) + 1:]
#print namespace, parent, name, fn[0]
if not namespace and not parent and not name.startswith("cv") and not name.startswith("CV_"):
logerror(ERROR_004_MISSEDNAMESPACE, "function " + name + " from opencv_" + module + " is placed in global namespace but violates C-style naming convention")
else:
fdescr = (namespace, parent, name, fn)
flookup_entry = flookup.get(fn[0], [])
flookup_entry.append(fdescr)
flookup[fn[0]] = flookup_entry
if do_python_crosscheck:
for name, doc in rst.iteritems():
decls = doc.get("decls")
if not decls:
continue
for signature in decls:
if signature[0] == "Python1":
pname = signature[1][:signature[1].find('(')]
try:
fn = getattr(cv2.cv, pname[3:])
docstr = "cv." + fn.__doc__
except AttributeError:
logerror(ERROR_005_MISSINGPYFUNC, "could not load documented function: cv2." + pname, doc)
continue
docstring = docstr
sign = signature[1]
signature.append(DOCUMENTED_MARKER)
# convert old signature to pydoc style
if docstring.endswith("*"):
docstring = docstring[:-1]
s = None
while s != sign:
s = sign
sign = re.sub(r"^(.*\(.*)\(.*?\)(.*\) *->)", "\\1_\\2", sign)
s = None
while s != sign:
s = sign
sign = re.sub(r"\s*,\s*([^,]+)\s*=\s*[^,]+\s*(( \[.*\])?)\)", " [, \\1\\2])", sign)
sign = re.sub(r"\(\s*([^,]+)\s*=\s*[^,]+\s*(( \[.*\])?)\)", "([\\1\\2])", sign)
sign = re.sub(r"\)\s*->\s*", ") -> ", sign)
sign = sign.replace("-> convexHull", "-> CvSeq")
sign = sign.replace("-> lines", "-> CvSeq")
sign = sign.replace("-> boundingRects", "-> CvSeq")
sign = sign.replace("-> contours", "-> CvSeq")
sign = sign.replace("-> retval", "-> int")
sign = sign.replace("-> detectedObjects", "-> CvSeqOfCvAvgComp")
def retvalRplace(match):
m = match.group(1)
m = m.replace("CvScalar", "scalar")
m = m.replace("CvMemStorage", "memstorage")
m = m.replace("ROIplImage", "image")
m = m.replace("IplImage", "image")
m = m.replace("ROCvMat", "mat")
m = m.replace("CvMat", "mat")
m = m.replace("double", "float")
m = m.replace("CvSubdiv2DPoint", "point")
m = m.replace("CvBox2D", "Box2D")
m = m.replace("IplConvKernel", "kernel")
m = m.replace("CvHistogram", "hist")
m = m.replace("CvSize", "width,height")
m = m.replace("cvmatnd", "matND")
m = m.replace("CvSeqOfCvConvexityDefect", "convexityDefects")
mm = m.split(',')
if len(mm) > 1:
return "(" + ", ".join(mm) + ")"
else:
return m
docstring = re.sub(r"(?<=-> )(.*)$", retvalRplace, docstring)
docstring = docstring.replace("( [, ", "([")
if sign != docstring:
logerror(ERROR_006_INVALIDPYOLDDOC, "old-style documentation differs from pydoc\npydoc: " + docstring + "\nfixup: " + sign + "\ncvdoc: " + signature[1], doc)
elif signature[0] == "Python2":
pname = signature[1][4:signature[1].find('(')]
cvname = "cv." + pname
parent = None
for cl in clsnamespaces:
if cvname.startswith(cl + "."):
if cl.startswith(parent or ""):
parent = cl
try:
if parent:
instance, clsname = get_cv2_object(parent)
fn = getattr(instance, cvname[len(parent)+1:])
docstr = fn.__doc__
docprefix = "cv2." + clsname + "."
else:
fn = getattr(cv2, pname)
docstr = fn.__doc__
docprefix = "cv2."
except AttributeError:
if parent:
logerror(ERROR_005_MISSINGPYFUNC, "could not load documented member of " + parent + " class: cv2." + pname, doc)
else:
logerror(ERROR_005_MISSINGPYFUNC, "could not load documented function cv2." + pname, doc)
signature.append(DOCUMENTED_MARKER) # stop subsequent errors
continue
docstrings = [docprefix + s.replace("([, ", "([") for s in docstr.split(" or ")]
if not signature[1] in docstrings:
pydocs = "\npydoc: ".join(docstrings)
logerror(ERROR_007_INVALIDPYDOC, "documentation differs from pydoc\npydoc: " + pydocs + "\ncvdoc: " + signature[1], doc)
signature.append(DOCUMENTED_MARKER)
# verify C/C++ signatures
for name, doc in rst.iteritems():
decls = doc.get("decls")
if not decls:
continue
for signature in decls:
if signature[0] == "C" or signature[0] == "C++":
if "template" in (signature[2][1] or ""):
# TODO find a way to validate templates
signature.append(DOCUMENTED_MARKER)
continue
fd = flookup.get(signature[2][0])
if not fd:
if signature[2][0].startswith("cv."):
fd = flookup.get(signature[2][0][3:])
if not fd:
continue
else:
signature[2][0] = signature[2][0][3:]
if signature[0] == "C":
ffd = [f for f in fd if not f[0] and not f[1]] # filter out C++ stuff
if not ffd:
if fd[0][1]:
logerror(ERROR_008_CFUNCISNOTGLOBAL, "function " + fd[0][2] + " is documented as C function but is actually member of " + fd[0][1] + " class", doc)
elif fd[0][0]:
logerror(ERROR_008_CFUNCISNOTGLOBAL, "function " + fd[0][2] + " is documented as C function but is actually placed in " + fd[0][0] + " namespace", doc)
fd = ffd
error = None
for f in fd:
match, error = compareSignatures(signature[2], f[3])
if match:
signature.append(DOCUMENTED_MARKER)
break
if signature[-1] != DOCUMENTED_MARKER:
candidates = "\n\t".join([formatSignature(f[3]) for f in fd])
logerror(ERROR_009_OVERLOADNOTFOUND, signature[0] + " function " + signature[2][0].replace(".","::") + " is documented but misses in headers (" + error + ").\nDocumented as:\n\t" + signature[1] + "\nCandidates are:\n\t" + candidates, doc)
signature.append(DOCUMENTED_MARKER) # to stop subsequent error on this function
# verify that all signatures was found in the library headers
for name, doc in rst.iteritems():
# if doc.get(DOCUMENTED_MARKER, False):
# continue # this class/struct was found
if not doc.get(DOCUMENTED_MARKER, False) and (doc.get("isclass", False) or doc.get("isstruct", False)):
if name in doc_signatures_whitelist:
continue
logerror(ERROR_010_UNKNOWNCLASS, "class/struct " + name + " is mentioned in documentation but is not found in OpenCV headers", doc)
for d in doc.get("decls", []):
if d[-1] != DOCUMENTED_MARKER:
if d[0] == "C" or d[0] =="C++" or (do_python_crosscheck and d[0].startswith("Python")):
if d[0][0] == 'C':
sname = d[2][0][3:].replace(".", "::")
if sname in defines:
#TODO: need to find a way to verify #define's
continue
else:
sname = d[1][:d[1].find("(")]
prefixes = [x for x in doc_signatures_whitelist if sname.startswith(x)]
if prefixes:
# TODO: member of template class
continue
logerror(ERROR_011_UNKNOWNFUNC, d[0] + " function " + sname + " is documented but is not found in OpenCV headers. It is documented as:\n\t" + d[1], doc)
# end of process_module
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:\n", os.path.basename(sys.argv[0]), " <module path>"
exit(0)
modules = sys.argv[1:]
if modules[0] == "all":
modules = allmodules
for module in modules:
selfpath = os.path.dirname(os.path.abspath(sys.argv[0]))
module_path = os.path.join(selfpath, "..", "modules", module)
if not os.path.isdir(module_path):
print "Module \"" + module + "\" could not be found."
exit(1)
process_module(module, module_path)

@ -231,9 +231,9 @@ latex_documents = [
('modules/refman', 'opencv2refman.tex', u'The OpenCV Reference Manual',
u'', 'manual'),
('doc/user_guide/user_guide', 'opencv_user.tex', u'The OpenCV User Guide',
u'', 'manual'),
u'', 'manual'),
('doc/tutorials/tutorials', 'opencv_tutorials.tex', u'The OpenCV Tutorials',
u'', 'manual'),
u'', 'manual'),
]
preamble ="""
@ -303,7 +303,7 @@ extlinks = {
'calib3d' : ('http://opencv.itseez.com/trunk/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#%s', None ),
'feature2d' : ('http://opencv.itseez.com/trunk/modules/imgproc/doc/feature_detection.html#%s', None ),
'imgproc_geometric' : ('http://opencv.itseez.com/trunk/modules/imgproc/doc/geometric_transformations.html#%s', None ),
'opencv_group' : ('http://tech.groups.yahoo.com/group/OpenCV/%s', None),
'cvt_color': ('http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None),

@ -25,7 +25,7 @@ from sphinx.util.nodes import make_refnode
from sphinx.util.compat import Directive
from sphinx.util.docfields import Field, GroupedField, TypedField
########################### Python Part ###########################
########################### Python Part ###########################
# REs for Python signatures
py_sig_re = re.compile(
@ -292,9 +292,10 @@ class OCVPyXRefRole(XRefRole):
return title, target
########################### C/C++/Java Part ###########################
########################### C/C++/Java Part ###########################
_identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*)\b')
_identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*\b)')
_argument_name_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*\b(?:\[\d*\])?|\.\.\.)')
_whitespace_re = re.compile(r'\s+(?u)')
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
@ -303,10 +304,10 @@ _operator_re = re.compile(r'''(?x)
\[\s*\]
| \(\s*\)
| (<<|>>)=?
| \+\+ | -- | ->\*?
| [!<>=/*%+|&^-]=?
| \+\+ | --
| ~ | && | \| | \|\|
| ->\*? | \,
| \,
''')
_id_shortwords = {
@ -561,7 +562,7 @@ class ConstDefExpr(WrappingDefExpr):
def __unicode__(self):
return (self.prefix and u'const %s' or u'%s const') % self.typename
class ConstTemplateDefExpr(WrappingDefExpr):
def __init__(self, typename, prefix=False):
@ -667,13 +668,14 @@ class MemberObjDefExpr(NamedDefExpr):
class FuncDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static, explicit, rv,
signature, const, pure_virtual):
signature, const, pure_virtual, virtual):
NamedDefExpr.__init__(self, name, visibility, static)
self.rv = rv
self.signature = signature
self.explicit = explicit
self.const = const
self.pure_virtual = pure_virtual
self.virtual = virtual
def get_id(self):
return u'%s%s%s' % (
@ -682,11 +684,13 @@ class FuncDefExpr(NamedDefExpr):
u'.'.join(x.get_id() for x in self.signature) or u'',
self.const and u'C' or u''
)
def __unicode__(self):
buf = self.get_modifiers()
if self.explicit:
buf.append(u'explicit')
if self.virtual:
buf.append(u'virtual')
if self.rv is not None:
buf.append(unicode(self.rv))
buf.append(u'%s(%s)' % (self.name, u', '.join(
@ -700,8 +704,9 @@ class FuncDefExpr(NamedDefExpr):
class ClassDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static):
def __init__(self, name, visibility, static, parents = None):
NamedDefExpr.__init__(self, name, visibility, static)
self.parents = parents
def get_id(self):
return self.name.get_id()
@ -788,7 +793,6 @@ class DefinitionParser(object):
if self.match(_operator_re):
return NameDefExpr('operator' +
_whitespace_re.sub('', self.matched_text))
# new/delete operator?
for allocop in 'new', 'delete':
if not self.skip_word(allocop):
@ -807,7 +811,7 @@ class DefinitionParser(object):
return CastOpDefExpr(type)
def _parse_name(self):
if not self.match(_identifier_re):
if not self.match(_argument_name_re):
self.fail('expected name')
identifier = self.matched_text
@ -981,7 +985,7 @@ class DefinitionParser(object):
elif paren_stack_depth == 0:
break
self.pos = idx+1
rv = self.definition[rv_start:idx]
self.pos = idx
return rv
@ -1004,8 +1008,13 @@ class DefinitionParser(object):
self.skip_ws()
argtype = self._parse_type()
argname = default = None
self.skip_ws()
if unicode(argtype) == u"...":
if not self.skip_string(')'):
self.fail("var arg must be the last argument")
args.append(ArgumentDefExpr(None, argtype, None))
break
argname = default = None
if self.skip_string('='):
self.pos += 1
default = self._parse_default_expr()
@ -1072,6 +1081,11 @@ class DefinitionParser(object):
self.skip_ws()
else:
explicit = False
if self.skip_word('virtual'):
virtual = True
self.skip_ws()
else:
virtual = False
rv = self._parse_type()
self.skip_ws()
# some things just don't have return values
@ -1080,12 +1094,27 @@ class DefinitionParser(object):
rv = None
else:
name = self._parse_type()
return FuncDefExpr(name, visibility, static, explicit, rv,
*self._parse_signature())
return FuncDefExpr(name, visibility, static, explicit, rv,
*self._parse_signature(), virtual = virtual)
def parse_class(self):
visibility, static = self._parse_visibility_static()
return ClassDefExpr(self._parse_type(), visibility, static)
typename = self._parse_type()
parent = None
self.skip_ws()
parents = []
if self.skip_string(':'):
while not self.eof:
self.skip_ws()
classname_pos = self.pos
pvisibility, pstatic = self._parse_visibility_static()
if pstatic:
self.fail('unsepected static keyword, got %r' %
self.definition[self.classname_pos:])
parents.append(ClassDefExpr(self._parse_type(), pvisibility, pstatic))
if not self.skip_string(','):
break
return ClassDefExpr(typename, visibility, static, parents)
def read_rest(self):
rv = self.definition[self.pos:]
@ -1138,7 +1167,7 @@ class OCVObject(ObjectDescription):
lname = self.__class__.langname
node += nodes.strong(lname + ":", lname + ":")
node += addnodes.desc_name(" ", " ")
if obj.visibility != 'public':
node += addnodes.desc_annotation(obj.visibility,
obj.visibility)
@ -1212,8 +1241,8 @@ class OCVClassObject(OCVObject):
object_annotation = "class "
object_long_name = "class"
def attach_modifiers(self, node, obj):
if obj.visibility != 'public':
def attach_modifiers(self, node, obj, skip_visibility = 'public'):
if obj.visibility != skip_visibility:
node += addnodes.desc_annotation(obj.visibility,
obj.visibility)
node += nodes.Text(' ')
@ -1231,6 +1260,15 @@ class OCVClassObject(OCVObject):
self.attach_modifiers(signode, cls)
signode += addnodes.desc_annotation(self.__class__.object_annotation, self.__class__.object_annotation)
self.attach_name(signode, cls.name)
first_parent = True
for p in cls.parents:
if first_parent:
signode += nodes.Text(' : ')
first_parent = False
else:
signode += nodes.Text(', ')
self.attach_modifiers(signode, p, None)
self.attach_name(signode, p.name)
class OCVStructObject(OCVClassObject):
object_annotation = "struct "
@ -1263,6 +1301,9 @@ class OCVMemberObject(OCVObject):
return ''
def parse_definition(self, parser):
parent_class = self.env.temp_data.get('ocv:parent')
if parent_class is None:
parser.fail("missing parent structure/class")
return parser.parse_member_object()
def describe_signature(self, signode, obj):
@ -1298,7 +1339,12 @@ class OCVFunctionObject(OCVObject):
self.attach_type(param, arg.type)
param += nodes.Text(u' ')
#param += nodes.emphasis(unicode(arg.name), unicode(arg.name))
param += nodes.strong(unicode(arg.name), unicode(arg.name))
sbrIdx = unicode(arg.name).find("[")
if sbrIdx < 0:
param += nodes.strong(unicode(arg.name), unicode(arg.name))
else:
param += nodes.strong(unicode(arg.name)[:sbrIdx], unicode(arg.name)[:sbrIdx])
param += nodes.Text(unicode(arg.name)[sbrIdx:])
if arg.default is not None:
def_ = u'=' + unicode(arg.default)
#param += nodes.emphasis(def_, def_)
@ -1325,6 +1371,9 @@ class OCVFunctionObject(OCVObject):
if func.explicit:
signode += addnodes.desc_annotation('explicit', 'explicit')
signode += nodes.Text(' ')
if func.virtual:
signode += addnodes.desc_annotation('virtual', 'virtual')
signode += nodes.Text(' ')
# return value is None for things with a reverse return value
# such as casting operator definitions or constructors
# and destructors.
@ -1380,7 +1429,7 @@ class OCVXRefRole(XRefRole):
class OCVCFunctionObject(OCVFunctionObject):
langname = "C"
class OCVJavaFunctionObject(OCVFunctionObject):
langname = "Java"
@ -1430,7 +1479,7 @@ class OCVDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def __init__(self, env):
Domain.__init__(self, env)
self.data['objects2'] = {}
@ -1496,14 +1545,14 @@ class OCVDomain(Domain):
def get_objects(self):
for refname, (docname, type, theid) in self.data['objects'].iteritems():
yield (refname, refname, type, docname, refname, 1)
def get_type_name(self, type, primary=False):
"""
Return full name for given ObjType.
"""
if primary:
return type.lname
return {
'class': _('C++ class'),
'struct': _('C/C++ struct'),
@ -1516,6 +1565,6 @@ class OCVDomain(Domain):
'type': _('C/C++ type'),
'namespace': _('C++ namespace'),
}.get(type.lname, _('%s %s') % (self.label, type.lname))
def setup(app):
app.add_domain(OCVDomain)

@ -199,7 +199,7 @@ protected:
int is_supported(const char* supp_modes_key, const char* mode)
{
const char* supported_modes = params.get(supp_modes_key);
return strstr(supported_modes, mode) > 0;
return (supported_modes && mode && (strstr(supported_modes, mode) > 0));
}
float getFocusDistance(int focus_distance_type)

@ -111,11 +111,11 @@ calibrateCamera
---------------
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON))
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) )
.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize[, cameraMatrix[, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
.. ocv:cfunction:: double cvCalibrateCamera2(const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* pointCounts, CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs, CvMat* rvecs=NULL, CvMat* tvecs=NULL, int flags=0, CvTermCriteria term_crit = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) )
.. ocv:cfunction:: double cvCalibrateCamera2( const CvMat* object_points, const CvMat* image_points, const CvMat* point_counts, CvSize image_size, CvMat* camera_matrix, CvMat* distortion_coeffs, CvMat* rotation_vectors=NULL, CvMat* translation_vectors=NULL, int flags=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) )
.. ocv:pyoldfunction:: cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags=0)-> None
@ -127,7 +127,7 @@ Finds the camera intrinsic and extrinsic parameters from several views of a cali
In the old interface all the vectors of object points from different views are concatenated together.
:param pointCounts: In the old interface this is a vector of integers, containing as many elements, as the number of views of the calibration pattern. Each element is the number of points in each view. Usually, all the elements are the same and equal to the number of feature points on the calibration pattern.
:param point_counts: In the old interface this is a vector of integers, containing as many elements, as the number of views of the calibration pattern. Each element is the number of points in each view. Usually, all the elements are the same and equal to the number of feature points on the calibration pattern.
:param imageSize: Size of the image used only to initialize the intrinsic camera matrix.
@ -268,7 +268,7 @@ For points in an image of a stereo pair, computes the corresponding epilines in
.. ocv:function:: void computeCorrespondEpilines( InputArray points, int whichImage, InputArray F, OutputArray lines )
.. ocv:cfunction:: void cvComputeCorrespondEpilines( const CvMat* points, int whichImage, const CvMat* F, CvMat* lines)
.. ocv:cfunction:: void cvComputeCorrespondEpilines( const CvMat* points, int which_image, const CvMat* fundamental_matrix, CvMat* correspondent_lines )
.. ocv:pyoldfunction:: cv.ComputeCorrespondEpilines(points, whichImage, F, lines) -> None
@ -344,10 +344,8 @@ Converts points to/from homogeneous coordinates.
.. ocv:function:: void convertPointsHomogeneous( InputArray src, OutputArray dst )
.. ocv:pyfunction:: cv2.convertPointsHomogeneous(src[, dst]) -> dst
.. ocv:cfunction:: void cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst )
.. ocv:pyoldfunction:: cv.ConvertPointsHomogeneous( src, dst ) -> None
.. ocv:pyoldfunction:: cv.ConvertPointsHomogeneous(src, dst) -> None
:param src: Input array or vector of 2D, 3D, or 4D points.
@ -391,7 +389,7 @@ Decomposes a projection matrix into a rotation matrix and a camera matrix.
.. ocv:pyfunction:: cv2.decomposeProjectionMatrix(projMatrix[, cameraMatrix[, rotMatrix[, transVect[, rotMatrixX[, rotMatrixY[, rotMatrixZ[, eulerAngles]]]]]]]) -> cameraMatrix, rotMatrix, transVect, rotMatrixX, rotMatrixY, rotMatrixZ, eulerAngles
.. ocv:cfunction:: void cvDecomposeProjectionMatrix( const CvMat *projMatrix, CvMat *cameraMatrix, CvMat *rotMatrix, CvMat *transVect, CvMat *rotMatrX=NULL, CvMat *rotMatrY=NULL, CvMat *rotMatrZ=NULL, CvPoint3D64f *eulerAngles=NULL)
.. ocv:cfunction:: void cvDecomposeProjectionMatrix( const CvMat * projMatr, CvMat * calibMatr, CvMat * rotMatr, CvMat * posVect, CvMat * rotMatrX=NULL, CvMat * rotMatrY=NULL, CvMat * rotMatrZ=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.DecomposeProjectionMatrix(projMatrix, cameraMatrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None) -> eulerAngles
@ -428,7 +426,7 @@ Renders the detected chessboard corners.
.. ocv:pyfunction:: cv2.drawChessboardCorners(image, patternSize, corners, patternWasFound) -> None
.. ocv:cfunction:: void cvDrawChessboardCorners( CvArr* image, CvSize patternSize, CvPoint2D32f* corners, int count, int patternWasFound )
.. ocv:cfunction:: void cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, CvPoint2D32f* corners, int count, int pattern_was_found )
.. ocv:pyoldfunction:: cv.DrawChessboardCorners(image, patternSize, corners, patternWasFound)-> None
:param image: Destination image. It must be an 8-bit color image.
@ -447,11 +445,11 @@ findChessboardCorners
-------------------------
Finds the positions of internal corners of the chessboard.
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE )
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners
.. ocv:cfunction:: int cvFindChessboardCorners( const void* image, CvSize patternSize, CvPoint2D32f* corners, int* cornerCount=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH )
.. ocv:cfunction:: int cvFindChessboardCorners( const void* image, CvSize pattern_size, CvPoint2D32f* corners, int* corner_count=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyoldfunction:: cv.FindChessboardCorners(image, patternSize, flags=CV_CALIB_CB_ADAPTIVE_THRESH) -> corners
:param image: Source chessboard view. It must be an 8-bit grayscale or color image.
@ -508,7 +506,7 @@ Finds the centers in the grid of circles.
.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector() )
.. ocv:pyfunction:: cv2.findCirclesGridDefault(image, patternSize[, centers[, flags]]) -> centers
.. ocv:pyfunction:: cv2.findCirclesGridDefault(image, patternSize[, centers[, flags]]) -> retval, centers
:param image: Grid view of source circles. It must be an 8-bit grayscale or color image.
@ -551,13 +549,13 @@ solvePnP
------------
Finds an object pose from 3D-2D point correspondences.
.. ocv:function:: void solvePnP( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int flags = CV_ITERATIVE )
.. ocv:function:: bool solvePnP( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int flags=ITERATIVE )
.. ocv:pyfunction:: cv2.solvePnP( objectPoints, imagePoints, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess[, flags]]]] ) -> rvec, tvec
.. ocv:pyfunction:: cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess[, flags]]]]) -> retval, rvec, tvec
.. ocv:cfunction:: void cvFindExtrinsicCameraParams2( const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* cameraMatrix, const CvMat* distCoeffs, CvMat* rvec, CvMat* tvec, int useExtrinsicGuess=0 )
.. ocv:cfunction:: void cvFindExtrinsicCameraParams2( const CvMat* object_points, const CvMat* image_points, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* rotation_vector, CvMat* translation_vector, int use_extrinsic_guess=0 )
.. ocv:pyoldfunction:: cv.FindExtrinsicCameraParams2( objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=0 )-> None
.. ocv:pyoldfunction:: cv.FindExtrinsicCameraParams2(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=0 ) -> None
:param objectPoints: Array of object points in the object coordinate space, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. ``vector<Point3f>`` can be also passed here.
@ -587,7 +585,7 @@ solvePnPRansac
------------------
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
.. ocv:function:: void solvePnPRansac( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = CV_ITERATIVE )
.. ocv:function:: void solvePnPRansac( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = ITERATIVE )
.. ocv:pyfunction:: cv2.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess[, iterationsCount[, reprojectionError[, minInliersCount[, inliers[, flags]]]]]]]]) -> rvec, tvec, inliers
@ -628,8 +626,8 @@ Calculates a fundamental matrix from the corresponding points in two images.
.. ocv:pyfunction:: cv2.findFundamentalMat(points1, points2[, method[, param1[, param2[, mask]]]]) -> retval, mask
.. ocv:cfunction:: int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, CvMat* fundamentalMatrix, int method=CV_FM_RANSAC, double param1=1., double param2=0.99, CvMat* status=NULL)
.. ocv:pyoldfunction:: cv.FindFundamentalMat(points1, points2, fundamentalMatrix, method=CV_FM_RANSAC, param1=1., param2=0.99, status=None) -> None
.. ocv:cfunction:: int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, CvMat* fundamental_matrix, int method=CV_FM_RANSAC, double param1=3., double param2=0.99, CvMat* status=NULL )
.. ocv:pyoldfunction:: cv.FindFundamentalMat(points1, points2, fundamentalMatrix, method=CV_FM_RANSAC, param1=1., param2=0.99, status=None) -> retval
:param points1: Array of ``N`` points from the first image. The point coordinates should be floating-point (single or double precision).
@ -693,9 +691,9 @@ Finds a perspective transformation between two planes.
.. ocv:pyfunction:: cv2.findHomography(srcPoints, dstPoints[, method[, ransacReprojThreshold[, mask]]]) -> retval, mask
.. ocv:cfunction:: void cvFindHomography( const CvMat* srcPoints, const CvMat* dstPoints, CvMat* H, int method=0, double ransacReprojThreshold=3, CvMat* status=NULL)
.. ocv:cfunction:: int cvFindHomography( const CvMat* src_points, const CvMat* dst_points, CvMat* homography, int method=0, double ransacReprojThreshold=3, CvMat* mask=0 )
.. ocv:pyoldfunction:: cv.FindHomography(srcPoints, dstPoints, H, method, ransacReprojThreshold=3.0, status=None)-> None
.. ocv:pyoldfunction:: cv.FindHomography(srcPoints, dstPoints, H, method=0, ransacReprojThreshold=3.0, status=None) -> None
:param srcPoints: Coordinates of the points in the original plane, a matrix of the type ``CV_32FC2`` or ``vector<Point2f>`` .
@ -717,7 +715,7 @@ Finds a perspective transformation between two planes.
then the point :math:`i` is considered an outlier. If ``srcPoints`` and ``dstPoints`` are measured in pixels, it usually makes sense to set this parameter somewhere in the range of 1 to 10.
:param status: Optional output mask set by a robust method ( ``CV_RANSAC`` or ``CV_LMEDS`` ). Note that the input mask values are ignored.
:param mask: Optional output mask set by a robust method ( ``CV_RANSAC`` or ``CV_LMEDS`` ). Note that the input mask values are ignored.
The functions find and return the perspective transformation :math:`H` between the source and the destination planes:
@ -773,13 +771,13 @@ estimateAffine3D
--------------------
Computes an optimal affine transformation between two 3D point sets.
.. ocv:function:: int estimateAffine3D(InputArray srcpt, InputArray dstpt, OutputArray out, OutputArray inliers, double ransacThreshold = 3.0, double confidence = 0.99)
.. ocv:function:: int estimateAffine3D(InputArray src, InputArray dst, OutputArray out, OutputArray inliers, double ransacThreshold = 3, double confidence = 0.99)
.. ocv:pyfunction:: cv2.estimateAffine3D(srcpt, dstpt[, out[, inliers[, ransacThreshold[, confidence]]]]) -> retval, out, inliers
.. ocv:pyfunction:: cv2.estimateAffine3D(src, dst[, out[, inliers[, ransacThreshold[, confidence]]]]) -> retval, out, inliers
:param srcpt: First input 3D point set.
:param src: First input 3D point set.
:param dstpt: Second input 3D point set.
:param dst: Second input 3D point set.
:param out: Output 3D affine transformation matrix :math:`3 \times 4` .
@ -815,13 +813,13 @@ getOptimalNewCameraMatrix
-----------------------------
Returns the new camera matrix based on the free scaling parameter.
.. ocv:function:: Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, Size imageSize, double alpha, Size newImageSize=Size(), Rect* validPixROI=0, bool centerPrincipalPoint=false)
.. ocv:function:: Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, Size imageSize, double alpha, Size newImgSize=Size(), Rect* validPixROI=0, bool centerPrincipalPoint=false )
.. ocv:pyfunction:: cv2.getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha[, newImgSize[, centerPrincipalPoint]]) -> retval, validPixROI
.. ocv:cfunction:: void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCoeffs, CvSize imageSize, double alpha, CvMat* newCameraMatrix, CvSize newImageSize=cvSize(0, 0), CvRect* validPixROI=0, int centerPrincipalPoint=0)
.. ocv:cfunction:: void cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, const CvMat* dist_coeffs, CvSize image_size, double alpha, CvMat* new_camera_matrix, CvSize new_imag_size=cvSize(0,0), CvRect* valid_pixel_ROI=0, int center_principal_point=0 )
.. ocv:pyoldfunction:: cv.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newCameraMatrix, newImageSize=(0, 0), validPixROI=0) -> None
.. ocv:pyoldfunction:: cv.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newCameraMatrix, newImageSize=(0, 0), validPixROI=0, centerPrincipalPoint=0) -> None
:param cameraMatrix: Input camera matrix.
@ -831,9 +829,9 @@ Returns the new camera matrix based on the free scaling parameter.
:param alpha: Free scaling parameter between 0 (when all the pixels in the undistorted image are valid) and 1 (when all the source image pixels are retained in the undistorted image). See :ocv:func:`stereoRectify` for details.
:param newCameraMatrix: Output new camera matrix.
:param new_camera_matrix: Output new camera matrix.
:param newImageSize: Image size after rectification. By default,it is set to ``imageSize`` .
:param new_imag_size: Image size after rectification. By default,it is set to ``imageSize`` .
:param validPixROI: Optional output rectangle that outlines all-good-pixels region in the undistorted image. See ``roi1, roi2`` description in :ocv:func:`stereoRectify` .
@ -854,15 +852,15 @@ Finds an initial camera matrix from 3D-2D point correspondences.
.. ocv:pyfunction:: cv2.initCameraMatrix2D(objectPoints, imagePoints, imageSize[, aspectRatio]) -> retval
.. ocv:cfunction:: void cvInitIntrinsicParams2D( const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* pointCounts, CvSize imageSize, CvMat* cameraMatrix, double aspectRatio=1.)
.. ocv:cfunction:: void cvInitIntrinsicParams2D( const CvMat* object_points, const CvMat* image_points, const CvMat* npoints, CvSize image_size, CvMat* camera_matrix, double aspect_ratio=1. )
.. ocv:pyoldfunction:: cv.InitIntrinsicParams2D(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, aspectRatio=1.) -> None
.. ocv:pyoldfunction:: cv.InitIntrinsicParams2D(objectPoints, imagePoints, npoints, imageSize, cameraMatrix, aspectRatio=1.) -> None
:param objectPoints: Vector of vectors of the calibration pattern points in the calibration pattern coordinate space. In the old interface all the per-view vectors are concatenated. See :ocv:func:`calibrateCamera` for details.
:param imagePoints: Vector of vectors of the projections of the calibration pattern points. In the old interface all the per-view vectors are concatenated.
:param pointCounts: The integer vector of point counters for each view.
:param npoints: The integer vector of point counters for each view.
:param imageSize: Image size in pixels used to initialize the principal point.
@ -903,7 +901,8 @@ Projects 3D points to an image plane.
.. ocv:pyfunction:: cv2.projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs[, imagePoints[, jacobian[, aspectRatio]]]) -> imagePoints, jacobian
.. ocv:cfunction:: void cvProjectPoints2( const CvMat* objectPoints, const CvMat* rvec, const CvMat* tvec, const CvMat* cameraMatrix, const CvMat* distCoeffs, CvMat* imagePoints, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL )
.. ocv:cfunction:: void cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, const CvMat* translation_vector, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* image_points, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL, double aspect_ratio=0 )
.. ocv:pyoldfunction:: cv.ProjectPoints2(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints, dpdrot=None, dpdt=None, dpdf=None, dpdc=None, dpddist=None)-> None
:param objectPoints: Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or ``vector<Point3f>`` ), where N is the number of points in the view.
@ -943,11 +942,11 @@ reprojectImageTo3D
----------------------
Reprojects a disparity image to 3D space.
.. ocv:function:: void reprojectImageTo3D( InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int depth=-1 )
.. ocv:function:: void reprojectImageTo3D( InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int ddepth=-1 )
.. ocv:pyfunction:: cv2.reprojectImageTo3D(disparity, Q[, _3dImage[, handleMissingValues[, ddepth]]]) -> _3dImage
.. ocv:cfunction:: void cvReprojectImageTo3D( const CvArr* disparity, CvArr* _3dImage, const CvMat* Q, int handleMissingValues=0)
.. ocv:cfunction:: void cvReprojectImageTo3D( const CvArr* disparityImage, CvArr* _3dImage, const CvMat* Q, int handleMissingValues=0 )
.. ocv:pyoldfunction:: cv.ReprojectImageTo3D(disparity, _3dImage, Q, handleMissingValues=0) -> None
@ -978,18 +977,18 @@ RQDecomp3x3
---------------
Computes an RQ decomposition of 3x3 matrices.
.. ocv:function:: Vec3d RQDecomp3x3( InputArray M, OutputArray R, OutputArray Q, OutputArray Qx=noArray(), OutputArray Qy=noArray(), OutputArray Qz=noArray() )
.. ocv:function:: Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ, OutputArray Qx=noArray(), OutputArray Qy=noArray(), OutputArray Qz=noArray() )
.. ocv:pyfunction:: cv2.RQDecomp3x3(src[, mtxR[, mtxQ[, Qx[, Qy[, Qz]]]]]) -> retval, mtxR, mtxQ, Qx, Qy, Qz
.. ocv:cfunction:: void cvRQDecomp3x3( const CvMat *M, CvMat *R, CvMat *Q, CvMat *Qx=NULL, CvMat *Qy=NULL, CvMat *Qz=NULL, CvPoint3D64f *eulerAngles=NULL)
.. ocv:cfunction:: void cvRQDecomp3x3( const CvMat * matrixM, CvMat * matrixR, CvMat * matrixQ, CvMat * matrixQx=NULL, CvMat * matrixQy=NULL, CvMat * matrixQz=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.RQDecomp3x3(M, R, Q, Qx=None, Qy=None, Qz=None) -> eulerAngles
:param M: 3x3 input matrix.
:param src: 3x3 input matrix.
:param R: Output 3x3 upper-triangular matrix.
:param mtxR: Output 3x3 upper-triangular matrix.
:param Q: Output 3x3 orthogonal matrix.
:param mtxQ: Output 3x3 orthogonal matrix.
:param Qx: Optional output 3x3 rotation matrix around x-axis.
@ -1083,11 +1082,11 @@ The constructors.
.. ocv:function:: StereoBM::StereoBM()
.. ocv:function:: StereoBM::StereoBM(int preset, int ndisparities=0, int SADWindowSize=21)
.. ocv:pyfunction:: cv2.StereoBM.StereoBM(preset[, ndisparities[, SADWindowSize]]) -> <StereoBM object>
.. ocv:pyfunction:: cv2.StereoBM([preset[, ndisparities[, SADWindowSize]]]) -> <StereoBM object>
.. ocv:cfunction:: CvStereoBMState* cvCreateStereoBMState( int preset=CV_STEREO_BM_BASIC, int ndisparities=0 )
.. ocv:cfunction:: CvStereoBMState* cvCreateStereoBMState( int preset=CV_STEREO_BM_BASIC, int numberOfDisparities=0 )
.. ocv:pyoldfunction:: cv.CreateStereoBMState(preset=CV_STEREO_BM_BASIC, ndisparities=0)-> StereoBMState
.. ocv:pyoldfunction:: cv.CreateStereoBMState(preset=CV_STEREO_BM_BASIC, numberOfDisparities=0)-> CvStereoBMState
:param preset: specifies the whole set of algorithm parameters, one of:
@ -1109,7 +1108,7 @@ StereoBM::operator()
-----------------------
Computes disparity using the BM algorithm for a rectified stereo pair.
.. ocv:function:: void StereoBM::operator()(InputArray left, InputArray right, OutputArray disp, int disptype=CV_16S )
.. ocv:function:: void StereoBM::operator()( InputArray left, InputArray right, OutputArray disparity, int disptype=CV_16S )
.. ocv:pyfunction:: cv2.StereoBM.compute(left, right[, disparity[, disptype]]) -> disparity
@ -1121,7 +1120,7 @@ Computes disparity using the BM algorithm for a rectified stereo pair.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It has the same size as the input images. When ``disptype==CV_16S``, the map is a 16-bit signed single-channel image, containing disparity values scaled by 16. To get the true disparity values from such fixed-point representation, you will need to divide each ``disp`` element by 16. If ``disptype==CV_32F``, the disparity map will already contain the real disparity values on output.
:param disparity: Output disparity map. It has the same size as the input images. When ``disptype==CV_16S``, the map is a 16-bit signed single-channel image, containing disparity values scaled by 16. To get the true disparity values from such fixed-point representation, you will need to divide each ``disp`` element by 16. If ``disptype==CV_32F``, the disparity map will already contain the real disparity values on output.
:param disptype: Type of the output disparity map, ``CV_16S`` (default) or ``CV_32F``.
@ -1182,7 +1181,7 @@ StereoSGBM::StereoSGBM
.. ocv:function:: StereoSGBM::StereoSGBM( int minDisparity, int numDisparities, int SADWindowSize, int P1=0, int P2=0, int disp12MaxDiff=0, int preFilterCap=0, int uniquenessRatio=0, int speckleWindowSize=0, int speckleRange=0, bool fullDP=false)
.. ocv:pyfunction:: cv2.StereoSGBM.StereoSGBM(minDisparity, numDisparities, SADWindowSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, fullDP]]]]]]]]) -> <StereoSGBM object>
.. ocv:pyfunction:: cv2.StereoSGBM([minDisparity, numDisparities, SADWindowSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, fullDP]]]]]]]]]) -> <StereoSGBM object>
Initializes ``StereoSGBM`` and sets parameters to custom values.??
@ -1232,129 +1231,17 @@ The method executes the SGBM algorithm on a rectified stereo pair. See ``stereo_
.. note:: The method is not constant, so you should not use the same ``StereoSGBM`` instance from different threads simultaneously.
StereoVar
----------
.. ocv:class:: StereoVar
Class for computing stereo correspondence using the variational matching algorithm ::
class StereoVar
{
StereoVar();
StereoVar( int levels, double pyrScale,
int nIt, int minDisp, int maxDisp,
int poly_n, double poly_sigma, float fi,
float lambda, int penalization, int cycle,
int flags);
virtual ~StereoVar();
virtual void operator()(InputArray left, InputArray right, OutputArray disp);
int levels;
double pyrScale;
int nIt;
int minDisp;
int maxDisp;
int poly_n;
double poly_sigma;
float fi;
float lambda;
int penalization;
int cycle;
int flags;
...
};
The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows:
* The automatic initialization of method's parameters is added.
* The method of Smart Iteration Distribution (SID) is implemented.
* The support of Multi-Level Adaptation Technique (MLAT) is not included.
* The method of dynamic adaptation of method's parameters is not included.
StereoVar::StereoVar
--------------------------
.. ocv:function:: StereoVar::StereoVar()
.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags )
The constructor
:param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set.
:param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.)
:param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places.
:param maxDisp: Maximum possible disparity value.
:param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7
:param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5
:param fi: The smoothness parameter, ot the weight coefficient for the smoothness term.
:param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.)
:param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param flags: The operation flags; can be a combination of the following:
* USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation.
* USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase.
* USE_SMART_ID: Use the smart iteration distribution (SID).
* USE_AUTO_PARAMS: Allow the method to initialize the main parameters.
* USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase.
The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value.
StereoVar::operator ()
-----------------------
.. ocv:function:: void StereoVar::operator()(InputArray left, InputArray right, OutputArray disp)
Computes disparity using the variational algorithm for a rectified stereo pair.
:param left: Left 8-bit single-channel or 3-channel image.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image.
The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method.
**Note**:
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.
stereoCalibrate
-------------------
Calibrates the stereo camera.
.. ocv:function:: double stereoCalibrate( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria term_crit = TermCriteria(TermCriteria::COUNT+ TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC )
.. ocv:function:: double stereoCalibrate( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC )
.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize[, cameraMatrix1[, distCoeffs1[, cameraMatrix2[, distCoeffs2[, R[, T[, E[, F[, criteria[, flags]]]]]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize[, cameraMatrix1[, distCoeffs1[, cameraMatrix2[, distCoeffs2[, R[, T[, E[, F[, criteria[, flags]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
.. ocv:cfunction:: double cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, const CvMat* image_points2, const CvMat* npoints, CvMat* camera_matrix1, CvMat* dist_coeffs1, CvMat* camera_matrix2, CvMat* dist_coeffs2, CvSize image_size, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6), int flags=CV_CALIB_FIX_INTRINSIC )
.. ocv:cfunction:: double cvStereoCalibrate( const CvMat* objectPoints, const CvMat* imagePoints1, const CvMat* imagePoints2, const CvMat* pointCounts, CvMat* cameraMatrix1, CvMat* distCoeffs1, CvMat* cameraMatrix2, CvMat* distCoeffs2, CvSize imageSize, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria termCrit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), int flags=CV_CALIB_FIX_INTRINSIC )
.. ocv:pyoldfunction:: cv.StereoCalibrate( objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, termCrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None
.. ocv:pyoldfunction:: cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, term_crit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None
:param objectPoints: Vector of vectors of the calibration pattern points.
@ -1439,10 +1326,11 @@ stereoRectify
-----------------
Computes rectification transforms for each head of a calibrated stereo camera.
.. ocv:function:: void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, InputArray cameraMatrix2, InputArray distCoeffs2, Size imageSize, InputArray R, InputArray T, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags=CALIB_ZERO_DISPARITY, double alpha, Size newImageSize=Size(), Rect* roi1=0, Rect* roi2=0 )
.. ocv:function:: void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, InputArray cameraMatrix2, InputArray distCoeffs2, Size imageSize, InputArray R, InputArray T, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags=CALIB_ZERO_DISPARITY, double alpha=-1, Size newImageSize=Size(), Rect* validPixROI1=0, Rect* validPixROI2=0 )
.. ocv:cfunction:: void cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, CvSize image_size, const CvMat* R, const CvMat* T, CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, CvMat* Q=0, int flags=CV_CALIB_ZERO_DISPARITY, double alpha=-1, CvSize new_image_size=cvSize(0,0), CvRect* valid_pix_ROI1=0, CvRect* valid_pix_ROI2=0 )
.. ocv:cfunction:: void cvStereoRectify( const CvMat* cameraMatrix1, const CvMat* cameraMatrix2, const CvMat* distCoeffs1, const CvMat* distCoeffs2, CvSize imageSize, const CvMat* R, const CvMat* T, CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, CvMat* Q=0, int flags=CV_CALIB_ZERO_DISPARITY, double alpha=-1, CvSize newImageSize=cvSize(0, 0), CvRect* roi1=0, CvRect* roi2=0)
.. ocv:pyoldfunction:: cv.StereoRectify( cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0))-> (roi1, roi2)
.. ocv:pyoldfunction:: cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0)) -> (roi1, roi2)
:param cameraMatrix1: First camera matrix.
@ -1474,9 +1362,9 @@ Computes rectification transforms for each head of a calibrated stereo camera.
:param newImageSize: New image resolution after rectification. The same size should be passed to :ocv:func:`initUndistortRectifyMap` (see the ``stereo_calib.cpp`` sample in OpenCV samples directory). When (0,0) is passed (default), it is set to the original ``imageSize`` . Setting it to larger value can help you preserve details in the original image, especially when there is a big radial distortion.
:param roi1:
:param validPixROI1: Optional output rectangles inside the rectified images where all the pixels are valid. If ``alpha=0`` , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).
:param roi2: Optional output rectangles inside the rectified images where all the pixels are valid. If ``alpha=0`` , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).
:param validPixROI2: Optional output rectangles inside the rectified images where all the pixels are valid. If ``alpha=0`` , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies the dense stereo correspondence problem. The function takes the matrices computed by
:ocv:func:`stereoCalibrate` as input. As output, it provides two rotation matrices and also two projection matrices in the new coordinates. The function distinguishes the following two cases:
@ -1529,7 +1417,7 @@ Computes a rectification transform for an uncalibrated stereo camera.
.. ocv:pyfunction:: cv2.stereoRectifyUncalibrated(points1, points2, F, imgSize[, H1[, H2[, threshold]]]) -> retval, H1, H2
.. ocv:cfunction:: void cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, const CvMat* F, CvSize imageSize, CvMat* H1, CvMat* H2, double threshold=5 )
.. ocv:cfunction:: int cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, const CvMat* F, CvSize img_size, CvMat* H1, CvMat* H2, double threshold=5 )
.. ocv:pyoldfunction:: cv.StereoRectifyUncalibrated(points1, points2, F, imageSize, H1, H2, threshold=5)-> None
@ -1539,7 +1427,7 @@ Computes a rectification transform for an uncalibrated stereo camera.
:param F: Input fundamental matrix. It can be computed from the same set of point pairs using :ocv:func:`findFundamentalMat` .
:param imageSize: Size of the image.
:param imgSize: Size of the image.
:param H1: Output rectification homography matrix for the first image.

@ -81,7 +81,7 @@ CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
@ -93,7 +93,7 @@ enum
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fundamental_matrix,
int method CV_DEFAULT(CV_FM_RANSAC),
@ -117,7 +117,7 @@ CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
CvMat* new_points1, CvMat* new_points2);
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
alpha=0 - only valid pixels will be retained in the undistorted image
alpha=1 - all the source image pixels will be retained in the undistorted image
@ -203,20 +203,19 @@ CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_CB_FAST_CHECK 8
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
/* Detects corners on a chessboard calibration pattern */
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+
CV_CALIB_CB_NORMALIZE_IMAGE) );
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
@ -330,7 +329,7 @@ typedef struct CvStereoBMState
int speckleRange; // acceptable range of variation in window
int trySmallerWindows; // if 1, the results may be more accurate,
// at the expense of slower processing
// at the expense of slower processing
CvRect roi1, roi2;
int disp12MaxDiff;
@ -353,13 +352,13 @@ CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
CvArr* disparity, CvStereoBMState* state );
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize );
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDiff CV_DEFAULT(1) );
int disp12MaxDiff CV_DEFAULT(1) );
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
@ -384,11 +383,11 @@ public:
bool completeSymmFlag=false );
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
void clear();
void step();
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
cv::Ptr<CvMat> mask;
cv::Ptr<CvMat> prevParam;
cv::Ptr<CvMat> param;
@ -427,7 +426,7 @@ CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
//! variant of findHomography for backward compatibility
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
OutputArray mask, int method=0, double ransacReprojThreshold=3);
//! Computes RQ decomposition of 3x3 matrix
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
OutputArray Qx=noArray(),
@ -440,7 +439,7 @@ CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray
OutputArray rotMatrixX=noArray(),
OutputArray rotMatrixY=noArray(),
OutputArray rotMatrixZ=noArray(),
OutputArray eulerAngles=noArray() );
OutputArray eulerAngles=noArray() );
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
@ -467,14 +466,14 @@ CV_EXPORTS_W void projectPoints( InputArray objectPoints,
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
enum
{
ITERATIVE=CV_ITERATIVE,
ITERATIVE=CV_ITERATIVE,
EPNP=CV_EPNP,
P3P=CV_P3P
P3P=CV_P3P
};
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess=false, int flags=0);
bool useExtrinsicGuess=false, int flags=ITERATIVE);
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
@ -483,12 +482,12 @@ CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
InputArray distCoeffs,
OutputArray rvec,
OutputArray tvec,
bool useExtrinsicGuess = false,
bool useExtrinsicGuess = false,
int iterationsCount = 100,
float reprojectionError = 8.0,
int minInliersCount = 100,
OutputArray inliers = noArray(),
int flags = 0);
int flags = ITERATIVE);
//! initializes camera matrix from a few 3D points and the corresponding projections.
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
@ -501,10 +500,9 @@ enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
//! finds checkerboard pattern of the specified size in the image
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
OutputArray corners,
int flags=CALIB_CB_ADAPTIVE_THRESH+
CALIB_CB_NORMALIZE_IMAGE );
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners
//! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
//! draws the checkerboard pattern (found or partly found) in the image
@ -574,11 +572,10 @@ CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
CV_OUT InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,
OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+
TermCriteria::EPS, 30, 1e-6),
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
int flags=CALIB_FIX_INTRINSIC );
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
@ -606,7 +603,7 @@ CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distC
OutputArray P1, OutputArray P2, OutputArray P3,
OutputArray Q, double alpha, Size newImgSize,
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
//! returns the optimal new camera matrix
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
Size imageSize, double alpha, Size newImgSize=Size(),
@ -614,16 +611,16 @@ CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
//! for backward compatibility
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
//! the algorithm for finding fundamental matrix
enum
{
{
FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
@ -642,7 +639,7 @@ CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
double param1=3., double param2=0.99);
//! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS void computeCorrespondEpilines( InputArray points1,
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F,
OutputArray lines );
@ -657,7 +654,7 @@ template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
/*!
Block Matching Stereo Correspondence Algorithm
The class implements BM stereo correspondence algorithm by K. Konolige.
*/
class CV_EXPORTS_W StereoBM
@ -683,7 +680,7 @@ public:
/*!
Semi-Global Block Matching Stereo Correspondence Algorithm
The class implements the original SGBM stereo correspondence algorithm by H. Hirschmuller and some its modification.
*/
class CV_EXPORTS_W StereoSGBM
@ -693,7 +690,7 @@ public:
//! the default constructor
CV_WRAP StereoSGBM();
//! the full constructor taking all the necessary algorithm parameters
CV_WRAP StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
int P1=0, int P2=0, int disp12MaxDiff=0,
@ -742,11 +739,11 @@ CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
OutputArray _3dImage, InputArray Q,
bool handleMissingValues=false,
int ddepth=-1 );
CV_EXPORTS_W int estimateAffine3D(InputArray _from, InputArray _to,
OutputArray _out, OutputArray _inliers,
double param1=3, double param2=0.99);
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold=3, double confidence=0.99);
}
#endif

@ -3,3 +3,8 @@ contrib. Contributed/Experimental Stuff
***************************************
The module contains some recently added functionality that has not been stabilized, or functionality that is considered optional.
.. toctree::
:maxdepth: 2
stereo

@ -0,0 +1,117 @@
Stereo Correspondence
========================================
.. highlight:: cpp
StereoVar
----------
.. ocv:class:: StereoVar
Class for computing stereo correspondence using the variational matching algorithm ::
class StereoVar
{
StereoVar();
StereoVar( int levels, double pyrScale,
int nIt, int minDisp, int maxDisp,
int poly_n, double poly_sigma, float fi,
float lambda, int penalization, int cycle,
int flags);
virtual ~StereoVar();
virtual void operator()(InputArray left, InputArray right, OutputArray disp);
int levels;
double pyrScale;
int nIt;
int minDisp;
int maxDisp;
int poly_n;
double poly_sigma;
float fi;
float lambda;
int penalization;
int cycle;
int flags;
...
};
The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows:
* The automatic initialization of method's parameters is added.
* The method of Smart Iteration Distribution (SID) is implemented.
* The support of Multi-Level Adaptation Technique (MLAT) is not included.
* The method of dynamic adaptation of method's parameters is not included.
StereoVar::StereoVar
--------------------------
.. ocv:function:: StereoVar::StereoVar()
.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags )
The constructor
:param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set.
:param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.)
:param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places.
:param maxDisp: Maximum possible disparity value.
:param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7
:param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5
:param fi: The smoothness parameter, ot the weight coefficient for the smoothness term.
:param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.)
:param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param flags: The operation flags; can be a combination of the following:
* USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation.
* USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase.
* USE_SMART_ID: Use the smart iteration distribution (SID).
* USE_AUTO_PARAMS: Allow the method to initialize the main parameters.
* USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase.
The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value.
StereoVar::operator ()
-----------------------
.. ocv:function:: void StereoVar::operator()( const Mat& left, const Mat& right, Mat& disp )
Computes disparity using the variational algorithm for a rectified stereo pair.
:param left: Left 8-bit single-channel or 3-channel image.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image.
The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method.
**Note**:
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.

@ -115,7 +115,7 @@ Rect\_
Template class for 2D rectangles, described by the following parameters:
* Coordinates of the top-left corner. This is a default interpretation of ``Rect_::x`` and ``Rect_::y`` in OpenCV. Though, in your algorithms you may count ``x`` and ``y`` from the bottom-left corner.
* Coordinates of the top-left corner. This is a default interpretation of ``Rect_::x`` and ``Rect_::y`` in OpenCV. Though, in your algorithms you may count ``x`` and ``y`` from the bottom-left corner.
* Rectangle width and height.
OpenCV typically assumes that the top and left boundary of the rectangle are inclusive, while the right and bottom boundaries are not. For example, the method ``Rect_::contains`` returns ``true`` if
@ -182,7 +182,7 @@ The class represents rotated (i.e. not up-right) rectangles on a plane. Each rec
:param angle: The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.
:param box: The rotated rectangle parameters as the obsolete CvBox2D structure.
.. ocv:function:: void RotatedRect::points(Point2f* pts) const
.. ocv:function:: void RotatedRect::points( Point2f pts[] ) const
.. ocv:function:: Rect RotatedRect::boundingRect() const
.. ocv:function:: RotatedRect::operator CvBox2D() const
@ -228,7 +228,7 @@ Matx
Template class for small matrices whose type and size are known at compilation time: ::
template<typename _Tp, int m, int n> class Matx {...};
typedef Matx<float, 1, 2> Matx12f;
typedef Matx<double, 1, 2> Matx12d;
...
@ -246,7 +246,7 @@ Template class for small matrices whose type and size are known at compilation t
...
typedef Matx<float, 6, 6> Matx66f;
typedef Matx<double, 6, 6> Matx66d;
If you need a more flexible type, use :ocv:class:`Mat` . The elements of the matrix ``M`` are accessible using the ``M(i,j)`` notation. Most of the common matrix operations (see also
:ref:`MatrixExpressions` ) are available. To do an operation on ``Matx`` that is not implemented, you can easily convert the matrix to
``Mat`` and backwards. ::
@ -256,7 +256,7 @@ If you need a more flexible type, use :ocv:class:`Mat` . The elements of the mat
7, 8, 9);
cout << sum(Mat(m*m.t())) << endl;
Vec
---
.. ocv:class:: Vec
@ -264,7 +264,7 @@ Vec
Template class for short numerical vectors, a partial case of :ocv:class:`Matx`: ::
template<typename _Tp, int n> class Vec : public Matx<_Tp, n, 1> {...};
typedef Vec<uchar, 2> Vec2b;
typedef Vec<uchar, 3> Vec3b;
typedef Vec<uchar, 4> Vec4b;
@ -286,8 +286,8 @@ Template class for short numerical vectors, a partial case of :ocv:class:`Matx`:
typedef Vec<double, 3> Vec3d;
typedef Vec<double, 4> Vec4d;
typedef Vec<double, 6> Vec6d;
It is possible to convert ``Vec<T,2>`` to/from ``Point_``, ``Vec<T,3>`` to/from ``Point3_`` , and ``Vec<T,4>`` to :ocv:struct:`CvScalar` or :ocv:class:`Scalar_`. Use ``operator[]`` to access the elements of ``Vec``.
It is possible to convert ``Vec<T,2>`` to/from ``Point_``, ``Vec<T,3>`` to/from ``Point3_`` , and ``Vec<T,4>`` to :ocv:struct:`CvScalar` or :ocv:class:`Scalar_`. Use ``operator[]`` to access the elements of ``Vec``.
All the expected vector operations are also implemented:
@ -309,7 +309,7 @@ Scalar\_
Template class for a 4-element vector derived from Vec. ::
template<typename _Tp> class Scalar_ : public Vec<_Tp, 4> { ... };
typedef Scalar_<double> Scalar;
Being derived from ``Vec<_Tp, 4>`` , ``Scalar_`` and ``Scalar`` can be used just as typical 4-element vectors. In addition, they can be converted to/from ``CvScalar`` . The type ``Scalar`` is widely used in OpenCV to pass pixel values.
@ -344,7 +344,7 @@ The static method ``Range::all()`` returns a special variable that means "the wh
}
.. _Ptr:
.. _Ptr:
Ptr
---
@ -491,7 +491,7 @@ So, the data layout in ``Mat`` is fully compatible with ``CvMat``, ``IplImage``,
There are many different ways to create a ``Mat`` object. The most popular options are listed below:
*
Use the ``create(nrows, ncols, type)`` method or the similar ``Mat(nrows, ncols, type[, fillValue])`` constructor. A new array of the specified size and type is allocated. ``type`` has the same meaning as in the ``cvCreateMat`` method.
For example, ``CV_8UC1`` means a 8-bit single-channel array, ``CV_32FC2`` means a 2-channel (complex) floating-point array, and so on.
@ -508,7 +508,7 @@ There are many different ways to create a ``Mat`` object. The most popular optio
As noted in the introduction to this chapter, ``create()`` allocates only a new array when the shape or type of the current array are different from the specified ones.
*
Create a multi-dimensional array:
::
@ -522,11 +522,11 @@ There are many different ways to create a ``Mat`` object. The most popular optio
It passes the number of dimensions =1 to the ``Mat`` constructor but the created array will be 2-dimensional with the number of columns set to 1. So, ``Mat::dims`` is always >= 2 (can also be 0 when the array is empty).
*
Use a copy constructor or assignment operator where there can be an array or expression on the right side (see below). As noted in the introduction, the array assignment is an O(1) operation because it only copies the header and increases the reference counter. The ``Mat::clone()`` method can be used to get a full (deep) copy of the array when you need it.
*
Construct a header for a part of another array. It can be a single row, single column, several rows, several columns, rectangular region in the array (called a *minor* in algebra) or a diagonal. Such operations are also O(1) because the new header references the same data. You can actually modify a part of the array using this feature, for example:
::
@ -568,7 +568,7 @@ There are many different ways to create a ``Mat`` object. The most popular optio
As in case of whole matrices, if you need a deep copy, use the ``clone()`` method of the extracted sub-matrices.
*
Make a header for user-allocated data. It can be useful to do the following:
#.
@ -610,7 +610,7 @@ There are many different ways to create a ``Mat`` object. The most popular optio
..
*
Use MATLAB-style array initializers, ``zeros(), ones(), eye()``, for example:
::
@ -621,7 +621,7 @@ There are many different ways to create a ``Mat`` object. The most popular optio
..
*
Use a comma-separated initializer:
::
@ -701,48 +701,48 @@ This is a list of implemented matrix operations that can be combined in arbitrar
*
Addition, subtraction, negation:
``A+B, A-B, A+s, A-s, s+A, s-A, -A``
*
*
Scaling:
``A*alpha``
*
Per-element multiplication and division:
``A.mul(B), A/B, alpha/A``
*
Matrix multiplication:
``A*B``
*
Transposition:
``A.t()`` (means ``A``\ :sup:`T`)
*
Matrix inversion and pseudo-inversion, solving linear systems and least-squares problems:
``A.inv([method])`` (~ ``A``\ :sup:`-1`) ``, A.inv([method])*B`` (~ ``X: AX=B``)
*
Comparison:
``A cmpop B, A cmpop alpha, alpha cmpop A``, where ``cmpop`` is one of ``: >, >=, ==, !=, <=, <``. The result of comparison is an 8-bit single channel mask whose elements are set to 255 (if the particular element or pair of elements satisfy the condition) or 0.
*
Bitwise logical operations: ``A logicop B, A logicop s, s logicop A, ~A``, where ``logicop`` is one of ``: &, |, ^``.
*
Element-wise minimum and maximum:
``min(A, B), min(A, alpha), max(A, B), max(A, alpha)``
*
Element-wise absolute value:
``abs(A)``
*
Cross-product, dot-product:
``A.cross(B)``
``A.dot(B)``
*
Any function of matrix or matrices and scalars that returns a matrix or a scalar, such as ``norm``, ``mean``, ``sum``, ``countNonZero``, ``trace``, ``determinant``, ``repeat``, and others.
@ -761,17 +761,17 @@ Here are examples of matrix expressions:
// compute pseudo-inverse of A, equivalent to A.inv(DECOMP_SVD)
SVD svd(A);
Mat pinvA = svd.vt.t()*Mat::diag(1./svd.w)*svd.u.t();
// compute the new vector of parameters in the Levenberg-Marquardt algorithm
x -= (A.t()*A + lambda*Mat::eye(A.cols,A.cols,A.type())).inv(DECOMP_CHOLESKY)*(A.t()*err);
// sharpen image using "unsharp mask" algorithm
Mat blurred; double sigma = 1, threshold = 5, amount = 1;
GaussianBlur(img, blurred, Size(), sigma, sigma);
Mat lowConstrastMask = abs(img - blurred) < threshold;
Mat sharpened = img*(1+amount) + blurred*(-amount);
img.copyTo(sharpened, lowContrastMask);
..
@ -782,43 +782,41 @@ Mat::Mat
Various Mat constructors
.. ocv:function:: Mat::Mat()
.. ocv:function:: Mat::Mat(int rows, int cols, int type)
.. ocv:function:: Mat::Mat(Size size, int type)
.. ocv:function:: Mat::Mat(int rows, int cols, int type, const Scalar& s)
.. ocv:function:: Mat::Mat(Size size, int type, const Scalar& s)
.. ocv:function:: Mat::Mat(const Mat& m)
.. ocv:function:: Mat::Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP)
.. ocv:function:: Mat::Mat(Size size, int type, void* data, size_t step=AUTO_STEP)
.. ocv:function:: Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange)
.. ocv:function:: Mat::Mat( const Mat& m, const Range& rowRange, const Range& colRange=Range::all() )
.. ocv:function:: Mat::Mat(const Mat& m, const Rect& roi)
.. ocv:function:: Mat::Mat(const CvMat* m, bool copyData=false)
.. ocv:function:: Mat::Mat(const IplImage* img, bool copyData=false)
.. ocv:function:: template<typename T, int n> explicit Mat::Mat(const Vec<T, n>& vec, bool copyData=true)
.. ocv:function:: template<typename T, int m, int n> explicit Mat::Mat(const Matx<T, m, n>& vec, bool copyData=true)
.. ocv:function:: template<typename T> explicit Mat::Mat(const vector<T>& vec, bool copyData=false)
.. ocv:function:: Mat::Mat(const MatExpr& expr)
.. ocv:function:: Mat::Mat(int ndims, const int* sizes, int type)
.. ocv:function:: Mat::Mat(int ndims, const int* sizes, int type, const Scalar& s)
.. ocv:function:: Mat::Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0)
.. ocv:function:: Mat::Mat(const Mat& m, const Range* ranges)
:param ndims: Array dimensionality.
@ -857,8 +855,6 @@ Various Mat constructors
:param ranges: Array of selected ranges of ``m`` along each dimensionality.
:param expr: Matrix expression. See :ref:`MatrixExpressions`.
These are various constructors that form a matrix. As noted in the :ref:`AutomaticAllocation`,
often the default constructor is enough, and the proper matrix will be allocated by an OpenCV function. The constructed matrix can further be assigned to another matrix or matrix expression or can be allocated with
:ocv:func:`Mat::create` . In the former case, the old content is de-referenced.
@ -879,7 +875,7 @@ Provides matrix assignment operators.
.. ocv:function:: Mat& Mat::operator = (const Mat& m)
.. ocv:function:: Mat& Mat::operator = (const MatExpr_Base& expr)
.. ocv:function:: Mat& Mat::operator =( const MatExpr& expr )
.. ocv:function:: Mat& Mat::operator = (const Scalar& s)
@ -891,23 +887,13 @@ Provides matrix assignment operators.
These are available assignment operators. Since they all are very different, make sure to read the operator parameters description.
Mat::operator MatExpr
-------------------------
Provides a ``Mat`` -to- ``MatExpr`` cast operator.
.. ocv:function:: Mat::operator MatExpr_<Mat, Mat>() const
The cast operator should not be called explicitly. It is used internally by the
:ref:`MatrixExpressions` engine.
Mat::row
------------
Creates a matrix header for the specified matrix row.
.. ocv:function:: Mat Mat::row(int i) const
.. ocv:function:: Mat Mat::row(int y) const
:param i: A 0-based row index.
:param y: A 0-based row index.
The method makes a new header for the specified matrix row and returns it. This is an O(1) operation, regardless of the matrix size. The underlying data of the new matrix is shared with the original matrix. Here is the example of one of the classical basic matrix processing operations, ``axpy``, used by LU and many other algorithms: ::
@ -940,9 +926,9 @@ Mat::col
------------
Creates a matrix header for the specified matrix column.
.. ocv:function:: Mat Mat::col(int j) const
.. ocv:function:: Mat Mat::col(int x) const
:param j: A 0-based column index.
:param x: A 0-based column index.
The method makes a new header for the specified matrix column and returns it. This is an O(1) operation, regardless of the matrix size. The underlying data of the new matrix is shared with the original matrix. See also the
:ocv:func:`Mat::row` description.
@ -988,11 +974,11 @@ Mat::diag
-------------
Extracts a diagonal from a matrix, or creates a diagonal matrix.
.. ocv:function:: Mat Mat::diag(int d) const
.. ocv:function:: Mat Mat::diag( int d=0 ) const
.. ocv:function:: static Mat Mat::diag(const Mat& matD)
.. ocv:function:: static Mat Mat::diag( const Mat& d )
:param d: Index of the diagonal, with the following values:
:param d: Single-column matrix that forms a diagonal matrix or index of the diagonal, with the following values:
* **d=0** is the main diagonal.
@ -1000,8 +986,6 @@ Extracts a diagonal from a matrix, or creates a diagonal matrix.
* **d<0** is a diagonal from the upper half. For example, ``d=1`` means the diagonal is set immediately above the main one.
:param matD: Single-column matrix that forms a diagonal matrix.
The method makes a new header for the specified matrix diagonal. The new matrix is represented as a single-column matrix. Similarly to
:ocv:func:`Mat::row` and
:ocv:func:`Mat::col` , this is an O(1) operation.
@ -1075,9 +1059,9 @@ Mat::setTo
--------------
Sets all or some of the array elements to the specified value.
.. ocv:function:: Mat& Mat::setTo(const Scalar& s, InputArray mask=noArray())
.. ocv:function:: Mat& Mat::setTo( InputArray value, InputArray mask=noArray() )
:param s: Assigned scalar converted to the actual array type.
:param value: Assigned scalar converted to the actual array type.
:param mask: Operation mask of the same size as ``*this``. This is an advanced variant of the ``Mat::operator=(const Scalar& s)`` operator.
@ -1189,7 +1173,7 @@ Returns a zero array of the specified size and type.
.. ocv:function:: static MatExpr Mat::zeros(int rows, int cols, int type)
.. ocv:function:: static MatExpr Mat::zeros(Size size, int type)
.. ocv:function:: static MatExpr Mat::zeros(int ndims, const int* sizes, int type)
.. ocv:function:: static MatExpr Mat::zeros( int ndims, const int* sz, int type )
:param ndims: Array dimensionality.
@ -1197,9 +1181,9 @@ Returns a zero array of the specified size and type.
:param cols: Number of columns.
:param size: Alternative to the matrix size specification ``Size(cols, rows)`` .
:param sizes: Array of integers specifying the array shape.
:param size: Alternative to the matrix size specification ``Size(cols, rows)`` .
:param sz: Array of integers specifying the array shape.
:param type: Created matrix type.
@ -1218,7 +1202,7 @@ Returns an array of all 1's of the specified size and type.
.. ocv:function:: static MatExpr Mat::ones(int rows, int cols, int type)
.. ocv:function:: static MatExpr Mat::ones(Size size, int type)
.. ocv:function:: static MatExpr Mat::ones(int ndims, const int* sizes, int type)
.. ocv:function:: static MatExpr Mat::ones( int ndims, const int* sz, int type )
:param ndims: Array dimensionality.
@ -1226,9 +1210,9 @@ Returns an array of all 1's of the specified size and type.
:param cols: Number of columns.
:param size: Alternative to the matrix size specification ``Size(cols, rows)`` .
:param sizes: Array of integers specifying the array shape.
:param size: Alternative to the matrix size specification ``Size(cols, rows)`` .
:param sz: Array of integers specifying the array shape.
:param type: Created matrix type.
@ -1252,12 +1236,12 @@ Returns an identity matrix of the specified size and type.
:param cols: Number of columns.
:param size: Alternative matrix size specification as ``Size(cols, rows)`` .
:param size: Alternative matrix size specification as ``Size(cols, rows)`` .
:param type: Created matrix type.
The method returns a Matlab-style identity matrix initializer, similarly to
:ocv:func:`Mat::zeros`. Similarly to
:ocv:func:`Mat::zeros`. Similarly to
:ocv:func:`Mat::ones`, you can use a scale operation to create a scaled identity matrix efficiently: ::
// make a 4x4 diagonal matrix with 0.1's on the diagonal.
@ -1278,8 +1262,8 @@ Allocates new array data if needed.
:param cols: New number of columns.
:param size: Alternative new matrix size specification: ``Size(cols, rows)``
:param size: Alternative new matrix size specification: ``Size(cols, rows)``
:param sizes: Array of integers specifying a new array shape.
:param type: New matrix type.
@ -1288,8 +1272,8 @@ This is one of the key ``Mat`` methods. Most new-style OpenCV functions and meth
#.
If the current array shape and the type match the new ones, return immediately. Otherwise, de-reference the previous data by calling
:ocv:func:`Mat::release`.
:ocv:func:`Mat::release`.
#.
Initialize the new header.
@ -1367,7 +1351,8 @@ Mat::push_back
Adds elements to the bottom of the matrix.
.. ocv:function:: template<typename T> void Mat::push_back(const T& elem)
.. ocv:function:: void Mat::push_back(const Mat& elem)
.. ocv:function:: void Mat::push_back( const Mat& m )
:param elem: Added element(s).
@ -1415,7 +1400,7 @@ Adjusts a submatrix size and position within the parent matrix.
:param dright: Shift of the right submatrix boundary to the right.
The method is complimentary to
The method is complimentary to
:ocv:func:`Mat::locateROI` . The typical use of these functions is to determine the submatrix position within the parent matrix and then shift the position somehow. Typically, it can be required for filtering operations when pixels outside of the ROI should be taken into account. When all the method parameters are positive, the ROI needs to grow in all directions by the specified amount, for example: ::
A.adjustROI(2, 2, 2, 2);
@ -1428,7 +1413,7 @@ In this example, the matrix size is increased by 4 elements in each direction. T
The function is used internally by the OpenCV filtering functions, like
:ocv:func:`filter2D` , morphological operations, and so on.
.. seealso:: :ocv:func:`copyMakeBorder`
.. seealso:: :ocv:func:`copyMakeBorder`
Mat::operator()
@ -1439,13 +1424,13 @@ Extracts a rectangular submatrix.
.. ocv:function:: Mat Mat::operator()( const Rect& roi ) const
.. ocv:function:: Mat Mat::operator()( const Ranges* ranges ) const
.. ocv:function:: Mat Mat::operator()( const Range* ranges ) const
:param rowRange: Start and end row of the extracted submatrix. The upper boundary is not included. To select all the rows, use ``Range::all()``.
:param colRange: Start and end column of the extracted submatrix. The upper boundary is not included. To select all the columns, use ``Range::all()``.
:param rowRange: Start and end row of the extracted submatrix. The upper boundary is not included. To select all the rows, use ``Range::all()``.
:param colRange: Start and end column of the extracted submatrix. The upper boundary is not included. To select all the columns, use ``Range::all()``.
:param roi: Extracted submatrix specified as a rectangle.
:param ranges: Array of selected ranges along each array dimension.
@ -1626,7 +1611,7 @@ Mat::step1
--------------
Returns a normalized step.
.. ocv:function:: size_t Mat::step1() const
.. ocv:function:: size_t Mat::step1( int i=0 ) const
The method returns a matrix step divided by
:ocv:func:`Mat::elemSize1()` . It can be useful to quickly access an arbitrary matrix element.
@ -1654,15 +1639,15 @@ Mat::ptr
------------
Returns a pointer to the specified matrix row.
.. ocv:function:: uchar* Mat::ptr(int i=0)
.. ocv:function:: uchar* Mat::ptr(int i0=0)
.. ocv:function:: const uchar* Mat::ptr(int i=0) const
.. ocv:function:: const uchar* Mat::ptr(int i0=0) const
.. ocv:function:: template<typename _Tp> _Tp* Mat::ptr(int i=0)
.. ocv:function:: template<typename _Tp> _Tp* Mat::ptr(int i0=0)
.. ocv:function:: template<typename _Tp> const _Tp* Mat::ptr(int i=0) const
.. ocv:function:: template<typename _Tp> const _Tp* Mat::ptr(int i0=0) const
:param i: A 0-based row index.
:param i0: A 0-based row index.
The methods return ``uchar*`` or typed pointer to the specified matrix row. See the sample in
:ocv:func:`Mat::isContinuous` to know how to use these methods.
@ -1696,7 +1681,7 @@ Returns a reference to the specified array element.
:param j: Index along the dimension 1
:param k: Index along the dimension 2
:param pt: Element position specified as ``Point(j,i)`` .
:param pt: Element position specified as ``Point(j,i)`` .
:param idx: Array of ``Mat::dims`` indices.
@ -1816,23 +1801,24 @@ To use ``Mat_`` for multi-channel images/matrices, pass ``Vec`` as a ``Mat_`` pa
InputArray
----------
.. ocv:class:: InputArray
This is the proxy class for passing read-only input arrays into OpenCV functions. It is defined as ::
typedef const _InputArray& InputArray;
where ``_InputArray`` is a class that can be constructed from ``Mat``, ``Mat_<T>``, ``Matx<T, m, n>``, ``std::vector<T>``, ``std::vector<std::vector<T> >`` or ``std::vector<Mat>``. It can also be constructed from a matrix expression.
Since this is mostly implementation-level class, and its interface may change in future versions, we do not describe it in details. There are a few key things, though, that should be kept in mind:
* When you see in the reference manual or in OpenCV source code a function that takes ``InputArray``, it means that you can actually pass ``Mat``, ``Matx``, ``vector<T>`` etc. (see above the complete list).
* Optional input arguments: If some of the input arrays may be empty, pass ``cv::noArray()`` (or simply ``cv::Mat()`` as you probably did before).
* The class is designed solely for passing parameters. That is, normally you *should not* declare class members, local and global variables of this type.
* If you want to design your own function or a class method that can operate of arrays of multiple types, you can use ``InputArray`` (or ``OutputArray``) for the respective parameters. Inside a function you should use ``_InputArray::getMat()`` method to construct a matrix header for the array (without copying data). ``_InputArray::kind()`` can be used to distinguish ``Mat`` from ``vector<>`` etc., but normally it is not needed.
Here is how you can use a function that takes ``InputArray`` ::
std::vector<Point2f> vec;
@ -1852,12 +1838,12 @@ Here is how such a function can be implemented (for simplicity, we implement a v
// unless _src and/or _m are matrix expressions.
Mat src = _src.getMat(), m = _m.getMat();
CV_Assert( src.type() == CV_32FC2 && m.type() == CV_32F && m.size() == Size(3, 2) );
// [re]create the output array so that it has the proper size and type.
// In case of Mat it calls Mat::create, in case of STL vector it calls vector::resize.
_dst.create(src.size(), src.type());
Mat dst = _dst.getMat();
for( int i = 0; i < src.rows; i++ )
for( int j = 0; j < src.cols; j++ )
{
@ -1874,12 +1860,13 @@ Here is how such a function can be implemented (for simplicity, we implement a v
There is another related type, ``InputArrayOfArrays``, which is currently defined as a synonym for ``InputArray``: ::
typedef InputArray InputArrayOfArrays;
It denotes function arguments that are either vectors of vectors or vectors of matrices. A separate synonym is needed to generate Python/Java etc. wrappers properly. At the function implementation level their use is similar, but ``_InputArray::getMat(idx)`` should be used to get header for the idx-th component of the outer vector and ``_InputArray::size().area()`` should be used to find the number of components (vectors/matrices) of the outer vector.
OutputArray
-----------
.. ocv:class:: OutputArray : public InputArray
This type is very similar to ``InputArray`` except that it is used for input/output and output function parameters. Just like with ``InputArray``, OpenCV users should not care about ``OutputArray``, they just pass ``Mat``, ``vector<T>`` etc. to the functions. The same limitation as for ``InputArray``: **Do not explicitly create OutputArray instances** applies here too.
@ -2298,7 +2285,7 @@ Template sparse n-dimensional array class derived from
SparseMatIterator_<_Tp> end();
SparseMatConstIterator_<_Tp> end() const;
};
``SparseMat_`` is a thin wrapper on top of :ocv:class:`SparseMat` created in the same way as ``Mat_`` .
It simplifies notation of some operations. ::
@ -2310,17 +2297,18 @@ It simplifies notation of some operations. ::
Algorithm
---------
.. ocv:class:: Algorithm
This is a base class for all more or less complex algorithms in OpenCV, especially for classes of algorithms, for which there can be multiple implementations. The examples are stereo correspondence (for which there are algorithms like block matching, semi-global block matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck etc.).
The class provides the following features for all derived classes:
* so called "virtual constructor". That is, each Algorithm derivative is registered at program start and you can get the list of registered algorithms and create instance of a particular algorithm by its name (see ``Algorithm::create``). If you plan to add your own algorithms, it is good practice to add a unique prefix to your algorithms to distinguish them from other algorithms.
* setting/retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV highgui module, you are probably familar with ``cvSetCaptureProperty()``, ``cvGetCaptureProperty()``, ``VideoCapture::set()`` and ``VideoCapture::get()``. ``Algorithm`` provides similar method where instead of integer id's you specify the parameter names as text strings. See ``Algorithm::set`` and ``Algorithm::get`` for details.
* reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store all its parameters and then read them back. There is no need to re-implement it each time.
Here is example of SIFT use in your application via Algorithm interface: ::
#include "opencv2/opencv.hpp"
@ -2329,9 +2317,9 @@ Here is example of SIFT use in your application via Algorithm interface: ::
...
initModule_nonfree(); // to load SURF/SIFT etc.
Ptr<Feature2D> sift = Algorithm::create<Feature2D>("Feature2D.SIFT");
FileStorage fs("sift_params.xml", FileStorage::READ);
if( fs.isOpened() ) // if we have file with parameters, read them
{
@ -2341,17 +2329,17 @@ Here is example of SIFT use in your application via Algorithm interface: ::
else // else modify the parameters and store them; user can later edit the file to use different parameters
{
sift->set("contrastThreshold", 0.01f); // lower the contrast threshold, compared to the default value
{
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
sift->write(fs);
}
}
Mat image = imread("myimage.png", 0), descriptors;
vector<KeyPoint> keypoints;
(*sift)(image, noArray(), keypoints, descriptors);
Algorithm::get
--------------
@ -2399,15 +2387,15 @@ Stores algorithm parameters in a file storage
.. ocv:function:: void Algorithm::write(FileStorage& fs) const
:param fs: File storage.
The method stores all the algorithm parameters (in alphabetic order) to the file storage. The method is virtual. If you define your own Algorithm derivative, your can override the method and store some extra information. However, it's rarely needed. Here are some examples:
* SIFT feature detector (from nonfree module). The class only stores algorithm parameters and no keypoints or their descriptors. Therefore, it's enough to store the algorithm parameters, which is what ``Algorithm::write()`` does. Therefore, there is no dedicated ``SIFT::write()``.
* Background subtractor (from video module). It has the algorithm parameters and also it has the current background model. However, the background model is not stored. First, it's rather big. Then, if you have stored the background model, it would likely become irrelevant on the next run (because of shifted camera, changed background, different lighting etc.). Therefore, ``BackgroundSubtractorMOG`` and ``BackgroundSubtractorMOG2`` also rely on the standard ``Algorithm::write()`` to store just the algorithm parameters.
* Expectation Maximization (from ml module). The algorithm finds mixture of gaussians that approximates user data best of all. In this case the model may be re-used on the next run to test new data against the trained statistical model. So EM needs to store the model. However, since the model is described by a few parameters that are available as read-only algorithm parameters (i.e. they are available via ``EM::get()``), EM also relies on ``Algorithm::write()`` to store both EM parameters and the model (represented by read-only algorithm parameters).
Algorithm::read
---------------
@ -2416,7 +2404,7 @@ Reads algorithm parameters from a file storage
.. ocv:function:: void Algorithm::read(const FileNode& fn)
:param fn: File node of the file storage.
The method reads all the algorithm parameters from the specified node of a file storage. Similarly to ``Algorithm::write()``, if you implement an algorithm that needs to read some extra data and/or re-compute some internal data, you may override the method.
Algorithm::getList
@ -2426,7 +2414,7 @@ Returns the list of registered algorithms
.. ocv:function:: void Algorithm::getList(vector<string>& algorithms)
:param algorithms: The output vector of algorithm names.
This static method returns the list of registered algorithms in alphabetical order. Here is how to use it ::
vector<string> algorithms;
@ -2443,13 +2431,13 @@ Creates algorithm instance by name
.. ocv:function:: template<typename _Tp> Ptr<_Tp> Algorithm::create(const string& name)
:param name: The algorithm name, one of the names returned by ``Algorithm::getList()``.
This static method creates a new instance of the specified algorithm. If there is no such algorithm, the method will silently return null pointer (that can be checked by ``Ptr::empty()`` method). Also, you should specify the particular ``Algorithm`` subclass as ``_Tp`` (or simply ``Algorithm`` if you do not know it at that point). ::
Ptr<BackgroundSubtractor> bgfg = Algorithm::create<BackgroundSubtractor>("BackgroundSubtractor.MOG2");
.. note:: This is important note about seemingly mysterious behavior of ``Algorithm::create()`` when it returns NULL while it should not. The reason is simple - ``Algorithm::create()`` resides in OpenCV`s core module and the algorithms are implemented in other modules. If you create algorithms dynamically, C++ linker may decide to throw away the modules where the actual algorithms are implemented, since you do not call any functions from the modules. To avoid this problem, you need to call ``initModule_<modulename>();`` somewhere in the beginning of the program before ``Algorithm::create()``. For example, call ``initModule_nonfree()`` in order to use SURF/SIFT, call ``initModule_ml()`` to use expectation maximization etc.
Creating Own Algorithms
-----------------------
@ -2460,4 +2448,4 @@ The above methods are usually enough for users. If you want to make your own alg
* Add public virtual method ``AlgorithmInfo* info() const;`` to your class.
* Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take http://code.opencv.org/svn/opencv/trunk/opencv/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters.
* Add some public function (e.g. ``initModule_<mymodule>()``) that calls info() of your algorithm and put it into the same source file as ``info()`` implementation. This is to force C++ linker to include this object file into the target application. See ``Algorithm::create()`` for details.

@ -7,17 +7,17 @@ kmeans
------
Finds centers of clusters and groups input samples around the clusters.
.. ocv:function:: double kmeans( InputArray samples, int clusterCount, InputOutputArray labels, TermCriteria criteria, int attempts, int flags, OutputArray centers=noArray() )
.. ocv:function:: double kmeans( InputArray data, int K, InputOutputArray bestLabels, TermCriteria criteria, int attempts, int flags, OutputArray centers=noArray() )
.. ocv:pyfunction:: cv2.kmeans(data, K, criteria, attempts, flags[, bestLabels[, centers]]) -> retval, bestLabels, centers
.. ocv:cfunction:: int cvKMeans2(const CvArr* samples, int clusterCount, CvArr* labels, CvTermCriteria criteria, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* centers=0, double* compactness=0)
.. ocv:cfunction:: int cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, CvTermCriteria termcrit, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* _centers=0, double* compactness=0 )
.. ocv:pyoldfunction:: cv.KMeans2(samples, clusterCount, labels, criteria)-> None
.. ocv:pyoldfunction:: cv.KMeans2(samples, nclusters, labels, termcrit, attempts=1, flags=0, centers=None) -> float
:param samples: Floating-point matrix of input samples, one row per sample.
:param clusterCount: Number of clusters to split the set by.
:param cluster_count: Number of clusters to split the set by.
:param labels: Input/output integer array that stores the cluster indices for every sample.
@ -40,7 +40,7 @@ Finds centers of clusters and groups input samples around the clusters.
:param compactness: The returned value that is described below.
The function ``kmeans`` implements a k-means algorithm that finds the
centers of ``clusterCount`` clusters and groups the input samples
centers of ``cluster_count`` clusters and groups the input samples
around the clusters. As an output,
:math:`\texttt{labels}_i` contains a 0-based cluster index for
the sample stored in the

@ -30,11 +30,12 @@ circle
----------
Draws a circle.
.. ocv:function:: void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
.. ocv:function:: void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
.. ocv:pyfunction:: cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvCircle( CvArr* img, CvPoint center, int radius, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvCircle( CvArr* img, CvPoint center, int radius, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Circle(img, center, radius, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image where the circle is drawn.
@ -63,10 +64,11 @@ Clips the line against the image rectangle.
.. ocv:pyfunction:: cv2.clipLine(imgRect, pt1, pt2) -> retval, pt1, pt2
.. ocv:cfunction:: int cvClipLine( CvSize imgSize, CvPoint* pt1, CvPoint* pt2 )
.. ocv:pyoldfunction:: cv.ClipLine(imgSize, pt1, pt2) -> (clippedPt1, clippedPt2)
.. ocv:cfunction:: int cvClipLine( CvSize img_size, CvPoint* pt1, CvPoint* pt2 )
.. ocv:pyoldfunction:: cv.ClipLine(imgSize, pt1, pt2) -> (point1, point2)
:param imgSize: Image size. The image rectangle is ``Rect(0, 0, imgSize.width, imgSize.height)`` .
:param imgSize: Image size. The image rectangle is ``Rect(0, 0, imgSize.width, imgSize.height)`` .
:param imgRect: Image rectangle.
@ -88,10 +90,12 @@ Draws a simple or thick elliptic arc or fills an ellipse sector.
.. ocv:pyfunction:: cv2.ellipse(img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:pyfunction:: cv2.ellipse(img, box, color[, thickness[, lineType]]) -> None
.. ocv:cfunction:: void cvEllipse( CvArr* img, CvPoint center, CvSize axes, double angle, double startAngle, double endAngle, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Ellipse(img, center, axes, angle, startAngle, endAngle, color, thickness=1, lineType=8, shift=0)-> None
.. ocv:cfunction:: void cvEllipse( CvArr* img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Ellipse(img, center, axes, angle, start_angle, end_angle, color, thickness=1, lineType=8, shift=0)-> None
.. ocv:cfunction:: void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:cfunction:: void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:pyoldfunction:: cv.EllipseBox(img, box, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
@ -130,19 +134,19 @@ ellipse2Poly
----------------
Approximates an elliptic arc with a polyline.
.. ocv:function:: void ellipse2Poly( Point center, Size axes, int angle, int startAngle, int endAngle, int delta, vector<Point>& pts )
.. ocv:function:: void ellipse2Poly( Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector<Point>& pts )
.. ocv:pyfunction:: cv2.ellipse2Poly(center, axes, angle, arcStart, arcEnd, delta) -> pts
:param center: Center of the arc.
:param axes: Half-sizes of the arc. See the :ocv:func:`ellipse` for details.
:param angle: Rotation angle of the ellipse in degrees. See the :ocv:func:`ellipse` for details.
:param startAngle: Starting angle of the elliptic arc in degrees.
:param axes: Half-sizes of the arc. See the :ocv:func:`ellipse` for details.
:param endAngle: Ending angle of the elliptic arc in degrees.
:param angle: Rotation angle of the ellipse in degrees. See the :ocv:func:`ellipse` for details.
:param arcStart: Starting angle of the elliptic arc in degrees.
:param arcEnd: Ending angle of the elliptic arc in degrees.
:param delta: Angle between the subsequent polyline vertices. It defines the approximation accuracy.
@ -157,11 +161,12 @@ fillConvexPoly
------------------
Fills a convex polygon.
.. ocv:function:: void fillConvexPoly(Mat& img, const Point* pts, int npts, const Scalar& color, int lineType=8, int shift=0)
.. ocv:function:: void fillConvexPoly(Mat& img, const Point* pts, int npts, const Scalar& color, int lineType=8, int shift=0)
.. ocv:pyfunction:: cv2.fillConvexPoly(img, points, color[, lineType[, shift]]) -> None
.. ocv:cfunction:: void cvFillConvexPoly( CvArr* img, CvPoint* pts, int npts, CvScalar color, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.FillConvexPoly(img, pn, color, lineType=8, shift=0)-> None
:param img: Image.
@ -190,7 +195,8 @@ Fills the area bounded by one or more polygons.
.. ocv:pyfunction:: cv2.fillPoly(img, pts, color[, lineType[, shift[, offset]]]) -> None
.. ocv:cfunction:: void cvFillPoly( CvArr* img, CvPoint** pts, int* npts, int ncontours, CvScalar color, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, int contours, CvScalar color, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.FillPoly(img, polys, color, lineType=8, shift=0)-> None
:param img: Image.
@ -218,20 +224,21 @@ getTextSize
---------------
Calculates the width and height of a text string.
.. ocv:function:: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine)
.. ocv:function:: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine)
.. ocv:pyfunction:: cv2.getTextSize(text, fontFace, fontScale, thickness) -> retval, baseLine
.. ocv:cfunction:: void cvGetTextSize( const char* textString, const CvFont* font, CvSize* textSize, int* baseline )
.. ocv:cfunction:: void cvGetTextSize( const char* text_string, const CvFont* font, CvSize* text_size, int* baseline )
.. ocv:pyoldfunction:: cv.GetTextSize(textString, font)-> (textSize, baseline)
:param text: Input text string.
:param fontFace: Font to use. See the :ocv:func:`putText` for details.
:param fontFace: Font to use. See the :ocv:func:`putText` for details.
:param fontScale: Font scale. See the :ocv:func:`putText` for details.
:param fontScale: Font scale. See the :ocv:func:`putText` for details.
:param thickness: Thickness of lines used to render the text. See :ocv:func:`putText` for details.
:param thickness: Thickness of lines used to render the text. See :ocv:func:`putText` for details.
:param baseLine: Output parameter - y-coordinate of the baseline relative to the bottom-most text point.
@ -273,51 +280,51 @@ InitFont
--------
Initializes font structure (OpenCV 1.x API).
.. ocv:cfunction:: void cvInitFont( CvFont* font, int fontFace, double hscale, double vscale, double shear=0, int thickness=1, int lineType=8 )
.. ocv:cfunction:: void cvInitFont( CvFont* font, int font_face, double hscale, double vscale, double shear=0, int thickness=1, int line_type=8 )
:param font: Pointer to the font structure initialized by the function
:param font: Pointer to the font structure initialized by the function
:param fontFace: Font name identifier. Only a subset of Hershey fonts http://sources.isc.org/utils/misc/hershey-font.txt are supported now:
:param font_face: Font name identifier. Only a subset of Hershey fonts http://sources.isc.org/utils/misc/hershey-font.txt are supported now:
* **CV_FONT_HERSHEY_SIMPLEX** normal size sans-serif font
* **CV_FONT_HERSHEY_SIMPLEX** normal size sans-serif font
* **CV_FONT_HERSHEY_PLAIN** small size sans-serif font
* **CV_FONT_HERSHEY_PLAIN** small size sans-serif font
* **CV_FONT_HERSHEY_DUPLEX** normal size sans-serif font (more complex than ``CV_FONT_HERSHEY_SIMPLEX`` )
* **CV_FONT_HERSHEY_DUPLEX** normal size sans-serif font (more complex than ``CV_FONT_HERSHEY_SIMPLEX`` )
* **CV_FONT_HERSHEY_COMPLEX** normal size serif font
* **CV_FONT_HERSHEY_COMPLEX** normal size serif font
* **CV_FONT_HERSHEY_TRIPLEX** normal size serif font (more complex than ``CV_FONT_HERSHEY_COMPLEX`` )
* **CV_FONT_HERSHEY_TRIPLEX** normal size serif font (more complex than ``CV_FONT_HERSHEY_COMPLEX`` )
* **CV_FONT_HERSHEY_COMPLEX_SMALL** smaller version of ``CV_FONT_HERSHEY_COMPLEX``
* **CV_FONT_HERSHEY_COMPLEX_SMALL** smaller version of ``CV_FONT_HERSHEY_COMPLEX``
* **CV_FONT_HERSHEY_SCRIPT_SIMPLEX** hand-writing style font
* **CV_FONT_HERSHEY_SCRIPT_SIMPLEX** hand-writing style font
* **CV_FONT_HERSHEY_SCRIPT_COMPLEX** more complex variant of ``CV_FONT_HERSHEY_SCRIPT_SIMPLEX``
* **CV_FONT_HERSHEY_SCRIPT_COMPLEX** more complex variant of ``CV_FONT_HERSHEY_SCRIPT_SIMPLEX``
The parameter can be composited from one of the values above and an optional ``CV_FONT_ITALIC`` flag, which indicates italic or oblique font.
The parameter can be composited from one of the values above and an optional ``CV_FONT_ITALIC`` flag, which indicates italic or oblique font.
:param hscale: Horizontal scale. If equal to ``1.0f`` , the characters have the original width depending on the font type. If equal to ``0.5f`` , the characters are of half the original width.
:param hscale: Horizontal scale. If equal to ``1.0f`` , the characters have the original width depending on the font type. If equal to ``0.5f`` , the characters are of half the original width.
:param vscale: Vertical scale. If equal to ``1.0f`` , the characters have the original height depending on the font type. If equal to ``0.5f`` , the characters are of half the original height.
:param vscale: Vertical scale. If equal to ``1.0f`` , the characters have the original height depending on the font type. If equal to ``0.5f`` , the characters are of half the original height.
:param shear: Approximate tangent of the character slope relative to the vertical line. A zero value means a non-italic font, ``1.0f`` means about a 45 degree slope, etc.
:param shear: Approximate tangent of the character slope relative to the vertical line. A zero value means a non-italic font, ``1.0f`` means about a 45 degree slope, etc.
:param thickness: Thickness of the text strokes
:param thickness: Thickness of the text strokes
:param lineType: Type of the strokes, see :ocv:func:`line` description
:param line_type: Type of the strokes, see :ocv:func:`line` description
The function initializes the font structure that can be passed to text rendering functions.
.. seealso:: :ocv:cfunc:`PutText`
.. _Line:
.. _Line:
line
--------
@ -327,7 +334,8 @@ Draws a line segment connecting two points.
.. ocv:pyfunction:: cv2.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Line(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
@ -402,13 +410,14 @@ rectangle
-------------
Draws a simple, thick, or filled up-right rectangle.
.. ocv:function:: void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
.. ocv:function:: void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
.. ocv:function:: void rectangle(Mat& img, Rect r, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
.. ocv:function:: void rectangle( Mat& img, Rect rec, const Scalar& color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:pyfunction:: cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Rectangle(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
@ -416,8 +425,8 @@ Draws a simple, thick, or filled up-right rectangle.
:param pt1: Vertex of the rectangle.
:param pt2: Vertex of the rectangle opposite to ``pt1`` .
:param r: Alternative specification of the drawn rectangle.
:param rec: Alternative specification of the drawn rectangle.
:param color: Rectangle color or brightness (grayscale image).
@ -439,9 +448,9 @@ Draws several polygonal curves.
.. ocv:pyfunction:: cv2.polylines(img, pts, isClosed, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvPolyLine( CvArr* img, CvPoint** pts, int* npts, int contours, int isClosed, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, int is_closed, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.PolyLine(img, polys, isClosed, color, thickness=1, lineType=8, shift=0)-> None
.. ocv:pyoldfunction:: cv.PolyLine(img, polys, is_closed, color, thickness=1, lineType=8, shift=0) -> None
:param img: Image.
@ -464,6 +473,88 @@ Draws several polygonal curves.
The function ``polylines`` draws one or more polygonal curves.
drawContours
----------------
Draws contours outlines or filled contours.
.. ocv:function:: void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Point offset=Point() )
.. ocv:pyfunction:: cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> None
.. ocv:cfunction:: void cvDrawContours( CvArr * img, CvSeq* contour, CvScalar external_color, CvScalar hole_color, int max_level, int thickness=1, int line_type=8, CvPoint offset=cvPoint(0,0) )
.. ocv:pyoldfunction:: cv.DrawContours(img, contour, external_color, hole_color, max_level, thickness=1, lineType=8, offset=(0, 0))-> None
:param image: Destination image.
:param contours: All the input contours. Each contour is stored as a point vector.
:param contourIdx: Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
:param color: Color of the contours.
:param thickness: Thickness of lines the contours are drawn with. If it is negative (for example, ``thickness=CV_FILLED`` ), the contour interiors are
drawn.
:param lineType: Line connectivity. See :ocv:func:`line` for details.
:param hierarchy: Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see ``maxLevel`` ).
:param maxLevel: Maximal level for drawn contours. If it is 0, only
the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is ``hierarchy`` available.
:param offset: Optional contour shift parameter. Shift all the drawn contours by the specified :math:`\texttt{offset}=(dx,dy)` .
:param contour: Pointer to the first contour.
:param external_color: Color of external contours.
:param hole_color: Color of internal contours (holes).
The function draws contour outlines in the image if
:math:`\texttt{thickness} \ge 0` or fills the area bounded by the contours if
:math:`\texttt{thickness}<0` . The example below shows how to retrieve connected components from the binary image and label them: ::
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main( int argc, char** argv )
{
Mat src;
// the first command-line parameter must be a filename of the binary
// (black-n-white) image
if( argc != 2 || !(src=imread(argv[1], 0)).data)
return -1;
Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);
src = src > 1;
namedWindow( "Source", 1 );
imshow( "Source", src );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours( src, contours, hierarchy,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
// iterate through all the top-level contours,
// draw each connected component with its own random color
int idx = 0;
for( ; idx >= 0; idx = hierarchy[idx][0] )
{
Scalar color( rand()&255, rand()&255, rand()&255 );
drawContours( dst, contours, idx, color, CV_FILLED, 8, hierarchy );
}
namedWindow( "Components", 1 );
imshow( "Components", dst );
waitKey(0);
}
putText
-----------
@ -471,7 +562,7 @@ Draws a text string.
.. ocv:function:: void putText( Mat& img, const string& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=8, bool bottomLeftOrigin=false )
.. ocv:pyfunction:: cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, linetype[, bottomLeftOrigin]]]) -> None
.. ocv:pyfunction:: cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]]) -> None
.. ocv:cfunction:: void cvPutText( CvArr* img, const char* text, CvPoint org, const CvFont* font, CvScalar color )
.. ocv:pyoldfunction:: cv.PutText(img, text, org, font, color)-> None

@ -10,27 +10,27 @@ CvMemStorage
.. ocv:struct:: CvMemStorage
A storage for various OpenCV dynamic data structures, such as ``CvSeq``, ``CvSet`` etc.
A storage for various OpenCV dynamic data structures, such as ``CvSeq``, ``CvSet`` etc.
.. ocv:member:: CvMemBlock* bottom
.. ocv:member:: CvMemBlock* bottom
the first memory block in the double-linked list of blocks
the first memory block in the double-linked list of blocks
.. ocv:member:: CvMemBlock* top
.. ocv:member:: CvMemBlock* top
the current partially allocated memory block in the list of blocks
the current partially allocated memory block in the list of blocks
.. ocv:member:: CvMemStorage* parent
.. ocv:member:: CvMemStorage* parent
the parent storage (if any) from which the new memory blocks are borrowed.
the parent storage (if any) from which the new memory blocks are borrowed.
.. ocv:member:: int free_space
.. ocv:member:: int free_space
number of free bytes in the ``top`` block
number of free bytes in the ``top`` block
.. ocv:member:: int block_size
.. ocv:member:: int block_size
the total size of the memory blocks
the total size of the memory blocks
Memory storage is a low-level structure used to store dynamically growing data structures such as sequences, contours, graphs, subdivisions, etc. It is organized as a list of memory blocks of equal size -
``bottom`` field is the beginning of the list of blocks and ``top`` is the currently used block, but not necessarily the last block of the list. All blocks between ``bottom`` and ``top``, not including the
@ -64,38 +64,38 @@ CvSeq
.. ocv:struct:: CvSeq
Dynamically growing sequence.
Dynamically growing sequence.
.. ocv:member:: int flags
.. ocv:member:: int flags
sequence flags, including the sequence signature (CV_SEQ_MAGIC_VAL or CV_SET_MAGIC_VAL), type of the elements and some other information about the sequence.
sequence flags, including the sequence signature (CV_SEQ_MAGIC_VAL or CV_SET_MAGIC_VAL), type of the elements and some other information about the sequence.
.. ocv:member:: int header_size
.. ocv:member:: int header_size
size of the sequence header. It should be sizeof(CvSeq) at minimum. See :ocv:cfunc:`CreateSeq`.
size of the sequence header. It should be sizeof(CvSeq) at minimum. See :ocv:cfunc:`CreateSeq`.
.. ocv:member:: CvSeq* h_prev
.. ocv:member:: CvSeq* h_next
.. ocv:member:: CvSeq* v_prev
.. ocv:member:: CvSeq* v_next
.. ocv:member:: CvSeq* h_prev
.. ocv:member:: CvSeq* h_next
.. ocv:member:: CvSeq* v_prev
.. ocv:member:: CvSeq* v_next
pointers to another sequences in a sequence tree. Sequence trees are used to store hierarchical contour structures, retrieved by :ocv:cfunc:`FindContours`
pointers to another sequences in a sequence tree. Sequence trees are used to store hierarchical contour structures, retrieved by :ocv:cfunc:`FindContours`
.. ocv:member:: int total
.. ocv:member:: int total
the number of sequence elements
the number of sequence elements
.. ocv:member:: int elem_size
.. ocv:member:: int elem_size
size of each sequence element in bytes
size of each sequence element in bytes
.. ocv:member:: CvMemStorage* storage
.. ocv:member:: CvMemStorage* storage
memory storage where the sequence resides. It can be a NULL pointer.
memory storage where the sequence resides. It can be a NULL pointer.
.. ocv:member:: CvSeqBlock* first
.. ocv:member:: CvSeqBlock* first
pointer to the first data block
pointer to the first data block
The structure ``CvSeq`` is a base for all of OpenCV dynamic data structures.
There are two types of sequences - dense and sparse. The base type for dense
@ -228,9 +228,9 @@ ClearSet
--------
Clears a set.
.. ocv:cfunction:: void cvClearSet( CvSet* setHeader )
.. ocv:cfunction:: void cvClearSet( CvSet* set_header )
:param setHeader: Cleared set
:param set_header: Cleared set
The function removes all elements from set. It has O(1) time complexity.
@ -362,11 +362,12 @@ CreateMemStorage
----------------
Creates memory storage.
.. ocv:cfunction:: CvMemStorage* cvCreateMemStorage( int blockSize=0 )
.. ocv:cfunction:: CvMemStorage* cvCreateMemStorage( int block_size=0 )
.. ocv:pyoldfunction:: cv.CreateMemStorage(blockSize=0) -> memstorage
:param blockSize: Size of the storage blocks in bytes. If it is 0, the block size is set to a default value - currently it is about 64K.
:param block_size: Size of the storage blocks in bytes. If it is 0, the block size is set to a default value - currently it is about 64K.
The function creates an empty memory storage. See
:ocv:struct:`CvMemStorage`
@ -376,14 +377,14 @@ CreateSeq
---------
Creates a sequence.
.. ocv:cfunction:: CvSeq* cvCreateSeq( int seqFlags, int headerSize, int elemSize, CvMemStorage* storage)
.. ocv:cfunction:: CvSeq* cvCreateSeq( int seq_flags, size_t header_size, size_t elem_size, CvMemStorage* storage )
:param seqFlags: Flags of the created sequence. If the sequence is not passed to any function working with a specific type of sequences, the sequence value may be set to 0, otherwise the appropriate type must be selected from the list of predefined sequence types.
:param seq_flags: Flags of the created sequence. If the sequence is not passed to any function working with a specific type of sequences, the sequence value may be set to 0, otherwise the appropriate type must be selected from the list of predefined sequence types.
:param headerSize: Size of the sequence header; must be greater than or equal to ``sizeof(CvSeq)`` . If a specific type or its extension is indicated, this type must fit the base type header.
:param header_size: Size of the sequence header; must be greater than or equal to ``sizeof(CvSeq)`` . If a specific type or its extension is indicated, this type must fit the base type header.
:param elemSize: Size of the sequence elements in bytes. The size must be consistent with the sequence type. For example, for a sequence of points to be created, the element type ``CV_SEQ_ELTYPE_POINT`` should be specified and the parameter ``elemSize`` must be equal to ``sizeof(CvPoint)`` .
:param elem_size: Size of the sequence elements in bytes. The size must be consistent with the sequence type. For example, for a sequence of points to be created, the element type ``CV_SEQ_ELTYPE_POINT`` should be specified and the parameter ``elem_size`` must be equal to ``sizeof(CvPoint)`` .
:param storage: Sequence location
@ -480,13 +481,13 @@ FindGraphEdgeByPtr
------------------
Finds an edge in a graph by using its pointer.
.. ocv:cfunction:: CvGraphEdge* cvFindGraphEdgeByPtr( const CvGraph* graph, const CvGraphVtx* startVtx, const CvGraphVtx* endVtx )
.. ocv:cfunction:: CvGraphEdge* cvFindGraphEdgeByPtr( const CvGraph* graph, const CvGraphVtx* start_vtx, const CvGraphVtx* end_vtx )
:param graph: Graph
:param startVtx: Pointer to the starting vertex of the edge
:param start_vtx: Pointer to the starting vertex of the edge
:param endVtx: Pointer to the ending vertex of the edge. For an unoriented graph, the order of the vertex parameters does not matter.
:param end_vtx: Pointer to the ending vertex of the edge. For an unoriented graph, the order of the vertex parameters does not matter.
::
@ -529,7 +530,7 @@ GetSeqElem
----------
Returns a pointer to a sequence element according to its index.
.. ocv:cfunction:: char* cvGetSeqElem( const CvSeq* seq, int index )
.. ocv:cfunction:: schar* cvGetSeqElem( const CvSeq* seq, int index )
:param seq: Sequence
@ -587,9 +588,9 @@ GetSetElem
----------
Finds a set element by its index.
.. ocv:cfunction:: CvSetElem* cvGetSetElem( const CvSet* setHeader, int index )
.. ocv:cfunction:: CvSetElem* cvGetSetElem( const CvSet* set_header, int index )
:param setHeader: Set
:param set_header: Set
:param index: Index of the set element within a sequence
@ -723,11 +724,11 @@ GraphVtxDegree
--------------
Counts the number of edges incident to the vertex.
.. ocv:cfunction:: int cvGraphVtxDegree( const CvGraph* graph, int vtxIdx )
.. ocv:cfunction:: int cvGraphVtxDegree( const CvGraph* graph, int vtx_idx )
:param graph: Graph
:param vtxIdx: Index of the graph vertex
:param vtx_idx: Index of the graph vertex
The function returns the number of edges incident to the specified vertex, both incoming and outgoing. To count the edges, the following code is used:
@ -1021,11 +1022,11 @@ SeqInsert
---------
Inserts an element in the middle of a sequence.
.. ocv:cfunction:: char* cvSeqInsert( CvSeq* seq, int beforeIndex, void* element=NULL )
.. ocv:cfunction:: schar* cvSeqInsert( CvSeq* seq, int before_index, const void* element=NULL )
:param seq: Sequence
:param beforeIndex: Index before which the element is inserted. Inserting before 0 (the minimal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPushFront` and inserting before ``seq->total`` (the maximal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPush` .
:param before_index: Index before which the element is inserted. Inserting before 0 (the minimal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPushFront` and inserting before ``seq->total`` (the maximal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPush` .
:param element: Inserted element
@ -1037,13 +1038,13 @@ SeqInsertSlice
--------------
Inserts an array in the middle of a sequence.
.. ocv:cfunction:: void cvSeqInsertSlice( CvSeq* seq, int beforeIndex, const CvArr* fromArr )
.. ocv:cfunction:: void cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr )
:param seq: Sequence
:param beforeIndex: Index before which the array is inserted
:param before_index: Index before which the array is inserted
:param fromArr: The array to take elements from
:param from_arr: The array to take elements from
The function inserts all
``fromArr``
@ -1109,7 +1110,7 @@ SeqPush
-------
Adds an element to the end of a sequence.
.. ocv:cfunction:: char* cvSeqPush( CvSeq* seq, void* element=NULL )
.. ocv:cfunction:: schar* cvSeqPush( CvSeq* seq, const void* element=NULL )
:param seq: Sequence
@ -1149,7 +1150,7 @@ SeqPushFront
------------
Adds an element to the beginning of a sequence.
.. ocv:cfunction:: char* cvSeqPushFront( CvSeq* seq, void* element=NULL )
.. ocv:cfunction:: schar* cvSeqPushFront( CvSeq* seq, const void* element=NULL )
:param seq: Sequence
@ -1163,7 +1164,7 @@ SeqPushMulti
------------
Pushes several elements to either end of a sequence.
.. ocv:cfunction:: void cvSeqPushMulti( CvSeq* seq, void* elements, int count, int in_front=0 )
.. ocv:cfunction:: void cvSeqPushMulti( CvSeq* seq, const void* elements, int count, int in_front=0 )
:param seq: Sequence
@ -1216,7 +1217,7 @@ SeqSearch
---------
Searches for an element in a sequence.
.. ocv:cfunction:: char* cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, int is_sorted, int* elem_idx, void* userdata=NULL )
.. ocv:cfunction:: schar* cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, int is_sorted, int* elem_idx, void* userdata=NULL )
:param seq: The sequence
@ -1325,9 +1326,9 @@ SetAdd
------
Occupies a node in the set.
.. ocv:cfunction:: int cvSetAdd( CvSet* setHeader, CvSetElem* elem=NULL, CvSetElem** inserted_elem=NULL )
.. ocv:cfunction:: int cvSetAdd( CvSet* set_header, CvSetElem* elem=NULL, CvSetElem** inserted_elem=NULL )
:param setHeader: Set
:param set_header: Set
:param elem: Optional input argument, an inserted element. If not NULL, the function copies the data to the allocated node (the MSB of the first integer field is cleared after copying).
@ -1346,9 +1347,9 @@ SetNew
------
Adds an element to a set (fast variant).
.. ocv:cfunction:: CvSetElem* cvSetNew( CvSet* setHeader )
.. ocv:cfunction:: CvSetElem* cvSetNew( CvSet* set_header )
:param setHeader: Set
:param set_header: Set
The function is an inline lightweight variant of
:ocv:cfunc:`SetAdd`
@ -1358,9 +1359,9 @@ SetRemove
---------
Removes an element from a set.
.. ocv:cfunction:: void cvSetRemove( CvSet* setHeader, int index )
.. ocv:cfunction:: void cvSetRemove( CvSet* set_header, int index )
:param setHeader: Set
:param set_header: Set
:param index: Index of the removed element
@ -1375,9 +1376,9 @@ SetRemoveByPtr
--------------
Removes a set element based on its pointer.
.. ocv:cfunction:: void cvSetRemoveByPtr( CvSet* setHeader, void* elem )
.. ocv:cfunction:: void cvSetRemoveByPtr( CvSet* set_header, void* elem )
:param setHeader: Set
:param set_header: Set
:param elem: Removed element
@ -1389,23 +1390,23 @@ SetSeqBlockSize
---------------
Sets up sequence block size.
.. ocv:cfunction:: void cvSetSeqBlockSize( CvSeq* seq, int deltaElems )
.. ocv:cfunction:: void cvSetSeqBlockSize( CvSeq* seq, int delta_elems )
:param seq: Sequence
:param deltaElems: Desirable sequence block size for elements
:param delta_elems: Desirable sequence block size for elements
The function affects memory allocation
granularity. When the free space in the sequence buffers has run out,
the function allocates the space for
``deltaElems``
``delta_elems``
sequence
elements. If this block immediately follows the one previously allocated,
the two blocks are concatenated; otherwise, a new sequence block is
created. Therefore, the bigger the parameter is, the lower the possible
sequence fragmentation, but the more space in the storage block is wasted. When
the sequence is created, the parameter
``deltaElems``
``delta_elems``
is set to
the default value of about 1K. The function can be called any time after
the sequence is created and affects future allocations. The function

File diff suppressed because it is too large Load Diff

@ -48,41 +48,41 @@ CvFileNode
.. ocv:struct:: CvFileNode
File storage node. When XML/YAML file is read, it is first parsed and stored in the memory as a hierarchical collection of nodes. Each node can be a "leaf", that is, contain a single number or a string, or be a collection of other nodes. Collections are also referenced to as "structures" in the data writing functions. There can be named collections (mappings), where each element has a name and is accessed by a name, and ordered collections (sequences), where elements do not have names, but rather accessed by index.
.. ocv:member:: int tag
type of the file node:
* CV_NODE_NONE - empty node
* CV_NODE_INT - an integer
* CV_NODE_REAL - a floating-point number
* CV_NODE_STR - text string
* CV_NODE_SEQ - a sequence
* CV_NODE_MAP - a mapping
type of the node can be retrieved using ``CV_NODE_TYPE(node->tag)`` macro.
.. ocv:member:: CvTypeInfo* info
optional pointer to the user type information. If you look at the matrix representation in XML and YAML, shown above, you may notice ``type_id="opencv-matrix"`` or ``!!opencv-matrix`` strings. They are used to specify that the certain element of a file is a representation of a data structure of certain type ("opencv-matrix" corresponds to :ocv:struct:`CvMat`). When a file is parsed, such type identifiers are passed to :ocv:cfunc:`FindType` to find type information and the pointer to it is stored in the file node. See :ocv:struct:`CvTypeInfo` for more details.
.. ocv:member:: union data
the node data, declared as: ::
union
{
double f; /* scalar floating-point number */
int i; /* scalar integer number */
CvString str; /* text string */
CvSeq* seq; /* sequence (ordered collection of file nodes) */
struct CvMap* map; /* map (collection of named file nodes) */
} data;
File storage node. When XML/YAML file is read, it is first parsed and stored in the memory as a hierarchical collection of nodes. Each node can be a "leaf", that is, contain a single number or a string, or be a collection of other nodes. Collections are also referenced to as "structures" in the data writing functions. There can be named collections (mappings), where each element has a name and is accessed by a name, and ordered collections (sequences), where elements do not have names, but rather accessed by index.
.. ocv:member:: int tag
type of the file node:
* CV_NODE_NONE - empty node
* CV_NODE_INT - an integer
* CV_NODE_REAL - a floating-point number
* CV_NODE_STR - text string
* CV_NODE_SEQ - a sequence
* CV_NODE_MAP - a mapping
type of the node can be retrieved using ``CV_NODE_TYPE(node->tag)`` macro.
.. ocv:member:: CvTypeInfo* info
optional pointer to the user type information. If you look at the matrix representation in XML and YAML, shown above, you may notice ``type_id="opencv-matrix"`` or ``!!opencv-matrix`` strings. They are used to specify that the certain element of a file is a representation of a data structure of certain type ("opencv-matrix" corresponds to :ocv:struct:`CvMat`). When a file is parsed, such type identifiers are passed to :ocv:cfunc:`FindType` to find type information and the pointer to it is stored in the file node. See :ocv:struct:`CvTypeInfo` for more details.
.. ocv:member:: union data
the node data, declared as: ::
union
{
double f; /* scalar floating-point number */
int i; /* scalar integer number */
CvString str; /* text string */
CvSeq* seq; /* sequence (ordered collection of file nodes) */
struct CvMap* map; /* map (collection of named file nodes) */
} data;
..
..
Primitive nodes are read using :ocv:cfunc:`ReadInt`, :ocv:cfunc:`ReadReal` and :ocv:cfunc:`ReadString`. Sequences are read by iterating through ``node->data.seq`` (see "Dynamic Data Structures" section). Mappings are read using :ocv:cfunc:`GetFileNodeByName`. Nodes with the specified type (so that ``node->info != NULL``) can be read using :ocv:cfunc:`Read`.
Primitive nodes are read using :ocv:cfunc:`ReadInt`, :ocv:cfunc:`ReadReal` and :ocv:cfunc:`ReadString`. Sequences are read by iterating through ``node->data.seq`` (see "Dynamic Data Structures" section). Mappings are read using :ocv:cfunc:`GetFileNodeByName`. Nodes with the specified type (so that ``node->info != NULL``) can be read using :ocv:cfunc:`Read`.
CvAttrList
----------
@ -97,16 +97,16 @@ List of attributes. ::
struct CvAttrList* next; /* pointer to next chunk of the attributes list */
}
CvAttrList;
/* initializes CvAttrList structure */
inline CvAttrList cvAttrList( const char** attr=NULL, CvAttrList* next=NULL );
/* returns attribute value or 0 (NULL) if there is no such attribute */
const char* cvAttrValue( const CvAttrList* attr, const char* attr_name );
..
In the current implementation, attributes are used to pass extra parameters when writing user objects (see
In the current implementation, attributes are used to pass extra parameters when writing user objects (see
:ocv:cfunc:`Write`). XML attributes inside tags are not supported, aside from the object type specification (``type_id`` attribute).
CvTypeInfo
@ -124,7 +124,7 @@ Type information. ::
const void* structPtr,
CvAttrList attributes );
typedef void* (CV_CDECL *CvCloneFunc)( const void* structPtr );
typedef struct CvTypeInfo
{
int flags; /* not used */
@ -132,7 +132,7 @@ Type information. ::
struct CvTypeInfo* prev; /* previous registered type in the list */
struct CvTypeInfo* next; /* next registered type in the list */
const char* type_name; /* type name, written to file storage */
/* methods */
CvIsInstanceFunc is_instance; /* checks if the passed object belongs to the type */
CvReleaseFunc release; /* releases object (memory etc.) */
@ -151,11 +151,11 @@ Clone
-----
Makes a clone of an object.
.. ocv:cfunction:: void* cvClone( const void* structPtr )
:param structPtr: The object to clone
.. ocv:cfunction:: void* cvClone( const void* struct_ptr )
:param struct_ptr: The object to clone
The function finds the type of a given object and calls ``clone`` with the passed object. Of course, if you know the object type, for example, ``structPtr`` is ``CvMat*``, it is faster to call the specific function, like :ocv:cfunc:`CloneMat`.
The function finds the type of a given object and calls ``clone`` with the passed object. Of course, if you know the object type, for example, ``struct_ptr`` is ``CvMat*``, it is faster to call the specific function, like :ocv:cfunc:`CloneMat`.
EndWriteStruct
--------------
@ -163,7 +163,7 @@ Finishes writing to a file node collection.
.. ocv:cfunction:: void cvEndWriteStruct(CvFileStorage* fs)
:param fs: File storage
:param fs: File storage
.. seealso:: :ocv:cfunc:`StartWriteStruct`.
@ -171,9 +171,9 @@ FindType
--------
Finds a type by its name.
.. ocv:cfunction:: CvTypeInfo* cvFindType(const char* typeName)
:param typeName: Type name
.. ocv:cfunction:: CvTypeInfo* cvFindType( const char* type_name )
:param type_name: Type name
The function finds a registered type by its name. It returns NULL if there is no type with the specified name.
@ -189,15 +189,15 @@ GetFileNode
-----------
Finds a node in a map or file storage.
.. ocv:cfunction:: CvFileNode* cvGetFileNode( CvFileStorage* fs, CvFileNode* map, const CvStringHashNode* key, int createMissing=0 )
:param fs: File storage
.. ocv:cfunction:: CvFileNode* cvGetFileNode( CvFileStorage* fs, CvFileNode* map, const CvStringHashNode* key, int create_missing=0 )
:param map: The parent map. If it is NULL, the function searches a top-level node. If both ``map`` and ``key`` are NULLs, the function returns the root file node - a map that contains top-level nodes.
:param fs: File storage
:param key: Unique pointer to the node name, retrieved with :ocv:cfunc:`GetHashedKey`
:param map: The parent map. If it is NULL, the function searches a top-level node. If both ``map`` and ``key`` are NULLs, the function returns the root file node - a map that contains top-level nodes.
:param createMissing: Flag that specifies whether an absent node should be added to the map
:param key: Unique pointer to the node name, retrieved with :ocv:cfunc:`GetHashedKey`
:param create_missing: Flag that specifies whether an absent node should be added to the map
The function finds a file node. It is a faster version of :ocv:cfunc:`GetFileNodeByName`
(see :ocv:cfunc:`GetHashedKey` discussion). Also, the function can insert a new node, if it is not in the map yet.
@ -207,12 +207,12 @@ GetFileNodeByName
Finds a node in a map or file storage.
.. ocv:cfunction:: CvFileNode* cvGetFileNodeByName( const CvFileStorage* fs, const CvFileNode* map, const char* name)
:param fs: File storage
:param map: The parent map. If it is NULL, the function searches in all the top-level nodes (streams), starting with the first one.
:param fs: File storage
:param map: The parent map. If it is NULL, the function searches in all the top-level nodes (streams), starting with the first one.
:param name: The file node name
:param name: The file node name
The function finds a file node by ``name``. The node is searched either in ``map`` or, if the pointer is NULL, among the top-level file storage nodes. Using this function for maps and :ocv:cfunc:`GetSeqElem`
(or sequence reader) for sequences, it is possible to navigate through the file storage. To speed up multiple queries for a certain key (e.g., in the case of an array of structures) one may use a combination of :ocv:cfunc:`GetHashedKey` and :ocv:cfunc:`GetFileNode`.
@ -223,7 +223,7 @@ Returns the name of a file node.
.. ocv:cfunction:: const char* cvGetFileNodeName( const CvFileNode* node )
:param node: File node
:param node: File node
The function returns the name of a file node or NULL, if the file node does not have a name or if ``node`` is ``NULL``.
@ -231,21 +231,21 @@ GetHashedKey
------------
Returns a unique pointer for a given name.
.. ocv:cfunction:: CvStringHashNode* cvGetHashedKey( CvFileStorage* fs, const char* name, int len=-1, int createMissing=0 )
.. ocv:cfunction:: CvStringHashNode* cvGetHashedKey( CvFileStorage* fs, const char* name, int len=-1, int create_missing=0 )
:param fs: File storage
:param fs: File storage
:param name: Literal node name
:param name: Literal node name
:param len: Length of the name (if it is known apriori), or -1 if it needs to be calculated
:param len: Length of the name (if it is known apriori), or -1 if it needs to be calculated
:param createMissing: Flag that specifies, whether an absent key should be added into the hash table
:param create_missing: Flag that specifies, whether an absent key should be added into the hash table
The function returns a unique pointer for each particular file node name. This pointer can be then passed to the :ocv:cfunc:`GetFileNode` function that is faster than :ocv:cfunc:`GetFileNodeByName`
because it compares text strings by comparing pointers rather than the strings' content.
Consider the following example where an array of points is encoded as a sequence of 2-entry maps: ::
points:
- { x: 10, y: 10 }
- { x: 20, y: 20 }
@ -257,14 +257,14 @@ Consider the following example where an array of points is encoded as a sequence
Then, it is possible to get hashed "x" and "y" pointers to speed up decoding of the points. ::
#include "cxcore.h"
int main( int argc, char** argv )
{
CvFileStorage* fs = cvOpenFileStorage( "points.yml", 0, CV_STORAGE_READ );
CvStringHashNode* x_key = cvGetHashedNode( fs, "x", -1, 1 );
CvStringHashNode* y_key = cvGetHashedNode( fs, "y", -1, 1 );
CvFileNode* points = cvGetFileNodeByName( fs, 0, "points" );
if( CV_NODE_IS_SEQ(points->tag) )
{
CvSeq* seq = points->data.seq;
@ -312,10 +312,10 @@ GetRootFileNode
Retrieves one of the top-level nodes of the file storage.
.. ocv:cfunction:: CvFileNode* cvGetRootFileNode( const CvFileStorage* fs, int stream_index=0 )
:param fs: File storage
:param stream_index: Zero-based index of the stream. See :ocv:cfunc:`StartNextStream` . In most cases, there is only one stream in the file; however, there can be several.
:param fs: File storage
:param stream_index: Zero-based index of the stream. See :ocv:cfunc:`StartNextStream` . In most cases, there is only one stream in the file; however, there can be several.
The function returns one of the top-level file nodes. The top-level nodes do not have a name, they correspond to the streams that are stored one after another in the file storage. If the index is out of range, the function returns a NULL pointer, so all the top-level nodes can be iterated by subsequent calls to the function with ``stream_index=0,1,...``, until the NULL pointer is returned. This function
can be used as a base for recursive traversal of the file storage.
@ -325,16 +325,17 @@ Load
----
Loads an object from a file.
.. ocv:cfunction:: void* cvLoad( const char* filename, CvMemStorage* storage=NULL, const char* name=NULL, const char** realName=NULL )
.. ocv:cfunction:: void* cvLoad( const char* filename, CvMemStorage* memstorage=NULL, const char* name=NULL, const char** real_name=NULL )
.. ocv:pyoldfunction:: cv.Load(filename, storage=None, name=None)-> generic
:param filename: File name
:param storage: Memory storage for dynamic structures, such as :ocv:struct:`CvSeq` or :ocv:struct:`CvGraph` . It is not used for matrices or images.
:param filename: File name
:param memstorage: Memory storage for dynamic structures, such as :ocv:struct:`CvSeq` or :ocv:struct:`CvGraph` . It is not used for matrices or images.
:param name: Optional object name. If it is NULL, the first top-level object in the storage will be loaded.
:param name: Optional object name. If it is NULL, the first top-level object in the storage will be loaded.
:param realName: Optional output parameter that will contain the name of the loaded object (useful if ``name=NULL`` )
:param real_name: Optional output parameter that will contain the name of the loaded object (useful if ``name=NULL`` )
The function loads an object from a file. It basically reads the specified file, find the first top-level node and calls :ocv:cfunc:`Read` for that node. If the file node does not have type information or the type information can not be found by the type name, the function returns NULL. After the object is loaded, the file storage is closed and all the temporary buffers are deleted. Thus, to load a dynamic structure, such as a sequence, contour, or graph, one should pass a valid memory storage destination to the function.
@ -342,19 +343,19 @@ OpenFileStorage
---------------
Opens file storage for reading or writing data.
.. ocv:cfunction:: CvFileStorage* cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, int flags)
.. ocv:cfunction:: CvFileStorage* cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, int flags, const char* encoding=NULL )
:param filename: Name of the file associated with the storage
:param filename: Name of the file associated with the storage
:param memstorage: Memory storage used for temporary data and for
storing dynamic structures, such as :ocv:struct:`CvSeq` or :ocv:struct:`CvGraph` .
If it is NULL, a temporary memory storage is created and used.
If it is NULL, a temporary memory storage is created and used.
:param flags: Can be one of the following:
* **CV_STORAGE_READ** the storage is open for reading
* **CV_STORAGE_READ** the storage is open for reading
* **CV_STORAGE_WRITE** the storage is open for writing
* **CV_STORAGE_WRITE** the storage is open for writing
The function opens file storage for reading or writing data. In the latter case, a new file is created or an existing file is rewritten. The type of the read or written file is determined by the filename extension: ``.xml`` for ``XML`` and ``.yml`` or ``.yaml`` for ``YAML``. The function returns a pointer to the :ocv:struct:`CvFileStorage` structure. If the file cannot be opened then the function returns ``NULL``.
@ -363,12 +364,12 @@ Read
Decodes an object and returns a pointer to it.
.. ocv:cfunction:: void* cvRead( CvFileStorage* fs, CvFileNode* node, CvAttrList* attributes=NULL )
:param fs: File storage
:param node: The root object node
:param fs: File storage
:param node: The root object node
:param attributes: Unused parameter
:param attributes: Unused parameter
The function decodes a user object (creates an object in a native representation from the file storage subtree) and returns it. The object to be decoded must be an instance of a registered type that supports the ``read`` method (see :ocv:struct:`CvTypeInfo`). The type of the object is determined by the type name that is encoded in the file. If the object is a dynamic structure, it is created either in memory storage and passed to :ocv:cfunc:`OpenFileStorage` or, if a NULL pointer was passed, in temporary
memory storage, which is released when :ocv:cfunc:`ReleaseFileStorage` is called. Otherwise, if the object is not a dynamic structure, it is created in a heap and should be released with a specialized function or by using the generic :ocv:cfunc:`Release`.
@ -378,14 +379,14 @@ ReadByName
Finds an object by name and decodes it.
.. ocv:cfunction:: void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, const char* name, CvAttrList* attributes=NULL )
:param fs: File storage
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param fs: File storage
:param name: The node name
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param attributes: Unused parameter
:param name: The node name
:param attributes: Unused parameter
The function is a simple superposition of :ocv:cfunc:`GetFileNodeByName` and :ocv:cfunc:`Read`.
@ -393,29 +394,29 @@ ReadInt
-------
Retrieves an integer value from a file node.
.. ocv:cfunction:: int cvReadInt( const CvFileNode* node, int defaultValue=0 )
.. ocv:cfunction:: int cvReadInt( const CvFileNode* node, int default_value=0 )
:param node: File node
:param node: File node
:param defaultValue: The value that is returned if ``node`` is NULL
:param default_value: The value that is returned if ``node`` is NULL
The function returns an integer that is represented by the file node. If the file node is NULL, the
``defaultValue`` is returned (thus, it is convenient to call the function right after :ocv:cfunc:`GetFileNode` without checking for a NULL pointer). If the file node has type ``CV_NODE_INT``, then ``node->data.i`` is returned. If the file node has type ``CV_NODE_REAL``, then ``node->data.f``
The function returns an integer that is represented by the file node. If the file node is NULL, the
``default_value`` is returned (thus, it is convenient to call the function right after :ocv:cfunc:`GetFileNode` without checking for a NULL pointer). If the file node has type ``CV_NODE_INT``, then ``node->data.i`` is returned. If the file node has type ``CV_NODE_REAL``, then ``node->data.f``
is converted to an integer and returned. Otherwise the error is reported.
ReadIntByName
-------------
Finds a file node and returns its value.
.. ocv:cfunction:: int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, int defaultValue=0 )
.. ocv:cfunction:: int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, int default_value=0 )
:param fs: File storage
:param fs: File storage
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param name: The node name
:param name: The node name
:param defaultValue: The value that is returned if the file node is not found
:param default_value: The value that is returned if the file node is not found
The function is a simple superposition of :ocv:cfunc:`GetFileNodeByName` and :ocv:cfunc:`ReadInt`.
@ -425,13 +426,13 @@ Reads multiple numbers.
.. ocv:cfunction:: void cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, void* dst, const char* dt)
:param fs: File storage
:param fs: File storage
:param src: The file node (a sequence) to read numbers from
:param src: The file node (a sequence) to read numbers from
:param dst: Pointer to the destination array
:param dst: Pointer to the destination array
:param dt: Specification of each array element. It has the same format as in :ocv:cfunc:`WriteRawData` .
:param dt: Specification of each array element. It has the same format as in :ocv:cfunc:`WriteRawData` .
The function reads elements from a file node that represents a sequence of scalars.
@ -441,16 +442,16 @@ ReadRawDataSlice
Initializes file node sequence reader.
.. ocv:cfunction:: void cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, int count, void* dst, const char* dt )
:param fs: File storage
:param reader: The sequence reader. Initialize it with :ocv:cfunc:`StartReadRawData` .
:param fs: File storage
:param reader: The sequence reader. Initialize it with :ocv:cfunc:`StartReadRawData` .
:param count: The number of elements to read
:param count: The number of elements to read
:param dst: Pointer to the destination array
:param dst: Pointer to the destination array
:param dt: Specification of each array element. It has the same format as in :ocv:cfunc:`WriteRawData` .
:param dt: Specification of each array element. It has the same format as in :ocv:cfunc:`WriteRawData` .
The function reads one or more elements from the file node, representing a sequence, to a user-specified array. The total number of read sequence elements is a product of ``total``
and the number of components in each array element. For example, if ``dt=2if``, the function will read ``total*3`` sequence elements. As with any sequence, some parts of the file node sequence can be skipped or read repeatedly by repositioning the reader using :ocv:cfunc:`SetSeqReaderPos`.
@ -459,27 +460,27 @@ ReadReal
--------
Retrieves a floating-point value from a file node.
.. ocv:cfunction:: double cvReadReal( const CvFileNode* node, double defaultValue=0. )
:param node: File node
.. ocv:cfunction:: double cvReadReal( const CvFileNode* node, double default_value=0. )
:param node: File node
:param defaultValue: The value that is returned if ``node`` is NULL
:param default_value: The value that is returned if ``node`` is NULL
The function returns a floating-point value
that is represented by the file node. If the file node is NULL, the
``defaultValue``
``default_value``
is returned (thus, it is convenient to call
the function right after
the function right after
:ocv:cfunc:`GetFileNode`
without checking for a NULL
pointer). If the file node has type
pointer). If the file node has type
``CV_NODE_REAL``
,
then
then
``node->data.f``
is returned. If the file node has type
``CV_NODE_INT``
, then
, then
``node-:math:`>`data.f``
is converted to floating-point
and returned. Otherwise the result is not determined.
@ -489,19 +490,19 @@ ReadRealByName
--------------
Finds a file node and returns its value.
.. ocv:cfunction:: double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, double defaultValue=0.)
:param fs: File storage
.. ocv:cfunction:: double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, double default_value=0. )
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param fs: File storage
:param name: The node name
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param defaultValue: The value that is returned if the file node is not found
:param name: The node name
The function is a simple superposition of
:param default_value: The value that is returned if the file node is not found
The function is a simple superposition of
:ocv:cfunc:`GetFileNodeByName`
and
and
:ocv:cfunc:`ReadReal`
.
@ -510,21 +511,21 @@ ReadString
----------
Retrieves a text string from a file node.
.. ocv:cfunction:: const char* cvReadString( const CvFileNode* node, const char* defaultValue=NULL )
:param node: File node
.. ocv:cfunction:: const char* cvReadString( const CvFileNode* node, const char* default_value=NULL )
:param node: File node
:param defaultValue: The value that is returned if ``node`` is NULL
:param default_value: The value that is returned if ``node`` is NULL
The function returns a text string that is represented
by the file node. If the file node is NULL, the
``defaultValue``
by the file node. If the file node is NULL, the
``default_value``
is returned (thus, it is convenient to call the function right after
:ocv:cfunc:`GetFileNode`
without checking for a NULL pointer). If
the file node has type
the file node has type
``CV_NODE_STR``
, then
, then
``node-:math:`>`data.str.ptr``
is returned. Otherwise the result is not determined.
@ -533,19 +534,19 @@ ReadStringByName
----------------
Finds a file node by its name and returns its value.
.. ocv:cfunction:: const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, const char* defaultValue=NULL )
:param fs: File storage
.. ocv:cfunction:: const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, const char* default_value=NULL )
:param fs: File storage
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param map: The parent map. If it is NULL, the function searches a top-level node.
:param name: The node name
:param name: The node name
:param defaultValue: The value that is returned if the file node is not found
:param default_value: The value that is returned if the file node is not found
The function is a simple superposition of
The function is a simple superposition of
:ocv:cfunc:`GetFileNodeByName`
and
and
:ocv:cfunc:`ReadString`
.
@ -556,10 +557,10 @@ Registers a new type.
.. ocv:cfunction:: void cvRegisterType(const CvTypeInfo* info)
:param info: Type info structure
:param info: Type info structure
The function registers a new type, which is
described by
described by
``info``
. The function creates a copy of the structure,
so the user should delete it after calling the function.
@ -569,11 +570,11 @@ Release
-------
Releases an object.
.. ocv:cfunction:: void cvRelease( void** structPtr )
.. ocv:cfunction:: void cvRelease( void** struct_ptr )
:param structPtr: Double pointer to the object
:param struct_ptr: Double pointer to the object
The function finds the type of a given object and calls
The function finds the type of a given object and calls
``release``
with the double pointer.
@ -583,8 +584,8 @@ ReleaseFileStorage
Releases file storage.
.. ocv:cfunction:: void cvReleaseFileStorage(CvFileStorage** fs)
:param fs: Double pointer to the released file storage
:param fs: Double pointer to the released file storage
The function closes the file associated with the storage and releases all the temporary structures. It must be called after all I/O operations with the storage are finished.
@ -593,20 +594,21 @@ Save
----
Saves an object to a file.
.. ocv:cfunction:: void cvSave( const char* filename, const void* structPtr, const char* name=NULL, const char* comment=NULL, CvAttrList attributes=cvAttrList())
.. ocv:cfunction:: void cvSave( const char* filename, const void* struct_ptr, const char* name=NULL, const char* comment=NULL, CvAttrList attributes=cvAttrList() )
.. ocv:pyoldfunction:: cv.Save(filename, structPtr, name=None, comment=None)-> None
:param filename: File name
:param structPtr: Object to save
:param filename: File name
:param struct_ptr: Object to save
:param name: Optional object name. If it is NULL, the name will be formed from ``filename`` .
:param name: Optional object name. If it is NULL, the name will be formed from ``filename`` .
:param comment: Optional comment to put in the beginning of the file
:param comment: Optional comment to put in the beginning of the file
:param attributes: Optional attributes passed to :ocv:cfunc:`Write`
:param attributes: Optional attributes passed to :ocv:cfunc:`Write`
The function saves an object to a file. It provides a simple interface to
The function saves an object to a file. It provides a simple interface to
:ocv:cfunc:`Write`
.
@ -617,7 +619,7 @@ Starts the next stream.
.. ocv:cfunction:: void cvStartNextStream(CvFileStorage* fs)
:param fs: File storage
:param fs: File storage
The function finishes the currently written stream and starts the next stream. In the case of XML the file with multiple streams looks like this: ::
@ -646,11 +648,11 @@ Initializes the file node sequence reader.
.. ocv:cfunction:: void cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, CvSeqReader* reader)
:param fs: File storage
:param fs: File storage
:param src: The file node (a sequence) to read numbers from
:param src: The file node (a sequence) to read numbers from
:param reader: Pointer to the sequence reader
:param reader: Pointer to the sequence reader
The function initializes the sequence reader to read data from a file node. The initialized reader can be then passed to :ocv:cfunc:`ReadRawDataSlice`.
@ -659,31 +661,31 @@ StartWriteStruct
----------------
Starts writing a new structure.
.. ocv:cfunction:: void cvStartWriteStruct( CvFileStorage* fs, const char* name, int struct_flags, const char* typeName=NULL, CvAttrList attributes=cvAttrList())
:param fs: File storage
.. ocv:cfunction:: void cvStartWriteStruct( CvFileStorage* fs, const char* name, int struct_flags, const char* type_name=NULL, CvAttrList attributes=cvAttrList() )
:param name: Name of the written structure. The structure can be accessed by this name when the storage is read.
:param fs: File storage
:param struct_flags: A combination one of the following values:
* **CV_NODE_SEQ** the written structure is a sequence (see discussion of :ocv:struct:`CvFileStorage` ), that is, its elements do not have a name.
* **CV_NODE_MAP** the written structure is a map (see discussion of :ocv:struct:`CvFileStorage` ), that is, all its elements have names.
:param name: Name of the written structure. The structure can be accessed by this name when the storage is read.
One and only one of the two above flags must be specified
:param struct_flags: A combination one of the following values:
* **CV_NODE_FLOW** the optional flag that makes sense only for YAML streams. It means that the structure is written as a flow (not as a block), which is more compact. It is recommended to use this flag for structures or arrays whose elements are all scalars.
* **CV_NODE_SEQ** the written structure is a sequence (see discussion of :ocv:struct:`CvFileStorage` ), that is, its elements do not have a name.
:param typeName: Optional parameter - the object type name. In
* **CV_NODE_MAP** the written structure is a map (see discussion of :ocv:struct:`CvFileStorage` ), that is, all its elements have names.
One and only one of the two above flags must be specified
* **CV_NODE_FLOW** the optional flag that makes sense only for YAML streams. It means that the structure is written as a flow (not as a block), which is more compact. It is recommended to use this flag for structures or arrays whose elements are all scalars.
:param type_name: Optional parameter - the object type name. In
case of XML it is written as a ``type_id`` attribute of the
structure opening tag. In the case of YAML it is written after a colon
following the structure name (see the example in :ocv:struct:`CvFileStorage`
following the structure name (see the example in :ocv:struct:`CvFileStorage`
description). Mainly it is used with user objects. When the storage
is read, the encoded type name is used to determine the object type
(see :ocv:struct:`CvTypeInfo` and :ocv:cfunc:`FindType` ).
(see :ocv:struct:`CvTypeInfo` and :ocv:cfunc:`FindType` ).
:param attributes: This parameter is not used in the current implementation
:param attributes: This parameter is not used in the current implementation
The function starts writing a compound structure (collection) that can be a sequence or a map. After all the structure fields, which can be scalars or structures, are written, :ocv:cfunc:`EndWriteStruct` should be called. The function can be used to group some objects or to implement the ``write`` function for a some user object (see :ocv:struct:`CvTypeInfo`).
@ -692,9 +694,9 @@ TypeOf
------
Returns the type of an object.
.. ocv:cfunction:: CvTypeInfo* cvTypeOf( const void* structPtr )
.. ocv:cfunction:: CvTypeInfo* cvTypeOf( const void* struct_ptr )
:param structPtr: The object pointer
:param struct_ptr: The object pointer
The function finds the type of a given object. It iterates through the list of registered types and calls the ``is_instance`` function/method for every type info structure with that object until one of them returns non-zero or until the whole list has been traversed. In the latter case, the function returns NULL.
@ -703,9 +705,9 @@ UnregisterType
--------------
Unregisters the type.
.. ocv:cfunction:: void cvUnregisterType( const char* typeName )
:param typeName: Name of an unregistered type
.. ocv:cfunction:: void cvUnregisterType( const char* type_name )
:param type_name: Name of an unregistered type
The function unregisters a type with a specified name. If the name is unknown, it is possible to locate the type info by an instance of the type using :ocv:cfunc:`TypeOf` or by iterating the type list, starting from :ocv:cfunc:`FirstType`, and then calling ``cvUnregisterType(info->typeName)``.
@ -715,12 +717,12 @@ Write
Writes an object to file storage.
.. ocv:cfunction:: void cvWrite( CvFileStorage* fs, const char* name, const void* ptr, CvAttrList attributes=cvAttrList() )
:param fs: File storage
:param name: Name of the written object. Should be NULL if and only if the parent structure is a sequence.
:param fs: File storage
:param name: Name of the written object. Should be NULL if and only if the parent structure is a sequence.
:param ptr: Pointer to the object
:param ptr: Pointer to the object
:param attributes: The attributes of the object. They are specific for each particular type (see the discussion below).
@ -731,37 +733,37 @@ Attributes are used to customize the writing procedure. The standard types suppo
#.
CvSeq
* **header_dt** description of user fields of the sequence header that follow CvSeq, or CvChain (if the sequence is a Freeman chain) or CvContour (if the sequence is a contour or point sequence)
* **header_dt** description of user fields of the sequence header that follow CvSeq, or CvChain (if the sequence is a Freeman chain) or CvContour (if the sequence is a contour or point sequence)
* **dt** description of the sequence elements.
* **dt** description of the sequence elements.
* **recursive** if the attribute is present and is not equal to "0" or "false", the whole tree of sequences (contours) is stored.
#.
CvGraph
* **header_dt** description of user fields of the graph header that follows CvGraph;
* **header_dt** description of user fields of the graph header that follows CvGraph;
* **vertex_dt** description of user fields of graph vertices
* **vertex_dt** description of user fields of graph vertices
* **edge_dt** description of user fields of graph edges (note that the edge weight is always written, so there is no need to specify it explicitly)
Below is the code that creates the YAML file shown in the
Below is the code that creates the YAML file shown in the
``CvFileStorage``
description:
::
#include "cxcore.h"
int main( int argc, char** argv )
{
CvMat* mat = cvCreateMat( 3, 3, CV_32F );
CvFileStorage* fs = cvOpenFileStorage( "example.yml", 0, CV_STORAGE_WRITE );
cvSetIdentity( mat );
cvWrite( fs, "A", mat, cvAttrList(0,0) );
cvReleaseFileStorage( &fs );
cvReleaseMat( &mat );
return 0;
@ -774,13 +776,13 @@ WriteComment
------------
Writes a comment.
.. ocv:cfunction:: void cvWriteComment( CvFileStorage* fs, const char* comment, int eolComment)
.. ocv:cfunction:: void cvWriteComment( CvFileStorage* fs, const char* comment, int eol_comment )
:param fs: File storage
:param fs: File storage
:param comment: The written comment, single-line or multi-line
:param comment: The written comment, single-line or multi-line
:param eolComment: If non-zero, the function tries to put the comment at the end of current line. If the flag is zero, if the comment is multi-line, or if it does not fit at the end of the current line, the comment starts a new line.
:param eol_comment: If non-zero, the function tries to put the comment at the end of current line. If the flag is zero, if the comment is multi-line, or if it does not fit at the end of the current line, the comment starts a new line.
The function writes a comment into file storage. The comments are skipped when the storage is read.
@ -789,14 +791,14 @@ WriteFileNode
Writes a file node to another file storage.
.. ocv:cfunction:: void cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, const CvFileNode* node, int embed )
:param fs: Destination file storage
:param new_node_name: New name of the file node in the destination file storage. To keep the existing name, use :ocv:cfunc:`cvGetFileNodeName`
:param fs: Destination file storage
:param new_node_name: New name of the file node in the destination file storage. To keep the existing name, use :ocv:cfunc:`cvGetFileNodeName`
:param node: The written node
:param node: The written node
:param embed: If the written node is a collection and this parameter is not zero, no extra level of hierarchy is created. Instead, all the elements of ``node`` are written into the currently written structure. Of course, map elements can only be embedded into another map, and sequence elements can only be embedded into another sequence.
:param embed: If the written node is a collection and this parameter is not zero, no extra level of hierarchy is created. Instead, all the elements of ``node`` are written into the currently written structure. Of course, map elements can only be embedded into another map, and sequence elements can only be embedded into another sequence.
The function writes a copy of a file node to file storage. Possible applications of the function are merging several file storages into one and conversion between XML and YAML formats.
@ -806,11 +808,11 @@ Writes an integer value.
.. ocv:cfunction:: void cvWriteInt( CvFileStorage* fs, const char* name, int value)
:param fs: File storage
:param fs: File storage
:param name: Name of the written value. Should be NULL if and only if the parent structure is a sequence.
:param name: Name of the written value. Should be NULL if and only if the parent structure is a sequence.
:param value: The written value
:param value: The written value
The function writes a single integer value (with or without a name) to the file storage.
@ -820,29 +822,29 @@ WriteRawData
Writes multiple numbers.
.. ocv:cfunction:: void cvWriteRawData( CvFileStorage* fs, const void* src, int len, const char* dt )
:param fs: File storage
:param src: Pointer to the written array
:param fs: File storage
:param len: Number of the array elements to write
:param src: Pointer to the written array
:param dt: Specification of each array element that has the following format ``([count]{'u'|'c'|'w'|'s'|'i'|'f'|'d'})...``
where the characters correspond to fundamental C types:
:param len: Number of the array elements to write
* **u** 8-bit unsigned number
:param dt: Specification of each array element that has the following format ``([count]{'u'|'c'|'w'|'s'|'i'|'f'|'d'})...``
where the characters correspond to fundamental C types:
* **c** 8-bit signed number
* **u** 8-bit unsigned number
* **w** 16-bit unsigned number
* **c** 8-bit signed number
* **s** 16-bit signed number
* **w** 16-bit unsigned number
* **i** 32-bit signed number
* **s** 16-bit signed number
* **f** single precision floating-point number
* **i** 32-bit signed number
* **d** double precision floating-point number
* **f** single precision floating-point number
* **d** double precision floating-point number
* **r** pointer, 32 lower bits of which are written as a signed integer. The type can be used to store structures with links between the elements. ``count`` is the optional counter of values of a given type. For
example, ``2if`` means that each array element is a structure
@ -854,9 +856,9 @@ Writes multiple numbers.
The function writes an array, whose elements consist
of single or multiple numbers. The function call can be replaced with
a loop containing a few
a loop containing a few
:ocv:cfunc:`WriteInt`
and
and
:ocv:cfunc:`WriteReal`
calls, but
a single call is more efficient. Note that because none of the elements
@ -868,12 +870,12 @@ WriteReal
Writes a floating-point value.
.. ocv:cfunction:: void cvWriteReal( CvFileStorage* fs, const char* name, double value )
:param fs: File storage
:param name: Name of the written value. Should be NULL if and only if the parent structure is a sequence.
:param fs: File storage
:param name: Name of the written value. Should be NULL if and only if the parent structure is a sequence.
:param value: The written value
:param value: The written value
The function writes a single floating-point value (with or without a name) to file storage. Special values are encoded as follows: NaN (Not A Number) as .NaN, infinity as +.Inf or -.Inf.
@ -900,12 +902,12 @@ Writes a text string.
.. ocv:cfunction:: void cvWriteString( CvFileStorage* fs, const char* name, const char* str, int quote=0 )
:param fs: File storage
:param fs: File storage
:param name: Name of the written string . Should be NULL if and only if the parent structure is a sequence.
:param name: Name of the written string . Should be NULL if and only if the parent structure is a sequence.
:param str: The written text string
:param str: The written text string
:param quote: If non-zero, the written string is put in quotes, regardless of whether they are required. Otherwise, if the flag is zero, quotes are used only when they are required (e.g. when the string starts with a digit or contains spaces).
:param quote: If non-zero, the written string is put in quotes, regardless of whether they are required. Otherwise, if the flag is zero, quotes are used only when they are required (e.g. when the string starts with a digit or contains spaces).
The function writes a text string to file storage.

File diff suppressed because it is too large Load Diff

@ -93,9 +93,9 @@ Computes the cube root of an argument.
.. ocv:pyfunction:: cv2.cubeRoot(val) -> retval
.. ocv:cfunction:: float cvCbrt(float val)
.. ocv:cfunction:: float cvCbrt( float value )
.. ocv:pyoldfunction:: cv.Cbrt(val)-> float
.. ocv:pyoldfunction:: cv.Cbrt(value)-> float
:param val: A function argument.
@ -151,7 +151,7 @@ Determines if the argument is Infinity.
.. ocv:cfunction:: int cvIsInf(double value)
.. ocv:pyoldfunction:: cv.IsInf(value)-> int
:param value: The input floating-point value
:param value: The input floating-point value
The function returns 1 if the argument is a plus or minus infinity (as defined by IEEE754 standard) and 0 otherwise.
@ -162,7 +162,7 @@ Determines if the argument is Not A Number.
.. ocv:cfunction:: int cvIsNaN(double value)
.. ocv:pyoldfunction:: cv.IsNaN(value)-> int
:param value: The input floating-point value
:param value: The input floating-point value
The function returns 1 if the argument is Not A Number (as defined by IEEE754 standard), 0 otherwise.
@ -182,12 +182,12 @@ Signals an error and raises an exception.
.. ocv:function:: void error( const Exception& exc )
.. ocv:cfunction:: int cvError( int status, const char* funcName, const char* err_msg, const char* filename, int line )
.. ocv:cfunction:: void cvError( int status, const char* func_name, const char* err_msg, const char* file_name, int line )
:param exc: Exception to throw.
:param status: Error code. Normally, it is a negative value. The list of pre-defined error codes can be found in ``cxerror.h`` .
:param status: Error code. Normally, it is a negative value. The list of pre-defined error codes can be found in ``cxerror.h`` .
:param err_msg: Text of the error message.
:param args: ``printf`` -like formatted error message in parentheses.
@ -209,7 +209,7 @@ The macro ``CV_Error_`` can be used to construct an error message on-fly to incl
Exception
---------
.. ocv:class:: Exception
.. ocv:class:: Exception : public std::exception
Exception class passed to an error. ::
@ -244,7 +244,8 @@ fastMalloc
--------------
Allocates an aligned memory buffer.
.. ocv:function:: void* fastMalloc(size_t size)
.. ocv:function:: void* fastMalloc( size_t bufSize )
.. ocv:cfunction:: void* cvAlloc( size_t size )
:param size: Allocated buffer size.
@ -261,7 +262,7 @@ Deallocates a memory buffer.
.. ocv:cfunction:: void cvFree( void** pptr )
:param ptr: Pointer to the allocated buffer.
:param pptr: Double pointer to the allocated buffer
The function deallocates the buffer allocated with :ocv:func:`fastMalloc` . If NULL pointer is passed, the function does nothing. C version of the function clears the pointer ``*pptr`` to avoid problems with double memory deallocation.
@ -276,7 +277,7 @@ Returns a text string formatted using the ``printf``\ -like expression.
:param fmt: ``printf`` -compatible formatting specifiers.
The function acts like ``sprintf`` but forms and returns an STL string. It can be used to form an error message in the
:ocv:func:`Exception` constructor.
:ocv:class:`Exception` constructor.
@ -286,10 +287,10 @@ Returns true if the specified feature is supported by the host hardware.
.. ocv:function:: bool checkHardwareSupport(int feature)
.. ocv:cfunction:: int cvCheckHardwareSupport(int feature)
.. ocv:pyfunction:: checkHardwareSupport(feature) -> Bool
.. ocv:pyfunction:: cv2.checkHardwareSupport(feature) -> retval
:param feature: The feature of interest, one of:
* ``CV_CPU_MMX`` - MMX
* ``CV_CPU_SSE`` - SSE
* ``CV_CPU_SSE2`` - SSE 2
@ -312,7 +313,7 @@ The function returns the number of threads that is used by OpenCV.
.. seealso::
:ocv:func:`setNumThreads`,
:ocv:func:`getThreadNum`
:ocv:func:`getThreadNum`
@ -411,7 +412,7 @@ The function sets the number of threads used by OpenCV in parallel OpenMP region
.. seealso::
:ocv:func:`getNumThreads`,
:ocv:func:`getThreadNum`
:ocv:func:`getThreadNum`
@ -419,13 +420,13 @@ setUseOptimized
-----------------
Enables or disables the optimized code.
.. ocv:function:: void setUseOptimized(bool onoff)
.. ocv:function:: int cvUseOptimized( int on_off )
.. ocv:pyfunction:: cv2.setUseOptimized(onoff) -> None
.. ocv:cfunction:: int cvUseOptimized( int onoff )
.. ocv:cfunction:: int cvUseOptimized( int on_off )
:param onoff: The boolean flag specifying whether the optimized code should be used (``onoff=true``) or not (``onoff=false``).
:param on_off: The boolean flag specifying whether the optimized code should be used (``on_off=true``) or not (``on_off=false``).
The function can be used to dynamically turn on and off optimized code (code that uses SSE2, AVX, and other instructions on the platforms that support it). It sets a global flag that is further checked by OpenCV functions. Since the flag is not checked in the inner OpenCV loops, it is only safe to call the function on the very top level in your application where you can be sure that no other OpenCV function is currently executed.

@ -18,9 +18,9 @@ Here is an example: ::
#include "opencv2/opencv.hpp"
#include <time.h>
using namespace cv;
int main(int, char** argv)
{
FileStorage fs("test.yml", FileStorage::WRITE);
@ -76,18 +76,18 @@ As an exercise, you can replace ".yml" with ".xml" in the sample above and see,
Several things can be noted by looking at the sample code and the output:
*
The produced YAML (and XML) consists of heterogeneous collections that can be nested. There are 2 types of collections: named collections (mappings) and unnamed collections (sequences). In mappings each element has a name and is accessed by name. This is similar to structures and ``std::map`` in C/C++ and dictionaries in Python. In sequences elements do not have names, they are accessed by indices. This is similar to arrays and ``std::vector`` in C/C++ and lists, tuples in Python. "Heterogeneous" means that elements of each single collection can have different types.
Top-level collection in YAML/XML is a mapping. Each matrix is stored as a mapping, and the matrix elements are stored as a sequence. Then, there is a sequence of features, where each feature is represented a mapping, and lbp value in a nested sequence.
*
When you write to a mapping (a structure), you write element name followed by its value. When you write to a sequence, you simply write the elements one by one. OpenCV data structures (such as cv::Mat) are written in absolutely the same way as simple C data structures - using **``<<``** operator.
*
To write a mapping, you first write the special string **"{"** to the storage, then write the elements as pairs (``fs << <element_name> << <element_value>``) and then write the closing **"}"**.
*
To write a sequence, you first write the special string **"["**, then write the elements, then write the closing **"]"**.
*
In YAML (but not XML), mappings and sequences can be written in a compact Python-like inline form. In the sample above matrix elements, as well as each feature, including its lbp value, is stored in such inline form. To store a mapping/sequence in a compact form, put ":" after the opening character, e.g. use **"{:"** instead of **"{"** and **"[:"** instead of **"["**. When the data is written to XML, those extra ":" are ignored.
@ -99,38 +99,38 @@ To read the previously written XML or YAML file, do the following:
#.
Open the file storage using :ocv:func:`FileStorage::FileStorage` constructor or :ocv:func:`FileStorage::open` method. In the current implementation the whole file is parsed and the whole representation of file storage is built in memory as a hierarchy of file nodes (see :ocv:class:`FileNode`)
#.
Read the data you are interested in. Use :ocv:func:`FileStorage::operator []`, :ocv:func:`FileNode::operator []` and/or :ocv:class:`FileNodeIterator`.
#.
Close the storage using :ocv:func:`FileStorage::release`.
Close the storage using :ocv:func:`FileStorage::release`.
Here is how to read the file created by the code sample above: ::
FileStorage fs2("test.yml", FileStorage::READ);
// first method: use (type) operator on FileNode.
int frameCount = (int)fs2["frameCount"];
std::string date;
// second method: use FileNode::operator >>
fs2["calibrationDate"] >> date;
Mat cameraMatrix2, distCoeffs2;
fs2["cameraMatrix"] >> cameraMatrix2;
fs2["distCoeffs"] >> distCoeffs2;
cout << "frameCount: " << frameCount << endl
<< "calibration date: " << date << endl
<< "camera matrix: " << cameraMatrix2 << endl
<< "distortion coeffs: " << distCoeffs2 << endl;
FileNode features = fs2["features"];
FileNodeIterator it = features.begin(), it_end = features.end();
int idx = 0;
std::vector<uchar> lbpval;
// iterate through a sequence using FileNodeIterator
for( ; it != it_end; ++it, idx++ )
{
@ -189,7 +189,7 @@ Checks whether the file is opened.
.. ocv:function:: bool FileStorage::isOpened() const
:returns: ``true`` if the object is associated with the current file and ``false`` otherwise.
It is a good practice to call this method after you tried to open a file.
@ -254,22 +254,22 @@ Writes multiple numbers.
:param fmt: Specification of each array element that has the following format ``([count]{'u'|'c'|'w'|'s'|'i'|'f'|'d'})...`` where the characters correspond to fundamental C++ types:
* **u** 8-bit unsigned number
* **u** 8-bit unsigned number
* **c** 8-bit signed number
* **c** 8-bit signed number
* **w** 16-bit unsigned number
* **w** 16-bit unsigned number
* **s** 16-bit signed number
* **s** 16-bit signed number
* **i** 32-bit signed number
* **i** 32-bit signed number
* **f** single precision floating-point number
* **f** single precision floating-point number
* **d** double precision floating-point number
* **d** double precision floating-point number
* **r** pointer, 32 lower bits of which are written as a signed integer. The type can be used to store structures with links between the elements.
* **r** pointer, 32 lower bits of which are written as a signed integer. The type can be used to store structures with links between the elements.
``count`` is the optional counter of values of a given type. For example, ``2if`` means that each array element is a structure of 2 integers, followed by a single-precision floating-point number. The equivalent notations of the above specification are ' ``iif`` ', ' ``2i1f`` ' and so forth. Other examples: ``u`` means that the array consists of bytes, and ``2d`` means the array consists of pairs of doubles.
:param vec: Pointer to the written array.
@ -431,7 +431,7 @@ Checks whether the node is empty.
FileNode::isNone
----------------
Checks whether the node is a "none" object
Checks whether the node is a "none" object
.. ocv:function:: bool FileNode::isNone() const
@ -459,7 +459,7 @@ Checks whether the node is a mapping.
FileNode::isInt
---------------
Checks whether the node is an integer.
.. ocv:function:: bool FileNode::isInt() const
:returns: ``true`` if the node is an integer.
@ -544,7 +544,7 @@ Returns the node content as text string.
.. ocv:function:: FileNode::operator string() const
:returns: The node content as a text string.
FileNode::operator*
-------------------
@ -663,7 +663,7 @@ FileNodeIterator::operator +=
-----------------------------
Moves iterator forward by the specified offset.
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator += (int ofs)
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator +=( int ofs )
:param ofs: Offset (possibly negative) to move the iterator.
@ -672,7 +672,7 @@ FileNodeIterator::operator -=
-----------------------------
Moves iterator backward by the specified offset (possibly negative).
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator -= (int ofs)
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator -=( int ofs )
:param ofs: Offset (possibly negative) to move the iterator.

File diff suppressed because it is too large Load Diff

@ -1130,7 +1130,7 @@ CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z )
/******************************** CvSize's & CvBox **************************************/
typedef struct
typedef struct CvSize
{
int width;
int height;

@ -140,11 +140,11 @@ bool clipLine( Rect img_rect, Point& pt1, Point& pt2 )
pt1 -= tl; pt2 -= tl;
bool inside = clipLine(img_rect.size(), pt1, pt2);
pt1 += tl; pt2 += tl;
return inside;
}
/*
/*
Initializes line iterator.
Returns number of points on the line or negative number if error.
*/
@ -195,7 +195,7 @@ LineIterator::LineIterator(const Mat& img, Point pt1, Point pt2,
step = (step ^ s) - s;
s = dy > dx ? -1 : 0;
/* conditional swaps */
dx ^= dy & s;
dy ^= dx & s;
@ -208,7 +208,7 @@ LineIterator::LineIterator(const Mat& img, Point pt1, Point pt2,
if( connectivity == 8 )
{
assert( dx >= 0 && dy >= 0 );
err = dx - (dy + dy);
plusDelta = dx + dx;
minusDelta = -(dy + dy);
@ -219,7 +219,7 @@ LineIterator::LineIterator(const Mat& img, Point pt1, Point pt2,
else /* connectivity == 4 */
{
assert( dx >= 0 && dy >= 0 );
err = 0;
plusDelta = (dx + dx) + (dy + dy);
minusDelta = -(dy + dy);
@ -227,7 +227,7 @@ LineIterator::LineIterator(const Mat& img, Point pt1, Point pt2,
minusStep = bt_pix;
count = dx + dy + 1;
}
this->ptr0 = img.data;
this->step = (int)img.step;
this->elemSize = bt_pix0;
@ -621,10 +621,10 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
tptr[1] = (uchar)cg; \
tptr[2] = (uchar)cr; \
}
ICV_PUT_POINT((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT,
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
if( ax > ay )
{
pt1.x >>= XY_SHIFT;
@ -640,7 +640,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
else
{
pt1.y >>= XY_SHIFT;
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x >> XY_SHIFT, pt1.y);
@ -664,8 +664,8 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
}
ICV_PUT_POINT((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT,
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
if( ax > ay )
{
pt1.x >>= XY_SHIFT;
@ -681,7 +681,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
else
{
pt1.y >>= XY_SHIFT;
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x >> XY_SHIFT, pt1.y);
@ -690,7 +690,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
ecount--;
}
}
#undef ICV_PUT_POINT
}
else
@ -706,12 +706,12 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
}
ICV_PUT_POINT((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT,
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
if( ax > ay )
{
pt1.x >>= XY_SHIFT;
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x, pt1.y >> XY_SHIFT);
@ -723,7 +723,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
else
{
pt1.y >>= XY_SHIFT;
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x >> XY_SHIFT, pt1.y);
@ -732,7 +732,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
ecount--;
}
}
#undef ICV_PUT_POINT
}
}
@ -830,7 +830,7 @@ sincos( int angle, float& cosval, float& sinval )
cosval = SinTable[450 - angle];
}
/*
/*
constructs polygon that represents elliptic arc.
*/
void ellipse2Poly( Point center, Size axes, int angle,
@ -880,7 +880,7 @@ void ellipse2Poly( Point center, Size axes, int angle,
angle = arc_end;
if( angle < 0 )
angle += 360;
x = size_a * SinTable[450-angle];
y = size_b * SinTable[angle];
Point pt;
@ -922,7 +922,7 @@ EllipseEx( Mat& img, Point center, Size axes,
/****************************************************************************************\
* Polygons filling *
* Polygons filling *
\****************************************************************************************/
/* helper macros: filling horizontal row */
@ -1010,7 +1010,7 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
LineAA( img, p0, p, color );
p0 = p;
}
xmin = (xmin + delta) >> shift;
xmax = (xmax + delta) >> shift;
ymin = (ymin + delta) >> shift;
@ -1118,7 +1118,7 @@ CollectPolyEdges( Mat& img, const Point* v, int count, vector<PolyEdge>& edges,
{
Point t0, t1;
PolyEdge edge;
pt1 = v[i];
pt1.x = (pt1.x + offset.x) << (XY_SHIFT - shift);
pt1.y = (pt1.y + delta) >> shift;
@ -1337,7 +1337,7 @@ Circle( Mat& img, Point center, int radius, const void* color, int fill )
{
uchar *tptr0 = ptr + y11 * step;
uchar *tptr1 = ptr + y12 * step;
if( !fill )
{
ICV_PUT_POINT( tptr0, x11 );
@ -1374,7 +1374,7 @@ Circle( Mat& img, Point center, int radius, const void* color, int fill )
x11 = std::max( x11, 0 );
x12 = MIN( x12, size.width - 1 );
}
if( (unsigned)y11 < (unsigned)size.height )
{
uchar *tptr = ptr + y11 * step;
@ -1523,7 +1523,7 @@ ThickLine( Mat& img, Point p0, Point p1, const void* color,
Point center;
center.x = (p0.x + (XY_ONE>>1)) >> XY_SHIFT;
center.y = (p0.y + (XY_ONE>>1)) >> XY_SHIFT;
Circle( img, center, (thickness + (XY_ONE>>1)) >> XY_SHIFT, color, 1 );
Circle( img, center, (thickness + (XY_ONE>>1)) >> XY_SHIFT, color, 1 );
}
else
{
@ -1544,7 +1544,7 @@ PolyLine( Mat& img, const Point* v, int count, bool is_closed,
{
if( !v || count <= 0 )
return;
int i = is_closed ? count - 1 : 0;
int flags = 2 + !is_closed;
Point p0;
@ -1575,7 +1575,7 @@ void line( Mat& img, Point pt1, Point pt2, const Scalar& color,
double buf[4];
scalarToRawData( color, buf, img.type(), 0 );
ThickLine( img, pt1, pt2, buf, thickness, line_type, 3, shift );
ThickLine( img, pt1, pt2, buf, thickness, line_type, 3, shift );
}
void rectangle( Mat& img, Point pt1, Point pt2,
@ -1606,7 +1606,7 @@ void rectangle( Mat& img, Point pt1, Point pt2,
FillConvexPoly( img, pt, 4, buf, lineType, shift );
}
void rectangle( Mat& img, Rect rec,
const Scalar& color, int thickness,
int lineType, int shift )
@ -1617,7 +1617,7 @@ void rectangle( Mat& img, Rect rec,
color, thickness, lineType, shift );
}
void circle( Mat& img, Point center, int radius,
const Scalar& color, int thickness, int line_type, int shift )
{
@ -1667,25 +1667,25 @@ void ellipse( Mat& img, Point center, Size axes,
EllipseEx( img, center, axes, _angle, _start_angle,
_end_angle, buf, thickness, line_type );
}
void ellipse(Mat& img, const RotatedRect& box, const Scalar& color,
int thickness, int lineType)
{
if( lineType == CV_AA && img.depth() != CV_8U )
lineType = 8;
CV_Assert( box.size.width >= 0 && box.size.height >= 0 &&
thickness <= 255 );
double buf[4];
scalarToRawData(color, buf, img.type(), 0);
int _angle = cvRound(box.angle);
Point center(cvRound(box.center.x*(1 << XY_SHIFT)),
cvRound(box.center.y*(1 << XY_SHIFT)));
Size axes(cvRound(box.size.width*(1 << (XY_SHIFT - 1))),
cvRound(box.size.height*(1 << (XY_SHIFT - 1))));
EllipseEx( img, center, axes, _angle, 0, 360, buf, thickness, lineType );
EllipseEx( img, center, axes, _angle, 0, 360, buf, thickness, lineType );
}
void fillConvexPoly( Mat& img, const Point* pts, int npts,
@ -1875,12 +1875,12 @@ static const int HersheyScriptComplex[] = {
2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676,
2225, 2229, 2226, 2246 };
static const int* getFontData(int fontFace)
{
bool isItalic = (fontFace & FONT_ITALIC) != 0;
const int* ascii = 0;
switch( fontFace & 15 )
{
case FONT_HERSHEY_SIMPLEX:
@ -1912,15 +1912,15 @@ static const int* getFontData(int fontFace)
}
return ascii;
}
void putText( Mat& img, const string& text, Point org,
int fontFace, double fontScale, Scalar color,
int thickness, int line_type, bool bottomLeftOrigin )
{
const int* ascii = getFontData(fontFace);
double buf[4];
scalarToRawData(color, buf, img.type(), 0);
@ -1959,7 +1959,7 @@ void putText( Mat& img, const string& text, Point org,
if( *ptr == ' ' || !*ptr )
{
if( pts.size() > 1 )
PolyLine( img, &pts[0], (int)pts.size(), false, buf, thickness, line_type, XY_SHIFT );
PolyLine( img, &pts[0], (int)pts.size(), false, buf, thickness, line_type, XY_SHIFT );
if( !*ptr++ )
break;
pts.resize(0);
@ -2030,7 +2030,7 @@ void cv::fillPoly(InputOutputArray _img, InputArrayOfArrays pts,
AutoBuffer<int> _npts(ncontours);
Point** ptsptr = _ptsptr;
int* npts = _npts;
for( i = 0; i < ncontours; i++ )
{
Mat p = pts.getMat(i);
@ -2056,7 +2056,7 @@ void cv::polylines(InputOutputArray _img, InputArrayOfArrays pts,
AutoBuffer<int> _npts(ncontours);
Point** ptsptr = _ptsptr;
int* npts = _npts;
for( i = 0; i < ncontours; i++ )
{
Mat p = pts.getMat(manyContours ? i : -1);
@ -2078,7 +2078,7 @@ static const int CodeDeltas[8][2] =
CV_IMPL void
cvDrawContours( void* _img, CvSeq* contour,
CvScalar _externalColor, CvScalar _holeColor,
CvScalar _externalColor, CvScalar _holeColor,
int maxLevel, int thickness,
int line_type, CvPoint _offset )
{
@ -2104,7 +2104,7 @@ cvDrawContours( void* _img, CvSeq* contour,
maxLevel = MAX(maxLevel, INT_MIN+2);
maxLevel = MIN(maxLevel, INT_MAX-1);
if( maxLevel < 0 )
{
h_next = contour->h_next;
@ -2148,7 +2148,7 @@ cvDrawContours( void* _img, CvSeq* contour,
pts.push_back(pt);
prev_pt = pt;
}
pt.x += CodeDeltas[(int)code][0];
pt.y += CodeDeltas[(int)code][1];
}
@ -2166,7 +2166,7 @@ cvDrawContours( void* _img, CvSeq* contour,
CV_Assert( elem_type == CV_32SC2 );
cv::Point pt1, pt2;
int shift = 0;
count -= !CV_IS_SEQ_CLOSED(contour);
CV_READ_SEQ_ELEM( pt1, reader );
pt1 += offset;
@ -2218,7 +2218,7 @@ CV_IMPL CvScalar
cvColorToScalar( double packed_color, int type )
{
CvScalar scalar;
if( CV_MAT_DEPTH( type ) == CV_8U )
{
int icolor = cvRound( packed_color );

@ -13,7 +13,7 @@ descriptor extractors inherit the
DescriptorExtractor
-------------------
.. ocv:class:: DescriptorExtractor
.. ocv:class:: DescriptorExtractor : public Algorithm
Abstract base class for computing descriptors for image keypoints. ::
@ -65,25 +65,6 @@ Computes the descriptors for a set of keypoints detected in an image (first vari
:param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]`. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
DescriptorExtractor::read
-----------------------------
Reads the object of a descriptor extractor from a file node.
.. ocv:function:: void DescriptorExtractor::read( const FileNode& fn )
:param fn: File node from which the detector is read.
DescriptorExtractor::write
------------------------------
Writes the object of a descriptor extractor to a file storage.
.. ocv:function:: void DescriptorExtractor::write( FileStorage& fs ) const
:param fs: File storage where the detector is written.
DescriptorExtractor::create
-------------------------------
@ -107,7 +88,7 @@ for example: ``"OpponentSIFT"`` .
OpponentColorDescriptorExtractor
--------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor
.. ocv:class:: OpponentColorDescriptorExtractor : public DescriptorExtractor
Class adapting a descriptor extractor to compute descriptors in the Opponent Color Space
(refer to Van de Sande et al., CGIV 2008 *Color Descriptors for Object Category Recognition*).
@ -132,7 +113,7 @@ them into a single color descriptor. ::
BriefDescriptorExtractor
------------------------
.. ocv:class:: BriefDescriptorExtractor
.. ocv:class:: BriefDescriptorExtractor : public DescriptorExtractor
Class for computing BRIEF descriptors described in a paper of Calonder M., Lepetit V.,
Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* ,

@ -11,7 +11,7 @@ descriptor matchers inherit the
DMatch
------
.. ocv:class:: DMatch
.. ocv:struct:: DMatch
Class for matching keypoint descriptors: query descriptor index,
train descriptor index, train image index, and distance between descriptors. ::
@ -40,7 +40,7 @@ train descriptor index, train image index, and distance between descriptors. ::
DescriptorMatcher
-----------------
.. ocv:class:: DescriptorMatcher
.. ocv:class:: DescriptorMatcher : public Algorithm
Abstract base class for matching keypoint descriptors. It has two groups
of match methods: for matching descriptors of an image with another image or
@ -111,7 +111,7 @@ Returns a constant link to the train descriptor collection ``trainDescCollection
.. ocv:function:: const vector<Mat>& DescriptorMatcher::getTrainDescriptors() const
@ -167,7 +167,7 @@ Finds the best match for each descriptor from a query set.
:param masks: Set of masks. Each ``masks[i]`` specifies permissible matches between the input query descriptors and stored train descriptors from the i-th image ``trainDescCollection[i]``.
In the first variant of this method, the train descriptors are passed as an input argument. In the second variant of the method, train descriptors collection that was set by ``DescriptorMatcher::add`` is used. Optional mask (or masks) can be passed to specify which query and training descriptors can be matched. Namely, ``queryDescriptors[i]`` can be matched with ``trainDescriptors[j]`` only if ``mask.at<uchar>(i,j)`` is non-zero.
In the first variant of this method, the train descriptors are passed as an input argument. In the second variant of the method, train descriptors collection that was set by ``DescriptorMatcher::add`` is used. Optional mask (or masks) can be passed to specify which query and training descriptors can be matched. Namely, ``queryDescriptors[i]`` can be matched with ``trainDescriptors[j]`` only if ``mask.at<uchar>(i,j)`` is non-zero.
@ -193,7 +193,7 @@ Finds the k best matches for each descriptor from a query set.
:param compactResult: Parameter used when the mask (or masks) is not empty. If ``compactResult`` is false, the ``matches`` vector has the same size as ``queryDescriptors`` rows. If ``compactResult`` is true, the ``matches`` vector does not contain matches for fully masked-out query descriptors.
These extended variants of :ocv:func:`DescriptorMatcher::match` methods find several best matches for each query descriptor. The matches are returned in the distance increasing order. See :ocv:func:`DescriptorMatcher::match` for the details about query and train descriptors.
These extended variants of :ocv:func:`DescriptorMatcher::match` methods find several best matches for each query descriptor. The matches are returned in the distance increasing order. See :ocv:func:`DescriptorMatcher::match` for the details about query and train descriptors.
@ -218,7 +218,7 @@ For each query descriptor, finds the training descriptors not farther than the s
:param compactResult: Parameter used when the mask (or masks) is not empty. If ``compactResult`` is false, the ``matches`` vector has the same size as ``queryDescriptors`` rows. If ``compactResult`` is true, the ``matches`` vector does not contain matches for fully masked-out query descriptors.
:param maxDistance: Threshold for the distance between matched descriptors.
For each query descriptor, the methods find such training descriptors that the distance between the query descriptor and the training descriptor is equal or smaller than ``maxDistance``. Found matches are returned in the distance increasing order.
@ -227,7 +227,7 @@ DescriptorMatcher::clone
----------------------------
Clones the matcher.
.. ocv:function:: Ptr<DescriptorMatcher> DescriptorMatcher::clone( bool emptyTrainData ) const
.. ocv:function:: Ptr<DescriptorMatcher> DescriptorMatcher::clone( bool emptyTrainData=false )
:param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters but with empty train data.
@ -241,15 +241,15 @@ Creates a descriptor matcher of a given type with the default parameters (using
:param descriptorMatcherType: Descriptor matcher type. Now the following matcher types are supported:
*
*
``BruteForce`` (it uses ``L2`` )
*
*
``BruteForce-L1``
*
*
``BruteForce-Hamming``
*
*
``BruteForce-Hamming(2)``
*
*
``FlannBased``
@ -258,25 +258,25 @@ Creates a descriptor matcher of a given type with the default parameters (using
BFMatcher
-----------------
.. ocv:class::BFMatcher
.. ocv:class:: BFMatcher : public DescriptorMatcher
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets. ::
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets.
BFMatcher::BFMatcher
--------------------
Brute-force matcher constructor.
.. ocv:function:: BFMatcher::BFMatcher( int distanceType, bool crossCheck=false )
.. ocv:function:: BFMatcher::BFMatcher( int normType, bool crossCheck=false )
:param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
:param distanceType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
FlannBasedMatcher
-----------------
.. ocv:class:: FlannBasedMatcher
.. ocv:class:: FlannBasedMatcher : public DescriptorMatcher
Flann-based descriptor matcher. This matcher trains :ocv:class:`flann::Index_` on a train descriptor collection and calls its nearest search methods to find the best matches. So, this matcher may be faster when matching a large train collection than the brute force matcher. ``FlannBasedMatcher`` does not support masking permissible matches of descriptor sets because ``flann::Index`` does not support this. ::

@ -12,31 +12,31 @@ KeyPoint
--------
.. ocv:class:: KeyPoint
Data structure for salient point detectors.
Data structure for salient point detectors.
.. ocv:member:: Point2f pt
.. ocv:member:: Point2f pt
coordinates of the keypoint
coordinates of the keypoint
.. ocv:member:: float size
.. ocv:member:: float size
diameter of the meaningful keypoint neighborhood
diameter of the meaningful keypoint neighborhood
.. ocv:member:: float angle
.. ocv:member:: float angle
computed orientation of the keypoint (-1 if not applicable)
computed orientation of the keypoint (-1 if not applicable)
.. ocv:member:: float response
.. ocv:member:: float response
the response by which the most strong keypoints have been selected. Can be used for further sorting or subsampling
the response by which the most strong keypoints have been selected. Can be used for further sorting or subsampling
.. ocv:member:: int octave
.. ocv:member:: int octave
octave (pyramid layer) from which the keypoint has been extracted
octave (pyramid layer) from which the keypoint has been extracted
.. ocv:member:: int class_id
.. ocv:member:: int class_id
object id that can be used to clustered keypoints by an object they belong to
object id that can be used to clustered keypoints by an object they belong to
KeyPoint::KeyPoint
------------------
@ -48,7 +48,7 @@ The keypoint constructors
.. ocv:function:: KeyPoint::KeyPoint(float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1)
.. ocv:pyfunction:: cv2.KeyPoint(x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]) -> <KeyPoint object>
.. ocv:pyfunction:: cv2.KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) -> <KeyPoint object>
:param x: x-coordinate of the keypoint
@ -69,7 +69,7 @@ The keypoint constructors
FeatureDetector
---------------
.. ocv:class:: FeatureDetector
.. ocv:class:: FeatureDetector : public Algorithm
Abstract base class for 2D image feature detectors. ::
@ -112,22 +112,6 @@ Detects keypoints in an image (first variant) or image set (second variant).
:param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``.
FeatureDetector::read
-------------------------
Reads a feature detector object from a file node.
.. ocv:function:: void FeatureDetector::read( const FileNode& fn )
:param fn: File node from which the detector is read.
FeatureDetector::write
--------------------------
Writes a feature detector object to a file storage.
.. ocv:function:: void FeatureDetector::write( FileStorage& fs ) const
:param fs: File storage where the detector is written.
FeatureDetector::create
---------------------------
Creates a feature detector by its name.
@ -156,7 +140,7 @@ for example: ``"GridFAST"``, ``"PyramidSTAR"`` .
FastFeatureDetector
-------------------
.. ocv:class:: FastFeatureDetector
.. ocv:class:: FastFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:func:`FAST` method. ::
@ -173,7 +157,7 @@ Wrapping class for feature detection using the
GoodFeaturesToTrackDetector
---------------------------
.. ocv:class:: GoodFeaturesToTrackDetector
.. ocv:class:: GoodFeaturesToTrackDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:func:`goodFeaturesToTrack` function. ::
@ -211,7 +195,7 @@ Wrapping class for feature detection using the
MserFeatureDetector
-------------------
.. ocv:class:: MserFeatureDetector
.. ocv:class:: MserFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:class:`MSER` class. ::
@ -233,7 +217,7 @@ Wrapping class for feature detection using the
StarFeatureDetector
-------------------
.. ocv:class:: StarFeatureDetector
.. ocv:class:: StarFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:class:`StarDetector` class. ::
@ -252,7 +236,7 @@ Wrapping class for feature detection using the
DenseFeatureDetector
--------------------
.. ocv:class:: DenseFeatureDetector
.. ocv:class:: DenseFeatureDetector : public FeatureDetector
Class for generation of image features which are distributed densely and regularly over the image. ::
@ -279,7 +263,7 @@ The detector generates several levels (in the amount of ``featureScaleLevels``)
SimpleBlobDetector
-------------------
.. ocv:class:: SimpleBlobDetector
.. ocv:class:: SimpleBlobDetector : public FeatureDetector
Class for extracting blobs from an image. ::
@ -344,7 +328,7 @@ Default values of parameters are tuned to extract dark circular blobs.
GridAdaptedFeatureDetector
--------------------------
.. ocv:class:: GridAdaptedFeatureDetector
.. ocv:class:: GridAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to partition the source image into a grid and detect points in each cell. ::
@ -369,7 +353,7 @@ Class adapting a detector to partition the source image into a grid and detect p
PyramidAdaptedFeatureDetector
-----------------------------
.. ocv:class:: PyramidAdaptedFeatureDetector
.. ocv:class:: PyramidAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to detect points over multiple levels of a Gaussian pyramid. Consider using this class for detectors that are not inherently scaled. ::
@ -387,7 +371,7 @@ Class adapting a detector to detect points over multiple levels of a Gaussian py
DynamicAdaptedFeatureDetector
-----------------------------
.. ocv:class:: DynamicAdaptedFeatureDetector
.. ocv:class:: DynamicAdaptedFeatureDetector : public FeatureDetector
Adaptively adjusting detector that iteratively detects features until the desired number is found. ::
@ -431,7 +415,7 @@ DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector
----------------------------------------------------------------
The constructor
.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features, int max_features, int max_iters )
.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 )
:param adjuster: :ocv:class:`AdjusterAdapter` that detects features and adjusts parameters.
@ -443,7 +427,7 @@ The constructor
AdjusterAdapter
---------------
.. ocv:class:: AdjusterAdapter
.. ocv:class:: AdjusterAdapter : public FeatureDetector
Class providing an interface for adjusting parameters of a feature detector. This interface is used by :ocv:class:`DynamicAdaptedFeatureDetector` . It is a wrapper for :ocv:class:`FeatureDetector` that enables adjusting parameters after feature detection. ::
@ -522,7 +506,7 @@ Creates an adjuster adapter by name
FastAdjuster
------------
.. ocv:class:: FastAdjuster
.. ocv:class:: FastAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`FastFeatureDetector`. This class decreases or increases the threshold value by 1. ::
@ -535,7 +519,7 @@ FastAdjuster
StarAdjuster
------------
.. ocv:class:: StarAdjuster
.. ocv:class:: StarAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`StarFeatureDetector`. This class adjusts the ``responseThreshhold`` of ``StarFeatureDetector``. ::

@ -3,7 +3,7 @@ Common Interfaces of Generic Descriptor Matchers
.. highlight:: cpp
Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to easily switch
Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to easily switch
between different algorithms solving the same problem. This section is devoted to matching descriptors
that cannot be represented as vectors in a multidimensional space. ``GenericDescriptorMatcher`` is a more generic interface for descriptors. It does not make any assumptions about descriptor representation.
Every descriptor with the
@ -130,7 +130,7 @@ GenericDescriptorMatcher::isMaskSupported
---------------------------------------------
Returns ``true`` if a generic descriptor matcher supports masking permissible matches.
.. ocv:function:: void GenericDescriptorMatcher::isMaskSupported()
.. ocv:function:: bool GenericDescriptorMatcher::isMaskSupported()
@ -151,12 +151,12 @@ Classifies keypoints from a query set.
:param trainKeypoints: Keypoints from a train image.
The method classifies each keypoint from a query set. The first variant of the method takes a train image and its keypoints as an input argument. The second variant uses the internally stored training collection that can be built using the ``GenericDescriptorMatcher::add`` method.
The methods do the following:
#.
Call the ``GenericDescriptorMatcher::match`` method to find correspondence between the query set and the training set.
#.
Set the ``class_id`` field of each keypoint from the query set to ``class_id`` of the corresponding keypoint from the training set.
@ -195,7 +195,7 @@ Finds the ``k`` best matches for each query keypoint.
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, const Mat& trainImage, vector<KeyPoint>& trainKeypoints, vector<vector<DMatch> >& matches, int k, const Mat& mask=Mat(), bool compactResult=false ) const
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, vector<vector<DMatch> >& matches, int k, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
The methods are extended variants of ``GenericDescriptorMatch::match``. The parameters are similar, and the semantics is similar to ``DescriptorMatcher::knnMatch``. But this class does not require explicitly computed keypoint descriptors.
@ -231,7 +231,7 @@ GenericDescriptorMatcher::clone
-----------------------------------
Clones the matcher.
.. ocv:function:: Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::clone( bool emptyTrainData ) const
.. ocv:function:: Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::clone( bool emptyTrainData=false ) const
:param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies
both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters
@ -240,7 +240,7 @@ Clones the matcher.
VectorDescriptorMatcher
-----------------------
.. ocv:class:: VectorDescriptorMatcher
.. ocv:class:: VectorDescriptorMatcher : public GenericDescriptorMatcher
Class used for matching descriptors that can be described as vectors in a finite-dimensional space. ::

@ -7,9 +7,9 @@ drawMatches
---------------
Draws the found matches of keypoints from two images.
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char>>& matchesMask= vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char> >& matchesMask=vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
:param img1: First source image.
@ -31,7 +31,7 @@ Draws the found matches of keypoints from two images.
:param matchesMask: Mask determining which matches are drawn. If the mask is empty, all matches are drawn.
:param flags: Flags setting drawing features. Possible ``flags`` bit values are defined by ``DrawMatchesFlags``.
This function draws matches of keypoints from two images in the output image. Match is a line connecting two keypoints (circles). The structure ``DrawMatchesFlags`` is defined as follows:
.. code-block:: cpp
@ -65,13 +65,13 @@ drawKeypoints
-----------------
Draws keypoints.
.. ocv:function:: void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImg, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImage, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
:param image: Source image.
:param keypoints: Keypoints from the source image.
:param outImg: Output image. Its content depends on the ``flags`` value defining what is drawn in the output image. See possible ``flags`` bit values below.
:param outImage: Output image. Its content depends on the ``flags`` value defining what is drawn in the output image. See possible ``flags`` bit values below.
:param color: Color of keypoints.

@ -24,7 +24,7 @@ Detects corners using the FAST algorithm by [Rosten06]_.
MSER
----
.. ocv:class:: MSER
.. ocv:class:: MSER : public FeatureDetector
Maximally stable extremal region extractor. ::
@ -50,7 +50,7 @@ http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http:/
ORB
---
.. ocv:class:: ORB
.. ocv:class:: ORB : public Feature2D
Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor, described in [RRKB11]_. The algorithm uses FAST in pyramids to detect stable keypoints, selects the strongest features using FAST or Harris response, finds their orientation using first-order moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or k-tuples) are rotated according to the measured orientation).
@ -60,39 +60,37 @@ ORB::ORB
--------
The ORB constructor
.. ocv:function:: ORB::ORB()
.. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31)
:param nfeatures: The maximum number of features to retain.
:param scaleFactor: Pyramid decimation ratio, greater than 1. ``scaleFactor==2`` means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
:param nlevels: The number of pyramid levels. The smallest level will have linear size equal to ``input_image_linear_size/pow(scaleFactor, nlevels)``.
:param edgeThreshold: This is size of the border where the features are not detected. It should roughly match the ``patchSize`` parameter.
:param firstLevel: It should be 0 in the current implementation.
:param WTA_K: The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as ``NORM_HAMMING2`` (2 bits per bin). When ``WTA_K=4``, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
:param scoreType: The default HARRIS_SCORE means that Harris algorithm is used to rank features (the score is written to ``KeyPoint::score`` and is used to retain best ``nfeatures`` features); FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, but it is a little faster to compute.
:param patchSize: size of the patch used by the oriented BRIEF descriptor. Of course, on smaller pyramid layers the perceived image area covered by a feature will be larger.
ORB::operator()
---------------
Finds keypoints in an image and computes their descriptors
.. ocv:function:: void ORB::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false ) const
:param image: The input 8-bit grayscale image.
:param mask: The operation mask.
:param keypoints: The output vector of keypoints.
:param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it.
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.

@ -54,7 +54,7 @@ BOWTrainer::descripotorsCount
---------------------------------
Returns the count of all descriptors stored in the training set.
.. ocv:function:: const vector<Mat>& BOWTrainer::descripotorsCount() const
.. ocv:function:: int BOWTrainer::descripotorsCount() const
@ -72,7 +72,7 @@ The vocabulary consists of cluster centers. So, this method returns the vocabula
BOWKMeansTrainer
----------------
.. ocv:class:: BOWKMeansTrainer
.. ocv:class:: BOWKMeansTrainer : public BOWTrainer
:ocv:func:`kmeans` -based class to train visual vocabulary using the *bag of visual words* approach.
::

@ -51,15 +51,15 @@
namespace cv
{
CV_EXPORTS bool initModule_features2d();
/*!
The Keypoint Class
The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as
Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc.
The keypoint is characterized by the 2D position, scale
(proportional to the diameter of the neighborhood that needs to be taken into account),
orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor
@ -81,9 +81,9 @@ public:
float _response=0, int _octave=0, int _class_id=-1)
: pt(x, y), size(_size), angle(_angle),
response(_response), octave(_octave), class_id(_class_id) {}
size_t hash() const;
//! converts vector of keypoints to vector of points
static void convert(const vector<KeyPoint>& keypoints,
CV_OUT vector<Point2f>& points2f,
@ -103,13 +103,13 @@ public:
CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable)
CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling
CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted
CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to)
CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to)
};
//! writes vector of keypoints to the file storage
CV_EXPORTS void write(FileStorage& fs, const string& name, const vector<KeyPoint>& keypoints);
//! reads vector of keypoints from the specified file storage node
CV_EXPORTS void read(const FileNode& node, CV_OUT vector<KeyPoint>& keypoints);
CV_EXPORTS void read(const FileNode& node, CV_OUT vector<KeyPoint>& keypoints);
/*
* A class filters a vector of keypoints.
@ -138,14 +138,14 @@ public:
* Remove duplicated keypoints.
*/
static void removeDuplicated( vector<KeyPoint>& keypoints );
/*
* Retain the specified number of the best keypoints (according to the response)
*/
static void retainBest(vector<KeyPoint>& keypoints, int npoints);
};
/************************************ Base Classes ************************************/
/*
@ -155,7 +155,7 @@ class CV_EXPORTS_W FeatureDetector : public virtual Algorithm
{
public:
virtual ~FeatureDetector();
/*
* Detect keypoints in an image.
* image The image.
@ -164,7 +164,7 @@ public:
* matrix with non-zero values in the region of interest.
*/
CV_WRAP void detect( const Mat& image, CV_OUT vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
/*
* Detect keypoints in an image set.
* images Image collection.
@ -172,16 +172,16 @@ public:
* masks Masks for image set. masks[i] is a mask for images[i].
*/
void detect( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, const vector<Mat>& masks=vector<Mat>() ) const;
// Return true if detector object is empty
CV_WRAP virtual bool empty() const;
// Create feature detector by detector name.
CV_WRAP static Ptr<FeatureDetector> create( const string& detectorType );
protected:
virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const = 0;
/*
* Remove keypoints that are not in the mask.
* Helper function, useful when wrapping a library call for keypoint detection that
@ -189,8 +189,8 @@ protected:
*/
static void removeInvalidPoints( const Mat& mask, vector<KeyPoint>& keypoints );
};
/*
* Abstract base class for computing descriptors for image keypoints.
*
@ -204,7 +204,7 @@ class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm
{
public:
virtual ~DescriptorExtractor();
/*
* Compute the descriptors for a set of keypoints in an image.
* image The image.
@ -212,7 +212,7 @@ public:
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
*/
CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT vector<KeyPoint>& keypoints, CV_OUT Mat& descriptors ) const;
/*
* Compute the descriptors for a keypoints collection detected in image collection.
* images Image collection.
@ -221,26 +221,26 @@ public:
* descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i].
*/
void compute( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, vector<Mat>& descriptors ) const;
CV_WRAP virtual int descriptorSize() const = 0;
CV_WRAP virtual int descriptorType() const = 0;
CV_WRAP virtual bool empty() const;
CV_WRAP static Ptr<DescriptorExtractor> create( const string& descriptorExtractorType );
protected:
virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const = 0;
/*
* Remove keypoints within borderPixels of an image edge.
*/
static void removeBorderKeypoints( vector<KeyPoint>& keypoints,
Size imageSize, int borderSize );
};
/*
* Abstract base class for simultaneous 2D feature detection descriptor extraction.
*/
@ -254,18 +254,18 @@ public:
* mask Mask specifying where to look for keypoints (optional). Must be a char
* matrix with non-zero values in the region of interest.
* useProvidedKeypoints If true, the method will skip the detection phase and will compute
* descriptors for the provided keypoints
* descriptors for the provided keypoints
*/
CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask,
CV_OUT vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints=false ) const = 0;
// Create feature detector and descriptor extractor by name.
static Ptr<Feature2D> create( const string& name );
};
/*!
ORB implementation.
*/
@ -276,7 +276,7 @@ public:
enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31,
int firstLevel = 0, int WTA_K=2, int scoreType=0, int patchSize=31 );
int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31 );
// returns the descriptor size in bytes
int descriptorSize() const;
@ -289,14 +289,14 @@ public:
// Compute the ORB features and descriptors on an image
void operator()( InputArray image, InputArray mask, vector<KeyPoint>& keypoints,
OutputArray descriptors, bool useProvidedKeypoints=false ) const;
AlgorithmInfo* info() const;
protected:
void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
CV_PROP_RW int nfeatures;
CV_PROP_RW double scaleFactor;
CV_PROP_RW int nlevels;
@ -306,18 +306,18 @@ protected:
CV_PROP_RW int scoreType;
CV_PROP_RW int patchSize;
};
typedef ORB OrbFeatureDetector;
typedef ORB OrbDescriptorExtractor;
/*!
Maximal Stable Extremal Regions class.
The class implements MSER algorithm introduced by J. Matas.
Unlike SIFT, SURF and many other detectors in OpenCV, this is salient region detector,
not the salient point detector.
It returns the regions, each of those is encoded as a contour.
*/
class CV_EXPORTS_W MSER : public FeatureDetector
@ -328,15 +328,15 @@ public:
double _max_variation=0.25, double _min_diversity=.2,
int _max_evolution=200, double _area_threshold=1.01,
double _min_margin=0.003, int _edge_blur_size=5 );
//! the operator that extracts the MSERs from the image or the specific part of it
CV_WRAP_AS(detect) void operator()( const Mat& image, CV_OUT vector<vector<Point> >& msers,
const Mat& mask=Mat() ) const;
const Mat& mask=Mat() ) const;
AlgorithmInfo* info() const;
protected:
void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
int delta;
int minArea;
int maxArea;
@ -347,12 +347,12 @@ protected:
double minMargin;
int edgeBlurSize;
};
typedef MSER MserFeatureDetector;
/*!
The "Star" Detector.
The class implements the keypoint detector introduced by K. Konolige.
*/
class CV_EXPORTS_W StarDetector : public FeatureDetector
@ -363,16 +363,16 @@ public:
int _lineThresholdProjected=10,
int _lineThresholdBinarized=8,
int _suppressNonmaxSize=5);
//! finds the keypoints in the image
CV_WRAP_AS(detect) void operator()(const Mat& image,
CV_OUT vector<KeyPoint>& keypoints) const;
AlgorithmInfo* info() const;
protected:
void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
int maxSize;
int responseThreshold;
int lineThresholdProjected;
@ -389,7 +389,7 @@ class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector
public:
CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true );
AlgorithmInfo* info() const;
protected:
virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
@ -469,7 +469,7 @@ protected:
Params params;
};
class CV_EXPORTS DenseFeatureDetector : public FeatureDetector
{
public:
@ -486,10 +486,10 @@ protected:
double initFeatureScale;
int featureScaleLevels;
double featureScaleMul;
int initXyStep;
int initImgBound;
bool varyXyStepWithScale;
bool varyImgBoundWithScale;
};
@ -511,7 +511,7 @@ public:
CV_WRAP GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
int maxTotalKeypoints=1000,
int gridRows=4, int gridCols=4 );
// TODO implement read/write
virtual bool empty() const;
@ -533,7 +533,7 @@ class CV_EXPORTS_W PyramidAdaptedFeatureDetector : public FeatureDetector
public:
// maxLevel - The 0-based index of the last pyramid layer
CV_WRAP PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector, int maxLevel=2 );
// TODO implement read/write
virtual bool empty() const;
@ -549,7 +549,7 @@ protected:
*/
class CV_EXPORTS AdjusterAdapter: public FeatureDetector
{
public:
public:
/** pure virtual interface
*/
virtual ~AdjusterAdapter() {}
@ -588,13 +588,13 @@ class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector
{
public:
/** \param adjaster an AdjusterAdapter that will do the detection and parameter adjustment
/** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment
* \param max_features the maximum desired number of features
* \param max_iters the maximum number of times to try to adjust the feature detector params
* for the FastAdjuster this can be high, but with Star or Surf this can get time consuming
* \param min_features the minimum desired features
*/
DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjaster, int min_features=400, int max_features=500, int max_iters=5 );
DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 );
virtual bool empty() const;
@ -741,7 +741,7 @@ protected:
/****************************************************************************************\
* Distance *
\****************************************************************************************/
template<typename T>
struct CV_EXPORTS Accumulator
{
@ -762,7 +762,7 @@ struct CV_EXPORTS SL2
enum { normType = NORM_L2SQR };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const
{
return normL2Sqr<ValueType, ResultType>(a, b, size);
@ -778,7 +778,7 @@ struct CV_EXPORTS L2
enum { normType = NORM_L2 };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const
{
return (ResultType)sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));
@ -794,7 +794,7 @@ struct CV_EXPORTS L1
enum { normType = NORM_L1 };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const
{
return normL1<ValueType, ResultType>(a, b, size);
@ -810,7 +810,7 @@ struct CV_EXPORTS Hamming
enum { normType = NORM_HAMMING };
typedef unsigned char ValueType;
typedef int ResultType;
/** this will count the bits in a ^ b
*/
ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const
@ -826,13 +826,13 @@ template<int cellsize> struct CV_EXPORTS HammingMultilevel
enum { normType = NORM_HAMMING + (cellsize>1) };
typedef unsigned char ValueType;
typedef int ResultType;
ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const
{
return normHamming(a, b, size, cellsize);
}
};
};
/****************************************************************************************\
* DMatch *
\****************************************************************************************/
@ -1042,7 +1042,7 @@ public:
virtual void train();
virtual bool isMaskSupported() const;
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
@ -1158,9 +1158,9 @@ public:
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
// Reads matcher object from a file node
virtual void read( const FileNode& );
virtual void read( const FileNode& fn );
// Writes matcher object to a file storage
virtual void write( FileStorage& ) const;
virtual void write( FileStorage& fs ) const;
// Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty)
virtual bool empty() const;

@ -289,7 +289,7 @@ Enables the :ocv:class:`gpu::StereoConstantSpaceBP` constructors.
.. ocv:function:: gpu::StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP, int iters = DEFAULT_ITERS, int levels = DEFAULT_LEVELS, int nr_plane = DEFAULT_NR_PLANE, int msg_type = CV_32F)
.. ocv:function:: StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th = 0, int msg_type = CV_32F)
.. ocv:function:: gpu::StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th = 0, int msg_type = CV_32F)
:param ndisp: Number of disparities.

@ -338,7 +338,7 @@ Blocks the current CPU thread until all operations in the stream are complete.
gpu::StreamAccessor
-------------------
.. ocv:class:: gpu::StreamAccessor
.. ocv:struct:: gpu::StreamAccessor
Class that enables getting ``cudaStream_t`` from :ocv:class:`gpu::Stream` and is declared in ``stream_accessor.hpp`` because it is the only public header that depends on the CUDA Runtime API. Including it brings a dependency to your code. ::

@ -346,19 +346,19 @@ Detects keypoints and computes descriptors for them.
gpu::ORB_GPU::downloadKeypoints
gpu::ORB_GPU::downloadKeyPoints
-------------------------------------
Download keypoints from GPU to CPU memory.
.. ocv:function:: void gpu::ORB_GPU::downloadKeypoints(const GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints)
.. ocv:function:: void gpu::ORB_GPU::downloadKeyPoints( GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints )
gpu::ORB_GPU::convertKeypoints
gpu::ORB_GPU::convertKeyPoints
-------------------------------------
Converts keypoints from GPU representation to vector of ``KeyPoint``.
.. ocv:function:: void gpu::ORB_GPU::convertKeypoints(const Mat& h_keypoints, std::vector<KeyPoint>& keypoints)
.. ocv:function:: void gpu::ORB_GPU::convertKeyPoints( Mat& d_keypoints, std::vector<KeyPoint>& keypoints )

@ -195,7 +195,7 @@ Creates a normalized 2D box filter.
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createBoxFilter_GPU(int srcType, int dstType, const Size& ksize, const Point& anchor = Point(-1,-1))
.. ocv:function:: Ptr<BaseFilter_GPU> getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1))
.. ocv:function:: Ptr<BaseFilter_GPU> gpu::getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1))
:param srcType: Input image type supporting ``CV_8UC1`` and ``CV_8UC4`` .
@ -285,7 +285,9 @@ gpu::erode
--------------
Erodes an image by using a specific structuring element.
.. ocv:function:: void gpu::erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::erode( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor=Point(-1, -1), int iterations=1 )
.. ocv:function:: void gpu::erode( const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, Point anchor=Point(-1, -1), int iterations=1, Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC1`` and ``CV_8UC4`` types are supported.
@ -309,7 +311,9 @@ gpu::dilate
---------------
Dilates an image by using a specific structuring element.
.. ocv:function:: void gpu::dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::dilate( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor=Point(-1, -1), int iterations=1 )
.. ocv:function:: void gpu::dilate( const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, Point anchor=Point(-1, -1), int iterations=1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` and ``CV_8UC4`` source types are supported.
@ -333,7 +337,9 @@ gpu::morphologyEx
---------------------
Applies an advanced morphological operation to an image.
.. ocv:function:: void gpu::morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::morphologyEx( const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor=Point(-1, -1), int iterations=1 )
.. ocv:function:: void gpu::morphologyEx( const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, GpuMat& buf1, GpuMat& buf2, Point anchor=Point(-1, -1), int iterations=1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` and ``CV_8UC4`` source types are supported.
@ -371,8 +377,6 @@ Creates a non-separable linear filter.
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
.. ocv:function:: Ptr<BaseFilter_GPU> gpu::getLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, const Size& ksize, Point anchor = Point(-1, -1))
:param srcType: Input image type. Supports ``CV_8U`` , ``CV_16U`` and ``CV_32F`` one and four channel image.
:param dstType: Output image type. The same type as ``src`` is supported.
@ -441,7 +445,7 @@ gpu::getLinearRowFilter_GPU
-------------------------------
Creates a primitive row filter with the specified kernel.
.. ocv:function:: Ptr<BaseRowFilter_GPU> gpu::getLinearRowFilter_GPU(int srcType, int bufType, const Mat& rowKernel, int anchor = -1, int borderType = BORDER_CONSTANT)
.. ocv:function:: Ptr<BaseRowFilter_GPU> gpu::getLinearRowFilter_GPU( int srcType, int bufType, const Mat& rowKernel, int anchor=-1, int borderType=BORDER_DEFAULT )
:param srcType: Source array type. Only ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -467,7 +471,7 @@ gpu::getLinearColumnFilter_GPU
----------------------------------
Creates a primitive column filter with the specified kernel.
.. ocv:function:: Ptr<BaseColumnFilter_GPU> gpu::getLinearColumnFilter_GPU(int bufType, int dstType, const Mat& columnKernel, int anchor = -1, int borderType = BORDER_CONSTANT)
.. ocv:function:: Ptr<BaseColumnFilter_GPU> gpu::getLinearColumnFilter_GPU( int bufType, int dstType, const Mat& columnKernel, int anchor=-1, int borderType=BORDER_DEFAULT )
:param bufType: Intermediate buffer type with as many channels as ``dstType`` .
@ -517,7 +521,10 @@ gpu::sepFilter2D
--------------------
Applies a separable 2D linear filter to an image.
.. ocv:function:: void gpu::sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::sepFilter2D( const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, Point anchor=Point(-1,-1), int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::sepFilter2D( const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, GpuMat& buf, Point anchor=Point(-1,-1), int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -569,7 +576,9 @@ gpu::Sobel
--------------
Applies the generalized Sobel operator to an image.
.. ocv:function:: void gpu::Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::Sobel( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize=3, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::Sobel( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, int ksize=3, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -599,7 +608,9 @@ gpu::Scharr
---------------
Calculates the first x- or y- image derivative using the Scharr operator.
.. ocv:function:: void gpu::Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::Scharr( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::Scharr( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -627,15 +638,15 @@ gpu::createGaussianFilter_GPU
---------------------------------
Creates a Gaussian filter engine.
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createGaussianFilter_GPU(int type, Size ksize, double sigmaX, double sigmaY = 0, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1)
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createGaussianFilter_GPU( int type, Size ksize, double sigma1, double sigma2=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
:param type: Source and destination image type. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` are supported.
:param ksize: Aperture size. See :ocv:func:`getGaussianKernel` for details.
:param sigmaX: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` for details.
:param sigma1: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` for details.
:param sigmaY: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigmaY}\leftarrow\texttt{sigmaX}` .
:param sigma2: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigma2}\leftarrow\texttt{sigma1}` .
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
@ -649,17 +660,19 @@ gpu::GaussianBlur
---------------------
Smooths an image using the Gaussian filter.
.. ocv:function:: void gpu::GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigmaX, double sigmaY = 0, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::GaussianBlur( const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::GaussianBlur( const GpuMat& src, GpuMat& dst, Size ksize, GpuMat& buf, double sigma1, double sigma2=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
:param dst: Destination image with the same size and type as ``src`` .
:param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. If they are zeros, they are computed from ``sigmaX`` and ``sigmaY`` .
:param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. If they are zeros, they are computed from ``sigma1`` and ``sigma2`` .
:param sigmaX: Gaussian kernel standard deviation in X direction.
:param sigma1: Gaussian kernel standard deviation in X direction.
:param sigmaY: Gaussian kernel standard deviation in Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize`` , ``sigmaX`` , and ``sigmaY`` .
:param sigma2: Gaussian kernel standard deviation in Y direction. If ``sigma2`` is zero, it is set to be equal to ``sigma1`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize`` , ``sigma1`` , and ``sigma2`` .
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.

@ -9,7 +9,7 @@ gpu::meanShiftFiltering
---------------------------
Performs mean-shift filtering for each point of the source image.
.. ocv:function:: void gpu::meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr,TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1))
.. ocv:function:: void gpu::meanShiftFiltering( const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
@ -29,7 +29,7 @@ gpu::meanShiftProc
----------------------
Performs a mean-shift procedure and stores information about processed points (their colors and positions) in two images.
.. ocv:function:: void gpu::meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1))
.. ocv:function:: void gpu::meanShiftProc( const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
@ -159,7 +159,7 @@ gpu::mulSpectrums
---------------------
Performs a per-element multiplication of two Fourier spectrums.
.. ocv:function:: void gpu::mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false)
.. ocv:function:: void gpu::mulSpectrums( const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false, Stream& stream=Stream::Null() )
:param a: First spectrum.
@ -181,7 +181,7 @@ gpu::mulAndScaleSpectrums
-----------------------------
Performs a per-element multiplication of two Fourier spectrums and scales the result.
.. ocv:function:: void gpu::mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false)
.. ocv:function:: void gpu::mulAndScaleSpectrums( const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false, Stream& stream=Stream::Null() )
:param a: First spectrum.
@ -205,7 +205,7 @@ gpu::dft
------------
Performs a forward or inverse discrete Fourier transform (1D or 2D) of the floating point matrix.
.. ocv:function:: void gpu::dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0)
.. ocv:function:: void gpu::dft( const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0, Stream& stream=Stream::Null() )
:param src: Source matrix (real or complex).
@ -238,7 +238,7 @@ The source matrix should be continuous, otherwise reallocation and data copying
gpu::ConvolveBuf
----------------
.. ocv:class:: gpu::ConvolveBuf
.. ocv:struct:: gpu::ConvolveBuf
Class providing a memory buffer for :ocv:func:`gpu::convolve` function, plus it allows to adjust some specific parameters. ::
@ -261,7 +261,7 @@ You can use field `user_block_size` to set specific block size for :ocv:func:`gp
gpu::ConvolveBuf::create
------------------------
.. ocv:function:: ConvolveBuf::create(Size image_size, Size templ_size)
.. ocv:function:: gpu::ConvolveBuf::create(Size image_size, Size templ_size)
Constructs a buffer for :ocv:func:`gpu::convolve` function with respective arguments.
@ -272,7 +272,7 @@ Computes a convolution (or cross-correlation) of two images.
.. ocv:function:: void gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr=false)
.. ocv:function:: void gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream &stream = Stream::Null())
.. ocv:function:: void gpu::convolve( const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream=Stream::Null() )
:param image: Source image. Only ``CV_32FC1`` images are supported for now.
@ -282,7 +282,7 @@ Computes a convolution (or cross-correlation) of two images.
:param ccorr: Flags to evaluate cross-correlation instead of convolution.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:class:`gpu::ConvolveBuf`.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`gpu::ConvolveBuf`.
:param stream: Stream for the asynchronous version.
@ -290,7 +290,7 @@ Computes a convolution (or cross-correlation) of two images.
gpu::MatchTemplateBuf
---------------------
.. ocv:class:: gpu::MatchTemplateBuf
.. ocv:struct:: gpu::MatchTemplateBuf
Class providing memory buffers for :ocv:func:`gpu::matchTemplate` function, plus it allows to adjust some specific parameters. ::
@ -321,7 +321,7 @@ Computes a proximity map for a raster template and an image where the template i
:param method: Specifies the way to compare the template with the image.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:class:`gpu::MatchTemplateBuf`.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`gpu::MatchTemplateBuf`.
:param stream: Stream for the asynchronous version.
@ -346,7 +346,7 @@ gpu::remap
--------------
Applies a generic geometrical transformation to an image.
.. ocv:function:: void gpu::remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap, int interpolation, int borderMode = BORDER_CONSTANT, const Scalar& borderValue = Scalar(), Stream& stream = Stream::Null())
.. ocv:function:: void gpu::remap( const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap, int interpolation, int borderMode=BORDER_CONSTANT, Scalar borderValue=Scalar(), Stream& stream=Stream::Null() )
:param src: Source image.
@ -477,7 +477,7 @@ gpu::warpAffine
-------------------
Applies an affine transformation to an image.
.. ocv:function:: void gpu::warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::warpAffine( const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, Scalar borderValue=Scalar(), Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U`` , ``CV_16U`` , ``CV_32S`` , or ``CV_32F`` depth and 1, 3, or 4 channels are supported.
@ -499,7 +499,7 @@ gpu::buildWarpAffineMaps
------------------------
Builds transformation maps for affine transformation.
.. ocv:function:: void buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
:param M: *2x3* transformation matrix.
@ -521,7 +521,7 @@ gpu::warpPerspective
------------------------
Applies a perspective transformation to an image.
.. ocv:function:: void gpu::warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::warpPerspective( const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, Scalar borderValue=Scalar(), Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U`` , ``CV_16U`` , ``CV_32S`` , or ``CV_32F`` depth and 1, 3, or 4 channels are supported.
@ -543,7 +543,7 @@ gpu::buildWarpPerspectiveMaps
-----------------------------
Builds transformation maps for perspective transformation.
.. ocv:function:: void buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
:param M: *3x3* transformation matrix.
@ -657,9 +657,9 @@ Calculates a histogram with evenly distributed bins.
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat* hist, int* histSize, int* lowerLevel, int* upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat* hist, GpuMat& buf, int* histSize, int* lowerLevel, int* upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U``, ``CV_16U``, or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
@ -685,10 +685,6 @@ Calculates a histogram with bins determined by the ``levels`` array.
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat* hist, const GpuMat* levels, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat* hist, const GpuMat* levels, GpuMat& buf, Stream& stream = Stream::Null())
:param src: Source image. ``CV_8U`` , ``CV_16U`` , or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
:param hist: Destination histogram with one row, ``(levels.cols-1)`` columns, and the ``CV_32SC1`` type.
@ -747,7 +743,7 @@ gpu::buildWarpPlaneMaps
-----------------------
Builds plane warping maps.
.. ocv:function:: void gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, double dist, GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpPlaneMaps( Size src_size, Rect dst_roi, const Mat & K, const Mat& R, const Mat & T, float scale, GpuMat& map_x, GpuMat& map_y, Stream& stream=Stream::Null() )
:param stream: Stream for the asynchronous version.
@ -757,7 +753,7 @@ gpu::buildWarpCylindricalMaps
-----------------------------
Builds cylindrical warping maps.
.. ocv:function:: void gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpCylindricalMaps( Size src_size, Rect dst_roi, const Mat & K, const Mat& R, float scale, GpuMat& map_x, GpuMat& map_y, Stream& stream=Stream::Null() )
:param stream: Stream for the asynchronous version.
@ -767,7 +763,7 @@ gpu::buildWarpSphericalMaps
---------------------------
Builds spherical warping maps.
.. ocv:function:: void gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpSphericalMaps( Size src_size, Rect dst_roi, const Mat & K, const Mat& R, float scale, GpuMat& map_x, GpuMat& map_y, Stream& stream=Stream::Null() )
:param stream: Stream for the asynchronous version.

@ -47,7 +47,6 @@ Any subsequent API call to this device will reinitialize the device.
gpu::FeatureSet
---------------
.. ocv:class:: gpu::FeatureSet
Class providing GPU computing features. ::
@ -74,9 +73,9 @@ Class providing a set of static methods to check what NVIDIA* card architecture
The following method checks whether the module was built with the support of the given feature:
.. ocv:function:: static bool gpu::TargetArchs::builtWith(FeatureSet feature)
.. ocv:function:: static bool gpu::TargetArchs::builtWith( FeatureSet feature_set )
:param feature: Feature to be checked. See :ocv:class:`gpu::FeatureSet`.
:param feature_set: Features to be checked. See :ocv:class:`gpu::FeatureSet`.
There is a set of methods to check whether the module contains intermediate (PTX) or binary GPU code for the given architecture(s):
@ -150,7 +149,7 @@ gpu::DeviceInfo::name
-------------------------
Returns the device name.
.. ocv:function:: string gpu::DeviceInfo::name()
.. ocv:function:: string gpu::DeviceInfo::name() const
@ -198,9 +197,9 @@ gpu::DeviceInfo::supports
-----------------------------
Provides information on GPU feature support.
.. ocv:function:: bool gpu::DeviceInfo::supports(FeatureSet feature)
.. ocv:function:: bool gpu::DeviceInfo::supports( FeatureSet feature_set ) const
:param feature: Feature to be checked. See :ocv:class:`gpu::FeatureSet`.
:param feature_set: Features to be checked. See :ocv:class:`gpu::FeatureSet`.
This function returns ``true`` if the device has the specified GPU feature. Otherwise, it returns ``false`` .

@ -32,7 +32,7 @@ By default, the OpenCV GPU module includes:
PTX code for compute capabilities 1.1 and 1.3 (controlled by ``CUDA_ARCH_PTX`` in ``CMake``)
This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw
:ocv:func:`Exception`. For platforms where JIT compilation is performed first, the run is slow.
:ocv:class:`Exception`. For platforms where JIT compilation is performed first, the run is slow.
On a GPU with CC 1.0, you can still compile the GPU module and most of the functions will run flawlessly. To achieve this, add "1.0" to the list of binaries, for example, ``CUDA_ARCH_BIN="1.0 1.3 2.0"`` . The functions that cannot be run on CC 1.0 GPUs throw an exception.

@ -7,7 +7,7 @@ Object Detection
gpu::HOGDescriptor
------------------
.. ocv:class:: gpu::HOGDescriptor
.. ocv:struct:: gpu::HOGDescriptor
The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detector. ::
@ -235,7 +235,7 @@ gpu::CascadeClassifier_GPU::CascadeClassifier_GPU
-----------------------------------------------------
Loads the classifier from a file.
.. ocv:function:: gpu::CascadeClassifier_GPU(const string& filename)
.. ocv:function:: gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string& filename)
:param filename: Name of the file from which the classifier is loaded. Only the old ``haar`` classifier (trained by the ``haar`` training application) and NVIDIA's ``nvbin`` are supported.

@ -9,7 +9,7 @@ gpu::gemm
------------------
Performs generalized matrix multiplication.
.. ocv:function:: void gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null())
:param src1: First multiplied input matrix that should have ``CV_32FC1`` , ``CV_64FC1`` , ``CV_32FC2`` , or ``CV_64FC2`` type.
@ -47,9 +47,9 @@ gpu::transpose
------------------
Transposes a matrix.
.. ocv:function:: void gpu::transpose(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::transpose( const GpuMat& src1, GpuMat& dst, Stream& stream=Stream::Null() )
:param src: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc).
:param src1: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc).
:param dst: Destination matrix.
@ -63,11 +63,11 @@ gpu::flip
-------------
Flips a 2D matrix around vertical, horizontal, or both axes.
.. ocv:function:: void gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::flip( const GpuMat& a, GpuMat& b, int flipCode, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth.
:param a: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth.
:param dst: Destination matrix.
:param b: Destination matrix.
:param flipCode: Flip mode for the source:
@ -143,7 +143,7 @@ gpu::magnitude
------------------
Computes magnitudes of complex matrix elements.
.. ocv:function:: void gpu::magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::magnitude( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
@ -165,7 +165,7 @@ gpu::magnitudeSqr
---------------------
Computes squared magnitudes of complex matrix elements.
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::magnitudeSqr( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())

@ -9,18 +9,20 @@ gpu::add
------------
Computes a matrix-matrix or matrix-scalar sum.
.. ocv:function:: void gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::add( const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::add(const GpuMat& src1, const Scalar& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::add( const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param b: Second source matrix to be added to ``a`` . Matrix should have the same size and type as ``a`` .
:param src2: Second source matrix or a scalar to be added to ``src1`` . Matrix should have the same size and type as ``src1`` .
:param sc: A scalar to be added to ``a`` .
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param mask: Optional operation mask, 8-bit single channel array, that specifies elements of the destination array to be changed.
:param dtype: Optional depth of the output array.
:param stream: Stream for the asynchronous version.
@ -33,18 +35,20 @@ gpu::subtract
-----------------
Computes a matrix-matrix or matrix-scalar difference.
.. ocv:function:: void gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::subtract( const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::subtract(const GpuMat& src1, const Scalar& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::subtract( const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param b: Second source matrix to be added to ``a`` . Matrix should have the same size and type as ``a`` .
:param sc: A scalar to be added to ``a`` .
:param src2: Second source matrix or a scalar to be added to ``src1`` . Matrix should have the same size and type as ``src1`` .
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param mask: Optional operation mask, 8-bit single channel array, that specifies elements of the destination array to be changed.
:param dtype: Optional depth of the output array.
:param stream: Stream for the asynchronous version.
@ -57,18 +61,20 @@ gpu::multiply
-----------------
Computes a matrix-matrix or matrix-scalar per-element product.
.. ocv:function:: void gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::multiply( const GpuMat& a, const GpuMat& b, GpuMat& c, double scale=1, int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::multiply(const GpuMat& src1, const Scalar& src2, GpuMat& dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::multiply( const GpuMat& a, const Scalar& sc, GpuMat& c, double scale=1, int dtype=-1, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param src2: Second source matrix or a scalar to be multiplied by ``src1`` elements.
:param b: Second source matrix to be multiplied by ``a`` elements.
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param sc: A scalar to be multiplied by ``a`` elements.
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param scale: Optional scale factor.
:param dtype: Optional depth of the output array.
:param stream: Stream for the asynchronous version.
@ -81,18 +87,22 @@ gpu::divide
-----------
Computes a matrix-matrix or matrix-scalar division.
.. ocv:function:: void gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::divide( const GpuMat& a, const GpuMat& b, GpuMat& c, double scale=1, int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::divide(double src1, const GpuMat& src2, GpuMat& dst, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
:param src1: First source matrix or a scalar.
.. ocv:function:: void gpu::divide( double scale, const GpuMat& b, GpuMat& c, int dtype=-1, Stream& stream=Stream::Null() )
:param src2: Second source matrix or a scalar. The ``src1`` elements are divided by it.
:param a: First source matrix or a scalar.
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param b: Second source matrix. The ``a`` elements are divided by it.
:param sc: A scalar to be divided by the elements of ``a`` matrix.
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param scale: Optional scale factor.
:param dtype: Optional depth of the output array.
:param stream: Stream for the asynchronous version.
@ -113,13 +123,13 @@ Computes the weighted sum of two arrays.
:param alpha: Weight for the first array elements.
:param src2: Second source array of the same size and channel number as ``src1`` .
:param beta: Weight for the second array elements.
:param dst: Destination array that has the same size and number of channels as the input arrays.
:param gamma: Scalar added to each sum.
:param dtype: Optional depth of the destination array. When both input arrays have the same depth, ``dtype`` can be set to ``-1``, which will be equivalent to ``src1.depth()``.
:param stream: Stream for the asynchronous version.
@ -186,11 +196,11 @@ gpu::exp
------------
Computes an exponent of each matrix element.
.. ocv:function:: void gpu::exp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::exp( const GpuMat& a, GpuMat& b, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param a: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param dst: Destination matrix with the same size and type as ``src`` .
:param b: Destination matrix with the same size and type as ``a`` .
:param stream: Stream for the asynchronous version.
@ -202,11 +212,11 @@ gpu::log
------------
Computes a natural logarithm of absolute value of each matrix element.
.. ocv:function:: void gpu::log(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::log( const GpuMat& a, GpuMat& b, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param a: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param dst: Destination matrix with the same size and type as ``src`` .
:param b: Destination matrix with the same size and type as ``a`` .
:param stream: Stream for the asynchronous version.
@ -242,15 +252,17 @@ gpu::absdiff
----------------
Computes per-element absolute difference of two matrices (or of a matrix and scalar).
.. ocv:function:: void gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::absdiff( const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::absdiff(const GpuMat& src1, const Scalar& src2, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::absdiff( const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param src2: Second source matrix or a scalar to be added to ``src1`` .
:param b: Second source matrix to be added to ``a`` .
:param dst: Destination matrix with the same size and type as ``src1`` .
:param s: A scalar to be added to ``a`` .
:param c: Destination matrix with the same size and type as ``a`` .
:param stream: Stream for the asynchronous version.
@ -262,22 +274,26 @@ gpu::compare
----------------
Compares elements of two matrices.
.. ocv:function:: void gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int cmpop, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::compare( const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream=Stream::Null() )
:param src1: First source matrix.
.. ocv:function:: void gpu::compare(const GpuMat& a, Scalar sc, GpuMat& c, int cmpop, Stream& stream = Stream::Null())
:param src2: Second source matrix with the same size and type as ``src1`` .
:param a: First source matrix.
:param b: Second source matrix with the same size and type as ``a`` .
:param sc: A scalar to be compared with ``a`` .
:param dst: Destination matrix with the same size as ``src1`` and the ``CV_8UC1`` type.
:param c: Destination matrix with the same size as ``a`` and the ``CV_8UC1`` type.
:param cmpop: Flag specifying the relation between the elements to be checked:
* **CMP_EQ:** ``src1(.) == src2(.)``
* **CMP_GT:** ``src1(.) < src2(.)``
* **CMP_GE:** ``src1(.) <= src2(.)``
* **CMP_LT:** ``src1(.) < src2(.)``
* **CMP_LE:** ``src1(.) <= src2(.)``
* **CMP_NE:** ``src1(.) != src2(.)``
* **CMP_EQ:** ``a(.) == b(.)``
* **CMP_GT:** ``a(.) < b(.)``
* **CMP_GE:** ``a(.) <= b(.)``
* **CMP_LT:** ``a(.) < b(.)``
* **CMP_LE:** ``a(.) <= b(.)``
* **CMP_NE:** ``a(.) != b(.)``
:param stream: Stream for the asynchronous version.
@ -362,7 +378,7 @@ gpu::rshift
--------------------
Performs pixel by pixel right shift of an image by a constant value.
.. ocv:function:: void gpu::rshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::rshift( const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports 1, 3 and 4 channels images with integers elements.
@ -378,7 +394,7 @@ gpu::lshift
--------------------
Performs pixel by pixel right left of an image by a constant value.
.. ocv:function:: void gpu::lshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::lshift( const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U`` , ``CV_16U`` or ``CV_32S`` depth.

@ -431,11 +431,11 @@ CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = S
//! computes magnitude of complex (x(i).re, x(i).im) vector
//! supports only CV_32FC2 type
CV_EXPORTS void magnitude(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null());
CV_EXPORTS void magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
//! computes squared magnitude of complex (x(i).re, x(i).im) vector
//! supports only CV_32FC2 type
CV_EXPORTS void magnitudeSqr(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null());
CV_EXPORTS void magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
//! computes magnitude of each (x(i), y(i)) vector
//! supports only floating-point source
@ -480,7 +480,7 @@ CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale
//! computes element-wise weighted quotient of matrix and scalar (c = a / s)
CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted reciprocal of an array (dst = scale/src2)
CV_EXPORTS void divide(double scale, const GpuMat& src2, GpuMat& dst, int dtype = -1, Stream& stream = Stream::Null());
CV_EXPORTS void divide(double scale, const GpuMat& b, GpuMat& c, int dtype = -1, Stream& stream = Stream::Null());
//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst,
@ -1697,15 +1697,15 @@ public:
class CV_EXPORTS GoodFeaturesToTrackDetector_GPU
{
public:
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners_ = 1000, double qualityLevel_ = 0.01, double minDistance_ = 0.0,
int blockSize_ = 3, bool useHarrisDetector_ = false, double harrisK_ = 0.04)
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04)
{
maxCorners = maxCorners_;
qualityLevel = qualityLevel_;
minDistance = minDistance_;
blockSize = blockSize_;
useHarrisDetector = useHarrisDetector_;
harrisK = harrisK_;
this->maxCorners = maxCorners;
this->qualityLevel = qualityLevel;
this->minDistance = minDistance;
this->blockSize = blockSize;
this->useHarrisDetector = useHarrisDetector;
this->harrisK = harrisK;
}
//! return 1 rows matrix with CV_32FC2 type

@ -61,35 +61,35 @@ setWindowProperty
---------------------
Changes parameters of a window dynamically.
.. ocv:function:: void setWindowProperty(const string& name, int prop_id, double prop_value)
.. ocv:function:: void setWindowProperty( const string& winname, int prop_id, double prop_value )
.. ocv:pyfunction:: cv2.setWindowProperty(winname, prop_id, prop_value) -> None
.. ocv:cfunction:: void cvSetWindowProperty(const char* name, int propId, double propValue)
.. ocv:cfunction:: void cvSetWindowProperty( const char* name, int prop_id, double prop_value )
:param name: Name of the window.
:param prop_id: Window property to edit. The following operation flags are available:
* **CV_WND_PROP_FULLSCREEN** Change if the window is fullscreen ( ``CV_WINDOW_NORMAL`` or ``CV_WINDOW_FULLSCREEN`` ).
* **CV_WND_PROP_AUTOSIZE** Change if the window is resizable (``CV_WINDOW_NORMAL`` or ``CV_WINDOW_AUTOSIZE`` ).
* **CV_WND_PROP_ASPECTRATIO** Change if the aspect ratio of the image is preserved ( ``CV_WINDOW_FREERATIO`` or ``CV_WINDOW_KEEPRATIO`` ).
:param prop_value: New value of the window property. The following operation flags are available:
* **CV_WINDOW_NORMAL** Change the window to normal size or make the window resizable.
* **CV_WINDOW_AUTOSIZE** Constrain the size by the displayed image. The window is not resizable.
* **CV_WINDOW_FULLSCREEN** Change the window to fullscreen.
* **CV_WINDOW_FREERATIO** Make the window resizable without any ratio constraints.
* **CV_WINDOW_KEEPRATIO** Make the window resizable, but preserve the proportions of the displayed image.
The function ``setWindowProperty`` enables changing properties of a window.
@ -97,22 +97,22 @@ getWindowProperty
---------------------
Provides parameters of a window.
.. ocv:function:: void getWindowProperty(const string& name, int prop_id)
.. ocv:function:: double getWindowProperty( const string& winname, int prop_id )
.. ocv:pyfunction:: cv2.getWindowProperty(winname, prop_id) -> retval
.. ocv:cfunction:: void cvGetWindowProperty(const char* name, int propId)
.. ocv:cfunction:: double cvGetWindowProperty( const char* name, int prop_id )
:param name: Name of the window.
:param prop_id: Window property to retrieve. The following operation flags are available:
* **CV_WND_PROP_FULLSCREEN** Change if the window is fullscreen ( ``CV_WINDOW_NORMAL`` or ``CV_WINDOW_FULLSCREEN`` ).
* **CV_WND_PROP_AUTOSIZE** Change if the window is resizable (``CV_WINDOW_NORMAL`` or ``CV_WINDOW_AUTOSIZE`` ).
* **CV_WND_PROP_ASPECTRATIO** Change if the aspect ratio of the image is preserved (``CV_WINDOW_FREERATIO`` or ``CV_WINDOW_KEEPRATIO`` ).
See
:ocv:func:`setWindowProperty` to know the meaning of the returned values.
@ -134,27 +134,27 @@ Creates the font to draw a text on an image.
:param color: Color of the font in BGRA where A = 255 is fully transparent. Use the macro ``CV _ RGB`` for simplicity.
:param weight: Font weight. The following operation flags are available:
* **CV_FONT_LIGHT** Weight of 25
* **CV_FONT_NORMAL** Weight of 50
* **CV_FONT_DEMIBOLD** Weight of 63
* **CV_FONT_BOLD** Weight of 75
* **CV_FONT_BLACK** Weight of 87
You can also specify a positive integer for better control.
:param style: Font style. The following operation flags are available:
* **CV_STYLE_NORMAL** Normal font
* **CV_STYLE_ITALIC** Italic font
* **CV_STYLE_OBLIQUE** Oblique font
:param spacing: Spacing between characters. It can be negative or positive.
The function ``fontQt`` creates a ``CvFont`` object. This ``CvFont`` is not compatible with ``putText`` .
@ -169,15 +169,15 @@ addText
-----------
Creates the font to draw a text on an image.
.. ocv:function:: void addText(const Mat& img, const string& text, Point location, CvFont *font)
.. ocv:function:: void addText( const Mat& img, const string& text, Point org, CvFont font )
.. ocv:cfunction:: void cvAddText(const CvArr* img, const char* text, CvPoint location, CvFont *font)
.. ocv:cfunction:: void cvAddText( const CvArr* img, const char* text, CvPoint org, CvFont * arg2 )
:param img: 8-bit 3-channel image where the text should be drawn.
:param text: Text to write on an image.
:param location: Point(x,y) where the text should start on an image.
:param org: Point(x,y) where the text should start on an image.
:param font: Font to use to draw a text.
@ -193,9 +193,9 @@ using a specific font
displayOverlay
------------------
Displays a text on a window image as an overlay for a specified duration.
Displays a text on a window image as an overlay for a specified duration.
.. ocv:function:: void displayOverlay(const string& name, const string& text, int delayms = 0)
.. ocv:function:: void displayOverlay( const string& winname, const string& text, int delayms=0 )
.. ocv:cfunction:: void cvDisplayOverlay(const char* name, const char* text, int delayms = 0)
@ -212,7 +212,7 @@ displayStatusBar
--------------------
Displays a text on the window statusbar during the specified period of time.
.. ocv:function:: void displayStatusBar(const string& name, const string& text, int delayms = 0)
.. ocv:function:: void displayStatusBar( const string& winname, const string& text, int delayms=0 )
.. ocv:cfunction:: void cvDisplayStatusBar(const char* name, const char* text, int delayms = 0)
@ -226,27 +226,21 @@ The function ``displayOverlay`` displays useful information/tips on top of the w
*delayms*
. This information is displayed on the window statusbar (the window must be created with the ``CV_GUI_EXPANDED`` flags).
createOpenGLCallback
setOpenGlDrawCallback
------------------------
Creates a callback function called to draw OpenGL on top the image display by ``windowname``.
Sets a callback function to be called to draw on top of displayed image.
.. ocv:function:: void createOpenGLCallback( const string& window_name, OpenGLCallback callbackOpenGL, void* userdata =NULL, double angle=-1, double zmin=-1, double zmax=-1)
.. ocv:function:: void setOpenGlDrawCallback( const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata=0 )
.. ocv:cfunction:: void cvCreateOpenGLCallback( const char* windowName, CvOpenGLCallback callbackOpenGL, void* userdata=NULL, double angle=-1, double zmin=-1, double zmax=-1 )
.. ocv:cfunction:: void cvSetOpenGlDrawCallback( const char* window_name, CvOpenGlDrawCallback callback, void* userdata=NULL )
:param window_name: Name of the window.
:param callbackOpenGL: Pointer to the function to be called every frame. This function should be prototyped as ``void Foo(*void);`` .
:param onOpenGlDraw: Pointer to the function to be called every frame. This function should be prototyped as ``void Foo(void*)`` .
:param userdata: Pointer passed to the callback function. *(Optional)*
:param angle: Parameter specifying the field of a view angle, in degrees, in the y direction. Default value is 45 degrees. *(Optional)*
:param zmin: Parameter specifying the distance from the viewer to the near clipping plane (always positive). Default value is 0.01. *(Optional)*
:param zmax: Parameter specifying the distance from the viewer to the far clipping plane (always positive). Default value is 1000. *(Optional)*
The function ``createOpenGLCallback`` can be used to draw 3D data on the window. See the example of callback use below: ::
The function ``setOpenGlDrawCallback`` can be used to draw 3D data on the window. See the example of callback function below: ::
void on_opengl(void* param)
{
@ -282,7 +276,7 @@ saveWindowParameters
------------------------
Saves parameters of the specified window.
.. ocv:function:: void saveWindowParameters(const string& name)
.. ocv:function:: void saveWindowParameters( const string& windowName )
.. ocv:cfunction:: void cvSaveWindowParameters(const char* name)
@ -295,7 +289,7 @@ loadWindowParameters
------------------------
Loads parameters of the specified window.
.. ocv:function:: void loadWindowParameters(const string& name)
.. ocv:function:: void loadWindowParameters( const string& windowName )
.. ocv:cfunction:: void cvLoadWindowParameters(const char* name)
@ -308,9 +302,9 @@ createButton
----------------
Attaches a button to the control panel.
.. ocv:function:: createButton( const string& button_name=NULL, ButtonCallback on_change=NULL, void* userdata=NULL, int button_type=CV_PUSH_BUTTON, int initial_button_state=0 )
.. ocv:function:: int createButton( const string& bar_name, ButtonCallback on_change, void* userdata=NULL, int type=CV_PUSH_BUTTON, bool initial_button_state=0 )
.. ocv:cfunction:: cvCreateButton( const char* buttonName=NULL, CvButtonCallback onChange=NULL, void* userdata=NULL, int buttonType=CV_PUSH_BUTTON, int initialButtonState=0 )
.. ocv:cfunction:: int cvCreateButton( const char* button_name=NULL, CvButtonCallback on_change=NULL, void* userdata=NULL, int button_type=CV_PUSH_BUTTON, int initial_button_state=0 )
:param button_name: Name of the button.

@ -18,7 +18,7 @@ Reads an image from a buffer in memory.
:param buf: Input array or vector of bytes.
:param flags: The same flags as in :ocv:func:`imread` .
The function reads an image from the specified buffer in the memory.
If the buffer is too short or contains invalid data, the empty matrix/image is returned.
@ -31,9 +31,9 @@ Encodes an image into a memory buffer.
.. ocv:function:: bool imencode( const string& ext, InputArray img, vector<uchar>& buf, const vector<int>& params=vector<int>())
.. ocv:cfunction:: CvMat* cvEncodeImage(const char* ext, const CvArr* image, const int* params=NULL )
.. ocv:cfunction:: CvMat* cvEncodeImage( const char* ext, const CvArr* image, const int* params=0 )
.. ocv:pyfunction:: cv2.imencode(ext, img, buf[, params]) -> retval
.. ocv:pyfunction:: cv2.imencode(ext, img[, params]) -> retval, buf
:param ext: File extension that defines the output format.
@ -57,13 +57,13 @@ Loads an image from a file.
.. ocv:pyfunction:: cv2.imread(filename[, flags]) -> retval
.. ocv:cfunction:: IplImage* cvLoadImage( const char* filename, int flags=CV_LOAD_IMAGE_COLOR )
.. ocv:cfunction:: IplImage* cvLoadImage( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR )
.. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int flags=CV_LOAD_IMAGE_COLOR )
.. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR )
.. ocv:pyoldfunction:: cv.LoadImage(filename, flags=CV_LOAD_IMAGE_COLOR)->None
.. ocv:pyoldfunction:: cv.LoadImage(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None
.. ocv:pyoldfunction:: cv.LoadImageM(filename, flags=CV_LOAD_IMAGE_COLOR)->None
.. ocv:pyoldfunction:: cv.LoadImageM(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None
:param filename: Name of file to be loaded.
@ -103,11 +103,11 @@ imwrite
-----------
Saves an image to a specified file.
.. ocv:function:: bool imwrite( const string& filename, InputArray image, const vector<int>& params=vector<int>())
.. ocv:function:: bool imwrite( const string& filename, InputArray img, const vector<int>& params=vector<int>() )
.. ocv:pyfunction:: cv2.imwrite(filename, image[, params]) -> retval
.. ocv:pyfunction:: cv2.imwrite(filename, img[, params]) -> retval
.. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image )
.. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image, const int* params=0 )
.. ocv:pyoldfunction:: cv.SaveImage(filename, image)-> None
@ -129,11 +129,11 @@ The function ``imwrite`` saves the image to the specified file. The image format
:ocv:func:`cvtColor` to convert it before saving. Or, use the universal XML I/O functions to save the image to XML or YAML format.
It is possible to store PNG images with an alpha channel using this function. To do this, create 8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535. The sample below shows how to create such a BGRA image and store to PNG file. It also demonstrates how to set custom compression parameters ::
#include <vector>
#include <stdio.h>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
@ -225,7 +225,7 @@ VideoCapture constructors.
.. ocv:pyfunction:: cv2.VideoCapture(device) -> <VideoCapture object>
.. ocv:cfunction:: CvCapture* cvCaptureFromCAM( int device )
.. ocv:pyoldfunction:: cv.CaptureFromCAM(device) -> CvCapture
.. ocv:pyoldfunction:: cv.CaptureFromCAM(index) -> CvCapture
.. ocv:cfunction:: CvCapture* cvCaptureFromFile( const char* filename )
.. ocv:pyoldfunction:: cv.CaptureFromFile(filename) -> CvCapture
@ -243,14 +243,14 @@ Open video file or a capturing device for video capturing
.. ocv:function:: bool VideoCapture::open(const string& filename)
.. ocv:function:: bool VideoCapture::open(int device)
.. ocv:pyfunction:: cv2.VideoCapture.open(filename) -> successFlag
.. ocv:pyfunction:: cv2.VideoCapture.open(device) -> successFlag
.. ocv:pyfunction:: cv2.VideoCapture.open(filename) -> retval
.. ocv:pyfunction:: cv2.VideoCapture.open(device) -> retval
:param filename: name of the opened video file
:param device: id of the opened video capturing device (i.e. a camera index).
The methods first call :ocv:func:`VideoCapture::release` to close the already opened file or camera.
The methods first call :ocv:func:`VideoCapture::release` to close the already opened file or camera.
VideoCapture::isOpened
@ -259,7 +259,7 @@ Returns true if video capturing has been initialized already.
.. ocv:function:: bool VideoCapture::isOpened()
.. ocv:pyfunction:: cv2.VideoCapture.isOpened() -> flag
.. ocv:pyfunction:: cv2.VideoCapture.isOpened() -> retval
If the previous call to ``VideoCapture`` constructor or ``VideoCapture::open`` succeeded, the method returns true.
@ -269,7 +269,7 @@ Closes video file or capturing device.
.. ocv:function:: void VideoCapture::release()
.. ocv:pyfunction:: cv2.VideoCapture.release()
.. ocv:pyfunction:: cv2.VideoCapture.release() -> None
.. ocv:cfunction:: void cvReleaseCapture(CvCapture** capture)
@ -284,7 +284,7 @@ Grabs the next frame from video file or capturing device.
.. ocv:function:: bool VideoCapture::grab()
.. ocv:pyfunction:: cv2.VideoCapture.grab() -> successFlag
.. ocv:pyfunction:: cv2.VideoCapture.grab() -> retval
.. ocv:cfunction:: int cvGrabFrame(CvCapture* capture)
@ -303,11 +303,11 @@ Decodes and returns the grabbed video frame.
.. ocv:function:: bool VideoCapture::retrieve(Mat& image, int channel=0)
.. ocv:pyfunction:: cv2.VideoCapture.retrieve([image[, channel]]) -> successFlag, image
.. ocv:pyfunction:: cv2.VideoCapture.retrieve([image[, channel]]) -> retval, image
.. ocv:cfunction:: IplImage* cvRetrieveFrame(CvCapture* capture)
.. ocv:cfunction:: IplImage* cvRetrieveFrame( CvCapture* capture, int streamIdx=0 )
.. ocv:pyoldfunction:: cv.RetrieveFrame(capture) -> iplimage
.. ocv:pyoldfunction:: cv.RetrieveFrame(capture) -> image
The methods/functions decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
@ -322,11 +322,11 @@ Grabs, decodes and returns the next video frame.
.. ocv:function:: bool VideoCapture::read(Mat& image)
.. ocv:pyfunction:: cv2.VideoCapture.read([image]) -> successFlag, image
.. ocv:pyfunction:: cv2.VideoCapture.read([image]) -> retval, image
.. ocv:cfunction:: IplImage* cvQueryFrame(CvCapture* capture)
.. ocv:pyoldfunction:: cv.QueryFrame(capture) -> iplimage
.. ocv:pyoldfunction:: cv.QueryFrame(capture) -> image
The methods/functions combine :ocv:func:`VideoCapture::grab` and :ocv:func:`VideoCapture::retrieve` in one call. This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
@ -335,15 +335,15 @@ The methods/functions combine :ocv:func:`VideoCapture::grab` and :ocv:func:`Vide
VideoCapture::get
---------------------
Returns the specified ``VideoCapture`` property
Returns the specified ``VideoCapture`` property
.. ocv:function:: double VideoCapture::get(int propId)
.. ocv:pyfunction:: cv2.VideoCapture.get(propId) -> retval
.. ocv:cfunction:: double cvGetCaptureProperty( CvCapture* capture, int propId )
.. ocv:cfunction:: double cvGetCaptureProperty( CvCapture* capture, int property_id )
.. ocv:pyoldfunction:: cv.GetCaptureProperty(capture, propId)->double
.. ocv:pyoldfunction:: cv.GetCaptureProperty(capture, property_id) -> float
:param propId: Property identifier. It can be one of the following:
@ -393,13 +393,13 @@ VideoCapture::set
---------------------
Sets a property in the ``VideoCapture``.
.. ocv:function:: bool VideoCapture::set(int propertyId, double value)
.. ocv:function:: bool VideoCapture::set( int propId, double value )
.. ocv:pyfunction:: cv2.VideoCapture.set(propId, value) -> retval
.. ocv:cfunction:: int cvSetCaptureProperty( CvCapture* capture, int propId, double value )
.. ocv:cfunction:: int cvSetCaptureProperty( CvCapture* capture, int property_id, double value )
.. ocv:pyoldfunction:: cv.SetCaptureProperty(capture, propId, value)->None
.. ocv:pyoldfunction:: cv.SetCaptureProperty(capture, property_id, value) -> retval
:param propId: Property identifier. It can be one of the following:
@ -463,22 +463,22 @@ VideoWriter constructors
.. ocv:pyfunction:: cv2.VideoWriter([filename, fourcc, fps, frameSize[, isColor]]) -> <VideoWriter object>
.. ocv:cfunction:: CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frameSize, int isColor=1 )
.. ocv:pyoldfunction:: cv.CreateVideoWriter(filename, fourcc, fps, frameSize, isColor) -> CvVideoWriter
.. ocv:cfunction:: CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 )
.. ocv:pyoldfunction:: cv.CreateVideoWriter(filename, fourcc, fps, frame_size, is_color=true) -> CvVideoWriter
.. ocv:pyfunction:: cv2.VideoWriter.isOpened() -> retval
.. ocv:pyfunction:: cv2.VideoWriter.open(filename, fourcc, fps, frameSize[, isColor]) -> retval
.. ocv:pyfunction:: cv2.VideoWriter.write(image) -> None
:param filename: Name of the output video file.
:param filename: Name of the output video file.
:param fourcc: 4-character code of codec used to compress the frames. For example, ``CV_FOURCC('P','I','M,'1')`` is a MPEG-1 codec, ``CV_FOURCC('M','J','P','G')`` is a motion-jpeg codec etc.
:param fps: Framerate of the created video stream.
:param fps: Framerate of the created video stream.
:param frameSize: Size of the video frames.
:param frameSize: Size of the video frames.
:param isColor: If it is not zero, the encoder will expect and encode color frames, otherwise it will work with grayscale frames (the flag is currently supported on Windows only).
:param isColor: If it is not zero, the encoder will expect and encode color frames, otherwise it will work with grayscale frames (the flag is currently supported on Windows only).
The constructors/functions initialize video writers. On Linux FFMPEG is used to write videos; on Windows FFMPEG or VFW is used; on MacOSX QTKit is used.
@ -526,9 +526,9 @@ Writes the next video frame
.. ocv:cfunction:: int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
.. ocv:pyoldfunction:: cv.WriteFrame(writer, image)->int
:param writer: Video writer structure (OpenCV 1.x API)
:param image: The written frame
:param writer: Video writer structure (OpenCV 1.x API)
:param image: The written frame
The functions/methods write the specified image to video file. It must have the same size as has been specified when opening the video writer.

@ -9,7 +9,7 @@ Creates a trackbar and attaches it to the specified window.
.. ocv:function:: int createTrackbar( const string& trackbarname, const string& winname, int* value, int count, TrackbarCallback onChange=0, void* userdata=0)
.. ocv:cfunction:: int cvCreateTrackbar( const char* trackbarName, const char* windowName, int* value, int count, CvTrackbarCallback onChange )
.. ocv:cfunction:: int cvCreateTrackbar( const char* trackbar_name, const char* window_name, int* value, int count, CvTrackbarCallback on_change=NULL )
.. ocv:pyoldfunction:: cv.CreateTrackbar(trackbarName, windowName, value, count, onChange) -> None
:param trackbarname: Name of the created trackbar.
@ -27,7 +27,7 @@ Creates a trackbar and attaches it to the specified window.
The function ``createTrackbar`` creates a trackbar (a slider or range control) with the specified name and range, assigns a variable ``value`` to be a position synchronized with the trackbar and specifies the callback function ``onChange`` to be called on the trackbar position change. The created trackbar is displayed in the specified window ``winname``.
.. note::
**[Qt Backend Only]** ``winname`` can be empty (or NULL) if the trackbar should be attached to the control panel.
Clicking the label of each trackbar enables editing the trackbar values manually.
@ -40,8 +40,8 @@ Returns the trackbar position.
.. ocv:pyfunction:: cv2.getTrackbarPos(trackbarname, winname) -> retval
.. ocv:cfunction:: int cvGetTrackbarPos( const char* trackbarName, const char* windowName )
.. ocv:pyoldfunction:: cv.GetTrackbarPos(trackbarName, windowName)-> None
.. ocv:cfunction:: int cvGetTrackbarPos( const char* trackbar_name, const char* window_name )
.. ocv:pyoldfunction:: cv.GetTrackbarPos(trackbarName, windowName) -> retval
:param trackbarname: Name of the trackbar.
@ -57,12 +57,12 @@ imshow
----------
Displays an image in the specified window.
.. ocv:function:: void imshow( const string& winname, InputArray image )
.. ocv:function:: void imshow( const string& winname, InputArray mat )
.. ocv:pyfunction:: cv2.imshow(winname, image) -> None
.. ocv:pyfunction:: cv2.imshow(winname, mat) -> None
.. ocv:cfunction:: void cvShowImage( const char* winname, const CvArr* image )
.. ocv:pyoldfunction:: cv.ShowImage(winname, image)-> None
.. ocv:cfunction:: void cvShowImage( const char* name, const CvArr* image )
.. ocv:pyoldfunction:: cv.ShowImage(name, image) -> None
:param winname: Name of the window.
@ -81,11 +81,11 @@ namedWindow
---------------
Creates a window.
.. ocv:function:: void namedWindow( const string& winname, int flags )
.. ocv:function:: void namedWindow( const string& winname, int flags=WINDOW_AUTOSIZE )
.. ocv:pyfunction:: cv2.namedWindow(winname[, flags]) -> None
.. ocv:cfunction:: int cvNamedWindow( const char* name, int flags )
.. ocv:cfunction:: int cvNamedWindow( const char* name, int flags=CV_WINDOW_AUTOSIZE )
.. ocv:pyoldfunction:: cv.NamedWindow(name, flags=CV_WINDOW_AUTOSIZE)-> None
:param name: Name of the window in the window caption that may be used as a window identifier.
@ -115,15 +115,15 @@ destroyWindow
-------------
Destroys a window.
.. ocv:function:: void destroyWindow( const string &winname )
.. ocv:function:: void destroyWindow( const string& winname )
.. ocv:pyfunction:: cv2.destroyWindow(winname) -> None
.. ocv:cfunction:: void cvDestroyWindow( const char* name )
.. ocv:pyoldfunction:: cv.DestroyWindow(name)-> None
:param winname: Name of the window to be destroyed.
:param winname: Name of the window to be destroyed.
The function ``destroyWindow`` destroys the window with the given name.
@ -149,9 +149,9 @@ Moves window to the specified position
.. ocv:pyoldfunction:: cv.MoveWindow(name, x, y)-> None
:param name: Window name
:param x: The new x-coordinate of the window
:param y: The new y-coordinate of the window
@ -171,7 +171,7 @@ Resizes window to the specified size
.. note::
* The specified window size is for the image area. Toolbars are not counted.
* Only windows created without CV_WINDOW_AUTOSIZE flag can be resized.
@ -179,13 +179,13 @@ SetMouseCallback
----------------
Sets mouse handler for the specified window
.. ocv:cfunction:: void cvSetMouseCallback( const char* name, CvMouseCallback onMouse, void* param=NULL )
.. ocv:pyoldfunction:: cv.SetMouseCallback(name, onMouse, param) -> None
.. ocv:cfunction:: void cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param=NULL )
.. ocv:pyoldfunction:: cv.SetMouseCallback(windowName, onMouse, param=None) -> None
:param window_name: Window name
:param on_mouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param name: Window name
:param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param param: The optional parameter passed to the callback.
@ -197,7 +197,7 @@ Sets the trackbar position.
.. ocv:pyfunction:: cv2.setTrackbarPos(trackbarname, winname, pos) -> None
.. ocv:cfunction:: void cvSetTrackbarPos( const char* trackbarName, const char* windowName, int pos )
.. ocv:cfunction:: void cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos )
.. ocv:pyoldfunction:: cv.SetTrackbarPos(trackbarName, windowName, pos)-> None
:param trackbarname: Name of the trackbar.
@ -209,7 +209,7 @@ Sets the trackbar position.
The function sets the position of the specified trackbar in the specified window.
.. note::
**[Qt Backend Only]** ``winname`` can be empty (or NULL) if the trackbar is attached to the control panel.
waitKey
@ -218,7 +218,7 @@ Waits for a pressed key.
.. ocv:function:: int waitKey(int delay=0)
.. ocv:pyfunction:: cv2.waitKey([, delay]) -> retval
.. ocv:pyfunction:: cv2.waitKey([delay]) -> retval
.. ocv:cfunction:: int cvWaitKey( int delay=0 )
.. ocv:pyoldfunction:: cv.WaitKey(delay=0)-> int
@ -233,5 +233,5 @@ The function ``waitKey`` waits for a key event infinitely (when
This function is the only method in HighGUI that can fetch and handle events, so it needs to be called periodically for normal event processing unless HighGUI is used within an environment that takes care of event processing.
.. note::
The function only works if there is at least one HighGUI window created and the window is active. If there are several HighGUI windows, any of them can be active.

@ -63,9 +63,9 @@
//Written by Theodore Watson - theo.watson@gmail.com //
//Do whatever you want with this code but if you find //
//a bug or make an improvement I would love to know! //
// //
// //
//Warning This code is experimental //
//use at your own risk :) //
//use at your own risk :) //
//////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
/* Shoutouts
@ -164,7 +164,7 @@ interface IMPEG2PIDMap : public IUnknown
/*
MEDIASUBTYPE_I420 : TGUID ='{30323449-0000-0010-8000-00AA00389B71}';
MEDIASUBTYPE_Y800 : TGUID ='{30303859-0000-0010-8000-00AA00389B71}';
MEDIASUBTYPE_Y8 : TGUID ='{20203859-0000-0010-8000-00AA00389B71}';
MEDIASUBTYPE_Y8 : TGUID ='{20203859-0000-0010-8000-00AA00389B71}';
MEDIASUBTYPE_Y160 : TGUID ='{30363159-0000-0010-8000-00AA00389B71}';
MEDIASUBTYPE_YV16 : TGUID ='{32315659-0000-0010-8000-00AA00389B71}';
MEDIASUBTYPE_Y422 : TGUID ='{32323459-0000-0010-8000-00AA00389B71}';
@ -285,13 +285,13 @@ interface ISampleGrabber : public IUnknown
//setup the first device - there are a number of options:
VI.setupDevice(device1); //setup the first device with the default settings
//VI.setupDevice(device1, VI_COMPOSITE); //or setup device with specific connection type
//VI.setupDevice(device1, 320, 240); //or setup device with specified video size
//VI.setupDevice(device1, 320, 240, VI_COMPOSITE); //or setup device with video size and connection type
//VI.setupDevice(device1, VI_COMPOSITE); //or setup device with specific connection type
//VI.setupDevice(device1, 320, 240); //or setup device with specified video size
//VI.setupDevice(device1, 320, 240, VI_COMPOSITE); //or setup device with video size and connection type
//VI.setFormat(device1, VI_NTSC_M); //if your card doesn't remember what format it should be
//call this with the appropriate format listed above
//NOTE: must be called after setupDevice!
//VI.setFormat(device1, VI_NTSC_M); //if your card doesn't remember what format it should be
//call this with the appropriate format listed above
//NOTE: must be called after setupDevice!
//optionally setup a second (or third, fourth ...) device - same options as above
VI.setupDevice(device2);
@ -299,8 +299,8 @@ interface ISampleGrabber : public IUnknown
//As requested width and height can not always be accomodated
//make sure to check the size once the device is setup
int width = VI.getWidth(device1);
int height = VI.getHeight(device1);
int width = VI.getWidth(device1);
int height = VI.getHeight(device1);
int size = VI.getSize(device1);
unsigned char * yourBuffer1 = new unsigned char[size];
@ -308,7 +308,7 @@ interface ISampleGrabber : public IUnknown
//to get the data from the device first check if the data is new
if(VI.isFrameNew(device1)){
VI.getPixels(device1, yourBuffer1, false, false); //fills pixels as a BGR (for openCV) unsigned char array - no flipping
VI.getPixels(device1, yourBuffer1, false, false); //fills pixels as a BGR (for openCV) unsigned char array - no flipping
VI.getPixels(device1, yourBuffer2, true, true); //fills pixels as a RGB (for openGL) unsigned char array - flipping!
}
@ -338,7 +338,7 @@ static bool verbose = true;
//STUFF YOU DON'T CHANGE
//videoInput defines
#define VI_VERSION 0.1995
#define VI_VERSION 0.1995
#define VI_MAX_CAMERAS 20
#define VI_NUM_TYPES 20 //MGB
#define VI_NUM_FORMATS 18 //DON'T TOUCH
@ -348,10 +348,10 @@ static bool verbose = true;
#define VI_S_VIDEO 1
#define VI_TUNER 2
#define VI_USB 3
#define VI_1394 4
#define VI_1394 4
//defines for formats
#define VI_NTSC_M 0
#define VI_NTSC_M 0
#define VI_PAL_B 1
#define VI_PAL_D 2
#define VI_PAL_G 3
@ -359,16 +359,16 @@ static bool verbose = true;
#define VI_PAL_I 5
#define VI_PAL_M 6
#define VI_PAL_N 7
#define VI_PAL_NC 8
#define VI_SECAM_B 9
#define VI_SECAM_D 10
#define VI_SECAM_G 11
#define VI_SECAM_H 12
#define VI_SECAM_K 13
#define VI_SECAM_K1 14
#define VI_SECAM_L 15
#define VI_NTSC_M_J 16
#define VI_NTSC_433 17
#define VI_PAL_NC 8
#define VI_SECAM_B 9
#define VI_SECAM_D 10
#define VI_SECAM_G 11
#define VI_SECAM_H 12
#define VI_SECAM_K 13
#define VI_SECAM_K1 14
#define VI_SECAM_L 15
#define VI_NTSC_M_J 16
#define VI_NTSC_433 17
//allows us to directShow classes here with the includes in the cpp
@ -731,48 +731,48 @@ public:
videoDevice::videoDevice(){
pCaptureGraph = NULL; // Capture graph builder object
pGraph = NULL; // Graph builder object
pControl = NULL; // Media control object
pVideoInputFilter = NULL; // Video Capture filter
pGrabber = NULL; // Grabs frame
pDestFilter = NULL; // Null Renderer Filter
pGrabberF = NULL; // Grabber Filter
pMediaEvent = NULL;
streamConf = NULL;
pAmMediaType = NULL;
pCaptureGraph = NULL; // Capture graph builder object
pGraph = NULL; // Graph builder object
pControl = NULL; // Media control object
pVideoInputFilter = NULL; // Video Capture filter
pGrabber = NULL; // Grabs frame
pDestFilter = NULL; // Null Renderer Filter
pGrabberF = NULL; // Grabber Filter
pMediaEvent = NULL;
streamConf = NULL;
pAmMediaType = NULL;
//This is our callback class that processes the frame.
sgCallback = new SampleGrabberCallback();
sgCallback->newFrame = false;
//This is our callback class that processes the frame.
sgCallback = new SampleGrabberCallback();
sgCallback->newFrame = false;
//Default values for capture type
videoType = MEDIASUBTYPE_RGB24;
connection = PhysConn_Video_Composite;
storeConn = 0;
//Default values for capture type
videoType = MEDIASUBTYPE_RGB24;
connection = PhysConn_Video_Composite;
storeConn = 0;
videoSize = 0;
width = 0;
height = 0;
videoSize = 0;
width = 0;
height = 0;
tryWidth = 640;
tryHeight = 480;
tryVideoType = MEDIASUBTYPE_RGB24;
nFramesForReconnect= 10000;
nFramesRunning = 0;
myID = -1;
tryWidth = 640;
tryHeight = 480;
tryVideoType = MEDIASUBTYPE_RGB24;
nFramesForReconnect= 10000;
nFramesRunning = 0;
myID = -1;
tryDiffSize = true;
useCrossbar = false;
readyToCapture = false;
sizeSet = false;
setupStarted = false;
specificFormat = false;
autoReconnect = false;
requestedFrameTime = -1;
tryDiffSize = true;
useCrossbar = false;
readyToCapture = false;
sizeSet = false;
setupStarted = false;
specificFormat = false;
autoReconnect = false;
requestedFrameTime = -1;
memset(wDeviceName, 0, sizeof(WCHAR) * 255);
memset(nDeviceName, 0, sizeof(char) * 255);
memset(wDeviceName, 0, sizeof(WCHAR) * 255);
memset(nDeviceName, 0, sizeof(char) * 255);
}
@ -808,39 +808,39 @@ void videoDevice::setSize(int w, int h){
// ----------------------------------------------------------------------
void videoDevice::NukeDownstream(IBaseFilter *pBF){
IPin *pP, *pTo;
ULONG u;
IEnumPins *pins = NULL;
PIN_INFO pininfo;
HRESULT hr = pBF->EnumPins(&pins);
pins->Reset();
while (hr == NOERROR)
IPin *pP, *pTo;
ULONG u;
IEnumPins *pins = NULL;
PIN_INFO pininfo;
HRESULT hr = pBF->EnumPins(&pins);
pins->Reset();
while (hr == NOERROR)
{
hr = pins->Next(1, &pP, &u);
if (hr == S_OK && pP)
{
hr = pins->Next(1, &pP, &u);
if (hr == S_OK && pP)
pP->ConnectedTo(&pTo);
if (pTo)
{
hr = pTo->QueryPinInfo(&pininfo);
if (hr == NOERROR)
{
pP->ConnectedTo(&pTo);
if (pTo)
{
hr = pTo->QueryPinInfo(&pininfo);
if (hr == NOERROR)
{
if (pininfo.dir == PINDIR_INPUT)
{
NukeDownstream(pininfo.pFilter);
pGraph->Disconnect(pTo);
pGraph->Disconnect(pP);
pGraph->RemoveFilter(pininfo.pFilter);
}
pininfo.pFilter->Release();
pininfo.pFilter = NULL;
}
pTo->Release();
}
pP->Release();
if (pininfo.dir == PINDIR_INPUT)
{
NukeDownstream(pininfo.pFilter);
pGraph->Disconnect(pTo);
pGraph->Disconnect(pP);
pGraph->RemoveFilter(pininfo.pFilter);
}
pininfo.pFilter->Release();
pininfo.pFilter = NULL;
}
pTo->Release();
}
pP->Release();
}
if (pins) pins->Release();
}
if (pins) pins->Release();
}
@ -949,27 +949,27 @@ videoDevice::~videoDevice(){
if( (pVideoInputFilter) )NukeDownstream(pVideoInputFilter);
//Release and zero pointers to our filters etc
if( (pDestFilter) ){ if(verbose)printf("SETUP: freeing Renderer \n");
if( (pDestFilter) ){ if(verbose)printf("SETUP: freeing Renderer \n");
(pDestFilter)->Release();
(pDestFilter) = 0;
}
if( (pVideoInputFilter) ){ if(verbose)printf("SETUP: freeing Capture Source \n");
if( (pVideoInputFilter) ){ if(verbose)printf("SETUP: freeing Capture Source \n");
(pVideoInputFilter)->Release();
(pVideoInputFilter) = 0;
}
if( (pGrabberF) ){ if(verbose)printf("SETUP: freeing Grabber Filter \n");
if( (pGrabberF) ){ if(verbose)printf("SETUP: freeing Grabber Filter \n");
(pGrabberF)->Release();
(pGrabberF) = 0;
}
if( (pGrabber) ){ if(verbose)printf("SETUP: freeing Grabber \n");
if( (pGrabber) ){ if(verbose)printf("SETUP: freeing Grabber \n");
(pGrabber)->Release();
(pGrabber) = 0;
}
if( (pControl) ){ if(verbose)printf("SETUP: freeing Control \n");
if( (pControl) ){ if(verbose)printf("SETUP: freeing Control \n");
(pControl)->Release();
(pControl) = 0;
}
if( (pMediaEvent) ){ if(verbose)printf("SETUP: freeing Media Event \n");
if( (pMediaEvent) ){ if(verbose)printf("SETUP: freeing Media Event \n");
(pMediaEvent)->Release();
(pMediaEvent) = 0;
}
@ -978,7 +978,7 @@ videoDevice::~videoDevice(){
(streamConf) = 0;
}
if( (pAmMediaType) ){ if(verbose)printf("SETUP: freeing Media Type \n");
if( (pAmMediaType) ){ if(verbose)printf("SETUP: freeing Media Type \n");
MyDeleteMediaType(pAmMediaType);
}
@ -992,7 +992,7 @@ videoDevice::~videoDevice(){
if( (pGraph) )destroyGraph();
//Release and zero our capture graph and our main graph
if( (pCaptureGraph) ){ if(verbose)printf("SETUP: freeing Capture Graph \n");
if( (pCaptureGraph) ){ if(verbose)printf("SETUP: freeing Capture Graph \n");
(pCaptureGraph)->Release();
(pCaptureGraph) = 0;
}
@ -1031,7 +1031,7 @@ videoInput::videoInput(){
devicesFound = 0;
callbackSetCount = 0;
bCallback = true;
bCallback = true;
//setup a max no of device objects
for(int i=0; i<VI_MAX_CAMERAS; i++) VDList[i] = new videoDevice();
@ -1057,9 +1057,9 @@ videoInput::videoInput(){
mediaSubtypes[8] = MEDIASUBTYPE_UYVY;
mediaSubtypes[9] = MEDIASUBTYPE_YV12;
mediaSubtypes[10] = MEDIASUBTYPE_YVU9;
mediaSubtypes[11] = MEDIASUBTYPE_Y411;
mediaSubtypes[12] = MEDIASUBTYPE_Y41P;
mediaSubtypes[13] = MEDIASUBTYPE_Y211;
mediaSubtypes[11] = MEDIASUBTYPE_Y411;
mediaSubtypes[12] = MEDIASUBTYPE_Y41P;
mediaSubtypes[13] = MEDIASUBTYPE_Y211;
mediaSubtypes[14] = MEDIASUBTYPE_AYUV;
mediaSubtypes[15] = MEDIASUBTYPE_MJPG; // MGB
@ -1067,31 +1067,29 @@ videoInput::videoInput(){
mediaSubtypes[16] = MEDIASUBTYPE_Y800;
mediaSubtypes[17] = MEDIASUBTYPE_Y8;
mediaSubtypes[18] = MEDIASUBTYPE_GREY;
mediaSubtypes[19] = MEDIASUBTYPE_I420;
mediaSubtypes[19] = MEDIASUBTYPE_I420;
//The video formats we support
formatTypes[VI_NTSC_M] = AnalogVideo_NTSC_M;
formatTypes[VI_NTSC_M] = AnalogVideo_NTSC_M;
formatTypes[VI_NTSC_M_J] = AnalogVideo_NTSC_M_J;
formatTypes[VI_NTSC_433] = AnalogVideo_NTSC_433;
formatTypes[VI_PAL_B] = AnalogVideo_PAL_B;
formatTypes[VI_PAL_D] = AnalogVideo_PAL_D;
formatTypes[VI_PAL_G] = AnalogVideo_PAL_G;
formatTypes[VI_PAL_H] = AnalogVideo_PAL_H;
formatTypes[VI_PAL_I] = AnalogVideo_PAL_I;
formatTypes[VI_PAL_M] = AnalogVideo_PAL_M;
formatTypes[VI_PAL_N] = AnalogVideo_PAL_N;
formatTypes[VI_PAL_NC] = AnalogVideo_PAL_N_COMBO;
formatTypes[VI_SECAM_B] = AnalogVideo_SECAM_B;
formatTypes[VI_SECAM_D] = AnalogVideo_SECAM_D;
formatTypes[VI_SECAM_G] = AnalogVideo_SECAM_G;
formatTypes[VI_SECAM_H] = AnalogVideo_SECAM_H;
formatTypes[VI_SECAM_K] = AnalogVideo_SECAM_K;
formatTypes[VI_PAL_B] = AnalogVideo_PAL_B;
formatTypes[VI_PAL_D] = AnalogVideo_PAL_D;
formatTypes[VI_PAL_G] = AnalogVideo_PAL_G;
formatTypes[VI_PAL_H] = AnalogVideo_PAL_H;
formatTypes[VI_PAL_I] = AnalogVideo_PAL_I;
formatTypes[VI_PAL_M] = AnalogVideo_PAL_M;
formatTypes[VI_PAL_N] = AnalogVideo_PAL_N;
formatTypes[VI_PAL_NC] = AnalogVideo_PAL_N_COMBO;
formatTypes[VI_SECAM_B] = AnalogVideo_SECAM_B;
formatTypes[VI_SECAM_D] = AnalogVideo_SECAM_D;
formatTypes[VI_SECAM_G] = AnalogVideo_SECAM_G;
formatTypes[VI_SECAM_H] = AnalogVideo_SECAM_H;
formatTypes[VI_SECAM_K] = AnalogVideo_SECAM_K;
formatTypes[VI_SECAM_K1] = AnalogVideo_SECAM_K1;
formatTypes[VI_SECAM_L] = AnalogVideo_SECAM_L;
formatTypes[VI_SECAM_L] = AnalogVideo_SECAM_L;
}
@ -2029,30 +2027,30 @@ void videoInput::setAttemptCaptureSize(int id, int w, int h,GUID mediaType){
void videoInput::setPhyCon(int id, int conn){
switch(conn){
switch(conn){
case 0:
VDList[id]->connection = PhysConn_Video_Composite;
break;
case 1:
VDList[id]->connection = PhysConn_Video_SVideo;
break;
case 2:
VDList[id]->connection = PhysConn_Video_Tuner;
break;
case 3:
VDList[id]->connection = PhysConn_Video_USB;
break;
case 4:
VDList[id]->connection = PhysConn_Video_1394;
break;
default:
return; //if it is not these types don't set crossbar
case 0:
VDList[id]->connection = PhysConn_Video_Composite;
break;
}
case 1:
VDList[id]->connection = PhysConn_Video_SVideo;
break;
case 2:
VDList[id]->connection = PhysConn_Video_Tuner;
break;
case 3:
VDList[id]->connection = PhysConn_Video_USB;
break;
case 4:
VDList[id]->connection = PhysConn_Video_1394;
break;
default:
return; //if it is not these types don't set crossbar
break;
}
VDList[id]->storeConn = conn;
VDList[id]->useCrossbar = true;
VDList[id]->storeConn = conn;
VDList[id]->useCrossbar = true;
}
@ -2154,26 +2152,26 @@ void videoInput::processPixels(unsigned char * src, unsigned char * dst, int wid
void videoInput::getMediaSubtypeAsString(GUID type, char * typeAsString){
char tmpStr[8];
if( type == MEDIASUBTYPE_RGB24) sprintf(tmpStr, "RGB24");
if( type == MEDIASUBTYPE_RGB24) sprintf(tmpStr, "RGB24");
else if(type == MEDIASUBTYPE_RGB32) sprintf(tmpStr, "RGB32");
else if(type == MEDIASUBTYPE_RGB555)sprintf(tmpStr, "RGB555");
else if(type == MEDIASUBTYPE_RGB565)sprintf(tmpStr, "RGB565");
else if(type == MEDIASUBTYPE_YUY2) sprintf(tmpStr, "YUY2");
else if(type == MEDIASUBTYPE_YVYU) sprintf(tmpStr, "YVYU");
else if(type == MEDIASUBTYPE_YUYV) sprintf(tmpStr, "YUYV");
else if(type == MEDIASUBTYPE_IYUV) sprintf(tmpStr, "IYUV");
else if(type == MEDIASUBTYPE_YUY2) sprintf(tmpStr, "YUY2");
else if(type == MEDIASUBTYPE_YVYU) sprintf(tmpStr, "YVYU");
else if(type == MEDIASUBTYPE_YUYV) sprintf(tmpStr, "YUYV");
else if(type == MEDIASUBTYPE_IYUV) sprintf(tmpStr, "IYUV");
else if(type == MEDIASUBTYPE_UYVY) sprintf(tmpStr, "UYVY");
else if(type == MEDIASUBTYPE_YV12) sprintf(tmpStr, "YV12");
else if(type == MEDIASUBTYPE_YVU9) sprintf(tmpStr, "YVU9");
else if(type == MEDIASUBTYPE_Y411) sprintf(tmpStr, "Y411");
else if(type == MEDIASUBTYPE_Y41P) sprintf(tmpStr, "Y41P");
else if(type == MEDIASUBTYPE_Y411) sprintf(tmpStr, "Y411");
else if(type == MEDIASUBTYPE_Y41P) sprintf(tmpStr, "Y41P");
else if(type == MEDIASUBTYPE_Y211) sprintf(tmpStr, "Y211");
else if(type == MEDIASUBTYPE_AYUV) sprintf(tmpStr, "AYUV");
else if(type == MEDIASUBTYPE_MJPG) sprintf(tmpStr, "MJPG");
else if(type == MEDIASUBTYPE_Y800) sprintf(tmpStr, "Y800");
else if(type == MEDIASUBTYPE_Y8) sprintf(tmpStr, "Y8");
else if(type == MEDIASUBTYPE_GREY) sprintf(tmpStr, "GREY");
else if(type == MEDIASUBTYPE_I420) sprintf(tmpStr, "I420");
else if(type == MEDIASUBTYPE_AYUV) sprintf(tmpStr, "AYUV");
else if(type == MEDIASUBTYPE_MJPG) sprintf(tmpStr, "MJPG");
else if(type == MEDIASUBTYPE_Y800) sprintf(tmpStr, "Y800");
else if(type == MEDIASUBTYPE_Y8) sprintf(tmpStr, "Y8");
else if(type == MEDIASUBTYPE_GREY) sprintf(tmpStr, "GREY");
else if(type == MEDIASUBTYPE_I420) sprintf(tmpStr, "I420");
else sprintf(tmpStr, "OTHER");
memcpy(typeAsString, tmpStr, sizeof(char)*8);
@ -2245,7 +2243,7 @@ int videoInput::getVideoPropertyFromCV(int cv_property){
return VideoProcAmp_BacklightCompensation;
case CV_CAP_PROP_GAIN:
return VideoProcAmp_Gain;
return VideoProcAmp_Gain;
}
return -1;
}
@ -2254,7 +2252,6 @@ int videoInput::getCameraPropertyFromCV(int cv_property){
// see CameraControlProperty in strmif.h
switch (cv_property) {
case CV_CAP_PROP_PAN:
return CameraControl_Pan;
@ -3075,7 +3072,7 @@ HRESULT videoInput::routeCrossbar(ICaptureGraphBuilder2 **ppBuild, IBaseFilter *
}
Crossbar->Route(pOIndex,pIndex);
}else{
if(verbose)printf("SETUP: Didn't find specified Physical Connection type. Using Defualt. \n");
if(verbose) printf("SETUP: Didn't find specified Physical Connection type. Using Defualt. \n");
}
//we only free the crossbar when we close or restart the device
@ -3087,7 +3084,7 @@ HRESULT videoInput::routeCrossbar(ICaptureGraphBuilder2 **ppBuild, IBaseFilter *
if(pXBar1)pXBar1 = NULL;
}else{
if(verbose)printf("SETUP: You are a webcam or snazzy firewire cam! No Crossbar needed\n");
if(verbose) printf("SETUP: You are a webcam or snazzy firewire cam! No Crossbar needed\n");
return hr;
}
@ -3110,8 +3107,6 @@ public:
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_DSHOW; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
void init();
@ -3217,60 +3212,29 @@ double CvCaptureCAM_DShow::getProperty( int property_id )
switch( property_id )
{
case CV_CAP_PROP_BRIGHTNESS:
if ( VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BRIGHTNESS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_CONTRAST:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_CONTRAST),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_HUE:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_HUE),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_SATURATION:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SATURATION),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_SHARPNESS:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SHARPNESS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_GAMMA:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAMMA),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_MONOCROME:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_MONOCROME),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_WHITE_BALANCE_BLUE_U),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_BACKLIGHT:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BACKLIGHT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_BACKLIGHT:
case CV_CAP_PROP_GAIN:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAIN),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(property_id),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
}
// camera properties
switch( property_id )
{
case CV_CAP_PROP_BACKLIGHT:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_BACKLIGHT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_PAN:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_PAN),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_TILT:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_TILT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_ROLL:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_ROLL),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_ZOOM:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_BACKLIGHT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_EXPOSURE:
case CV_CAP_PROP_IRIS:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_IRIS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_FOCUS:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_FOCUS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
}
@ -3282,36 +3246,36 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value )
{
// image capture properties
bool handled = false;
switch( property_id )
switch( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
width = cvRound(value);
handled = true;
break;
case CV_CAP_PROP_FRAME_WIDTH:
width = cvRound(value);
handled = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
height = cvRound(value);
handled = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
height = cvRound(value);
handled = true;
break;
case CV_CAP_PROP_FOURCC:
fourcc = cvRound(value);
if ( fourcc < 0 ) {
// following cvCreateVideo usage will pop up caprturepindialog here if fourcc=-1
// TODO - how to create a capture pin dialog
}
handled = true;
break;
case CV_CAP_PROP_FOURCC:
fourcc = cvRound(value);
if ( fourcc < 0 ) {
// following cvCreateVideo usage will pop up caprturepindialog here if fourcc=-1
// TODO - how to create a capture pin dialog
}
handled = true;
break;
case CV_CAP_PROP_FPS:
int fps = cvRound(value);
if (fps != VI.getFPS(0))
{
VI.stopDevice(index);
VI.setIdealFramerate(index,fps);
VI.setupDevice(index);
}
break;
case CV_CAP_PROP_FPS:
int fps = cvRound(value);
if (fps != VI.getFPS(0))
{
VI.stopDevice(index);
VI.setIdealFramerate(index,fps);
VI.setupDevice(index);
}
break;
}
@ -3329,7 +3293,7 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value )
width = height = fourcc = -1;
return VI.isDeviceSetup(index);
}
return true;
return true;
}
// show video/camera filter dialog
@ -3341,67 +3305,32 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value )
//video Filter properties
switch( property_id )
{
case CV_CAP_PROP_BRIGHTNESS:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BRIGHTNESS),(long)value);
case CV_CAP_PROP_CONTRAST:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_CONTRAST),(long)value);
case CV_CAP_PROP_HUE:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_HUE),(long)value);
case CV_CAP_PROP_SATURATION:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SATURATION),(long)value);
case CV_CAP_PROP_SHARPNESS:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SHARPNESS),(long)value);
case CV_CAP_PROP_GAMMA:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAMMA),(long)value);
case CV_CAP_PROP_MONOCROME:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_MONOCROME),(long)value);
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_WHITE_BALANCE_BLUE_U),(long)value);
case CV_CAP_PROP_BACKLIGHT:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BACKLIGHT),(long)value);
case CV_CAP_PROP_GAIN:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAIN),(long)value);
default:
;
case CV_CAP_PROP_BRIGHTNESS:
case CV_CAP_PROP_CONTRAST:
case CV_CAP_PROP_HUE:
case CV_CAP_PROP_SATURATION:
case CV_CAP_PROP_SHARPNESS:
case CV_CAP_PROP_GAMMA:
case CV_CAP_PROP_MONOCROME:
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CV_CAP_PROP_BACKLIGHT:
case CV_CAP_PROP_GAIN:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(property_id),(long)value);
}
//camera properties
switch( property_id )
{
case CV_CAP_PROP_PAN:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_PAN),(long)value);
case CV_CAP_PROP_TILT:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_TILT),(long)value);
case CV_CAP_PROP_ROLL:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_ROLL),(long)value);
case CV_CAP_PROP_ZOOM:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_ZOOM),(long)value);
case CV_CAP_PROP_EXPOSURE:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_EXPOSURE),(long)value);
case CV_CAP_PROP_IRIS:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_IRIS),(long)value);
case CV_CAP_PROP_FOCUS:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_FOCUS),(long)value);
case CV_CAP_PROP_PAN:
case CV_CAP_PROP_TILT:
case CV_CAP_PROP_ROLL:
case CV_CAP_PROP_ZOOM:
case CV_CAP_PROP_EXPOSURE:
case CV_CAP_PROP_IRIS:
case CV_CAP_PROP_FOCUS:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),(long)value);
}
return false;
}
@ -3410,8 +3339,16 @@ CvCapture* cvCreateCameraCapture_DShow( int index )
{
CvCaptureCAM_DShow* capture = new CvCaptureCAM_DShow;
if( capture->open( index ))
return capture;
try
{
if( capture->open( index ))
return capture;
}
catch(...)
{
delete capture;
throw;
}
delete capture;
return 0;

@ -521,7 +521,7 @@ void CV_SpecificVideoTest::run(int)
TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); }
#endif
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && !defined(__APPLE__)
TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); }
TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); }
#endif

@ -13,8 +13,9 @@ Finds edges in an image using the [Canny86]_ algorithm.
.. ocv:pyfunction:: cv2.Canny(image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]]) -> edges
.. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int apertureSize=3 )
.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, apertureSize=3)-> None
.. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, aperture_size=3) -> None
:param image: Single-channel 8-bit input image.
@ -37,20 +38,21 @@ cornerEigenValsAndVecs
----------------------
Calculates eigenvalues and eigenvectors of image blocks for corner detection.
.. ocv:function:: void cornerEigenValsAndVecs( InputArray src, OutputArray dst, int blockSize, int apertureSize, int borderType=BORDER_DEFAULT )
.. ocv:function:: void cornerEigenValsAndVecs( InputArray src, OutputArray dst, int blockSize, int ksize, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.cornerEigenValsAndVecs(src, blockSize, ksize[, dst[, borderType]]) -> dst
.. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int blockSize, int apertureSize=3 )
.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, apertureSize=3)-> None
.. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int block_size, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, aperture_size=3) -> None
:param src: Input single-channel 8-bit or floating-point image.
:param dst: Image to store the results. It has the same size as ``src`` and the type ``CV_32FC(6)`` .
:param blockSize: Neighborhood size (see details below).
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
@ -72,7 +74,7 @@ After that, it finds eigenvectors and eigenvalues of
* :math:`\lambda_1, \lambda_2` are the non-sorted eigenvalues of :math:`M`
* :math:`x_1, y_1` are the eigenvectors corresponding to :math:`\lambda_1`
* :math:`x_2, y_2` are the eigenvectors corresponding to :math:`\lambda_2`
The output of the function can be used for robust edge or corner detection.
@ -89,20 +91,21 @@ cornerHarris
------------
Harris edge detector.
.. ocv:function:: void cornerHarris( InputArray src, OutputArray dst, int blockSize, int apertureSize, double k, int borderType=BORDER_DEFAULT )
.. ocv:function:: void cornerHarris( InputArray src, OutputArray dst, int blockSize, int ksize, double k, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.cornerHarris(src, blockSize, ksize, k[, dst[, borderType]]) -> dst
.. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harrisDst, int blockSize, int apertureSize=3, double k=0.04 )
.. ocv:pyoldfunction:: cv.CornerHarris(image, harrisDst, blockSize, apertureSize=3, k=0.04)-> None
.. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harris_responce, int block_size, int aperture_size=3, double k=0.04 )
.. ocv:pyoldfunction:: cv.CornerHarris(image, harris_dst, blockSize, aperture_size=3, k=0.04) -> None
:param src: Input single-channel 8-bit or floating-point image.
:param dst: Image to store the Harris detector responses. It has the type ``CV_32FC1`` and the same size as ``src`` .
:param blockSize: Neighborhood size (see the details on :ocv:func:`cornerEigenValsAndVecs` ).
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param k: Harris detector free parameter. See the formula below.
@ -128,21 +131,21 @@ cornerMinEigenVal
-----------------
Calculates the minimal eigenvalue of gradient matrices for corner detection.
.. ocv:function:: void cornerMinEigenVal( InputArray src, OutputArray dst, int blockSize, int apertureSize=3, int borderType=BORDER_DEFAULT )
.. ocv:function:: void cornerMinEigenVal( InputArray src, OutputArray dst, int blockSize, int ksize=3, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.cornerMinEigenVal(src, blockSize[, dst[, ksize[, borderType]]]) -> dst
.. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int blockSize, int apertureSize=3 )
.. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int block_size, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, apertureSize=3)-> None
.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, aperture_size=3) -> None
:param src: Input single-channel 8-bit or floating-point image.
:param dst: Image to store the minimal eigenvalues. It has the type ``CV_32FC1`` and the same size as ``src`` .
:param blockSize: Neighborhood size (see the details on :ocv:func:`cornerEigenValsAndVecs` ).
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
@ -161,9 +164,9 @@ Refines the corner locations.
.. ocv:pyfunction:: cv2.cornerSubPix(image, corners, winSize, zeroZone, criteria) -> None
.. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize winSize, CvSize zeroZone, CvTermCriteria criteria )
.. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria )
.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, winSize, zeroZone, criteria)-> corners
.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, win, zero_zone, criteria) -> corners
:param image: Input image.
@ -223,15 +226,15 @@ Determines strong corners on an image.
.. ocv:pyfunction:: cv2.goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) -> corners
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eigImage, CvArr* tempImage, CvPoint2D32f* corners, int* cornerCount, double qualityLevel, double minDistance, const CvArr* mask=NULL, int blockSize=3, int useHarris=0, double k=0.04 )
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, CvArr* temp_image, CvPoint2D32f* corners, int* corner_count, double quality_level, double min_distance, const CvArr* mask=NULL, int block_size=3, int use_harris=0, double k=0.04 )
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04)-> corners
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04) -> cornerCount
:param image: Input 8-bit or floating-point 32-bit, single-channel image.
:param eigImage: The parameter is ignored.
:param tempImage: The parameter is ignored.
:param eig_image: The parameter is ignored.
:param temp_image: The parameter is ignored.
:param corners: Output vector of detected corners.
@ -244,9 +247,9 @@ Determines strong corners on an image.
:param mask: Optional region of interest. If the image is not empty (it needs to have the type ``CV_8UC1`` and the same size as ``image`` ), it specifies the region in which the corners are detected.
:param blockSize: Size of an average block for computing a derivative covariation matrix over each pixel neighborhood. See :ocv:func:`cornerEigenValsAndVecs` .
:param useHarrisDetector: Parameter indicating whether to use a Harris detector (see :ocv:func:`cornerHarris`) or :ocv:func:`cornerMinEigenVal`.
:param k: Free parameter of the Harris detector.
The function finds the most prominent corners in the image or in the specified image region, as described in [Shi94]_:
@ -255,7 +258,7 @@ The function finds the most prominent corners in the image or in the specified i
Function calculates the corner quality measure at every source image pixel using the
:ocv:func:`cornerMinEigenVal` or
:ocv:func:`cornerHarris` .
#.
Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are retained).
@ -268,16 +271,16 @@ The function finds the most prominent corners in the image or in the specified i
#.
Function throws away each corner for which there is a stronger corner at a distance less than ``maxDistance``.
The function can be used to initialize a point-based tracker of an object.
.. note:: If the function is called with different values ``A`` and ``B`` of the parameter ``qualityLevel`` , and ``A`` > {B}, the vector of returned corners with ``qualityLevel=A`` will be the prefix of the output vector with ``qualityLevel=B`` .
.. seealso::
:ocv:func:`cornerMinEigenVal`,
:ocv:func:`cornerHarris`,
:ocv:func:`calcOpticalFlowPyrLK`,
:ocv:func:`cornerMinEigenVal`,
:ocv:func:`cornerHarris`,
:ocv:func:`calcOpticalFlowPyrLK`,
:ocv:func:`estimateRigidTransform`,
@ -287,16 +290,16 @@ Finds circles in a grayscale image using the Hough transform.
.. ocv:function:: void HoughCircles( InputArray image, OutputArray circles, int method, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
.. ocv:cfunction:: CvSeq* cvHoughCircles( CvArr* image, CvMemStorage* circleStorage, int method, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
.. ocv:cfunction:: CvSeq* cvHoughCircles( CvArr* image, void* circle_storage, int method, double dp, double min_dist, double param1=100, double param2=100, int min_radius=0, int max_radius=0 )
.. ocv:pyfunction:: cv2.HoughCircles(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles
:param image: 8-bit, single-channel, grayscale input image.
:param circles: Output vector of found circles. Each vector is encoded as a 3-element floating-point vector :math:`(x, y, radius)` .
:param circleStorage: In C function this is a memory storage that will contain the output sequence of found circles.
:param circle_storage: In C function this is a memory storage that will contain the output sequence of found circles.
:param method: Detection method to use. Currently, the only implemented method is ``CV_HOUGH_GRADIENT`` , which is basically *21HT* , described in [Yuen90]_.
:param dp: Inverse ratio of the accumulator resolution to the image resolution. For example, if ``dp=1`` , the accumulator has the same resolution as the input image. If ``dp=2`` , the accumulator has half as big width and height.
@ -311,7 +314,7 @@ Finds circles in a grayscale image using the Hough transform.
:param maxRadius: Maximum circle radius.
The function finds circles in a grayscale image using a modification of the Hough transform.
The function finds circles in a grayscale image using a modification of the Hough transform.
Example: ::
@ -362,7 +365,7 @@ Finds lines in a binary image using the standard Hough transform.
.. ocv:pyfunction:: cv2.HoughLines(image, rho, theta, threshold[, lines[, srn[, stn]]]) -> lines
.. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 )
.. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* line_storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 )
.. ocv:pyoldfunction:: cv.HoughLines2(image, storage, method, rho, theta, threshold, param1=0, param2=0)-> lines
@ -379,31 +382,31 @@ Finds lines in a binary image using the standard Hough transform.
:param srn: For the multi-scale Hough transform, it is a divisor for the distance resolution ``rho`` . The coarse accumulator distance resolution is ``rho`` and the accurate accumulator resolution is ``rho/srn`` . If both ``srn=0`` and ``stn=0`` , the classical Hough transform is used. Otherwise, both these parameters should be positive.
:param stn: For the multi-scale Hough transform, it is a divisor for the distance resolution ``theta``.
:param method: One of the following Hough transform variants:
* **CV_HOUGH_STANDARD** classical or standard Hough transform. Every line is represented by two floating-point numbers :math:`(\rho, \theta)` , where :math:`\rho` is a distance between (0,0) point and the line, and :math:`\theta` is the angle between x-axis and the normal to the line. Thus, the matrix must be (the created sequence will be) of ``CV_32FC2`` type
* **CV_HOUGH_PROBABILISTIC** probabilistic Hough transform (more efficient in case if the picture contains a few long linear segments). It returns line segments rather than the whole line. Each segment is represented by starting and ending points, and the matrix must be (the created sequence will be) of the ``CV_32SC4`` type.
:param method: One of the following Hough transform variants:
* **CV_HOUGH_STANDARD** classical or standard Hough transform. Every line is represented by two floating-point numbers :math:`(\rho, \theta)` , where :math:`\rho` is a distance between (0,0) point and the line, and :math:`\theta` is the angle between x-axis and the normal to the line. Thus, the matrix must be (the created sequence will be) of ``CV_32FC2`` type
* **CV_HOUGH_PROBABILISTIC** probabilistic Hough transform (more efficient in case if the picture contains a few long linear segments). It returns line segments rather than the whole line. Each segment is represented by starting and ending points, and the matrix must be (the created sequence will be) of the ``CV_32SC4`` type.
* **CV_HOUGH_MULTI_SCALE** multi-scale variant of the classical Hough transform. The lines are encoded the same way as ``CV_HOUGH_STANDARD``.
:param param1: First method-dependent parameter:
* For the classical Hough transform, it is not used (0).
* For the probabilistic Hough transform, it is the minimum line length.
* For the multi-scale Hough transform, it is ``srn``.
:param param2: Second method-dependent parameter:
* For the multi-scale Hough transform, it is ``srn``.
:param param2: Second method-dependent parameter:
* For the classical Hough transform, it is not used (0).
* For the probabilistic Hough transform, it is the maximum gap between line segments lying on the same line to treat them as a single line segment (that is, to join them).
* For the multi-scale Hough transform, it is ``stn``.
The function implements the standard or standard multi-scale Hough transform algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm for a good explanation of Hough transform.
@ -501,21 +504,22 @@ preCornerDetect
---------------
Calculates a feature map for corner detection.
.. ocv:function:: void preCornerDetect( InputArray src, OutputArray dst, int apertureSize, int borderType=BORDER_DEFAULT )
.. ocv:function:: void preCornerDetect( InputArray src, OutputArray dst, int ksize, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.preCornerDetect(src, ksize[, dst[, borderType]]) -> dst
.. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int apertureSize=3 )
.. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.PreCornerDetect(image, corners, apertureSize=3)-> None
:param src: Source single-channel 8-bit of floating-point image.
:param dst: Output image that has the type ``CV_32F`` and the same size as ``src`` .
:param apertureSize: Aperture size of the :ocv:func:`Sobel` .
:param ksize: Aperture size of the :ocv:func:`Sobel` .
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
The function calculates the complex spatial derivative-based function of the source image
.. math::

@ -14,11 +14,11 @@ OpenCV enables you to specify the extrapolation method. For details, see the fun
/*
Various border types, image boundaries are denoted with '|'
* BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
* BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
* BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
*/
@ -390,9 +390,9 @@ Applies the bilateral filter to an image.
:param src: Source 8-bit or floating-point, 1-channel or 3-channel image.
:param dst: Destination image of the same size and type as ``src`` .
:param d: Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from ``sigmaSpace`` .
:param sigmaColor: Filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see ``sigmaSpace`` ) will be mixed together, resulting in larger areas of semi-equal color.
:param sigmaSpace: Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see ``sigmaColor`` ). When ``d>0`` , it specifies the neighborhood size regardless of ``sigmaSpace`` . Otherwise, ``d`` is proportional to ``sigmaSpace`` .
@ -421,7 +421,7 @@ Smoothes an image using the normalized box filter.
:param src: Source image. The image can have any number of channels, which are processed independently. The depth should be ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F`` or ``CV_64F``.
:param dst: Destination image of the same size and type as ``src`` .
:param ksize: Smoothing kernel size.
:param anchor: Anchor point. The default value ``Point(-1,-1)`` means that the anchor is at the kernel center.
@ -441,7 +441,7 @@ The call ``blur(src, dst, ksize, anchor, borderType)`` is equivalent to ``boxFil
:ocv:func:`boxFilter`,
:ocv:func:`bilateralFilter`,
:ocv:func:`GaussianBlur`,
:ocv:func:`medianBlur`
:ocv:func:`medianBlur`
borderInterpolate
@ -453,7 +453,7 @@ Computes the source location of an extrapolated pixel.
.. ocv:pyfunction:: cv2.borderInterpolate(p, len, borderType) -> retval
:param p: 0-based coordinate of the extrapolated pixel along one of the axes, likely <0 or >= ``len`` .
:param len: Length of the array along the corresponding axis.
:param borderType: Border type, one of the ``BORDER_*`` , except for ``BORDER_TRANSPARENT`` and ``BORDER_ISOLATED`` . When ``borderType==BORDER_CONSTANT`` , the function always returns -1, regardless of ``p`` and ``len`` .
@ -486,7 +486,7 @@ Smoothes an image using the box filter.
:param src: Source image.
:param dst: Destination image of the same size and type as ``src`` .
:param ksize: Smoothing kernel size.
:param anchor: Anchor point. The default value ``Point(-1,-1)`` means that the anchor is at the kernel center.
@ -515,7 +515,7 @@ Unnormalized box filter is useful for computing various integral characteristics
:ocv:func:`bilateralFilter`,
:ocv:func:`GaussianBlur`,
:ocv:func:`medianBlur`,
:ocv:func:`integral`
:ocv:func:`integral`
@ -523,7 +523,7 @@ buildPyramid
----------------
Constructs the Gaussian pyramid for an image.
.. ocv:function:: void buildPyramid( InputArray src, OutputArrayOfArrays dst, int maxlevel )
.. ocv:function:: void buildPyramid( InputArray src, OutputArrayOfArrays dst, int maxlevel, int borderType=BORDER_DEFAULT )
:param src: Source image. Check :ocv:func:`pyrDown` for the list of supported types.
@ -550,19 +550,19 @@ Forms a border around an image.
:param src: Source image.
:param dst: Destination image of the same type as ``src`` and the size ``Size(src.cols+left+right, src.rows+top+bottom)`` .
:param top:
:param bottom:
:param left:
:param right: Parameter specifying how many pixels in each direction from the source image rectangle to extrapolate. For example, ``top=1, bottom=1, left=1, right=1`` mean that 1 pixel-wide border needs to be built.
:param borderType: Border type. See :ocv:func:`borderInterpolate` for details.
:param value: Border value if ``borderType==BORDER_CONSTANT`` .
The function copies the source image into the middle of the destination image. The areas to the left, to the right, above and below the copied source image will be filled with extrapolated pixels. This is not what
:ocv:class:`FilterEngine` or filtering functions based on it do (they extrapolate pixels on-fly), but what other more complex functions, including your own, may do to simplify image boundary handling.
@ -605,17 +605,17 @@ Returns a box filter engine.
:param srcType: Source image type.
:param sumType: Intermediate horizontal sum type that must have as many channels as ``srcType`` .
:param dstType: Destination image type that must have as many channels as ``srcType`` .
:param ksize: Aperture size.
:param anchor: Anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
:param normalize: Flag specifying whether the sums are normalized or not. See :ocv:func:`boxFilter` for details.
:param scale: Another way to specify normalization in lower-level ``getColumnSumFilter`` .
:param borderType: Border type to use. See :ocv:func:`borderInterpolate` .
The function is a convenience function that retrieves the horizontal sum primitive filter with
@ -631,7 +631,7 @@ The function itself is used by
:ocv:class:`FilterEngine`,
:ocv:func:`blur`,
:ocv:func:`boxFilter`
:ocv:func:`boxFilter`
@ -644,13 +644,13 @@ Returns an engine for computing image derivatives.
:param srcType: Source image type.
:param dstType: Destination image type that must have as many channels as ``srcType`` .
:param dx: Derivative order in respect of x.
:param dy: Derivative order in respect of y.
:param ksize: Aperture size See :ocv:func:`getDerivKernels` .
:param borderType: Border type to use. See :ocv:func:`borderInterpolate` .
The function :ocv:func:`createDerivFilter` is a small convenience function that retrieves linear filter coefficients for computing image derivatives using
@ -664,7 +664,7 @@ The function :ocv:func:`createDerivFilter` is a small convenience function that
:ocv:func:`createSeparableLinearFilter`,
:ocv:func:`getDerivKernels`,
:ocv:func:`Scharr`,
:ocv:func:`Sobel`
:ocv:func:`Sobel`
@ -672,16 +672,16 @@ createGaussianFilter
------------------------
Returns an engine for smoothing images with the Gaussian filter.
.. ocv:function:: Ptr<FilterEngine> createGaussianFilter( int type, Size ksize, double sigmaX, double sigmaY=0, int borderType=BORDER_DEFAULT)
.. ocv:function:: Ptr<FilterEngine> createGaussianFilter( int type, Size ksize, double sigma1, double sigma2=0, int borderType=BORDER_DEFAULT )
:param type: Source and destination image type.
:param ksize: Aperture size. See :ocv:func:`getGaussianKernel` .
:param sigmaX: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` .
:param sigmaY: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigmaY}\leftarrow\texttt{sigmaX}` .
:param sigma1: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` .
:param sigma2: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigma2}\leftarrow\texttt{sigma1}` .
:param borderType: Border type to use. See :ocv:func:`borderInterpolate` .
The function :ocv:func:`createGaussianFilter` computes Gaussian kernel coefficients and then returns a separable linear filter for that kernel. The function is used by
@ -693,7 +693,7 @@ The function :ocv:func:`createGaussianFilter` computes Gaussian kernel coefficie
:ocv:func:`createSeparableLinearFilter`,
:ocv:func:`getGaussianKernel`,
:ocv:func:`GaussianBlur`
:ocv:func:`GaussianBlur`
@ -701,14 +701,14 @@ createLinearFilter
----------------------
Creates a non-separable linear filter engine.
.. ocv:function:: Ptr<FilterEngine> createLinearFilter(int srcType, int dstType, InputArray kernel, Point _anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar())
.. ocv:function:: Ptr<FilterEngine> createLinearFilter( int srcType, int dstType, InputArray kernel, Point _anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar() )
.. ocv:function:: Ptr<BaseFilter> getLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor=Point(-1,-1), double delta=0, int bits=0)
.. ocv:function:: Ptr<BaseFilter> getLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor=Point(-1,-1), double delta=0, int bits=0)
:param srcType: Source image type.
:param dstType: Destination image type that must have as many channels as ``srcType`` .
:param kernel: 2D array of filter coefficients.
:param anchor: Anchor point within the kernel. Special value ``Point(-1,-1)`` means that the anchor is at the kernel center.
@ -718,9 +718,9 @@ Creates a non-separable linear filter engine.
:param bits: Number of the fractional bits. The parameter is used when the kernel is an integer matrix representing fixed-point filter coefficients.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
:param borderValue: Border value used in case of a constant border.
The function returns a pointer to a 2D linear filter for the specified kernel, the source array type, and the destination array type. The function is a higher-level function that calls ``getLinearFilter`` and passes the retrieved 2D filter to the
@ -737,13 +737,13 @@ createMorphologyFilter
--------------------------
Creates an engine for non-separable morphological operations.
.. ocv:function:: Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray element, Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, int columnBorderType=-1, const Scalar& borderValue=morphologyDefaultBorderValue())
.. ocv:function:: Ptr<FilterEngine> createMorphologyFilter( int op, int type, InputArray kernel, Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, int columnBorderType=-1, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray element, Point anchor=Point(-1,-1))
.. ocv:function:: Ptr<BaseFilter> getMorphologyFilter( int op, int type, InputArray kernel, Point anchor=Point(-1,-1) )
.. ocv:function:: Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int esize, int anchor=-1)
.. ocv:function:: Ptr<BaseRowFilter> getMorphologyRowFilter( int op, int type, int ksize, int anchor=-1 )
.. ocv:function:: Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int esize, int anchor=-1)
.. ocv:function:: Ptr<BaseColumnFilter> getMorphologyColumnFilter( int op, int type, int ksize, int anchor=-1 )
.. ocv:function:: Scalar morphologyDefaultBorderValue()
@ -751,16 +751,16 @@ Creates an engine for non-separable morphological operations.
:param type: Input/output image type. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
:param element: 2D 8-bit structuring element for a morphological operation. Non-zero elements indicate the pixels that belong to the element.
:param kernel: 2D 8-bit structuring element for a morphological operation. Non-zero elements indicate the pixels that belong to the element.
:param esize: Horizontal or vertical structuring element size for separable morphological operations.
:param ksize: Horizontal or vertical structuring element size for separable morphological operations.
:param anchor: Anchor position within the structuring element. Negative values mean that the anchor is at the kernel center.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
:param borderValue: Border value in case of a constant border. The default value, \ ``morphologyDefaultBorderValue`` , has a special meaning. It is transformed :math:`+\inf` for the erosion and to :math:`-\inf` for the dilation, which means that the minimum (maximum) is effectively computed only over the pixels that are inside the image.
The functions construct primitive morphological filtering operations or a filter engine based on them. Normally it is enough to use
@ -783,18 +783,18 @@ createSeparableLinearFilter
-------------------------------
Creates an engine for a separable linear filter.
.. ocv:function:: Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar())
.. ocv:function:: Ptr<FilterEngine> createSeparableLinearFilter( int srcType, int dstType, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar() )
.. ocv:function:: Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType, InputArray columnKernel, int anchor, int symmetryType, double delta=0, int bits=0)
.. ocv:function:: Ptr<BaseColumnFilter> getLinearColumnFilter( int bufType, int dstType, InputArray kernel, int anchor, int symmetryType, double delta=0, int bits=0 )
.. ocv:function:: Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType, InputArray rowKernel, int anchor, int symmetryType)
.. ocv:function:: Ptr<BaseRowFilter> getLinearRowFilter( int srcType, int bufType, InputArray kernel, int anchor, int symmetryType )
:param srcType: Source array type.
:param dstType: Destination image type that must have as many channels as ``srcType`` .
:param bufType: Intermediate buffer type that must have as many channels as ``srcType`` .
:param rowKernel: Coefficients for filtering each row.
:param columnKernel: Coefficients for filtering each column.
@ -806,12 +806,12 @@ Creates an engine for a separable linear filter.
:param bits: Number of the fractional bits. The parameter is used when the kernel is an integer matrix representing fixed-point filter coefficients.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
:param borderValue: Border value used in case of a constant border.
:param symmetryType: Type of each row and column kernel. See :ocv:func:`getKernelType` .
:param symmetryType: Type of each row and column kernel. See :ocv:func:`getKernelType` .
The functions construct primitive separable linear filtering operations or a filter engine based on them. Normally it is enough to use
:ocv:func:`createSeparableLinearFilter` or even higher-level
@ -831,7 +831,7 @@ dilate
----------
Dilates an image by using a specific structuring element.
.. ocv:function:: void dilate( InputArray src, OutputArray dst, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: void dilate( InputArray src, OutputArray dst, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:pyfunction:: cv2.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
@ -841,7 +841,7 @@ Dilates an image by using a specific structuring element.
:param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
:param dst: Destination image of the same size and type as ``src`` .
:param element: Structuring element used for dilation. If ``element=Mat()`` , a ``3 x 3`` rectangular structuring element is used.
:param anchor: Position of the anchor within the element. The default value ``(-1, -1)`` means that the anchor is at the element center.
@ -849,9 +849,9 @@ Dilates an image by using a specific structuring element.
:param iterations: Number of times dilation is applied.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
:param borderValue: Border value in case of a constant border. The default value has a special meaning. See :ocv:func:`createMorphologyFilter` for details.
The function dilates the source image using the specified structuring element that determines the shape of a pixel neighborhood over which the maximum is taken:
.. math::
@ -871,7 +871,7 @@ erode
---------
Erodes an image by using a specific structuring element.
.. ocv:function:: void erode( InputArray src, OutputArray dst, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: void erode( InputArray src, OutputArray dst, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:pyfunction:: cv2.erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
@ -881,7 +881,7 @@ Erodes an image by using a specific structuring element.
:param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
:param dst: Destination image of the same size and type as ``src``.
:param element: Structuring element used for erosion. If ``element=Mat()`` , a ``3 x 3`` rectangular structuring element is used.
:param anchor: Position of the anchor within the element. The default value ``(-1, -1)`` means that the anchor is at the element center.
@ -889,9 +889,9 @@ Erodes an image by using a specific structuring element.
:param iterations: Number of times erosion is applied.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
:param borderValue: Border value in case of a constant border. The default value has a special meaning. See :ocv:func:`createMorphologyFilter` for details.
The function erodes the source image using the specified structuring element that determines the shape of a pixel neighborhood over which the minimum is taken:
.. math::
@ -916,27 +916,28 @@ Convolves an image with the kernel.
.. ocv:pyfunction:: cv2.filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst
.. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1, -1))
.. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1,-1) )
.. ocv:pyoldfunction:: cv.Filter2D(src, dst, kernel, anchor=(-1, -1))-> None
:param src: Source image.
:param dst: Destination image of the same size and the same number of channels as ``src`` .
:param ddepth: Desired depth of the destination image. If it is negative, it will be the same as ``src.depth()`` . The following combination of ``src.depth()`` and ``ddepth`` are supported:
* ``src.depth()`` = ``CV_8U``, ``ddepth`` = -1/``CV_16S``/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_16U``/``CV_16S``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_32F``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_64F``, ``ddepth`` = -1/``CV_64F``
when ``ddepth=-1``, the destination image will have the same depth as the source.
:param kernel: Convolution kernel (or rather a correlation kernel), a single-channel floating point matrix. If you want to apply different kernels to different channels, split the image into separate color planes using :ocv:func:`split` and process them individually.
:param anchor: Anchor of the kernel that indicates the relative position of a filtered point within the kernel. The anchor should lie within the kernel. The special default value (-1,-1) means that the anchor is at the kernel center.
:param delta: Optional value added to the filtered pixels before storing them in ``dst`` .
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
The function applies an arbitrary linear filter to an image. In-place operation is supported. When the aperture is partially outside the image, the function interpolates outlier pixel values according to the specified border mode.
@ -972,13 +973,13 @@ Smoothes an image using a Gaussian filter.
:param src: Source image. The image can have any number of channels, which are processed independently. The depth should be ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F`` or ``CV_64F``.
:param dst: Destination image of the same size and type as ``src`` .
:param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. Or, they can be zero's and then they are computed from ``sigma*`` .
:param sigmaX: Gaussian kernel standard deviation in X direction.
:param sigmaY: Gaussian kernel standard deviation in Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If both sigmas are zeros, they are computed from ``ksize.width`` and ``ksize.height`` , respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modifications of all this semantics, it is recommended to specify all of ``ksize`` , ``sigmaX`` , and ``sigmaY`` .
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
The function convolves the source image with the specified Gaussian kernel. In-place filtering is supported.
@ -1002,9 +1003,9 @@ Returns filter coefficients for computing spatial image derivatives.
.. ocv:pyfunction:: cv2.getDerivKernels(dx, dy, ksize[, kx[, ky[, normalize[, ktype]]]]) -> kx, ky
:param kx: Output matrix of row filter coefficients. It has the type ``ktype`` .
:param ky: Output matrix of column filter coefficients. It has the type ``ktype`` .
:param dx: Derivative order in respect of x.
:param dy: Derivative order in respect of y.
@ -1060,7 +1061,7 @@ Two of such generated kernels can be passed to
:ocv:func:`createSeparableLinearFilter`,
:ocv:func:`getDerivKernels`,
:ocv:func:`getStructuringElement`,
:ocv:func:`GaussianBlur`
:ocv:func:`GaussianBlur`
@ -1084,7 +1085,7 @@ The function analyzes the kernel coefficients and returns the corresponding kern
* **KERNEL_SMOOTH** All the kernel elements are non-negative and summed to 1. For example, the Gaussian kernel is both smooth kernel and symmetrical, so the function returns ``KERNEL_SMOOTH | KERNEL_SYMMETRICAL`` .
* **KERNEL_INTEGER** All the kernel coefficients are integer numbers. This flag can be combined with ``KERNEL_SYMMETRICAL`` or ``KERNEL_ASYMMETRICAL`` .
getStructuringElement
@ -1095,7 +1096,7 @@ Returns a structuring element of the specified size and shape for morphological
.. ocv:pyfunction:: cv2.getStructuringElement(shape, ksize[, anchor]) -> retval
.. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchorX, int anchorY, int shape, int* values=NULL )
.. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchor_x, int anchor_y, int shape, int* values=NULL )
.. ocv:pyoldfunction:: cv.CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)-> kernel
@ -1108,27 +1109,27 @@ Returns a structuring element of the specified size and shape for morphological
E_{ij}=1
* **MORPH_ELLIPSE** - an elliptic structuring element, that is, a filled ellipse inscribed into the rectangle ``Rect(0, 0, esize.width, 0.esize.height)``
* **MORPH_CROSS** - a cross-shaped structuring element:
.. math::
E_{ij} = \fork{1}{if i=\texttt{anchor.y} or j=\texttt{anchor.x}}{0}{otherwise}
* **CV_SHAPE_CUSTOM** - custom structuring element (OpenCV 1.x API)
:param ksize: Size of the structuring element.
:param cols: Width of the structuring element
:param rows: Height of the structuring element
:param anchor: Anchor position within the element. The default value :math:`(-1, -1)` means that the anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor position. In other cases the anchor just regulates how much the result of the morphological operation is shifted.
:param anchorX: x-coordinate of the anchor
:param anchorY: y-coordinate of the anchor
:param anchor_x: x-coordinate of the anchor
:param anchor_y: y-coordinate of the anchor
:param values: integer array of ``cols``*``rows`` elements that specifies the custom shape of the structuring element, when ``shape=CV_SHAPE_CUSTOM``.
The function constructs and returns the structuring element that can be further passed to
@ -1149,9 +1150,9 @@ Smoothes an image using the median filter.
.. ocv:pyfunction:: cv2.medianBlur(src, ksize[, dst]) -> dst
:param src: Source 1-, 3-, or 4-channel image. When ``ksize`` is 3 or 5, the image depth should be ``CV_8U`` , ``CV_16U`` , or ``CV_32F`` . For larger aperture sizes, it can only be ``CV_8U`` .
:param dst: Destination array of the same size and type as ``src`` .
:param ksize: Aperture linear size. It must be odd and greater than 1, for example: 3, 5, 7 ...
The function smoothes an image using the median filter with the
@ -1170,7 +1171,7 @@ morphologyEx
----------------
Performs advanced morphological transformations.
.. ocv:function:: void morphologyEx( InputArray src, OutputArray dst, int op, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: void morphologyEx( InputArray src, OutputArray dst, int op, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:pyfunction:: cv2.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
@ -1180,7 +1181,7 @@ Performs advanced morphological transformations.
:param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
:param dst: Destination image of the same size and type as ``src`` .
:param element: Structuring element.
:param op: Type of a morphological operation that can be one of the following:
@ -1198,7 +1199,7 @@ Performs advanced morphological transformations.
:param iterations: Number of times erosion and dilation are applied.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
:param borderValue: Border value in case of a constant border. The default value has a special meaning. See :ocv:func:`createMorphologyFilter` for details.
The function can perform advanced morphological transformations using an erosion and dilation as basic operations.
@ -1242,7 +1243,6 @@ Any of the operations can be done in-place. In case of multi-channel images, eac
:ocv:func:`createMorphologyFilter`
Laplacian
-------------
Calculates the Laplacian of an image.
@ -1251,14 +1251,14 @@ Calculates the Laplacian of an image.
.. ocv:pyfunction:: cv2.Laplacian(src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
.. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int ksize=3)
.. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Laplace(src, dst, ksize=3)-> None
.. ocv:pyoldfunction:: cv.Laplace(src, dst, apertureSize=3) -> None
:param src: Source image.
:param dst: Destination image of the same size and the same number of channels as ``src`` .
:param ddepth: Desired depth of the destination image.
:param ksize: Aperture size used to compute the second-derivative filters. See :ocv:func:`getDerivKernels` for details. The size must be positive and odd.
@ -1266,7 +1266,7 @@ Calculates the Laplacian of an image.
:param scale: Optional scale factor for the computed Laplacian values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
:param delta: Optional delta value that is added to the results prior to storing them in ``dst`` .
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
The function calculates the Laplacian of the source image by adding up the second x and y derivatives calculated using the Sobel operator:
@ -1293,9 +1293,9 @@ pyrDown
-----------
Smoothes an image and downsamples it.
.. ocv:function:: void pyrDown( InputArray src, OutputArray dst, const Size& dstsize=Size())
.. ocv:function:: void pyrDown( InputArray src, OutputArray dst, const Size& dstsize=Size(), int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.pyrDown(src[, dst[, dstsize]]) -> dst
.. ocv:pyfunction:: cv2.pyrDown(src[, dst[, dstsize[, borderType]]]) -> dst
.. ocv:cfunction:: void cvPyrDown( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
@ -1304,7 +1304,7 @@ Smoothes an image and downsamples it.
:param src: Source image.
:param dst: Destination image. It has the specified size and the same type as ``src`` .
:param dstsize: Size of the destination image. By default, it is computed as ``Size((src.cols+1)/2, (src.rows+1)/2)`` . But in any case, the following conditions should be satisfied:
.. math::
@ -1326,9 +1326,9 @@ pyrUp
---------
Upsamples an image and then smoothes it.
.. ocv:function:: void pyrUp( InputArray src, OutputArray dst, const Size& dstsize=Size())
.. ocv:function:: void pyrUp( InputArray src, OutputArray dst, const Size& dstsize=Size(), int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.pyrUp(src[, dst[, dstsize]]) -> dst
.. ocv:pyfunction:: cv2.pyrUp(src[, dst[, dstsize[, borderType]]]) -> dst
.. ocv:cfunction:: cvPyrUp( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
@ -1337,7 +1337,7 @@ Upsamples an image and then smoothes it.
:param src: Source image.
:param dst: Destination image. It has the specified size and the same type as ``src`` .
:param dstsize: Size of the destination image. By default, it is computed as ``Size(src.cols*2, (src.rows*2)`` . But in any case, the following conditions should be satisfied:
.. math::
@ -1353,46 +1353,46 @@ pyrMeanShiftFiltering
---------------------
Performs initial step of meanshift segmentation of an image.
.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) )
.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) )
.. ocv:pyfunction:: cv2.pyrMeanShiftFiltering(src, sp, sr[, dst[, maxLevel[, termcrit]]]) -> dst
.. ocv:cfunction:: void cvPyrMeanShiftFiltering( const CvArr* src, CvArr* dst, double sp, double sr, int max_level=1, CvTermCriteria termcrit= cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1))
.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, maxLevel=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1))-> None
:param src: The source 8-bit, 3-channel image.
:param dst: The destination image of the same format and the same size as the source.
:param sp: The spatial window radius.
:param sr: The color window radius.
:param maxLevel: Maximum level of the pyramid for the segmentation.
:param termcrit: Termination criteria: when to stop meanshift iterations.
The function implements the filtering stage of meanshift segmentation, that is, the output of the function is the filtered "posterized" image with color gradients and fine-grain texture flattened. At every pixel
.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1)) -> None
:param src: The source 8-bit, 3-channel image.
:param dst: The destination image of the same format and the same size as the source.
:param sp: The spatial window radius.
:param sr: The color window radius.
:param maxLevel: Maximum level of the pyramid for the segmentation.
:param termcrit: Termination criteria: when to stop meanshift iterations.
The function implements the filtering stage of meanshift segmentation, that is, the output of the function is the filtered "posterized" image with color gradients and fine-grain texture flattened. At every pixel
``(X,Y)`` of the input image (or down-sized input image, see below) the function executes meanshift
iterations, that is, the pixel ``(X,Y)`` neighborhood in the joint space-color hyperspace is considered:
.. math::
(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}
(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}
where ``(R,G,B)`` and ``(r,g,b)`` are the vectors of color components at ``(X,Y)`` and ``(x,y)``, respectively (though, the algorithm does not depend on the color space used, so any 3-component color space can be used instead). Over the neighborhood the average spatial value ``(X',Y')`` and average color vector ``(R',G',B')`` are found and they act as the neighborhood center on the next iteration:
where ``(R,G,B)`` and ``(r,g,b)`` are the vectors of color components at ``(X,Y)`` and ``(x,y)``, respectively (though, the algorithm does not depend on the color space used, so any 3-component color space can be used instead). Over the neighborhood the average spatial value ``(X',Y')`` and average color vector ``(R',G',B')`` are found and they act as the neighborhood center on the next iteration:
.. math::
(X,Y)~(X',Y'), (R,G,B)~(R',G',B').
After the iterations over, the color components of the initial pixel (that is, the pixel from where the iterations started) are set to the final value (average color at the last iteration):
After the iterations over, the color components of the initial pixel (that is, the pixel from where the iterations started) are set to the final value (average color at the last iteration):
.. math::
I(X,Y) <- (R*,G*,B*)
When ``maxLevel > 0``, the gaussian pyramid of ``maxLevel+1`` levels is built, and the above procedure is run on the smallest layer first. After that, the results are propagated to the larger layer and the iterations are run again only on those pixels where the layer colors differ by more than ``sr`` from the lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the results will be actually different from the ones obtained by running the meanshift procedure on the whole original image (i.e. when ``maxLevel==0``).
@ -1402,25 +1402,25 @@ sepFilter2D
---------------
Applies a separable linear filter to an image.
.. ocv:function:: void sepFilter2D( InputArray src, OutputArray dst, int ddepth, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:function:: void sepFilter2D( InputArray src, OutputArray dst, int ddepth, InputArray kernelX, InputArray kernelY, Point anchor=Point(-1,-1), double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.sepFilter2D(src, ddepth, kernelX, kernelY[, dst[, anchor[, delta[, borderType]]]]) -> dst
:param src: Source image.
:param dst: Destination image of the same size and the same number of channels as ``src`` .
:param ddepth: Destination image depth. The following combination of ``src.depth()`` and ``ddepth`` are supported:
* ``src.depth()`` = ``CV_8U``, ``ddepth`` = -1/``CV_16S``/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_16U``/``CV_16S``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_32F``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_64F``, ``ddepth`` = -1/``CV_64F``
when ``ddepth=-1``, the destination image will have the same depth as the source.
:param rowKernel: Coefficients for filtering each row.
:param kernelX: Coefficients for filtering each row.
:param columnKernel: Coefficients for filtering each column.
:param kernelY: Coefficients for filtering each column.
:param anchor: Anchor position within the kernel. The default value :math:`(-1, 1)` means that the anchor is at the kernel center.
@ -1428,7 +1428,7 @@ Applies a separable linear filter to an image.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
The function applies a separable linear filter to the image. That is, first, every row of ``src`` is filtered with the 1D kernel ``rowKernel`` . Then, every column of the result is filtered with the 1D kernel ``columnKernel`` . The final result shifted by ``delta`` is stored in ``dst`` .
The function applies a separable linear filter to the image. That is, first, every row of ``src`` is filtered with the 1D kernel ``kernelX`` . Then, every column of the result is filtered with the 1D kernel ``kernelY`` . The final result shifted by ``delta`` is stored in ``dst`` .
.. seealso::
@ -1437,50 +1437,50 @@ The function applies a separable linear filter to the image. That is, first, eve
:ocv:func:`Sobel`,
:ocv:func:`GaussianBlur`,
:ocv:func:`boxFilter`,
:ocv:func:`blur`
:ocv:func:`blur`
Smooth
------
Smooths the image in one of several ways.
.. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int param1=3, int param2=0, double param3=0, double param4=0)
.. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int size1=3, int size2=0, double sigma1=0, double sigma2=0 )
.. ocv:pyoldfunction:: cv.Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)-> None
:param src: The source image
:param dst: The destination image
:param smoothtype: Type of the smoothing:
* **CV_BLUR_NO_SCALE** linear convolution with :math:`\texttt{param1}\times\texttt{param2}` box kernel (all 1's). If you want to smooth different pixels with different-size box kernels, you can use the integral image that is computed using :ocv:func:`integral`
* **CV_BLUR** linear convolution with :math:`\texttt{param1}\times\texttt{param2}` box kernel (all 1's) with subsequent scaling by :math:`1/(\texttt{param1}\cdot\texttt{param2})`
* **CV_GAUSSIAN** linear convolution with a :math:`\texttt{param1}\times\texttt{param2}` Gaussian kernel
* **CV_MEDIAN** median filter with a :math:`\texttt{param1}\times\texttt{param1}` square aperture
* **CV_BILATERAL** bilateral filter with a :math:`\texttt{param1}\times\texttt{param1}` square aperture, color sigma= ``param3`` and spatial sigma= ``param4`` . If ``param1=0`` , the aperture square side is set to ``cvRound(param4*1.5)*2+1`` . Information about bilateral filtering can be found at http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.html
:param param1: The first parameter of the smoothing operation, the aperture width. Must be a positive odd number (1, 3, 5, ...)
:param param2: The second parameter of the smoothing operation, the aperture height. Ignored by ``CV_MEDIAN`` and ``CV_BILATERAL`` methods. In the case of simple scaled/non-scaled and Gaussian blur if ``param2`` is zero, it is set to ``param1`` . Otherwise it must be a positive odd number.
:param param3: In the case of a Gaussian parameter this parameter may specify Gaussian :math:`\sigma` (standard deviation). If it is zero, it is calculated from the kernel size:
:param src: The source image
:param dst: The destination image
:param smoothtype: Type of the smoothing:
* **CV_BLUR_NO_SCALE** linear convolution with :math:`\texttt{size1}\times\texttt{size2}` box kernel (all 1's). If you want to smooth different pixels with different-size box kernels, you can use the integral image that is computed using :ocv:func:`integral`
* **CV_BLUR** linear convolution with :math:`\texttt{size1}\times\texttt{size2}` box kernel (all 1's) with subsequent scaling by :math:`1/(\texttt{size1}\cdot\texttt{size2})`
* **CV_GAUSSIAN** linear convolution with a :math:`\texttt{size1}\times\texttt{size2}` Gaussian kernel
* **CV_MEDIAN** median filter with a :math:`\texttt{size1}\times\texttt{size1}` square aperture
* **CV_BILATERAL** bilateral filter with a :math:`\texttt{size1}\times\texttt{size1}` square aperture, color sigma= ``sigma1`` and spatial sigma= ``sigma2`` . If ``size1=0`` , the aperture square side is set to ``cvRound(sigma2*1.5)*2+1`` . Information about bilateral filtering can be found at http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.html
:param size1: The first parameter of the smoothing operation, the aperture width. Must be a positive odd number (1, 3, 5, ...)
:param size2: The second parameter of the smoothing operation, the aperture height. Ignored by ``CV_MEDIAN`` and ``CV_BILATERAL`` methods. In the case of simple scaled/non-scaled and Gaussian blur if ``size2`` is zero, it is set to ``size1`` . Otherwise it must be a positive odd number.
:param sigma1: In the case of a Gaussian parameter this parameter may specify Gaussian :math:`\sigma` (standard deviation). If it is zero, it is calculated from the kernel size:
.. math::
\sigma = 0.3 (n/2 - 1) + 0.8 \quad \text{where} \quad n= \begin{array}{l l} \mbox{\texttt{param1} for horizontal kernel} \\ \mbox{\texttt{param2} for vertical kernel} \end{array}
Using standard sigma for small kernels ( :math:`3\times 3` to :math:`7\times 7` ) gives better speed. If ``param3`` is not zero, while ``param1`` and ``param2`` are zeros, the kernel size is calculated from the sigma (to provide accurate enough operation).
\sigma = 0.3 (n/2 - 1) + 0.8 \quad \text{where} \quad n= \begin{array}{l l} \mbox{\texttt{size1} for horizontal kernel} \\ \mbox{\texttt{size2} for vertical kernel} \end{array}
Using standard sigma for small kernels ( :math:`3\times 3` to :math:`7\times 7` ) gives better speed. If ``sigma1`` is not zero, while ``size1`` and ``size2`` are zeros, the kernel size is calculated from the sigma (to provide accurate enough operation).
The function smooths an image using one of several methods. Every of the methods has some features and restrictions listed below:
* Blur with no scaling works with single-channel images only and supports accumulation of 8-bit to 16-bit format (similar to :ocv:func:`Sobel` and :ocv:func:`Laplacian`) and 32-bit floating point to 32-bit floating-point format.
@ -1496,23 +1496,24 @@ Sobel
---------
Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
.. ocv:function:: void Sobel( InputArray src, OutputArray dst, int ddepth, int xorder, int yorder, int ksize=3, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:function:: void Sobel( InputArray src, OutputArray dst, int ddepth, int dx, int dy, int ksize=3, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
.. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int apertureSize=3 )
.. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Sobel(src, dst, xorder, yorder, apertureSize=3)-> None
:param src: Source image.
:param dst: Destination image of the same size and the same number of channels as ``src`` .
:param ddepth: Destination image depth. The following combination of ``src.depth()`` and ``ddepth`` are supported:
* ``src.depth()`` = ``CV_8U``, ``ddepth`` = -1/``CV_16S``/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_16U``/``CV_16S``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_32F``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
* ``src.depth()`` = ``CV_64F``, ``ddepth`` = -1/``CV_64F``
when ``ddepth=-1``, the destination image will have the same depth as the source. In the case of 8-bit input images it will result in truncated derivatives.
:param xorder: Order of the derivative x.
@ -1524,7 +1525,7 @@ Calculates the first, second, third, or mixed image derivatives using an extende
:param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
:param delta: Optional delta value that is added to the results prior to storing them in ``dst`` .
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
In all cases except one, the
@ -1538,7 +1539,7 @@ derivative. When
There is also the special value ``ksize = CV_SCHARR`` (-1) that corresponds to the
:math:`3\times3` Scharr
filter that may give more accurate results than the
:math:`3\times3` Sobel. The Scharr aperture is
:math:`3\times3` Sobel. The Scharr aperture is
.. math::
@ -1582,37 +1583,37 @@ Scharr
----------
Calculates the first x- or y- image derivative using Scharr operator.
.. ocv:function:: void Scharr( InputArray src, OutputArray dst, int ddepth, int xorder, int yorder, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:function:: void Scharr( InputArray src, OutputArray dst, int ddepth, int dx, int dy, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.Scharr(src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]]) -> dst
:param src: Source image.
:param dst: Destination image of the same size and the same number of channels as ``src``.
:param ddepth: Destination image depth. See :ocv:func:`Sobel` for the list of supported combination of ``src.depth()`` and ``ddepth``.
:param xorder: Order of the derivative x.
:param dx: Order of the derivative x.
:param yorder: Order of the derivative y.
:param dy: Order of the derivative y.
:param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
:param delta: Optional delta value that is added to the results prior to storing them in ``dst``.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
The function computes the first x- or y- spatial image derivative using the Scharr operator. The call
.. math::
\texttt{Scharr(src, dst, ddepth, xorder, yorder, scale, delta, borderType)}
\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}
is equivalent to
.. math::
\texttt{Sobel(src, dst, ddepth, xorder, yorder, CV\_SCHARR, scale, delta, borderType)} .
\texttt{Sobel(src, dst, ddepth, dx, dy, CV\_SCHARR, scale, delta, borderType)} .
.. seealso::

@ -41,15 +41,15 @@ Converts image transformation maps from one representation to another.
.. ocv:pyfunction:: cv2.convertMaps(map1, map2, dstmap1type[, dstmap1[, dstmap2[, nninterpolation]]]) -> dstmap1, dstmap2
:param map1: The first input map of type ``CV_16SC2`` , ``CV_32FC1`` , or ``CV_32FC2`` .
:param map2: The second input map of type ``CV_16UC1`` , ``CV_32FC1`` , or none (empty matrix), respectively.
:param dstmap1: The first output map that has the type ``dstmap1type`` and the same size as ``src`` .
:param dstmap2: The second output map.
:param dstmap1type: Type of the first output map that should be ``CV_16SC2`` , ``CV_32FC1`` , or ``CV_32FC2`` .
:param nninterpolation: Flag indicating whether the fixed-point maps are used for the nearest-neighbor or for a more complex interpolation.
The function converts a pair of maps for
@ -77,11 +77,13 @@ getAffineTransform
----------------------
Calculates an affine transform from three pairs of the corresponding points.
.. ocv:function:: Mat getAffineTransform( const Point2f* src, const Point2f* dst )
.. ocv:function:: Mat getAffineTransform( InputArray src, InputArray dst )
.. ocv:function:: Mat getAffineTransform( const Point2f src[], const Point2f dst[] )
.. ocv:pyfunction:: cv2.getAffineTransform(src, dst) -> retval
.. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* mapMatrix )
.. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f * src, const CvPoint2D32f * dst, CvMat * map_matrix )
.. ocv:pyoldfunction:: cv.GetAffineTransform(src, dst, mapMatrix)-> None
@ -114,11 +116,13 @@ getPerspectiveTransform
---------------------------
Calculates a perspective transform from four pairs of the corresponding points.
.. ocv:function:: Mat getPerspectiveTransform( const Point2f* src, const Point2f* dst )
.. ocv:function:: Mat getPerspectiveTransform( InputArray src, InputArray dst )
.. ocv:function:: Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] )
.. ocv:pyfunction:: cv2.getPerspectiveTransform(src, dst) -> retval
.. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* mapMatrix )
.. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetPerspectiveTransform(src, dst, mapMatrix)-> None
@ -151,7 +155,7 @@ getRectSubPix
-----------------
Retrieves a pixel rectangle from an image with sub-pixel accuracy.
.. ocv:function:: void getRectSubPix( InputArray image, Size patchSize, Point2f center, OutputArray dst, int patchType=-1 )
.. ocv:function:: void getRectSubPix( InputArray image, Size patchSize, Point2f center, OutputArray patch, int patchType=-1 )
.. ocv:pyfunction:: cv2.getRectSubPix(image, patchSize, center[, patch[, patchType]]) -> patch
@ -165,7 +169,7 @@ Retrieves a pixel rectangle from an image with sub-pixel accuracy.
:param center: Floating point coordinates of the center of the extracted rectangle within the source image. The center must be inside the image.
:param dst: Extracted patch that has the size ``patchSize`` and the same number of channels as ``src`` .
:param patchType: Depth of the extracted pixels. By default, they have the same depth as ``src`` .
The function ``getRectSubPix`` extracts pixels from ``src`` :
@ -196,7 +200,7 @@ Calculates an affine matrix of 2D rotation.
.. ocv:pyfunction:: cv2.getRotationMatrix2D(center, angle, scale) -> retval
.. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* mapMatrix )
.. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetRotationMatrix2D(center, angle, scale, mapMatrix)-> None
@ -206,7 +210,7 @@ Calculates an affine matrix of 2D rotation.
:param scale: Isotropic scale factor.
:param mapMatrix: The output affine transformation, 2x3 floating-point matrix.
:param map_matrix: The output affine transformation, 2x3 floating-point matrix.
The function calculates the following matrix:
@ -262,19 +266,19 @@ Remaps an image to log-polar space.
.. ocv:pyoldfunction:: cv.LogPolar(src, dst, center, M, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS)-> None
:param src: Source image
:param src: Source image
:param dst: Destination image
:param dst: Destination image
:param center: The transformation center; where the output precision is maximal
:param center: The transformation center; where the output precision is maximal
:param M: Magnitude scale parameter. See below
:param M: Magnitude scale parameter. See below
:param flags: A combination of interpolation methods and the following optional flags:
* **CV_WARP_FILL_OUTLIERS** fills all of the destination image pixels. If some of them correspond to outliers in the source image, they are set to zero
* **CV_WARP_INVERSE_MAP** See below
:param flags: A combination of interpolation methods and the following optional flags:
* **CV_WARP_FILL_OUTLIERS** fills all of the destination image pixels. If some of them correspond to outliers in the source image, they are set to zero
* **CV_WARP_INVERSE_MAP** See below
The function ``cvLogPolar`` transforms the source image using the following transformation:
@ -283,7 +287,7 @@ The function ``cvLogPolar`` transforms the source image using the following tran
.. math::
dst( \phi , \rho ) = src(x,y)
dst( \phi , \rho ) = src(x,y)
*
@ -291,14 +295,14 @@ The function ``cvLogPolar`` transforms the source image using the following tran
.. math::
dst(x,y) = src( \phi , \rho )
dst(x,y) = src( \phi , \rho )
where
.. math::
\rho = M \cdot \log{\sqrt{x^2 + y^2}} , \phi =atan(y/x)
\rho = M \cdot \log{\sqrt{x^2 + y^2}} , \phi =atan(y/x)
The function emulates the human "foveal" vision and can be used for fast scale and rotation-invariant template matching, for object tracking and so forth. The function can not operate in-place.
@ -372,7 +376,7 @@ Resizes an image.
\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}
Either ``dsize`` or both ``fx`` and ``fy`` must be non-zero.
:param fx: Scale factor along the horizontal axis. When it is 0, it is computed as
@ -417,7 +421,7 @@ To shrink an image, it will generally look best with CV_INTER_AREA interpolation
:ocv:func:`warpAffine`,
:ocv:func:`warpPerspective`,
:ocv:func:`remap`
:ocv:func:`remap`
warpAffine
@ -428,16 +432,18 @@ Applies an affine transformation to an image.
.. ocv:pyfunction:: cv2.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst
.. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* mapMatrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:pyoldfunction:: cv.WarpAffine(src, dst, mapMatrix, flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
.. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* mapMatrix )
.. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetQuadrangleSubPix(src, dst, mapMatrix)-> None
:param src: Source image.
:param dst: Destination image that has the size ``dsize`` and the same type as ``src`` .
:param M: :math:`2\times 3` transformation matrix.
:param dsize: Size of the destination image.
@ -477,13 +483,14 @@ Applies a perspective transformation to an image.
.. ocv:pyfunction:: cv2.warpPerspective(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst
.. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* mapMatrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:pyoldfunction:: cv.WarpPerspective(src, dst, mapMatrix, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
:param src: Source image.
:param dst: Destination image that has the size ``dsize`` and the same type as ``src`` .
:param M: :math:`3\times 3` transformation matrix.
:param dsize: Size of the destination image.
@ -524,24 +531,24 @@ Computes the undistortion and rectification transformation map.
.. ocv:pyfunction:: cv2.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, m1type[, map1[, map2]]) -> map1, map2
.. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* R, const CvMat* newCameraMatrix, CvArr* map1, CvArr* map2 )
.. ocv:cfunction:: void cvInitUndistortMap( const CvMat* cameraMatrix, const CvMat* distCoeffs, CvArr* map1, CvArr* map2 )
.. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat * R, const CvMat* new_camera_matrix, CvArr* mapx, CvArr* mapy )
.. ocv:cfunction:: void cvInitUndistortMap( const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvArr* mapx, CvArr* mapy )
.. ocv:pyoldfunction:: cv.InitUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, map1, map2)-> None
.. ocv:pyoldfunction:: cv.InitUndistortMap(cameraMatrix, distCoeffs, map1, map2)-> None
:param cameraMatrix: Input camera matrix :math:`A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param R: Optional rectification transformation in the object space (3x3 matrix). ``R1`` or ``R2`` , computed by :ocv:func:`stereoRectify` can be passed here. If the matrix is empty, the identity transformation is assumed. In ``cvInitUndistortMap`` R assumed to be an identity matrix.
:param newCameraMatrix: New camera matrix :math:`A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}` .
:param size: Undistorted image size.
:param m1type: Type of the first output map that can be ``CV_32FC1`` or ``CV_16SC2`` . See :ocv:func:`convertMaps` for details.
:param map1: The first output map.
:param map2: The second output map.
@ -606,7 +613,7 @@ where
:math:`(0,0)` and
:math:`(1,1)` elements of ``cameraMatrix`` , respectively.
By default, the undistortion functions in OpenCV (see
By default, the undistortion functions in OpenCV (see
:ocv:func:`initUndistortRectifyMap`,
:ocv:func:`undistort`) do not move the principal point. However, when you work with stereo, it is important to move the principal points in both views to the same y-coordinate (which is required by most of stereo correspondence algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for each view where the principal points are located at the center.
@ -621,16 +628,16 @@ Transforms an image to compensate for lens distortion.
.. ocv:pyfunction:: cv2.undistort(src, cameraMatrix, distCoeffs[, dst[, newCameraMatrix]]) -> dst
.. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* newCameraMatrix=NULL )
.. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* camera_matrix, const CvMat* distortion_coeffs, const CvMat* new_camera_matrix=0 )
.. ocv:pyoldfunction:: cv.Undistort2(src, dst, cameraMatrix, distCoeffs)-> None
:param src: Input (distorted) image.
:param dst: Output (corrected) image that has the same size and type as ``src`` .
:param cameraMatrix: Input camera matrix :math:`A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param newCameraMatrix: Camera matrix of the distorted image. By default, it is the same as ``cameraMatrix`` but you may additionally scale and shift the result by using a different matrix.
@ -660,7 +667,7 @@ Computes the ideal point coordinates from the observed point coordinates.
.. ocv:function:: void undistortPoints( InputArray src, OutputArray dst, InputArray cameraMatrix, InputArray distCoeffs, InputArray R=noArray(), InputArray P=noArray())
.. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* R=NULL, const CvMat* P=NULL)
.. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat* R=0, const CvMat* P=0 )
.. ocv:pyoldfunction:: cv.UndistortPoints(src, dst, cameraMatrix, distCoeffs, R=None, P=None)-> None
:param src: Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).
@ -668,7 +675,7 @@ Computes the ideal point coordinates from the observed point coordinates.
:param dst: Output ideal point coordinates after undistortion and reverse perspective transformation. If matrix ``P`` is identity or omitted, ``dst`` will contain normalized point coordinates.
:param cameraMatrix: Camera matrix :math:`\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param R: Rectification transformation in the object space (3x3 matrix). ``R1`` or ``R2`` computed by :ocv:func:`stereoRectify` can be passed here. If the matrix is empty, the identity transformation is used.
@ -696,4 +703,4 @@ where ``undistort()`` is an approximate iterative algorithm that estimates the n
The function can be used for both a stereo camera head or a monocular camera (when R is empty).

@ -9,22 +9,22 @@ calcHist
------------
Calculates a histogram of a set of arrays.
.. ocv:function:: void calcHist( const Mat* arrays, int narrays, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:function:: void calcHist( const Mat* images, int nimages, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:function:: void calcHist( const Mat* arrays, int narrays, const int* channels, InputArray mask, SparseMat& hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:function:: void calcHist( const Mat* images, int nimages, const int* channels, InputArray mask, SparseMat& hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:pyfunction:: cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]]) -> hist
.. ocv:cfunction:: void cvCalcHist( IplImage** image, CvHistogram* hist, int accumulate=0, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.CalcHist(image, hist, accumulate=0, mask=None)-> None
:param arrays: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param narrays: Number of source arrays.
:param nimages: Number of source images.
:param channels: List of the ``dims`` channels used to compute the histogram. The first array channels are numerated from 0 to ``arrays[0].channels()-1`` , the second array channels are counted from ``arrays[0].channels()`` to ``arrays[0].channels() + arrays[1].channels()-1``, and so on.
:param channels: List of the ``dims`` channels used to compute the histogram. The first array channels are numerated from 0 to ``images[0].channels()-1`` , the second array channels are counted from ``images[0].channels()`` to ``images[0].channels() + images[1].channels()-1``, and so on.
:param mask: Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as ``arrays[i]`` . The non-zero mask elements mark the array elements counted in the histogram.
:param mask: Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as ``images[i]`` . The non-zero mask elements mark the array elements counted in the histogram.
:param hist: Output histogram, which is a dense or sparse ``dims`` -dimensional array.
@ -106,27 +106,27 @@ calcBackProject
-------------------
Calculates the back projection of a histogram.
.. ocv:function:: void calcBackProject( const Mat* arrays, int narrays, const int* channels, InputArray hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:function:: void calcBackProject( const Mat* images, int nimages, const int* channels, InputArray hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:function:: void calcBackProject( const Mat* arrays, int narrays, const int* channels, const SparseMat& hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:function:: void calcBackProject( const Mat* images, int nimages, const int* channels, const SparseMat& hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges[, dst[, scale]]) -> dst
.. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges, scale[, dst]) -> dst
.. ocv:cfunction:: void cvCalcBackProject( IplImage** image, CvArr* backProject, const CvHistogram* hist )
.. ocv:pyoldfunction:: cv.CalcBackProject(image, backProject, hist)-> None
.. ocv:pyoldfunction:: cv.CalcBackProject(image, back_project, hist) -> None
:param arrays: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param narrays: Number of source arrays.
:param nimages: Number of source images.
:param channels: The list of channels used to compute the back projection. The number of channels must match the histogram dimensionality. The first array channels are numerated from 0 to ``arrays[0].channels()-1`` , the second array channels are counted from ``arrays[0].channels()`` to ``arrays[0].channels() + arrays[1].channels()-1``, and so on.
:param channels: The list of channels used to compute the back projection. The number of channels must match the histogram dimensionality. The first array channels are numerated from 0 to ``images[0].channels()-1`` , the second array channels are counted from ``images[0].channels()`` to ``images[0].channels() + images[1].channels()-1``, and so on.
:param hist: Input histogram that can be dense or sparse.
:param backProject: Destination back projection array that is a single-channel array of the same size and depth as ``arrays[0]`` .
:param backProject: Destination back projection array that is a single-channel array of the same size and depth as ``images[0]`` .
:param ranges: Array of arrays of the histogram bin boundaries in each dimension. See :ocv:func:`calcHist` .
:param scale: Optional scale factor for the output back projection.
:param uniform: Flag indicating whether the histogram is uniform or not (see above).
@ -164,7 +164,7 @@ Compares two histograms.
:param H1: First compared histogram.
:param H2: Second compared histogram of the same size as ``H1`` .
:param method: Comparison method that could be one of the following:
* **CV_COMP_CORREL** Correlation
@ -225,8 +225,9 @@ Computes the "minimal work" distance between two weighted point configurations.
.. ocv:function:: float EMD( InputArray signature1, InputArray signature2, int distType, InputArray cost=noArray(), float* lowerBound=0, OutputArray flow=noArray() )
.. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distType, CvDistanceFunction distFunc=NULL, const CvArr* cost=NULL, CvArr* flow=NULL, float* lowerBound=NULL, void* userdata=NULL )
.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distType, distFunc=None, cost=None, flow=None, lowerBound=None, userdata=None) -> float
.. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distance_type, CvDistanceFunction distance_func=NULL, const CvArr* cost_matrix=NULL, CvArr* flow=NULL, float* lower_bound=NULL, void* userdata=NULL )
.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distance_type, distance_func=None, cost_matrix=None, flow=None, lower_bound=None, userdata=None) -> float
:param signature1: First signature, a :math:`\texttt{size1}\times \texttt{dims}+1` floating-point matrix. Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a single column (weights only) if the user-defined cost matrix is used.
@ -234,19 +235,19 @@ Computes the "minimal work" distance between two weighted point configurations.
:param distType: Used metric. ``CV_DIST_L1, CV_DIST_L2`` , and ``CV_DIST_C`` stand for one of the standard metrics. ``CV_DIST_USER`` means that a pre-calculated cost matrix ``cost`` is used.
:param distFunc: Custom distance function supported by the old interface. ``CvDistanceFunction`` is defined as: ::
:param distance_func: Custom distance function supported by the old interface. ``CvDistanceFunction`` is defined as: ::
typedef float (CV_CDECL * CvDistanceFunction)( const float* a,
const float* b, void* userdata );
where ``a`` and ``b`` are point coordinates and ``userdata`` is the same as the last parameter.
:param cost: User-defined :math:`\texttt{size1}\times \texttt{size2}` cost matrix. Also, if a cost matrix is used, lower boundary ``lowerBound`` cannot be calculated because it needs a metric function.
:param lowerBound: Optional input/output parameter: lower boundary of a distance between the two signatures that is a distance between mass centers. The lower boundary may not be calculated if the user-defined cost matrix is used, the total weights of point configurations are not equal, or if the signatures consist of weights only (the signature matrices have a single column). You **must** initialize ``*lowerBound`` . If the calculated distance between mass centers is greater or equal to ``*lowerBound`` (it means that the signatures are far enough), the function does not calculate EMD. In any case ``*lowerBound`` is set to the calculated distance between mass centers on return. Thus, if you want to calculate both distance between mass centers and EMD, ``*lowerBound`` should be set to 0.
:param flow: Resultant :math:`\texttt{size1} \times \texttt{size2}` flow matrix: :math:`\texttt{flow}_{i,j}` is a flow from :math:`i` -th point of ``signature1`` to :math:`j` -th point of ``signature2`` .
:param userdata: Optional pointer directly passed to the custom distance function.
The function computes the earth mover distance and/or a lower boundary of the distance between the two weighted point configurations. One of the applications described in [RubnerSept98]_ is multi-dimensional histogram comparison for image retrieval. EMD is a transportation problem that is solved using some modification of a simplex algorithm, thus the complexity is exponential in the worst case, though, on average it is much faster. In the case of a real metric the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used to determine roughly whether the two signatures are far enough so that they cannot relate to the same object.
@ -301,20 +302,20 @@ Locates a template within an image by using a histogram comparison.
.. ocv:cfunction:: void cvCalcBackProjectPatch( IplImage** images, CvArr* dst, CvSize patch_size, CvHistogram* hist, int method, double factor )
.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patchSize, hist, method, factor)-> None
:param images: Source images (though, you may pass CvMat** as well).
:param dst: Destination image.
:param patch_size: Size of the patch slid though the source image.
:param hist: Histogram.
:param method: Comparison method passed to :ocv:cfunc:`CompareHist` (see the function description).
:param factor: Normalization factor for histograms that affects the normalization scale of the destination image. Pass 1 if not sure.
.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patch_size, hist, method, factor)-> None
:param images: Source images (though, you may pass CvMat** as well).
:param dst: Destination image.
:param patch_size: Size of the patch slid though the source image.
:param hist: Histogram.
:param method: Comparison method passed to :ocv:cfunc:`CompareHist` (see the function description).
:param factor: Normalization factor for histograms that affects the normalization scale of the destination image. Pass 1 if not sure.
The function calculates the back projection by comparing histograms of the source image patches with the given histogram. The function is similar to :ocv:func:`matchTemplate`, but instead of comparing the raster patch with all its possible positions within the search window, the function ``CalcBackProjectPatch`` compares histograms. See the algorithm diagram below:
.. image:: pics/backprojectpatch.png
@ -324,23 +325,23 @@ CalcProbDensity
---------------
Divides one histogram by another.
.. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dsthist, double scale=255 )
.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dsthist, scale=255)-> None
:param hist1: First histogram (the divisor).
:param hist2: Second histogram.
:param dsthist: Destination histogram.
:param scale: Scale factor for the destination histogram.
.. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dst_hist, double scale=255 )
.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dst_hist, scale=255) -> None
:param hist1: First histogram (the divisor).
:param hist2: Second histogram.
:param dst_hist: Destination histogram.
:param scale: Scale factor for the destination histogram.
The function calculates the object probability density from two histograms as:
.. math::
\texttt{disthist} (I)= \forkthree{0}{if $\texttt{hist1}(I)=0$}{\texttt{scale}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) > \texttt{hist1}(I)$}{\frac{\texttt{hist2}(I) \cdot \texttt{scale}}{\texttt{hist1}(I)}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) \le \texttt{hist1}(I)$}
\texttt{disthist} (I)= \forkthree{0}{if $\texttt{hist1}(I)=0$}{\texttt{scale}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) > \texttt{hist1}(I)$}{\frac{\texttt{hist2}(I) \cdot \texttt{scale}}{\texttt{hist1}(I)}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) \le \texttt{hist1}(I)$}
ClearHist
@ -350,7 +351,7 @@ Clears the histogram.
.. ocv:cfunction:: void cvClearHist( CvHistogram* hist )
.. ocv:pyoldfunction:: cv.ClearHist(hist)-> None
:param hist: Histogram.
:param hist: Histogram.
The function sets all of the histogram bins to 0 in case of a dense histogram and removes all histogram bins in case of a sparse array.
@ -361,10 +362,10 @@ Copies a histogram.
.. ocv:cfunction:: void cvCopyHist( const CvHistogram* src, CvHistogram** dst )
:param src: Source histogram.
:param dst: Pointer to the destination histogram.
:param src: Source histogram.
:param dst: Pointer to the destination histogram.
The function makes a copy of the histogram. If the second histogram pointer ``*dst`` is NULL, a new histogram of the same size as ``src`` is created. Otherwise, both histograms must have equal types and sizes. Then the function copies the bin values of the source histogram to the destination histogram and sets the same bin value ranges as in ``src``.
.. _createhist:
@ -375,92 +376,54 @@ Creates a histogram.
.. ocv:cfunction:: CvHistogram* cvCreateHist( int dims, int* sizes, int type, float** ranges=NULL, int uniform=1 )
.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges, uniform=1) -> hist
.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges=None, uniform=1) -> hist
:param dims: Number of histogram dimensions.
:param sizes: Array of the histogram dimension sizes.
:param type: Histogram representation format. ``CV_HIST_ARRAY`` means that the histogram data is represented as a multi-dimensional dense array CvMatND. ``CV_HIST_SPARSE`` means that histogram data is represented as a multi-dimensional sparse array ``CvSparseMat``.
:param ranges: Array of ranges for the histogram bins. Its meaning depends on the ``uniform`` parameter value. The ranges are used when the histogram is calculated or backprojected to determine which histogram bin corresponds to which value/tuple of values from the input image(s).
:param dims: Number of histogram dimensions.
:param sizes: Array of the histogram dimension sizes.
:param type: Histogram representation format. ``CV_HIST_ARRAY`` means that the histogram data is represented as a multi-dimensional dense array CvMatND. ``CV_HIST_SPARSE`` means that histogram data is represented as a multi-dimensional sparse array ``CvSparseMat``.
:param ranges: Array of ranges for the histogram bins. Its meaning depends on the ``uniform`` parameter value. The ranges are used when the histogram is calculated or backprojected to determine which histogram bin corresponds to which value/tuple of values from the input image(s).
:param uniform: Uniformity flag. If not zero, the histogram has evenly
spaced bins and for every :math:`0<=i<cDims` ``ranges[i]``
spaced bins and for every :math:`0<=i<cDims` ``ranges[i]``
is an array of two numbers: lower and upper boundaries for the i-th
histogram dimension.
The whole range [lower,upper] is then split
into ``dims[i]`` equal parts to determine the ``i``-th input
tuple value ranges for every histogram bin. And if ``uniform=0`` ,
then the ``i``-th element of the ``ranges`` array contains ``dims[i]+1`` elements: :math:`\texttt{lower}_0, \texttt{upper}_0,
then the ``i``-th element of the ``ranges`` array contains ``dims[i]+1`` elements: :math:`\texttt{lower}_0, \texttt{upper}_0,
\texttt{lower}_1, \texttt{upper}_1 = \texttt{lower}_2,
...
\texttt{upper}_{dims[i]-1}`
where :math:`\texttt{lower}_j` and :math:`\texttt{upper}_j`
\texttt{upper}_{dims[i]-1}`
where :math:`\texttt{lower}_j` and :math:`\texttt{upper}_j`
are lower and upper
boundaries of the ``i``-th input tuple value for the ``j``-th
boundaries of the ``i``-th input tuple value for the ``j``-th
bin, respectively. In either case, the input values that are beyond
the specified range for a histogram bin are not counted by :ocv:cfunc:`CalcHist` and filled with 0 by :ocv:cfunc:`CalcBackProject`.
The function creates a histogram of the specified size and returns a pointer to the created histogram. If the array ``ranges`` is 0, the histogram bin ranges must be specified later via the function :ocv:cfunc:`SetHistBinRanges`. Though :ocv:cfunc:`CalcHist` and :ocv:cfunc:`CalcBackProject` may process 8-bit images without setting bin ranges, they assume they are equally spaced in 0 to 255 bins.
GetHistValue\_?D
----------------
Returns a pointer to the histogram bin.
.. ocv:cfunction:: float cvGetHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float cvGetHistValue_2D(CvHistogram hist, int idx0, int idx1)
The function creates a histogram of the specified size and returns a pointer to the created histogram. If the array ``ranges`` is 0, the histogram bin ranges must be specified later via the function :ocv:cfunc:`SetHistBinRanges`. Though :ocv:cfunc:`CalcHist` and :ocv:cfunc:`CalcBackProject` may process 8-bit images without setting bin ranges, they assume they are equally spaced in 0 to 255 bins.
.. ocv:cfunction:: float cvGetHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float cvGetHistValue_nD(CvHistogram hist, int idx)
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
GetMinMaxHistValue
------------------
Finds the minimum and maximum histogram bins.
::
.. ocv:cfunction:: void cvGetMinMaxHistValue( const CvHistogram* hist, float* min_value, float* max_value, int* min_idx=NULL, int* max_idx=NULL )
#define cvGetHistValue_1D( hist, idx0 )
((float*)(cvPtr1D( (hist)->bins, (idx0), 0 ))
#define cvGetHistValue_2D( hist, idx0, idx1 )
((float*)(cvPtr2D( (hist)->bins, (idx0), (idx1), 0 )))
#define cvGetHistValue_3D( hist, idx0, idx1, idx2 )
((float*)(cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0 )))
#define cvGetHistValue_nD( hist, idx )
((float*)(cvPtrND( (hist)->bins, (idx), 0 )))
.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (min_value, max_value, min_idx, max_idx)
..
:param hist: Histogram.
The macros ``GetHistValue`` return a pointer to the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function creates a new bin and sets it to 0, unless it exists already.
:param min_value: Pointer to the minimum value of the histogram.
:param max_value: Pointer to the maximum value of the histogram.
GetMinMaxHistValue
------------------
Finds the minimum and maximum histogram bins.
:param min_idx: Pointer to the array of coordinates for the minimum.
.. ocv:cfunction:: void cvGetMinMaxHistValue( const CvHistogram* hist, float* min_value, float* max_value, int* min_idx=NULL, int* max_idx=NULL )
.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (minValue, maxValue, minIdx, maxIdx)
:param max_idx: Pointer to the array of coordinates for the maximum.
:param hist: Histogram.
:param min_value: Pointer to the minimum value of the histogram.
:param max_value: Pointer to the maximum value of the histogram.
:param min_idx: Pointer to the array of coordinates for the minimum.
:param max_idx: Pointer to the array of coordinates for the maximum.
The function finds the minimum and maximum histogram bins and their positions. All of output arguments are optional. Among several extremas with the same value the ones with the minimum index (in the lexicographical order) are returned. In case of several maximums or minimums, the earliest in the lexicographical order (extrema locations) is returned.
@ -469,19 +432,19 @@ MakeHistHeaderForArray
Makes a histogram out of an array.
.. ocv:cfunction:: CvHistogram* cvMakeHistHeaderForArray( int dims, int* sizes, CvHistogram* hist, float* data, float** ranges=NULL, int uniform=1 )
:param dims: Number of the histogram dimensions.
:param sizes: Array of the histogram dimension sizes.
:param hist: Histogram header initialized by the function.
:param data: Array used to store histogram bins.
:param dims: Number of the histogram dimensions.
:param sizes: Array of the histogram dimension sizes.
:param hist: Histogram header initialized by the function.
:param data: Array used to store histogram bins.
:param ranges: Histogram bin ranges. See :ocv:cfunc:`CreateHist` for details.
:param uniform: Uniformity flag. See :ocv:cfunc:`CreateHist` for details.
The function initializes the histogram, whose header and bins are allocated by the user. :ocv:cfunc:`ReleaseHist` does not need to be called afterwards. Only dense histograms can be initialized this way. The function returns ``hist``.
NormalizeHist
@ -490,48 +453,22 @@ Normalizes the histogram.
.. ocv:cfunction:: void cvNormalizeHist( CvHistogram* hist, double factor )
.. ocv:pyoldfunction:: cv.NormalizeHist(hist, factor)-> None
:param hist: Pointer to the histogram.
:param factor: Normalization factor.
The function normalizes the histogram bins by scaling them so that the sum of the bins becomes equal to ``factor``.
:param hist: Pointer to the histogram.
:param factor: Normalization factor.
The function normalizes the histogram bins by scaling them so that the sum of the bins becomes equal to ``factor``.
QueryHistValue*D
----------------
Queries the value of the histogram bin.
.. ocv:cfunction:: float QueryHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float QueryHistValue_2D(CvHistogram hist, int idx0, int idx1)
.. ocv:cfunction:: float QueryHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float QueryHistValue_nD(CvHistogram hist, const int* idx)
.. ocv:pyoldfunction:: cv.QueryHistValue_1D(hist, idx0) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_2D(hist, idx0, idx1) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_3D(hist, idx0, idx1, idx2) -> float
.. ocv:pyoldfunction:: cv.QueryHistValueND(hist, idx) -> float
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
The macros return the value of the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function returns 0. If the bin is not present in the histogram, no new bin is created.
ReleaseHist
-----------
Releases the histogram.
.. ocv:cfunction:: void cvReleaseHist( CvHistogram** hist )
:param hist: Double pointer to the released histogram.
:param hist: Double pointer to the released histogram.
The function releases the histogram (header and the data). The pointer to the histogram is cleared by the function. If ``*hist`` pointer is already ``NULL``, the function does nothing.
@ -541,12 +478,12 @@ Sets the bounds of the histogram bins.
.. ocv:cfunction:: void cvSetHistBinRanges( CvHistogram* hist, float** ranges, int uniform=1 )
:param hist: Histogram.
:param hist: Histogram.
:param ranges: Array of bin ranges arrays. See :ocv:cfunc:`CreateHist` for details.
:param uniform: Uniformity flag. See :ocv:cfunc:`CreateHist` for details.
:param uniform: Uniformity flag. See :ocv:cfunc:`CreateHist` for details.
This is a standalone function for setting bin ranges in the histogram. For a more detailed description of the parameters ``ranges`` and ``uniform``, see the :ocv:cfunc:`CalcHist` function that can initialize the ranges as well. Ranges for the histogram bins must be set before the histogram is calculated or the backproject of the histogram is calculated.
@ -555,37 +492,13 @@ ThreshHist
Thresholds the histogram.
.. ocv:cfunction:: void cvThreshHist( CvHistogram* hist, double threshold )
.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold)-> None
:param hist: Pointer to the histogram.
:param threshold: Threshold level.
The function clears histogram bins that are below the specified threshold.
.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold) -> None
:param hist: Pointer to the histogram.
CalcPGH
-------
Calculates a pair-wise geometrical histogram for a contour.
.. ocv:cfunction:: void cvCalcPGH( const CvSeq* contour, CvHistogram* hist )
.. ocv:pyoldfunction:: cv.CalcPGH(contour, hist)-> None
:param contour: Input contour. Currently, only integer point coordinates are allowed.
:param hist: Calculated histogram. It must be two-dimensional.
The function calculates a 2D pair-wise geometrical histogram (PGH), described in [Iivarinen97]_ for the contour. The algorithm considers every pair of contour
edges. The angle between the edges and the minimum/maximum distances
are determined for every pair. To do this, each of the edges in turn
is taken as the base, while the function loops through all the other
edges. When the base edge and any other edge are considered, the minimum
and maximum distances from the points on the non-base edge and line of
the base edge are selected. The angle between the edges defines the row
of the histogram in which all the bins that correspond to the distance
between the calculated minimum and maximum distances are incremented
(that is, the histogram is transposed relatively to the definition in the original paper). The histogram can be used for contour matching.
:param threshold: Threshold level.
.. [RubnerSept98] Y. Rubner. C. Tomasi, L.J. Guibas. *The Earth Mover’s Distance as a Metric for Image Retrieval*. Technical Report STAN-CS-TN-98-86, Department of Computer Science, Stanford University, September 1998.
The function clears histogram bins that are below the specified threshold.
.. [Iivarinen97] Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. *Comparison of Combined Shape Descriptors for Irregular Objects*, 8th British Machine Vision Conference, BMVC'97. http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html
.. [RubnerSept98] Y. Rubner. C. Tomasi, L.J. Guibas. *The Earth Mover’s Distance as a Metric for Image Retrieval*. Technical Report STAN-CS-TN-98-86, Department of Computer Science, Stanford University, September 1998.

@ -12,19 +12,20 @@ Applies an adaptive threshold to an array.
.. ocv:pyfunction:: cv2.adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst
.. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double maxValue, int adaptiveMethod=CV_ADAPTIVE_THRESH_MEAN_C, int thresholdType=CV_THRESH_BINARY, int blockSize=3, double param1=5 )
.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptiveMethod=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None
.. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, int adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, int threshold_type=CV_THRESH_BINARY, int block_size=3, double param1=5 )
.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None
:param src: Source 8-bit single-channel image.
:param dst: Destination image of the same size and the same type as ``src`` .
:param maxValue: Non-zero value assigned to the pixels for which the condition is satisfied. See the details below.
:param adaptiveMethod: Adaptive thresholding algorithm to use, ``ADAPTIVE_THRESH_MEAN_C`` or ``ADAPTIVE_THRESH_GAUSSIAN_C`` . See the details below.
:param thresholdType: Thresholding type that must be either ``THRESH_BINARY`` or ``THRESH_BINARY_INV`` .
:param blockSize: Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
:param C: Constant subtracted from the mean or weighted mean (see the details below). Normally, it is positive but may be zero or negative as well.
@ -83,7 +84,7 @@ Converts an image from one color space to another.
:param src: Source image: 8-bit unsigned, 16-bit unsigned ( ``CV_16UC...`` ), or single-precision floating-point.
:param dst: Destination image of the same size and depth as ``src`` .
:param code: Color space conversion code. See the description below.
:param dstCn: Number of channels in the destination image. If the parameter is 0, the number of the channels is derived automatically from ``src`` and ``code`` .
@ -98,7 +99,7 @@ The conventional ranges for R, G, and B channel values are:
0 to 255 for ``CV_8U`` images
*
0 to 65535 for ``CV_16U`` images
0 to 65535 for ``CV_16U`` images
*
0 to 1 for ``CV_32F`` images
@ -414,22 +415,22 @@ Calculates the distance to the closest zero pixel for each pixel of the source i
.. ocv:function:: void distanceTransform( InputArray src, OutputArray dst, OutputArray labels, int distanceType, int maskSize, int labelType=DIST_LABEL_CCOMP )
.. ocv:pyfunction:: cv2.distanceTransform(src, distanceType, maskSize[, dst[, labels[, labelType=cv2.DIST_LABEL_CCOMP]]]) -> dst, labels
.. ocv:pyfunction:: cv2.distanceTransform(src, distanceType, maskSize[, dst]) -> dst
.. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distanceType=CV_DIST_L2, int maskSize=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP )
.. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distance_type=CV_DIST_L2, int mask_size=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP )
.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distanceType=CV_DIST_L2, maskSize=3, mask=None, labels=None)-> None
.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distance_type=CV_DIST_L2, mask_size=3, mask=None, labels=None) -> None
:param src: 8-bit, single-channel (binary) source image.
:param dst: Output image with calculated distances. It is a 32-bit floating-point, single-channel image of the same size as ``src`` .
:param distanceType: Type of distance. It can be ``CV_DIST_L1, CV_DIST_L2`` , or ``CV_DIST_C`` .
:param maskSize: Size of the distance transform mask. It can be 3, 5, or ``CV_DIST_MASK_PRECISE`` (the latter option is only supported by the first function). In case of the ``CV_DIST_L1`` or ``CV_DIST_C`` distance type, the parameter is forced to 3 because a :math:`3\times 3` mask gives the same result as :math:`5\times 5` or any larger aperture.
:param labels: Optional output 2D array of labels (the discrete Voronoi diagram). It has the type ``CV_32SC1`` and the same size as ``src`` . See the details below.
:param labelType: Type of the label array to build. If ``labelType==DIST_LABEL_CCOMP`` then each connected component of zeros in ``src`` (as well as all the non-zero pixels closest to the connected component) will be assigned the same label. If ``labelType==DIST_LABEL_PIXEL`` then each zero pixel (and all the non-zero pixels closest to it) gets its own label.
The functions ``distanceTransform`` calculate the approximate or precise
@ -483,22 +484,22 @@ floodFill
-------------
Fills a connected component with the given color.
.. ocv:function:: int floodFill( InputOutputArray image, Point seed, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:function:: int floodFill( InputOutputArray image, Point seedPoint, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:function:: int floodFill( InputOutputArray image, InputOutputArray mask, Point seed, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:function:: int floodFill( InputOutputArray image, InputOutputArray mask, Point seedPoint, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:pyfunction:: cv2.floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]]) -> retval, rect
.. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seedPoint, CvScalar newVal, CvScalar loDiff=cvScalarAll(0), CvScalar upDiff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.FloodFill(image, seedPoint, newVal, loDiff=(0, 0, 0, 0), upDiff=(0, 0, 0, 0), flags=4, mask=None)-> comp
.. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seed_point, CvScalar new_val, CvScalar lo_diff=cvScalarAll(0), CvScalar up_diff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.FloodFill(image, seed_point, new_val, lo_diff=(0, 0, 0, 0), up_diff=(0, 0, 0, 0), flags=4, mask=None)-> comp
:param image: Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the ``FLOODFILL_MASK_ONLY`` flag is set in the second variant of the function. See the details below.
:param mask: (For the second function only) Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function uses and updates the mask, so you take responsibility of initializing the ``mask`` content. Flood-filling cannot go across non-zero pixels in the mask. For example, an edge detector output can be used as a mask to stop filling at edges. It is possible to use the same mask in multiple calls to the function to make sure the filled area does not overlap.
:param mask: (For the second function only) Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function uses and updates the mask, so you take responsibility of initializing the ``mask`` content. Flood-filling cannot go across non-zero pixels in the mask. For example, an edge detector output can be used as a mask to stop filling at edges. It is possible to use the same mask in multiple calls to the function to make sure the filled area does not overlap.
.. note:: Since the mask is larger than the filled image, a pixel :math:`(x, y)` in ``image`` corresponds to the pixel :math:`(x+1, y+1)` in the ``mask`` .
:param seed: Starting point.
:param seedPoint: Starting point.
:param newVal: New value of the repainted domain pixels.
@ -517,18 +518,18 @@ Fills a connected component with the given color.
The functions ``floodFill`` fill a connected component starting from the seed point with the specified color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The pixel at
:math:`(x,y)` is considered to belong to the repainted domain if:
*
*
.. math::
\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}
in case of a grayscale image and floating range
*
*
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)+ \texttt{upDiff}
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}
in case of a grayscale image and fixed range
@ -541,7 +542,7 @@ The functions ``floodFill`` fill a connected component starting from the seed po
.. math::
\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g
and
.. math::
@ -555,17 +556,17 @@ The functions ``floodFill`` fill a connected component starting from the seed po
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_r+ \texttt{upDiff} _r,
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_g+ \texttt{upDiff} _g
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g
and
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_b+ \texttt{upDiff} _b
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b
in case of a color image and fixed range
@ -588,11 +589,11 @@ integral
------------
Calculates the integral of an image.
.. ocv:function:: void integral( InputArray image, OutputArray sum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray src, OutputArray sum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray image, OutputArray sum, OutputArray sqsum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray src, OutputArray sum, OutputArray sqsum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray image, OutputArray sum, OutputArray sqsum, OutputArray tilted, int sdepth=-1 )
.. ocv:function:: void integral( InputArray src, OutputArray sum, OutputArray sqsum, OutputArray tilted, int sdepth=-1 )
.. ocv:pyfunction:: cv2.integral(src[, sum[, sdepth]]) -> sum
@ -600,7 +601,8 @@ Calculates the integral of an image.
.. ocv:pyfunction:: cv2.integral3(src[, sum[, sqsum[, tilted[, sdepth]]]]) -> sum, sqsum, tilted
.. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tiltedSum=NULL )
.. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tilted_sum=NULL )
.. ocv:pyoldfunction:: cv.Integral(image, sum, sqsum=None, tiltedSum=None)-> None
:param image: Source image as :math:`W \times H` , 8-bit or floating-point (32f or 64f).
@ -610,7 +612,7 @@ Calculates the integral of an image.
:param sqsum: Integral image for squared pixel values. It is :math:`(W+1)\times (H+1)`, double-precision floating-point (64f) array.
:param tilted: Integral for the image rotated by 45 degrees. It is :math:`(W+1)\times (H+1)` array with the same data type as ``sum``.
:param sdepth: Desired depth of the integral and the tilted integral images, ``CV_32S``, ``CV_32F``, or ``CV_64F``.
The functions calculate one or more integral images for the source image as follows:
@ -647,22 +649,23 @@ threshold
-------------
Applies a fixed-level threshold to each array element.
.. ocv:function:: double threshold( InputArray src, OutputArray dst, double thresh, double maxVal, int thresholdType )
.. ocv:function:: double threshold( InputArray src, OutputArray dst, double thresh, double maxval, int type )
.. ocv:pyfunction:: cv2.threshold(src, thresh, maxval, type[, dst]) -> retval, dst
.. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double maxValue, int thresholdType )
.. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double max_value, int threshold_type )
.. ocv:pyoldfunction:: cv.Threshold(src, dst, threshold, maxValue, thresholdType)-> None
:param src: Source array (single-channel, 8-bit of 32-bit floating point).
:param dst: Destination array of the same size and type as ``src`` .
:param thresh: Threshold value.
:param maxVal: Maximum value to use with the ``THRESH_BINARY`` and ``THRESH_BINARY_INV`` thresholding types.
:param maxval: Maximum value to use with the ``THRESH_BINARY`` and ``THRESH_BINARY_INV`` thresholding types.
:param thresholdType: Thresholding type (see the details below).
:param type: Thresholding type (see the details below).
The function applies fixed-level thresholding
to a single-channel array. The function is typically used to get a
@ -670,19 +673,19 @@ bi-level (binary) image out of a grayscale image (
:ocv:func:`compare` could
be also used for this purpose) or for removing a noise, that is, filtering
out pixels with too small or too large values. There are several
types of thresholding supported by the function. They are determined by ``thresholdType`` :
types of thresholding supported by the function. They are determined by ``type`` :
* **THRESH_BINARY**
.. math::
\texttt{dst} (x,y) = \fork{\texttt{maxVal}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
\texttt{dst} (x,y) = \fork{\texttt{maxval}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
* **THRESH_BINARY_INV**
.. math::
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxVal}}{otherwise}
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxval}}{otherwise}
* **THRESH_TRUNC**
@ -706,7 +709,7 @@ Also, the special value ``THRESH_OTSU`` may be combined with
one of the above values. In this case, the function determines the optimal threshold
value using the Otsu's algorithm and uses it instead of the specified ``thresh`` .
The function returns the computed threshold value.
Currently, the Otsu's method is implemented only for 8-bit images.
Currently, the Otsu's method is implemented only for 8-bit images.
.. image:: pics/threshold.png
@ -748,11 +751,11 @@ grabCut
-------
Runs the GrabCut algorithm.
.. ocv:function:: void grabCut(InputArray image, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, int mode )
.. ocv:function:: void grabCut( InputArray img, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, int mode=GC_EVAL )
.. ocv:pyfunction:: cv2.grabCut(img, mask, rect, bgdModel, fgdModel, iterCount[, mode]) -> None
:param image: Input 8-bit 3-channel image.
:param img: Input 8-bit 3-channel image.
:param mask: Input/output 8-bit single-channel mask. The mask is initialized by the function when ``mode`` is set to ``GC_INIT_WITH_RECT``. Its elements may have one of following values:
@ -765,13 +768,13 @@ Runs the GrabCut algorithm.
* **GC_PR_BGD** defines a possible foreground pixel.
:param rect: ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". The parameter is only used when ``mode==GC_INIT_WITH_RECT`` .
:param bgdModel: Temporary array for the background model. Do not modify it while you are processing the same image.
:param fgdModel: Temporary arrays for the foreground model. Do not modify it while you are processing the same image.
:param iterCount: Number of iterations the algorithm should make before returning the result. Note that the result can be refined with further calls with ``mode==GC_INIT_WITH_MASK`` or ``mode==GC_EVAL`` .
:param mode: Operation mode that could be one of the following:
* **GC_INIT_WITH_RECT** The function initializes the state and the mask using the provided rectangle. After that it runs ``iterCount`` iterations of the algorithm.

@ -11,8 +11,9 @@ Adds an image to the accumulator.
.. ocv:pyfunction:: cv2.accumulate(src, dst[, mask]) -> None
.. ocv:cfunction:: void cvAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Acc(src, dst, mask=None)-> None
.. ocv:cfunction:: void cvAcc( const CvArr* image, CvArr* sum, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Acc(image, sum, mask=None) -> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
@ -46,8 +47,9 @@ Adds the square of a source image to the accumulator.
.. ocv:pyfunction:: cv2.accumulateSquare(src, dst[, mask]) -> None
.. ocv:cfunction:: void cvSquareAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.SquareAcc(src, dst, mask=None)-> None
.. ocv:cfunction:: void cvSquareAcc( const CvArr* image, CvArr* sqsum, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.SquareAcc(image, sqsum, mask=None) -> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
@ -79,13 +81,14 @@ Adds the per-element product of two input images to the accumulator.
.. ocv:pyfunction:: cv2.accumulateProduct(src1, src2, dst[, mask]) -> None
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MultiplyAcc(src1, src2, dst, mask=None)-> None
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MultiplyAcc(image1, image2, acc, mask=None)-> None
:param src1: First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
:param src2: Second input image of the same type and the same size as ``src1`` .
:param dst: Accumulator with the same number of channels as input images, 32-bit or 64-bit floating-point.
:param mask: Optional operation mask.
@ -114,8 +117,8 @@ Updates a running average.
.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> None
.. ocv:cfunction:: void cvRunningAvg( const CvArr* src, CvArr* dst, double alpha, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.RunningAvg(src, dst, alpha, mask=None)-> None
.. ocv:cfunction:: void cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.RunningAvg(image, acc, alpha, mask=None)-> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
@ -138,7 +141,7 @@ The function supports multi-channel images. Each channel is processed independen
:ocv:func:`accumulate`,
:ocv:func:`accumulateSquare`,
:ocv:func:`accumulateProduct`
:ocv:func:`accumulateProduct`
@ -206,7 +209,7 @@ This function computes a Hanning window coefficients in two dimensions. See http
An example is shown below: ::
// create hanning window of size 100x100 and type CV_32F
// create hanning window of size 100x100 and type CV_32F
Mat hann;
createHanningWindow(hann, Size(100, 100), CV_32F);

@ -7,7 +7,7 @@ matchTemplate
-----------------
Compares a template against overlapped image regions.
.. ocv:function:: void matchTemplate( InputArray image, InputArray temp, OutputArray result, int method )
.. ocv:function:: void matchTemplate( InputArray image, InputArray templ, OutputArray result, int method )
.. ocv:pyfunction:: cv2.matchTemplate(image, templ, method[, result]) -> result
@ -19,7 +19,7 @@ Compares a template against overlapped image regions.
:param templ: Searched template. It must be not greater than the source image and have the same data type.
:param result: Map of comparison results. It must be single-channel 32-bit floating-point. If ``image`` is :math:`W \times H` and ``templ`` is :math:`w \times h` , then ``result`` is :math:`(W-w+1) \times (H-h+1)` .
:param method: Parameter specifying the comparison method (see below).
The function slides through ``image`` , compares the

@ -11,13 +11,14 @@ Calculates all of the moments up to the third order of a polygon or rasterized s
.. ocv:pyfunction:: cv2.moments(array[, binaryImage]) -> retval
.. ocv:cfunction:: void cvMoments( const CvArr* array, CvMoments* moments, int binary=0 )
.. ocv:pyoldfunction:: cv.Moments(array, binary=0) -> moments
.. ocv:cfunction:: void cvMoments( const CvArr* arr, CvMoments* moments, int binary=0 )
.. ocv:pyoldfunction:: cv.Moments(arr, binary=0) -> moments
:param array: Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( :math:`1 \times N` or :math:`N \times 1` ) of 2D points (``Point`` or ``Point2f`` ).
:param binaryImage: If it is true, all non-zero image pixels are treated as 1's. The parameter is used for images only.
:param moments: Output moments.
The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The results are returned in the structure ``Moments`` defined as: ::
@ -30,7 +31,7 @@ The function computes moments, up to the 3rd order, of a vector shape or a raste
double m02, double m30, double m21, double m12, double m03 );
Moments( const CvMoments& moments );
operator CvMoments() const;
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
@ -69,7 +70,7 @@ The normalized central moments
.. note::
:math:`\texttt{mu}_{00}=\texttt{m}_{00}`,
:math:`\texttt{nu}_{00}=1`
:math:`\texttt{nu}_{00}=1`
:math:`\texttt{nu}_{10}=\texttt{mu}_{10}=\texttt{mu}_{01}=\texttt{mu}_{10}=0` , hence the values are not stored.
The moments of a contour are defined in the same way but computed using the Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to a limited raster resolution, the moments computed for a contour are slightly different from the moments computed for the same rasterized contour.
@ -89,11 +90,13 @@ HuMoments
-------------
Calculates seven Hu invariants.
.. ocv:function:: void HuMoments( const Moments& moments, double* hu )
.. ocv:function:: void HuMoments( const Moments& m, OutputArray hu )
.. ocv:function:: void HuMoments( const Moments& moments, double hu[7] )
.. ocv:pyfunction:: cv2.HuMoments(m) -> hu
.. ocv:pyfunction:: cv2.HuMoments(m[, hu]) -> hu
.. ocv:cfunction:: void cvGetHuMoments( const CvMoments* moments, CvHuMoments* hu )
.. ocv:cfunction:: void cvGetHuMoments( CvMoments* moments, CvHuMoments* hu_moments )
.. ocv:pyoldfunction:: cv.GetHuMoments(moments) -> hu
@ -126,9 +129,9 @@ Finds contours in a binary image.
.. ocv:pyfunction:: cv2.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** firstContour, int headerSize=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0, 0) )
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0,0) )
.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> cvseq
.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> contours
:param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours.
@ -163,87 +166,6 @@ The function retrieves contours from the binary image using the algorithm
.. note:: If you use the new Python interface then the ``CV_`` prefix has to be omitted in contour retrieval mode and contour approximation method parameters (for example, use ``cv2.RETR_LIST`` and ``cv2.CHAIN_APPROX_NONE`` parameters). If you use the old Python interface then these parameters have the ``CV_`` prefix (for example, use ``cv.CV_RETR_LIST`` and ``cv.CV_CHAIN_APPROX_NONE``).
drawContours
----------------
Draws contours outlines or filled contours.
.. ocv:function:: void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Point offset=Point() )
.. ocv:pyfunction:: cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> None
.. ocv:cfunction:: void cvDrawContours( CvArr *img, CvSeq* contour, CvScalar externalColor, CvScalar holeColor, int maxLevel, int thickness=1, int lineType=8 )
.. ocv:pyoldfunction:: cv.DrawContours(img, contour, externalColor, holeColor, maxLevel, thickness=1, lineType=8, offset=(0, 0))-> None
:param image: Destination image.
:param contours: All the input contours. Each contour is stored as a point vector.
:param contourIdx: Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
:param color: Color of the contours.
:param thickness: Thickness of lines the contours are drawn with. If it is negative (for example, ``thickness=CV_FILLED`` ), the contour interiors are
drawn.
:param lineType: Line connectivity. See :ocv:func:`line` for details.
:param hierarchy: Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see ``maxLevel`` ).
:param maxLevel: Maximal level for drawn contours. If it is 0, only
the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is ``hierarchy`` available.
:param offset: Optional contour shift parameter. Shift all the drawn contours by the specified :math:`\texttt{offset}=(dx,dy)` .
:param contour: Pointer to the first contour.
:param externalColor: Color of external contours.
:param holeColor: Color of internal contours (holes).
The function draws contour outlines in the image if
:math:`\texttt{thickness} \ge 0` or fills the area bounded by the contours if
:math:`\texttt{thickness}<0` . The example below shows how to retrieve connected components from the binary image and label them: ::
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main( int argc, char** argv )
{
Mat src;
// the first command-line parameter must be a filename of the binary
// (black-n-white) image
if( argc != 2 || !(src=imread(argv[1], 0)).data)
return -1;
Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);
src = src > 1;
namedWindow( "Source", 1 );
imshow( "Source", src );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours( src, contours, hierarchy,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
// iterate through all the top-level contours,
// draw each connected component with its own random color
int idx = 0;
for( ; idx >= 0; idx = hierarchy[idx][0] )
{
Scalar color( rand()&255, rand()&255, rand()&255 );
drawContours( dst, contours, idx, color, CV_FILLED, 8, hierarchy );
}
namedWindow( "Components", 1 );
imshow( "Components", dst );
waitKey(0);
}
approxPolyDP
----------------
@ -253,28 +175,28 @@ Approximates a polygonal curve(s) with the specified precision.
.. ocv:pyfunction:: cv2.approxPolyDP(curve, epsilon, closed[, approxCurve]) -> approxCurve
.. ocv:cfunction:: CvSeq* cvApproxPoly( const void* curve, int headerSize, CvMemStorage* storage, int method, double epsilon, int recursive=0 )
.. ocv:cfunction:: CvSeq* cvApproxPoly( const void* src_seq, int header_size, CvMemStorage* storage, int method, double eps, int recursive=0 )
:param curve: Input vector of a 2D point stored in:
* ``std::vector`` or ``Mat`` (C++ interface)
* ``Nx2`` numpy array (Python interface)
* ``CvSeq`` or `` ``CvMat`` (C interface)
* ``CvSeq`` or `` ``CvMat`` (C interface)
:param approxCurve: Result of the approximation. The type should match the type of the input curve. In case of C interface the approximated curve is stored in the memory storage and pointer to it is returned.
:param epsilon: Parameter specifying the approximation accuracy. This is the maximum distance between the original curve and its approximation.
:param closed: If true, the approximated curve is closed (its first and last vertices are connected). Otherwise, it is not closed.
:param headerSize: Header size of the approximated curve. Normally, ``sizeof(CvContour)`` is used.
:param header_size: Header size of the approximated curve. Normally, ``sizeof(CvContour)`` is used.
:param storage: Memory storage where the approximated curve is stored.
:param method: Contour approximation algorithm. Only ``CV_POLY_APPROX_DP`` is supported.
:param recursive: Recursion flag. If it is non-zero and ``curve`` is ``CvSeq*``, the function ``cvApproxPoly`` approximates all the contours accessible from ``curve`` by ``h_next`` and ``v_next`` links.
The functions ``approxPolyDP`` approximate a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. It uses the Douglas-Peucker algorithm
@ -287,22 +209,22 @@ ApproxChains
-------------
Approximates Freeman chain(s) with a polygonal curve.
.. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* chain, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimalPerimeter=0, int recursive=0 )
.. ocv:pyoldfunction:: cv.ApproxChains(chain, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimalPerimeter=0, recursive=0)-> contours
:param chain: Pointer to the approximated Freeman chain that can refer to other chains.
:param storage: Storage location for the resulting polylines.
:param method: Approximation method (see the description of the function :ocv:cfunc:`FindContours` ).
:param parameter: Method parameter (not used now).
:param minimalPerimeter: Approximates only those contours whose perimeters are not less than ``minimal_perimeter`` . Other chains are removed from the resulting structure.
.. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* src_seq, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimal_perimeter=0, int recursive=0 )
.. ocv:pyoldfunction:: cv.ApproxChains(src_seq, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimal_perimeter=0, recursive=0)-> contours
:param src_seq: Pointer to the approximated Freeman chain that can refer to other chains.
:param storage: Storage location for the resulting polylines.
:param method: Approximation method (see the description of the function :ocv:cfunc:`FindContours` ).
:param parameter: Method parameter (not used now).
:param minimal_perimeter: Approximates only those contours whose perimeters are not less than ``minimal_perimeter`` . Other chains are removed from the resulting structure.
:param recursive: Recursion flag. If it is non-zero, the function approximates all chains that can be obtained from ``chain`` by using the ``h_next`` or ``v_next`` links. Otherwise, the single input chain is approximated.
This is a standalone contour approximation routine, not represented in the new interface. When :ocv:cfunc:`FindContours` retrieves contours as Freeman chains, it calls the function to get approximated contours, represented as polygons.
@ -314,8 +236,9 @@ Calculates a contour perimeter or a curve length.
.. ocv:pyfunction:: cv2.arcLength(curve, closed) -> retval
.. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int isClosed=-1 )
.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1)-> double
.. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int is_closed=-1 )
.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1) -> float
:param curve: Input vector of 2D points, stored in ``std::vector`` or ``Mat``.
@ -351,9 +274,9 @@ Calculates a contour area.
.. ocv:pyfunction:: cv2.contourArea(contour[, oriented]) -> retval
.. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ )
.. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ, int oriented=0 )
.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ)-> double
.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ) -> float
:param contour: Input vector of 2D points (contour vertices), stored in ``std::vector`` or ``Mat``.
@ -390,22 +313,22 @@ Finds the convex hull of a point set.
.. ocv:function:: void convexHull( InputArray points, OutputArray hull, bool clockwise=false, bool returnPoints=true )
.. ocv:pyfunction:: cv2.convexHull(points[, hull[, returnPoints[, clockwise]]]) -> hull
.. ocv:pyfunction:: cv2.convexHull(points[, hull[, clockwise[, returnPoints]]]) -> hull
.. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* storage=NULL, int orientation=CV_CLOCKWISE, int returnPoints=0 )
.. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0 )
.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, returnPoints=0)-> convexHull
.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, return_points=0) -> convexHull
:param points: Input 2D point set, stored in ``std::vector`` or ``Mat``.
:param hull: Output convex hull. It is either an integer vector of indices or vector of points. In the first case, the ``hull`` elements are 0-based indices of the convex hull points in the original array (since the set of convex hull points is a subset of the original point set). In the second case, ``hull`` elements are the convex hull points themselves.
:param storage: Output memory storage in the old API (``cvConvexHull2`` returns a sequence containing the convex hull points or their indices).
:param hull_storage: Output memory storage in the old API (``cvConvexHull2`` returns a sequence containing the convex hull points or their indices).
:param clockwise: Orientation flag. If it is true, the output convex hull is oriented clockwise. Otherwise, it is oriented counter-clockwise. The usual screen coordinate system is assumed so that the origin is at the top-left corner, x axis is oriented to the right, and y axis is oriented downwards.
:param orientation: Convex hull orientation parameter in the old API, ``CV_CLOCKWISE`` or ``CV_COUNTERCLOCKWISE``.
:param returnPoints: Operation flag. In case of a matrix, when the flag is true, the function returns convex hull points. Otherwise, it returns indices of the convex hull points. When the output array is ``std::vector``, the flag is ignored, and the output depends on the type of the vector: ``std::vector<int>`` implies ``returnPoints=true``, ``std::vector<Point>`` implies ``returnPoints=false``.
The functions find the convex hull of a 2D point set using the Sklansky's algorithm
@ -420,20 +343,20 @@ Finds the convexity defects of a contour.
.. ocv:function:: void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects )
.. ocv:pyfunction:: cv2.ConvexityDefects(contour, convexhull)-> convexityDefects
.. ocv:pyfunction:: cv2.convexityDefects(contour, convexhull[, convexityDefects]) -> convexityDefects
.. ocv:cfunction:: CvSeq* cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, CvMemStorage* storage=NULL )
.. ocv:pyoldfunction:: cv.ConvexityDefects(contour, convexhull, storage)-> convexityDefects
:param contour: Input contour.
:param convexhull: Convex hull obtained using :ocv:func:`convexHull` that should contain indices of the contour points that make the hull.
:param contour: Input contour.
:param convexhull: Convex hull obtained using :ocv:func:`convexHull` that should contain indices of the contour points that make the hull.
:param convexityDefects: The output vector of convexity defects. In C++ and the new Python/Java interface each convexity defect is represented as 4-element integer vector (a.k.a. ``cv::Vec4i``): ``(start_index, end_index, farthest_pt_index, fixpt_depth)``, where indices are 0-based indices in the original contour of the convexity defect beginning, end and the farthest point, and ``fixpt_depth`` is fixed-point approximation (with 8 fractional bits) of the distance between the farthest contour point and the hull. That is, to get the floating-point value of the depth will be ``fixpt_depth/256.0``. In C interface convexity defect is represented by ``CvConvexityDefect`` structure - see below.
:param storage: Container for the output sequence of convexity defects. If it is NULL, the contour or hull (in that order) storage is used.
The function finds all convexity defects of the input contour and returns a sequence of the ``CvConvexityDefect`` structures, where ``CvConvexityDetect`` is defined as: ::
struct CvConvexityDefect
@ -460,7 +383,7 @@ Fits an ellipse around a set of 2D points.
.. ocv:pyoldfunction:: cv.FitEllipse2(points)-> Box2D
:param points: Input 2D point set, stored in:
* ``std::vector<>`` or ``Mat`` (C++ interface)
* ``CvSeq*`` or ``CvMat*`` (C interface)
@ -475,10 +398,11 @@ Fits a line to a 2D or 3D point set.
.. ocv:function:: void fitLine( InputArray points, OutputArray line, int distType, double param, double reps, double aeps )
.. ocv:pyfunction:: cv2.fitLine(points, distType, param, reps, aeps) -> line
.. ocv:pyfunction:: cv2.fitLine(points, distType, param, reps, aeps[, line]) -> line
.. ocv:cfunction:: void cvFitLine( const CvArr* points, int distType, double param, double reps, double aeps, float* line )
.. ocv:pyoldfunction:: cv.FitLine(points, distType, param, reps, aeps) -> line
.. ocv:cfunction:: void cvFitLine( const CvArr* points, int dist_type, double param, double reps, double aeps, float* line )
.. ocv:pyoldfunction:: cv.FitLine(points, dist_type, param, reps, aeps) -> line
:param points: Input vector of 2D or 3D points, stored in ``std::vector<>`` or ``Mat``.
@ -489,7 +413,7 @@ Fits a line to a 2D or 3D point set.
:param param: Numerical parameter ( ``C`` ) for some types of distances. If it is 0, an optimal value is chosen.
:param reps: Sufficient accuracy for the radius (distance between the coordinate origin and the line).
:param aeps: Sufficient accuracy for the angle. 0.01 would be a good default value for ``reps`` and ``aeps``.
The function ``fitLine`` fits a line to a 2D or 3D point set by minimizing
@ -554,7 +478,7 @@ Tests a contour convexity.
.. ocv:pyoldfunction:: cv.CheckContourConvexity(contour)-> int
:param contour: Input vector of 2D points, stored in:
* ``std::vector<>`` or ``Mat`` (C++ interface)
* ``CvSeq*`` or ``CvMat*`` (C interface)
@ -575,14 +499,14 @@ Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
.. ocv:cfunction:: CvBox2D cvMinAreaRect2( const CvArr* points, CvMemStorage* storage=NULL )
.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None)-> CvBox2D
.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None) -> Box2D
:param points: Input vector of 2D points, stored in:
* ``std::vector<>`` or ``Mat`` (C++ interface)
* ``CvSeq*`` or ``CvMat*`` (C interface)
* Nx2 numpy array (Python interface)
The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a specified point set. See the OpenCV sample ``minarea.cpp`` .
@ -595,18 +519,18 @@ Finds a circle of the minimum area enclosing a 2D point set.
.. ocv:function:: void minEnclosingCircle( InputArray points, Point2f& center, float& radius )
.. ocv:pyfunction:: cv2.minEnclosingCircle(points, center, radius) -> None
.. ocv:pyfunction:: cv2.minEnclosingCircle(points) -> center, radius
.. ocv:cfunction:: int cvMinEnclosingCircle( const CvArr* points, CvPoint2D32f* center, float* radius )
.. ocv:pyoldfunction:: cv.MinEnclosingCircle(points)-> (int, center, radius)
:param points: Input vector of 2D points, stored in:
* ``std::vector<>`` or ``Mat`` (C++ interface)
* ``CvSeq*`` or ``CvMat*`` (C interface)
* Nx2 numpy array (Python interface)
:param center: Output center of the circle.
@ -621,12 +545,12 @@ matchShapes
---------------
Compares two shapes.
.. ocv:function:: double matchShapes( InputArray object1, InputArray object2, int method, double parameter=0 )
.. ocv:function:: double matchShapes( InputArray contour1, InputArray contour2, int method, double parameter )
.. ocv:pyfunction:: cv2.matchShapes(contour1, contour2, method, parameter) -> retval
.. ocv:cfunction:: double cvMatchShapes( const void* object1, const void* object2, int method, double parameter=0 )
.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0)-> None
.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0) -> float
:param object1: First contour or grayscale image.
@ -680,8 +604,8 @@ Performs a point-in-contour test.
.. ocv:pyfunction:: cv2.pointPolygonTest(contour, pt, measureDist) -> retval
.. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measureDist )
.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measureDist)-> double
.. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measure_dist )
.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measure_dist) -> float
:param contour: Input contour.

@ -60,20 +60,20 @@ namespace cv
//! various border interpolation methods
enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT,
BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP,
BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP,
BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101,
BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT,
BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 };
//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType );
/*!
The Base Class for 1D or Row-wise Filters
This is the base class for linear or non-linear filters that process 1D data.
In particular, such filters are used for the "horizontal" filtering parts in separable filters.
Several functions in OpenCV return Ptr<BaseRowFilter> for the specific types of filters,
and those pointers can be used directly or within cv::FilterEngine.
*/
@ -93,17 +93,17 @@ public:
/*!
The Base Class for Column-wise Filters
This is the base class for linear or non-linear filters that process columns of 2D arrays.
Such filters are used for the "vertical" filtering parts in separable filters.
Several functions in OpenCV return Ptr<BaseColumnFilter> for the specific types of filters,
and those pointers can be used directly or within cv::FilterEngine.
Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information,
i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset()
must be called (e.g. the method is called by cv::FilterEngine)
*/
*/
class CV_EXPORTS BaseColumnFilter
{
public:
@ -121,15 +121,15 @@ public:
/*!
The Base Class for Non-Separable 2D Filters.
This is the base class for linear or non-linear 2D filters.
Several functions in OpenCV return Ptr<BaseFilter> for the specific types of filters,
and those pointers can be used directly or within cv::FilterEngine.
Similar to cv::BaseColumnFilter, the class may have some context information,
that should be reset using BaseFilter::reset() method before processing the new array.
*/
*/
class CV_EXPORTS BaseFilter
{
public:
@ -148,7 +148,7 @@ public:
/*!
The Main Class for Image Filtering.
The class can be used to apply an arbitrary filtering operation to an image.
It contains all the necessary intermediate buffers, it computes extrapolated values
of the "virtual" pixels outside of the image etc.
@ -156,45 +156,45 @@ public:
are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(),
cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(),
cv::createBoxFilter() and cv::createMorphologyFilter().
Using the class you can process large images by parts and build complex pipelines
that include filtering as some of the stages. If all you need is to apply some pre-defined
filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc.
functions that create FilterEngine internally.
Here is the example on how to use the class to implement Laplacian operator, which is the sum of
second-order derivatives. More complex variant for different types is implemented in cv::Laplacian().
\code
void laplace_f(const Mat& src, Mat& dst)
{
CV_Assert( src.type() == CV_32F );
// make sure the destination array has the proper size and type
dst.create(src.size(), src.type());
// get the derivative and smooth kernels for d2I/dx2.
// for d2I/dy2 we could use the same kernels, just swapped
Mat kd, ks;
getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );
// let's process 10 source rows at once
int DELTA = std::min(10, src.rows);
Ptr<FilterEngine> Fxx = createSeparableLinearFilter(src.type(),
dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
Ptr<FilterEngine> Fyy = createSeparableLinearFilter(src.type(),
dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );
int y = Fxx->start(src), dsty = 0, dy = 0;
Fyy->start(src);
const uchar* sptr = src.data + y*src.step;
// allocate the buffers for the spatial image derivatives;
// the buffers need to have more than DELTA rows, because at the
// last iteration the output may take max(kd.rows-1,ks.rows-1)
// rows more than the input.
Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() );
Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() );
// inside the loop we always pass DELTA rows to the filter
// (note that the "proceed" method takes care of possibe overflow, since
// it was given the actual image height in the "start" method)
@ -241,7 +241,7 @@ public:
int srcType, int dstType, int bufType,
int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1,
const Scalar& _borderValue=Scalar());
//! starts filtering of the specified ROI of an image of size wholeSize.
//! starts filtering of the specified ROI of an image of size wholeSize.
virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1);
//! starts filtering of the specified ROI of the specified image.
virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1),
@ -256,10 +256,10 @@ public:
bool isolated=false);
//! returns true if the filter is separable
bool isSeparable() const { return (const BaseFilter*)filter2D == 0; }
//! returns the number
//! returns the number
int remainingInputRows() const;
int remainingOutputRows() const;
int srcType, dstType, bufType;
Size ksize;
Point anchor;
@ -276,7 +276,7 @@ public:
vector<uchar> constBorderRow;
int bufStep, startY, startY0, endY, rowCount, dstY;
vector<uchar*> rows;
Ptr<BaseFilter> filter2D;
Ptr<BaseRowFilter> rowFilter;
Ptr<BaseColumnFilter> columnFilter;
@ -309,16 +309,16 @@ CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,
//! returns the separable linear filter engine
CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,
InputArray rowKernel, InputArray columnKernel,
Point _anchor=Point(-1,-1), double delta=0,
int _rowBorderType=BORDER_DEFAULT,
int _columnBorderType=-1,
const Scalar& _borderValue=Scalar());
Point anchor=Point(-1,-1), double delta=0,
int rowBorderType=BORDER_DEFAULT,
int columnBorderType=-1,
const Scalar& borderValue=Scalar());
//! returns the non-separable linear filter engine
CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,
InputArray kernel, Point _anchor=Point(-1,-1),
double delta=0, int _rowBorderType=BORDER_DEFAULT,
int _columnBorderType=-1, const Scalar& _borderValue=Scalar());
double delta=0, int rowBorderType=BORDER_DEFAULT,
int columnBorderType=-1, const Scalar& borderValue=Scalar());
//! returns the Gaussian kernel with the specified parameters
CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );
@ -335,7 +335,7 @@ CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky,
CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,
int dx, int dy, int ksize,
int borderType=BORDER_DEFAULT );
//! returns horizontal 1D box filter
//! returns horizontal 1D box filter
CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,
int ksize, int anchor=-1);
//! returns vertical 1D box filter
@ -347,11 +347,11 @@ CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksi
Point anchor=Point(-1,-1),
bool normalize=true,
int borderType=BORDER_DEFAULT);
//! returns the Gabor kernel with the specified parameters
CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd,
double gamma, double psi=CV_PI*0.5, int ktype=CV_64F );
//! type of morphological operation
enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE,
MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE,
@ -365,15 +365,15 @@ CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int
//! returns 2D morphological filter
CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray kernel,
Point anchor=Point(-1,-1));
//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation.
static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }
//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray kernel,
Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT,
int _columnBorderType=-1,
const Scalar& _borderValue=morphologyDefaultBorderValue());
Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT,
int columnBorderType=-1,
const Scalar& borderValue=morphologyDefaultBorderValue());
//! shape of the structuring element
enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };
@ -382,7 +382,7 @@ CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point
template<> CV_EXPORTS void Ptr<IplConvKernel>::delete_obj();
//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst,
int top, int bottom, int left, int right,
int borderType, const Scalar& value=Scalar() );
@ -392,7 +392,7 @@ CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );
//! smooths the image using Gaussian filter.
CV_EXPORTS_W void GaussianBlur( InputArray src,
OutputArray dst, Size ksize,
double sigma1, double sigma2=0,
double sigmaX, double sigmaY=0,
int borderType=BORDER_DEFAULT );
//! smooths the image using bilateral filter
CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,
@ -418,7 +418,7 @@ CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth,
InputArray kernelX, InputArray kernelY,
Point anchor=Point(-1,-1),
double delta=0, int borderType=BORDER_DEFAULT );
//! applies generalized Sobel operator to the image
CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth,
int dx, int dy, int ksize=3,
@ -452,7 +452,7 @@ CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize,
// low-level function for computing eigenvalues and eigenvectors of 2x2 matrices
CV_EXPORTS void eigen2x2( const float* a, float* e, int n );
//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix.
CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst,
int blockSize, int ksize,
@ -483,7 +483,7 @@ CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines,
double rho, double theta, int threshold,
double minLineLength=0, double maxLineGap=0 );
//! finds circles in the grayscale image using 2+1 gradient Hough transform
//! finds circles in the grayscale image using 2+1 gradient Hough transform
CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles,
int method, double dp, double minDist,
double param1=100, double param2=100,
@ -494,13 +494,13 @@ CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel,
Point anchor=Point(-1,-1), int iterations=1,
int borderType=BORDER_CONSTANT,
const Scalar& borderValue=morphologyDefaultBorderValue() );
//! dilates the image (applies the local maximum operator)
CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel,
Point anchor=Point(-1,-1), int iterations=1,
int borderType=BORDER_CONSTANT,
const Scalar& borderValue=morphologyDefaultBorderValue() );
//! applies an advanced morphological operation to the image
CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst,
int op, InputArray kernel,
@ -531,7 +531,7 @@ CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst,
int flags=INTER_LINEAR,
int borderMode=BORDER_CONSTANT,
const Scalar& borderValue=Scalar());
//! warps the image using perspective transformation
CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst,
InputArray M, Size dsize,
@ -556,7 +556,7 @@ CV_EXPORTS_W void remap( InputArray src, OutputArray dst,
CV_EXPORTS_W void convertMaps( InputArray map1, InputArray map2,
OutputArray dstmap1, OutputArray dstmap2,
int dstmap1type, bool nninterpolation=false );
//! returns 2x3 affine transformation matrix for the planar rotation.
CV_EXPORTS_W Mat getRotationMatrix2D( Point2f center, double angle, double scale );
//! returns 3x3 perspective transformation for the corresponding 4 point pairs.
@ -597,12 +597,12 @@ CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,
CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
double alpha, InputArray mask=noArray() );
//! computes PSNR image/video quality metric
//! computes PSNR image/video quality metric
CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2, InputArray window = noArray());
CV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type);
//! type of the threshold operation
enum { THRESH_BINARY=CV_THRESH_BINARY, THRESH_BINARY_INV=CV_THRESH_BINARY_INV,
THRESH_TRUNC=CV_THRESH_TRUNC, THRESH_TOZERO=CV_THRESH_TOZERO,
@ -637,7 +637,7 @@ CV_EXPORTS_W void undistort( InputArray src, OutputArray dst,
InputArray cameraMatrix,
InputArray distCoeffs,
InputArray newCameraMatrix=noArray() );
//! initializes maps for cv::remap() to correct lens distortion and optionally rectify the image
CV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs,
InputArray R, InputArray newCameraMatrix,
@ -647,25 +647,25 @@ enum
{
PROJ_SPHERICAL_ORTHO = 0,
PROJ_SPHERICAL_EQRECT = 1
};
};
//! initializes maps for cv::remap() for wide-angle
CV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs,
Size imageSize, int destImageWidth,
int m1type, OutputArray map1, OutputArray map2,
int projType=PROJ_SPHERICAL_EQRECT, double alpha=0);
//! returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true)
CV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize=Size(),
bool centerPrincipalPoint=false );
//! returns points' coordinates after lens distortion correction
CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst,
InputArray cameraMatrix, InputArray distCoeffs,
InputArray R=noArray(), InputArray P=noArray());
template<> CV_EXPORTS void Ptr<CvHistogram>::delete_obj();
//! computes the joint dense histogram for a set of images.
CV_EXPORTS void calcHist( const Mat* images, int nimages,
const int* channels, InputArray mask,
@ -678,7 +678,7 @@ CV_EXPORTS void calcHist( const Mat* images, int nimages,
SparseMat& hist, int dims,
const int* histSize, const float** ranges,
bool uniform=true, bool accumulate=false );
CV_EXPORTS_W void calcHist( InputArrayOfArrays images,
const vector<int>& channels,
InputArray mask, OutputArray hist,
@ -694,7 +694,7 @@ CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
//! computes back projection for the set of images
CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
const int* channels, const SparseMat& hist,
const int* channels, const SparseMat& hist,
OutputArray backProject, const float** ranges,
double scale=1, bool uniform=true );
@ -705,8 +705,8 @@ CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector<int>&
/*CV_EXPORTS void calcBackProjectPatch( const Mat* images, int nimages, const int* channels,
InputArray hist, OutputArray dst, Size patchSize,
int method, double factor=1 );
int method, double factor=1 );
CV_EXPORTS_W void calcBackProjectPatch( InputArrayOfArrays images, const vector<int>& channels,
InputArray hist, OutputArray dst, Size patchSize,
int method, double factor=1 );*/
@ -719,7 +719,7 @@ CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int met
//! normalizes the grayscale image brightness and contrast by normalizing its histogram
CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst );
CV_EXPORTS float EMD( InputArray signature1, InputArray signature2,
int distType, InputArray cost=noArray(),
float* lowerBound=0, OutputArray flow=noArray() );
@ -739,7 +739,7 @@ enum
GC_BGD = 0, //!< background
GC_FGD = 1, //!< foreground
GC_PR_BGD = 2, //!< most probably background
GC_PR_FGD = 3 //!< most probably foreground
GC_PR_FGD = 3 //!< most probably foreground
};
//! GrabCut algorithm flags
@ -751,7 +751,7 @@ enum
};
//! segments the image using GrabCut algorithm
CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
InputOutputArray bgdModel, InputOutputArray fgdModel,
int iterCount, int mode = GC_EVAL );
@ -760,7 +760,7 @@ enum
DIST_LABEL_CCOMP = 0,
DIST_LABEL_PIXEL = 1
};
//! builds the discrete Voronoi diagram
CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst,
OutputArray labels, int distanceType, int maskSize,
@ -784,27 +784,27 @@ CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask,
Scalar loDiff=Scalar(), Scalar upDiff=Scalar(),
int flags=4 );
enum
{
COLOR_BGR2BGRA =0,
COLOR_RGB2RGBA =COLOR_BGR2BGRA,
COLOR_BGRA2BGR =1,
COLOR_RGBA2RGB =COLOR_BGRA2BGR,
COLOR_BGR2RGBA =2,
COLOR_RGB2BGRA =COLOR_BGR2RGBA,
COLOR_RGBA2BGR =3,
COLOR_BGRA2RGB =COLOR_RGBA2BGR,
COLOR_BGR2RGB =4,
COLOR_RGB2BGR =COLOR_BGR2RGB,
COLOR_BGRA2RGBA =5,
COLOR_RGBA2BGRA =COLOR_BGRA2RGBA,
COLOR_BGR2GRAY =6,
COLOR_RGB2GRAY =7,
COLOR_GRAY2BGR =8,
@ -813,7 +813,7 @@ enum
COLOR_GRAY2RGBA =COLOR_GRAY2BGRA,
COLOR_BGRA2GRAY =10,
COLOR_RGBA2GRAY =11,
COLOR_BGR2BGR565 =12,
COLOR_RGB2BGR565 =13,
COLOR_BGR5652BGR =14,
@ -822,10 +822,10 @@ enum
COLOR_RGBA2BGR565 =17,
COLOR_BGR5652BGRA =18,
COLOR_BGR5652RGBA =19,
COLOR_GRAY2BGR565 =20,
COLOR_BGR5652GRAY =21,
COLOR_BGR2BGR555 =22,
COLOR_RGB2BGR555 =23,
COLOR_BGR5552BGR =24,
@ -834,86 +834,86 @@ enum
COLOR_RGBA2BGR555 =27,
COLOR_BGR5552BGRA =28,
COLOR_BGR5552RGBA =29,
COLOR_GRAY2BGR555 =30,
COLOR_BGR5552GRAY =31,
COLOR_BGR2XYZ =32,
COLOR_RGB2XYZ =33,
COLOR_XYZ2BGR =34,
COLOR_XYZ2RGB =35,
COLOR_BGR2YCrCb =36,
COLOR_RGB2YCrCb =37,
COLOR_YCrCb2BGR =38,
COLOR_YCrCb2RGB =39,
COLOR_BGR2HSV =40,
COLOR_RGB2HSV =41,
COLOR_BGR2Lab =44,
COLOR_RGB2Lab =45,
COLOR_BayerBG2BGR =46,
COLOR_BayerGB2BGR =47,
COLOR_BayerRG2BGR =48,
COLOR_BayerGR2BGR =49,
COLOR_BayerBG2RGB =COLOR_BayerRG2BGR,
COLOR_BayerGB2RGB =COLOR_BayerGR2BGR,
COLOR_BayerRG2RGB =COLOR_BayerBG2BGR,
COLOR_BayerGR2RGB =COLOR_BayerGB2BGR,
COLOR_BGR2Luv =50,
COLOR_RGB2Luv =51,
COLOR_BGR2HLS =52,
COLOR_RGB2HLS =53,
COLOR_HSV2BGR =54,
COLOR_HSV2RGB =55,
COLOR_Lab2BGR =56,
COLOR_Lab2RGB =57,
COLOR_Luv2BGR =58,
COLOR_Luv2RGB =59,
COLOR_HLS2BGR =60,
COLOR_HLS2RGB =61,
COLOR_BayerBG2BGR_VNG =62,
COLOR_BayerGB2BGR_VNG =63,
COLOR_BayerRG2BGR_VNG =64,
COLOR_BayerGR2BGR_VNG =65,
COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG,
COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG,
COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG,
COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG,
COLOR_BGR2HSV_FULL = 66,
COLOR_RGB2HSV_FULL = 67,
COLOR_BGR2HLS_FULL = 68,
COLOR_RGB2HLS_FULL = 69,
COLOR_HSV2BGR_FULL = 70,
COLOR_HSV2RGB_FULL = 71,
COLOR_HLS2BGR_FULL = 72,
COLOR_HLS2RGB_FULL = 73,
COLOR_LBGR2Lab = 74,
COLOR_LRGB2Lab = 75,
COLOR_LBGR2Luv = 76,
COLOR_LRGB2Luv = 77,
COLOR_Lab2LBGR = 78,
COLOR_Lab2LRGB = 79,
COLOR_Luv2LBGR = 80,
COLOR_Luv2LRGB = 81,
COLOR_BGR2YUV = 82,
COLOR_RGB2YUV = 83,
COLOR_YUV2BGR = 84,
COLOR_YUV2RGB = 85,
COLOR_BayerBG2GRAY = 86,
COLOR_BayerGB2GRAY = 87,
COLOR_BayerRG2GRAY = 88,
@ -921,7 +921,7 @@ enum
//YUV 4:2:0 formats family
COLOR_YUV2RGB_NV12 = 90,
COLOR_YUV2BGR_NV12 = 91,
COLOR_YUV2BGR_NV12 = 91,
COLOR_YUV2RGB_NV21 = 92,
COLOR_YUV2BGR_NV21 = 93,
COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
@ -933,7 +933,7 @@ enum
COLOR_YUV2BGRA_NV21 = 97,
COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
COLOR_YUV2RGB_YV12 = 98,
COLOR_YUV2BGR_YV12 = 99,
COLOR_YUV2RGB_IYUV = 100,
@ -942,7 +942,7 @@ enum
COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
COLOR_YUV2RGBA_YV12 = 102,
COLOR_YUV2BGRA_YV12 = 103,
COLOR_YUV2RGBA_IYUV = 104,
@ -951,7 +951,7 @@ enum
COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
COLOR_YUV2GRAY_420 = 106,
COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
@ -960,7 +960,7 @@ enum
COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
//YUV 4:2:2 formats family
COLOR_YUV2RGB_UYVY = 107,
COLOR_YUV2BGR_UYVY = 108,
@ -970,7 +970,7 @@ enum
COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
COLOR_YUV2RGBA_UYVY = 111,
COLOR_YUV2BGRA_UYVY = 112,
//COLOR_YUV2RGBA_VYUY = 113,
@ -979,7 +979,7 @@ enum
COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
COLOR_YUV2RGB_YUY2 = 115,
COLOR_YUV2BGR_YUY2 = 116,
COLOR_YUV2RGB_YVYU = 117,
@ -988,7 +988,7 @@ enum
COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
COLOR_YUV2RGBA_YUY2 = 119,
COLOR_YUV2BGRA_YUY2 = 120,
COLOR_YUV2RGBA_YVYU = 121,
@ -997,7 +997,7 @@ enum
COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
COLOR_YUV2GRAY_UYVY = 123,
COLOR_YUV2GRAY_YUY2 = 124,
//COLOR_YUV2GRAY_VYUY = COLOR_YUV2GRAY_UYVY,
@ -1006,11 +1006,11 @@ enum
COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
COLOR_COLORCVT_MAX = 125
};
//! converts image from one color space to another
CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 );
@ -1027,7 +1027,7 @@ public:
Moments( const CvMoments& moments );
//! the conversion to CvMoments
operator CvMoments() const;
//! spatial moments
CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
//! central moments
@ -1100,7 +1100,7 @@ CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false );
CV_EXPORTS_W RotatedRect minAreaRect( InputArray points );
//! computes the minimal enclosing circle for a set of points
CV_EXPORTS_W void minEnclosingCircle( InputArray points,
CV_OUT Point2f& center, CV_OUT float& radius );
CV_OUT Point2f& center, CV_OUT float& radius );
//! matches two contours using one of the available algorithms
CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2,
int method, double parameter );
@ -1125,7 +1125,7 @@ CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType,
double param, double reps, double aeps );
//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary
CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist );
class CV_EXPORTS_W Subdiv2D
{
@ -1138,7 +1138,7 @@ public:
PTLOC_VERTEX = 1,
PTLOC_ON_EDGE = 2
};
enum
{
NEXT_AROUND_ORG = 0x00,
@ -1150,30 +1150,30 @@ public:
PREV_AROUND_LEFT = 0x20,
PREV_AROUND_RIGHT = 0x02
};
CV_WRAP Subdiv2D();
CV_WRAP Subdiv2D(Rect rect);
CV_WRAP void initDelaunay(Rect rect);
CV_WRAP int insert(Point2f pt);
CV_WRAP void insert(const vector<Point2f>& ptvec);
CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex);
CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0);
CV_WRAP void getEdgeList(CV_OUT vector<Vec4f>& edgeList) const;
CV_WRAP void getTriangleList(CV_OUT vector<Vec6f>& triangleList) const;
CV_WRAP void getVoronoiFacetList(const vector<int>& idx, CV_OUT vector<vector<Point2f> >& facetList,
CV_OUT vector<Point2f>& facetCenters);
CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const;
CV_WRAP int getEdge( int edge, int nextEdgeType ) const;
CV_WRAP int nextEdge(int edge) const;
CV_WRAP int rotateEdge(int edge, int rotate) const;
CV_WRAP int symEdge(int edge) const;
CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const;
CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const;
protected:
int newEdge();
void deleteEdge(int edge);
@ -1187,7 +1187,7 @@ protected:
void calcVoronoi();
void clearVoronoi();
void checkSubdiv() const;
struct CV_EXPORTS Vertex
{
Vertex();
@ -1206,13 +1206,13 @@ protected:
int next[4];
int pt[4];
};
vector<Vertex> vtx;
vector<QuadEdge> qedges;
int freeQEdge;
int freePoint;
bool validGeometry;
int recentEdge;
Point2f topLeft;
Point2f bottomRight;

@ -351,8 +351,8 @@ CVAPI(CvPoint) cvReadChainPoint( CvChainPtReader* reader );
a tree of polygonal curves (contours) */
CVAPI(CvSeq*) cvApproxPoly( const void* src_seq,
int header_size, CvMemStorage* storage,
int method, double parameter,
int parameter2 CV_DEFAULT(0));
int method, double eps,
int recursive CV_DEFAULT(0));
/* Calculates perimeter of a contour or length of a part of contour */
CVAPI(double) cvArcLength( const void* curve,

@ -182,37 +182,37 @@ enum
CV_BayerGB2BGR_VNG =63,
CV_BayerRG2BGR_VNG =64,
CV_BayerGR2BGR_VNG =65,
CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG,
CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG,
CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG,
CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG,
CV_BGR2HSV_FULL = 66,
CV_RGB2HSV_FULL = 67,
CV_BGR2HLS_FULL = 68,
CV_RGB2HLS_FULL = 69,
CV_HSV2BGR_FULL = 70,
CV_HSV2RGB_FULL = 71,
CV_HLS2BGR_FULL = 72,
CV_HLS2RGB_FULL = 73,
CV_LBGR2Lab = 74,
CV_LRGB2Lab = 75,
CV_LBGR2Luv = 76,
CV_LRGB2Luv = 77,
CV_Lab2LBGR = 78,
CV_Lab2LRGB = 79,
CV_Luv2LBGR = 80,
CV_Luv2LRGB = 81,
CV_BGR2YUV = 82,
CV_RGB2YUV = 83,
CV_YUV2BGR = 84,
CV_YUV2RGB = 85,
CV_BayerBG2GRAY = 86,
CV_BayerGB2GRAY = 87,
CV_BayerRG2GRAY = 88,
@ -220,7 +220,7 @@ enum
//YUV 4:2:0 formats family
CV_YUV2RGB_NV12 = 90,
CV_YUV2BGR_NV12 = 91,
CV_YUV2BGR_NV12 = 91,
CV_YUV2RGB_NV21 = 92,
CV_YUV2BGR_NV21 = 93,
CV_YUV420sp2RGB = CV_YUV2RGB_NV21,
@ -232,7 +232,7 @@ enum
CV_YUV2BGRA_NV21 = 97,
CV_YUV420sp2RGBA = CV_YUV2RGBA_NV21,
CV_YUV420sp2BGRA = CV_YUV2BGRA_NV21,
CV_YUV2RGB_YV12 = 98,
CV_YUV2BGR_YV12 = 99,
CV_YUV2RGB_IYUV = 100,
@ -241,7 +241,7 @@ enum
CV_YUV2BGR_I420 = CV_YUV2BGR_IYUV,
CV_YUV420p2RGB = CV_YUV2RGB_YV12,
CV_YUV420p2BGR = CV_YUV2BGR_YV12,
CV_YUV2RGBA_YV12 = 102,
CV_YUV2BGRA_YV12 = 103,
CV_YUV2RGBA_IYUV = 104,
@ -250,7 +250,7 @@ enum
CV_YUV2BGRA_I420 = CV_YUV2BGRA_IYUV,
CV_YUV420p2RGBA = CV_YUV2RGBA_YV12,
CV_YUV420p2BGRA = CV_YUV2BGRA_YV12,
CV_YUV2GRAY_420 = 106,
CV_YUV2GRAY_NV21 = CV_YUV2GRAY_420,
CV_YUV2GRAY_NV12 = CV_YUV2GRAY_420,
@ -259,7 +259,7 @@ enum
CV_YUV2GRAY_I420 = CV_YUV2GRAY_420,
CV_YUV420sp2GRAY = CV_YUV2GRAY_420,
CV_YUV420p2GRAY = CV_YUV2GRAY_420,
//YUV 4:2:2 formats family
CV_YUV2RGB_UYVY = 107,
CV_YUV2BGR_UYVY = 108,
@ -269,7 +269,7 @@ enum
CV_YUV2BGR_Y422 = CV_YUV2BGR_UYVY,
CV_YUV2RGB_UYNV = CV_YUV2RGB_UYVY,
CV_YUV2BGR_UYNV = CV_YUV2BGR_UYVY,
CV_YUV2RGBA_UYVY = 111,
CV_YUV2BGRA_UYVY = 112,
//CV_YUV2RGBA_VYUY = 113,
@ -278,7 +278,7 @@ enum
CV_YUV2BGRA_Y422 = CV_YUV2BGRA_UYVY,
CV_YUV2RGBA_UYNV = CV_YUV2RGBA_UYVY,
CV_YUV2BGRA_UYNV = CV_YUV2BGRA_UYVY,
CV_YUV2RGB_YUY2 = 115,
CV_YUV2BGR_YUY2 = 116,
CV_YUV2RGB_YVYU = 117,
@ -287,7 +287,7 @@ enum
CV_YUV2BGR_YUYV = CV_YUV2BGR_YUY2,
CV_YUV2RGB_YUNV = CV_YUV2RGB_YUY2,
CV_YUV2BGR_YUNV = CV_YUV2BGR_YUY2,
CV_YUV2RGBA_YUY2 = 119,
CV_YUV2BGRA_YUY2 = 120,
CV_YUV2RGBA_YVYU = 121,
@ -296,7 +296,7 @@ enum
CV_YUV2BGRA_YUYV = CV_YUV2BGRA_YUY2,
CV_YUV2RGBA_YUNV = CV_YUV2RGBA_YUY2,
CV_YUV2BGRA_YUNV = CV_YUV2BGRA_YUY2,
CV_YUV2GRAY_UYVY = 123,
CV_YUV2GRAY_YUY2 = 124,
//CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY,
@ -305,7 +305,7 @@ enum
CV_YUV2GRAY_YVYU = CV_YUV2GRAY_YUY2,
CV_YUV2GRAY_YUYV = CV_YUV2GRAY_YUY2,
CV_YUV2GRAY_YUNV = CV_YUV2GRAY_YUY2,
CV_COLORCVT_MAX = 125
};

@ -57,8 +57,6 @@ int icvIntersectLines( double x1, double dx1, double y1, double dy1,
double* t2 );
void icvCreateCenterNormalLine( CvSubdiv2DEdge edge, double* a, double* b, double* c );
void icvIntersectLines3( double* a0, double* b0, double* c0,
double* a1, double* b1, double* c1,
CvPoint2D32f* point );

@ -70,15 +70,15 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
CvChainPtReader reader;
CvSeqWriter writer;
CvPoint pt = chain->origin;
CV_Assert( CV_IS_SEQ_CHAIN_CONTOUR( chain ));
CV_Assert( header_size >= (int)sizeof(CvContour) );
cvStartWriteSeq( (chain->flags & ~CV_SEQ_ELTYPE_MASK) | CV_SEQ_ELTYPE_POINT,
header_size, sizeof( CvPoint ), storage, &writer );
if( chain->total == 0 )
{
{
CV_WRITE_SEQ_ELEM( pt, writer );
return cvEndWriteSeq( &writer );
}
@ -380,13 +380,13 @@ CV_IMPL CvSeq*
cvApproxChains( CvSeq* src_seq,
CvMemStorage* storage,
int method,
double /*parameter*/,
int minimal_perimeter,
double /*parameter*/,
int minimal_perimeter,
int recursive )
{
CvSeq *prev_contour = 0, *parent = 0;
CvSeq *dst_seq = 0;
if( !src_seq || !storage )
CV_Error( CV_StsNullPtr, "" );
if( method > CV_CHAIN_APPROX_TC89_KCOS || method <= 0 || minimal_perimeter < 0 )
@ -399,7 +399,7 @@ cvApproxChains( CvSeq* src_seq,
if( len >= minimal_perimeter )
{
CvSeq *contour = 0;
switch( method )
{
case CV_CHAIN_APPROX_NONE:
@ -471,7 +471,7 @@ cvApproxChains( CvSeq* src_seq,
/* the version for integer point coordinates */
template<typename T> static CvSeq*
icvApproxPolyDP( CvSeq* src_contour, int header_size,
icvApproxPolyDP( CvSeq* src_contour, int header_size,
CvMemStorage* storage, double eps )
{
typedef cv::Point_<T> PT;
@ -486,7 +486,7 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
CvMemStorage* temp_storage = 0;
CvSeq* stack = 0;
CvSeq* dst_contour;
assert( CV_SEQ_ELTYPE(src_contour) == cv::DataType<PT>::type );
cvStartWriteSeq( src_contour->flags, header_size, sizeof(pt), storage, &writer );
@ -518,7 +518,7 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
init_iters = 1;
}
}
if( is_closed )
{
/* 1. Find approximately two farthest points of the contour */
@ -629,10 +629,10 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
CV_WRITE_SEQ_ELEM( end_pt, writer );
dst_contour = cvEndWriteSeq( &writer );
// last stage: do final clean-up of the approximated contour -
// remove extra points on the [almost] stright lines.
// remove extra points on the [almost] stright lines.
cvStartReadSeq( dst_contour, &reader, is_closed );
CV_READ_SEQ_ELEM( start_pt, reader );
@ -675,7 +675,7 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
CV_IMPL CvSeq*
cvApproxPoly( const void* array, int header_size,
CvMemStorage* storage, int method,
CvMemStorage* storage, int method,
double parameter, int parameter2 )
{
CvSeq* dst_seq = 0;

@ -159,7 +159,7 @@ typedef struct _CvContourScanner
external contours and holes),
3 - full hierarchy;
4 - connected components of a multi-level image
*/
*/
int subst_flag;
int seq_type1; /* type of fetched contours */
int header_size1; /* hdr size of fetched contours */
@ -190,7 +190,7 @@ cvStartFindContours( void* _img, CvMemStorage* storage,
if( CV_MAT_TYPE(mat->type) == CV_32SC1 && mode == CV_RETR_CCOMP )
mode = CV_RETR_FLOODFILL;
if( !((CV_IS_MASK_ARR( mat ) && mode < CV_RETR_FLOODFILL) ||
(CV_MAT_TYPE(mat->type) == CV_32SC1 && mode == CV_RETR_FLOODFILL)) )
CV_Error( CV_StsUnsupportedFormat, "[Start]FindContours support only 8uC1 and 32sC1 images" );
@ -207,7 +207,7 @@ cvStartFindContours( void* _img, CvMemStorage* storage,
CvContourScanner scanner = (CvContourScanner)cvAlloc( sizeof( *scanner ));
memset( scanner, 0, sizeof(*scanner) );
scanner->storage1 = scanner->storage2 = storage;
scanner->img0 = (schar *) img;
scanner->img = (schar *) (img + step);
@ -805,13 +805,13 @@ icvTraceContour_32s( int *ptr, int step, int *stop_ptr, int is_hole )
const int new_flag = (int)((unsigned)INT_MIN >> 1);
const int value_mask = ~(right_flag | new_flag);
const int ccomp_val = *i0 & value_mask;
/* initialize local state */
CV_INIT_3X3_DELTAS( deltas, step, 1 );
memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));
s_end = s = is_hole ? 0 : 4;
do
{
s = (s - 1) & 7;
@ -820,9 +820,9 @@ icvTraceContour_32s( int *ptr, int step, int *stop_ptr, int is_hole )
break;
}
while( s != s_end );
i3 = i0;
/* check single pixel domain */
if( s != s_end )
{
@ -830,17 +830,17 @@ icvTraceContour_32s( int *ptr, int step, int *stop_ptr, int is_hole )
for( ;; )
{
s_end = s;
for( ;; )
{
i4 = i3 + deltas[++s];
if( (*i4 & value_mask) == ccomp_val )
break;
}
if( i3 == stop_ptr || (i4 == i0 && i3 == i1) )
break;
i3 = i4;
s = (s + 4) & 7;
} /* end of border following loop */
@ -869,24 +869,24 @@ icvFetchContourEx_32s( int* ptr,
const int ccomp_val = *i0 & value_mask;
const int nbd0 = ccomp_val | new_flag;
const int nbd1 = nbd0 | right_flag;
assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE );
/* initialize local state */
CV_INIT_3X3_DELTAS( deltas, step, 1 );
memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));
/* initialize writer */
cvStartAppendToSeq( contour, &writer );
if( method < 0 )
((CvChain *)contour)->origin = pt;
rect.x = rect.width = pt.x;
rect.y = rect.height = pt.y;
s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4;
do
{
s = (s - 1) & 7;
@ -895,7 +895,7 @@ icvFetchContourEx_32s( int* ptr,
break;
}
while( s != s_end );
if( s == s_end ) /* single pixel domain */
{
*i0 = nbd1;
@ -908,12 +908,12 @@ icvFetchContourEx_32s( int* ptr,
{
i3 = i0;
prev_s = s ^ 4;
/* follow border */
for( ;; )
{
s_end = s;
for( ;; )
{
i4 = i3 + deltas[++s];
@ -921,7 +921,7 @@ icvFetchContourEx_32s( int* ptr,
break;
}
s &= 7;
/* check "right" bound */
if( (unsigned) (s - 1) < (unsigned) s_end )
{
@ -931,7 +931,7 @@ icvFetchContourEx_32s( int* ptr,
{
*i3 = nbd0;
}
if( method < 0 )
{
schar _s = (schar) s;
@ -941,7 +941,7 @@ icvFetchContourEx_32s( int* ptr,
{
CV_WRITE_SEQ_ELEM( pt, writer );
}
if( s != prev_s )
{
/* update bounds */
@ -949,37 +949,37 @@ icvFetchContourEx_32s( int* ptr,
rect.x = pt.x;
else if( pt.x > rect.width )
rect.width = pt.x;
if( pt.y < rect.y )
rect.y = pt.y;
else if( pt.y > rect.height )
rect.height = pt.y;
}
prev_s = s;
pt.x += icvCodeDeltas[s].x;
pt.y += icvCodeDeltas[s].y;
if( i4 == i0 && i3 == i1 ) break;
i3 = i4;
s = (s + 4) & 7;
} /* end of border following loop */
}
rect.width -= rect.x - 1;
rect.height -= rect.y - 1;
cvEndWriteSeq( &writer );
if( _method != CV_CHAIN_CODE )
((CvContour*)contour)->rect = rect;
assert( (writer.seq->total == 0 && writer.seq->first == 0) ||
writer.seq->total > writer.seq->first->count ||
(writer.seq->first->prev == writer.seq->first &&
writer.seq->first->next == writer.seq->first) );
if( _rect ) *_rect = rect;
}
@ -1005,7 +1005,7 @@ cvFindNextContour( CvContourScanner scanner )
int nbd = scanner->nbd;
int prev = img[x - 1];
int new_mask = -2;
if( mode == CV_RETR_FLOODFILL )
{
prev = ((int*)img)[x - 1];
@ -1017,13 +1017,13 @@ cvFindNextContour( CvContourScanner scanner )
int* img0_i = 0;
int* img_i = 0;
int p = 0;
if( mode == CV_RETR_FLOODFILL )
{
img0_i = (int*)img0;
img_i = (int*)img;
}
for( ; x < width; x++ )
{
if( img_i )
@ -1036,10 +1036,10 @@ cvFindNextContour( CvContourScanner scanner )
for( ; x < width && (p = img[x]) == prev; x++ )
;
}
if( x >= width )
break;
{
_CvContourInfo *par_info = 0;
_CvContourInfo *l_cinfo = 0;
@ -1053,7 +1053,7 @@ cvFindNextContour( CvContourScanner scanner )
{
/* check hole */
if( (!img_i && (p != 0 || prev < 1)) ||
(img_i && ((prev & new_mask) != 0 || (p & new_mask) != 0)))
(img_i && ((prev & new_mask) != 0 || (p & new_mask) != 0)))
goto resume_scan;
if( prev & new_mask )
@ -1219,7 +1219,7 @@ cvFindNextContour( CvContourScanner scanner )
return l_cinfo->contour;
resume_scan:
prev = p;
/* update lnbd */
if( prev & -2 )
@ -1663,7 +1663,7 @@ cvFindContours( void* img, CvMemStorage* storage,
if( !firstContour )
CV_Error( CV_StsNullPtr, "NULL double CvSeq pointer" );
*firstContour = 0;
if( method == CV_LINK_RUNS )
@ -1733,7 +1733,7 @@ void cv::findContours( InputOutputArray _image, OutputArrayOfArrays _contours,
{
_hierarchy.create(1, total, CV_32SC4, -1, true);
Vec4i* hierarchy = _hierarchy.getMat().ptr<Vec4i>();
it = all_contours.begin();
for( i = 0; i < total; i++, ++it )
{
@ -1934,7 +1934,7 @@ double cv::matchShapes( InputArray _contour1,
CV_Assert(contour1.checkVector(2) >= 0 && contour2.checkVector(2) >= 0 &&
(contour1.depth() == CV_32F || contour1.depth() == CV_32S) &&
contour1.depth() == contour2.depth());
CvMat c1 = Mat(contour1), c2 = Mat(contour2);
return cvMatchShapes(&c1, &c2, method, parameter);
}
@ -1945,13 +1945,13 @@ void cv::convexHull( InputArray _points, OutputArray _hull, bool clockwise, bool
Mat points = _points.getMat();
int nelems = points.checkVector(2), depth = points.depth();
CV_Assert(nelems >= 0 && (depth == CV_32F || depth == CV_32S));
if( nelems == 0 )
{
_hull.release();
return;
}
returnPoints = !_hull.fixedType() ? returnPoints : _hull.type() != CV_32S;
Mat hull(nelems, 1, returnPoints ? CV_MAKETYPE(depth, 2) : CV_32S);
CvMat _cpoints = points, _chull = hull;
@ -1970,23 +1970,23 @@ void cv::convexityDefects( InputArray _points, InputArray _hull, OutputArray _de
Mat hull = _hull.getMat();
CV_Assert( hull.checkVector(1, CV_32S) > 2 );
Ptr<CvMemStorage> storage = cvCreateMemStorage();
CvMat c_points = points, c_hull = hull;
CvSeq* seq = cvConvexityDefects(&c_points, &c_hull, storage);
int i, n = seq->total;
if( n == 0 )
{
_defects.release();
return;
}
_defects.create(n, 1, CV_32SC4);
Mat defects = _defects.getMat();
SeqIterator<CvConvexityDefect> it = Seq<CvConvexityDefect>(seq).begin();
CvPoint* ptorg = (CvPoint*)points.data;
for( i = 0; i < n; i++, ++it )
{
CvConvexityDefect& d = *it;
@ -2034,9 +2034,9 @@ void cv::fitLine( InputArray _points, OutputArray _line, int distType,
CvMat _cpoints = points.reshape(2 + (int)is3d);
float line[6];
cvFitLine(&_cpoints, distType, param, reps, aeps, &line[0]);
int out_size = (is2d)?( (is3d)? (points.channels() * points.rows * 2) : 4 ): 6;
_line.create(out_size, 1, CV_32F, -1, true);
Mat l = _line.getMat();
CV_Assert( l.isContinuous() );

@ -589,9 +589,8 @@ class ClassInfo(object):
self.jname = m[1:]
self.base = ''
if decl[1]:
self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
#self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
self.base = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "")
class ArgInfo(object):
def __init__(self, arg_tuple): # [ ctype, name, def val, [mod], argno ]

@ -3,6 +3,7 @@ allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d"
verbose = False
show_warnings = True
show_errors = True
show_critical_errors = True
params_blacklist = {
"fromarray" : ("object", "allowND"), # python only function
@ -14,6 +15,18 @@ params_blacklist = {
"gpu::swapChannels" : ("dstOrder") # parameter is not parsed correctly by the hdr_parser
}
ERROR_001_SECTIONFAILURE = 1
WARNING_002_HDRWHITESPACE = 2
ERROR_003_PARENTHESES = 3
WARNING_004_TABS = 4
ERROR_005_REDEFENITIONPARAM = 5
ERROR_006_REDEFENITIONFUNC = 6
WARNING_007_UNDOCUMENTEDPARAM = 7
WARNING_008_MISSINGPARAM = 8
WARNING_009_HDRMISMATCH = 9
ERROR_010_NOMODULE = 10
ERROR_011_EOLEXPECTED = 11
params_mapping = {
"composeRT" : {
"dr3dr1" : "d*d*",
@ -66,7 +79,7 @@ class DeclarationParser(object):
if line.startswith(".. ocv:jfunction::"):
return "Java"
return None
def hasDeclaration(self, line):
return self.getLang(line) is not None
@ -97,7 +110,7 @@ class ParamParser(object):
self.comment += "\n" + line.lstrip()
else:
self.active = False
def hasDeclaration(self, line):
return line.lstrip().startswith(":param")
@ -115,21 +128,22 @@ class RstParser(object):
doclist = glob.glob(os.path.join(module_path,"doc/*.rst"))
for doc in doclist:
self.parse_rst_file(module_name, doc)
def parse_section_safe(self, module_name, section_name, file_name, lineno, lines):
try:
self.parse_section(module_name, section_name, file_name, lineno, lines)
except AssertionError, args:
if show_errors:
print >> sys.stderr, "RST parser error: assertion in \"%s\" File: %s (line %s)" % (section_name, file_name, lineno)
print >> sys.stderr, "RST parser error E%03d: assertion in \"%s\" at %s:%s" % (ERROR_001_SECTIONFAILURE, section_name, file_name, lineno)
print >> sys.stderr, " Details: %s" % args
def parse_section(self, module_name, section_name, file_name, lineno, lines):
self.sections_total += 1
# skip sections having whitespace in name
if section_name.find(" ") >= 0 and section_name.find("::operator") < 0:
#if section_name.find(" ") >= 0 and section_name.find("::operator") < 0:
if section_name.find(" ") >= 0 and not bool(re.match(r"(\w+::)*operator\s*(\w+|>>|<<|\(\)|->|\+\+|--|=|==|\+=|-=)", section_name)):
if show_errors:
print "SKIPPED: \"%s\" File: %s (line %s)" % (section_name, file_name, lineno)
print >> sys.stderr, "RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno)
self.sections_skipped += 1
return
@ -184,16 +198,16 @@ class RstParser(object):
continue
else:
skip_code_lines = False
if ll.startswith(".. code-block::") or ll.startswith(".. image::"):
skip_code_lines = True
continue
# todo: parse structure members; skip them for now
if ll.startswith(".. ocv:member::"):
skip_code_lines = True
continue
#ignore references (todo: collect them)
if l.startswith(".. ["):
continue
@ -204,7 +218,7 @@ class RstParser(object):
# turn on line-skipping mode for code fragments
skip_code_lines = True
ll = ll[:len(ll)-2]
# continue param parsing (process params after processing .. at the beginning of the line and :: at the end)
if pdecl.active:
pdecl.append(l)
@ -269,10 +283,10 @@ class RstParser(object):
if skip_code_lines:
func["long"] = func.get("long", "") + "\n"
# endfor l in lines
if fdecl.balance != 0:
if show_errors:
print >> sys.stderr, "RST parser error: invalid parentheses balance in \"%s\" File: %s (line %s)" % (section_name, file_name, lineno)
if show_critical_errors:
print >> sys.stderr, "RST parser error E%03d: invalid parentheses balance in \"%s\" at %s:%s" % (ERROR_003_PARENTHESES, section_name, file_name, lineno)
return
# save last parameter if needed
@ -295,7 +309,7 @@ class RstParser(object):
lineno = 0
whitespace_warnings = 0
max_whitespace_warnings = 10
lines = []
flineno = 0
fname = ""
@ -308,9 +322,9 @@ class RstParser(object):
if l.find("\t") >= 0:
whitespace_warnings += 1
if whitespace_warnings <= max_whitespace_warnings and show_warnings:
print >> sys.stderr, "RST parser warning: tab symbol instead of space is used at file %s (line %s)" % (doc, lineno)
print >> sys.stderr, "RST parser warning W%03d: tab symbol instead of space is used at %s:%s" % (WARNING_004_TABS, doc, lineno)
l = l.replace("\t", " ")
# handle first line
if prev_line == None:
prev_line = l.rstrip()
@ -325,7 +339,7 @@ class RstParser(object):
flineno = lineno-1
fname = prev_line.strip()
elif flineno > 0:
lines.append(ll)
lines.append(ll)
prev_line = ll
df.close()
@ -343,12 +357,15 @@ class RstParser(object):
return section_name
def add_new_fdecl(self, func, decl):
if decl.fdecl.endswith(";"):
print >> sys.stderr, "RST parser error E%03d: unexpected semicolon at the end of declaration in \"%s\" at %s:%s" \
% (ERROR_011_EOLEXPECTED, func["name"], func["file"], func["line"])
decls = func.get("decls",[])
if (decl.lang == "C++" or decl.lang == "C"):
rst_decl = self.cpp_parser.parse_func_decl_no_wrap(decl.fdecl)
decls.append( (decl.lang, decl.fdecl, rst_decl) )
decls.append( [decl.lang, decl.fdecl, rst_decl] )
else:
decls.append( (decl.lang, decl.fdecl) )
decls.append( [decl.lang, decl.fdecl] )
func["decls"] = decls
def add_new_pdecl(self, func, decl):
@ -357,8 +374,8 @@ class RstParser(object):
if show_errors:
#check black_list
if decl.name not in params_blacklist.get(func["name"], []):
print >> sys.stderr, "RST parser error: redefinition of parameter \"%s\" in \"%s\" File: %s (line %s)" \
% (decl.name, func["name"], func["file"], func["line"])
print >> sys.stderr, "RST parser error E%03d: redefinition of parameter \"%s\" in \"%s\" at %s:%s" \
% (ERROR_005_REDEFENITIONPARAM, decl.name, func["name"], func["file"], func["line"])
else:
params[decl.name] = decl.comment
func["params"] = params
@ -368,7 +385,7 @@ class RstParser(object):
if skipped:
print >> out, "SKIPPED DEFINITION:"
print >> out, "name: %s" % (func.get("name","~empty~"))
print >> out, "file: %s (line %s)" % (func.get("file","~empty~"), func.get("line","~empty~"))
print >> out, "file: %s:%s" % (func.get("file","~empty~"), func.get("line","~empty~"))
print >> out, "is class: %s" % func.get("isclass",False)
print >> out, "is struct: %s" % func.get("isstruct",False)
print >> out, "module: %s" % func.get("module","~unknown~")
@ -395,15 +412,15 @@ class RstParser(object):
return False
if func["name"] in self.definitions:
if show_errors:
print >> sys.stderr, "RST parser error: \"%s\" from file: %s (line %s) is already documented in file: %s (line %s)" \
% (func["name"], func["file"], func["line"], self.definitions[func["name"]]["file"], self.definitions[func["name"]]["line"])
print >> sys.stderr, "RST parser error E%03d: \"%s\" from: %s:%s is already documented at %s:%s" \
% (ERROR_006_REDEFENITIONFUNC, func["name"], func["file"], func["line"], self.definitions[func["name"]]["file"], self.definitions[func["name"]]["line"])
return False
return self.validateParams(func)
def validateParams(self, func):
documentedParams = func.get("params",{}).keys()
params = []
for decl in func.get("decls", []):
if len(decl) > 2:
args = decl[2][3] # decl[2] -> [ funcname, return_ctype, [modifiers], [args] ]
@ -416,13 +433,13 @@ class RstParser(object):
# 1. all params are documented
for p in params:
if p not in documentedParams and show_warnings:
print >> sys.stderr, "RST parser warning: parameter \"%s\" of \"%s\" is undocumented. File: %s (line %s)" % (p, func["name"], func["file"], func["line"])
print >> sys.stderr, "RST parser warning W%03d: parameter \"%s\" of \"%s\" is undocumented. %s:%s" % (WARNING_007_UNDOCUMENTEDPARAM, p, func["name"], func["file"], func["line"])
# 2. only real params are documented
for p in documentedParams:
if p not in params and show_warnings:
if p not in params_blacklist.get(func["name"], []):
print >> sys.stderr, "RST parser warning: unexisting parameter \"%s\" of \"%s\" is documented. File: %s (line %s)" % (p, func["name"], func["file"], func["line"])
print >> sys.stderr, "RST parser warning W%03d: unexisting parameter \"%s\" of \"%s\" is documented at %s:%s" % (WARNING_008_MISSINGPARAM, p, func["name"], func["file"], func["line"])
return True
def normalize(self, func):
@ -476,7 +493,7 @@ class RstParser(object):
return func
def fixOldCFunctionName(self, func):
if not "decls" in func:
if not "decls" in func:
return
fname = None
for decl in func["decls"]:
@ -489,13 +506,13 @@ class RstParser(object):
fname = fname.replace(".", "::")
if fname.startswith("cv::cv"):
if fname[6:] == func.get("name", ""):
if fname[6:] == func.get("name", "").replace("*", "_n"):
func["name"] = fname[4:]
func["method"] = fname[4:]
elif show_warnings:
print >> sys.stderr, "\"%s\" - section name is \"%s\" instead of \"%s\". File: %s (line %s)" % (fname, func["name"], fname[6:], func["file"], func["line"])
print >> sys.stderr, "RST parser warning W%03d: \"%s\" - section name is \"%s\" instead of \"%s\" at %s:%s" % (WARNING_009_HDRMISMATCH, fname, func["name"], fname[6:], func["file"], func["line"])
#self.print_info(func)
def normalizeText(self, s):
if s is None:
return s
@ -503,7 +520,7 @@ class RstParser(object):
s = re.sub(r"\.\. math::[ \r]*\n+((.|\n)*?)(\n[ \r]*\n|$)", mathReplace2, s)
s = re.sub(r":math:`([^`]+?)`", mathReplace, s)
s = re.sub(r" *:sup:", "^", s)
s = s.replace(":ocv:class:", "")
s = s.replace(":ocv:struct:", "")
s = s.replace(":ocv:func:", "")
@ -526,7 +543,7 @@ class RstParser(object):
s = re.sub(r"`([^`<]+ )<(https?://[^>]+)>`_", "\\1(\\2)", s)
# remove tailing ::
s = re.sub(r"::(\n|$)", "\\1", s)
# normalize line endings
s = re.sub(r"\r\n", "\n", s)
# remove extra line breaks before/after _ or ,
@ -554,7 +571,7 @@ class RstParser(object):
#s = re.sub(r"\.\. \[", "[", s)
# unescape
s = re.sub(r"\\(.)", "\\1", s)
# remove whitespace before .
s = re.sub(r"[ ]+\.", ".", s)
# remove tailing whitespace
@ -582,7 +599,7 @@ class RstParser(object):
s = s.strip()
return s
def printSummary(self):
print
print "RST Parser Summary:"
@ -623,7 +640,7 @@ def matrixReplace(match):
m = match.group(2)
m = re.sub(r" *& *", " ", m)
return m
def mathReplace(match):
m = match.group(1)
@ -645,7 +662,7 @@ def mathReplace(match):
m = re.sub(r"\\begin{(?P<gtype>array|bmatrix)}(?:{[\|lcr\. ]+})? *(.*?)\\end{(?P=gtype)}", matrixReplace, m)
m = re.sub(r"\\hdotsfor{(\d+)}", hdotsforReplace, m)
m = re.sub(r"\\vecthreethree{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}", "<BR>|\\1 \\2 \\3|<BR>|\\4 \\5 \\6|<BR>|\\7 \\8 \\9|<BR>", m)
m = re.sub(r"\\left[ ]*\\lfloor[ ]*", "[", m)
m = re.sub(r"[ ]*\\right[ ]*\\rfloor", "]", m)
m = re.sub(r"\\left[ ]*\([ ]*", "(", m)
@ -696,7 +713,7 @@ if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:\n", os.path.basename(sys.argv[0]), " <module path>"
exit(0)
if len(sys.argv) >= 3:
if sys.argv[2].lower() == "verbose":
verbose = True
@ -710,11 +727,11 @@ if __name__ == "__main__":
module = sys.argv[1]
if module != "all" and not os.path.isdir(os.path.join(rst_parser_dir, "../" + module)):
print "Module \"" + module + "\" could not be found."
print "RST parser error E%03d: module \"%s\" could not be found." % (ERROR_010_NOMODULE, module)
exit(1)
parser = RstParser(hdr_parser.CppHeaderParser())
if module == "all":
for m in allmodules:
parser.parse(m, os.path.join(rst_parser_dir, "../" + m))

@ -13,7 +13,7 @@ descriptor extractors inherit the
CalonderDescriptorExtractor
---------------------------
.. ocv:class:: CalonderDescriptorExtractor
.. ocv:class:: CalonderDescriptorExtractor : public DescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`RTreeClassifier` class. ::

@ -5,7 +5,7 @@ Common Interfaces of Generic Descriptor Matchers
OneWayDescriptorMatcher
-----------------------
.. ocv:class:: OneWayDescriptorMatcher
.. ocv:class:: OneWayDescriptorMatcher : public GenericDescriptorMatcher
Wrapping class for computing, matching, and classifying descriptors using the
:ocv:class:`OneWayDescriptorBase` class. ::
@ -64,7 +64,7 @@ Wrapping class for computing, matching, and classifying descriptors using the
FernDescriptorMatcher
---------------------
.. ocv:class:: FernDescriptorMatcher
.. ocv:class:: FernDescriptorMatcher : public GenericDescriptorMatcher
Wrapping class for computing, matching, and classifying descriptors using the
:ocv:class:`FernClassifier` class. ::

@ -8,7 +8,7 @@ This section describes obsolete ``C`` interface of EM algorithm. Details of the
CvEMParams
----------
.. ocv:class:: CvEMParams
.. ocv:struct:: CvEMParams
Parameters of the EM algorithm. All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
@ -18,10 +18,10 @@ The constructors
.. ocv:function:: CvEMParams::CvEMParams()
.. ocv:function:: CvEMParams::CvEMParams( int nclusters, int cov_mat_type=CvEM::COV_MAT_DIAGONAL, int start_step=CvEM::START_AUTO_STEP, CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 )
.. ocv:function:: CvEMParams::CvEMParams( int nclusters, int cov_mat_type=EM::COV_MAT_DIAGONAL, int start_step=EM::START_AUTO_STEP, CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 )
:param nclusters: The number of mixture components in the Gaussian mixture model. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
:param cov_mat_type: Constraint on covariance matrices which defines type of matrices. Possible values are:
* **CvEM::COV_MAT_SPHERICAL** A scaled identity matrix :math:`\mu_k * I`. There is the only parameter :math:`\mu_k` to be estimated for each matrix. The option may be used in special cases, when the constraint is relevant, or as a first step in the optimization (for example in case when the data is preprocessed with PCA). The results of such preliminary estimation may be passed again to the optimization procedure, this time with ``cov_mat_type=CvEM::COV_MAT_DIAGONAL``.
@ -30,7 +30,7 @@ The constructors
* **CvEM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free parameters in each matrix is about :math:`d^2/2`. It is not recommended to use this option, unless there is pretty accurate initial estimation of the parameters and/or a huge number of training samples.
:param start_step: The start step of the EM algorithm:
:param start_step: The start step of the EM algorithm:
* **CvEM::START_E_STEP** Start with Expectation step. You need to provide means :math:`a_k` of mixture components to use this option. Optionally you can pass weights :math:`\pi_k` and covariance matrices :math:`S_k` of mixture components.
* **CvEM::START_M_STEP** Start with Maximization step. You need to provide initial probabilities :math:`p_{i,k}` to use this option.
@ -40,7 +40,7 @@ The constructors
:param probs: Initial probabilities :math:`p_{i,k}` of sample :math:`i` to belong to mixture component :math:`k`. It is a floating-point matrix of :math:`nsamples \times nclusters` size. It is used and must be not NULL only when ``start_step=CvEM::START_M_STEP``.
:param weights: Initial weights :math:`\pi_k` of mixture components. It is a floating-point vector with :math:`nclusters` elements. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
:param weights: Initial weights :math:`\pi_k` of mixture components. It is a floating-point vector with :math:`nclusters` elements. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
:param means: Initial means :math:`a_k` of mixture components. It is a floating-point matrix of :math:`nclusters \times dims` size. It is used used and must be not NULL only when ``start_step=CvEM::START_E_STEP``.
@ -62,7 +62,7 @@ With another constructor it is possible to override a variety of parameters from
CvEM
----
.. ocv:class:: CvEM
.. ocv:class:: CvEM : public CvStatModel
The class implements the EM algorithm as described in the beginning of the section :ref:`ML_Expectation Maximization`.
@ -71,15 +71,13 @@ CvEM::train
-----------
Estimates the Gaussian mixture parameters from a sample set.
.. ocv:function:: void CvEM::train( const Mat& samples, const Mat& sample_idx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 )
.. ocv:function:: bool CvEM::train( const Mat& samples, const Mat& sampleIdx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 )
.. ocv:function:: bool CvEM::train( const CvMat* samples, const CvMat* sampleIdx=0, CvEMParams params=CvEMParams(), CvMat* labels=0 )
.. ocv:pyfunction:: cv2.EM.train(samples[, sampleIdx[, params]]) -> retval, labels
:param samples: Samples from which the Gaussian mixture model will be estimated.
:param sample_idx: Mask of samples to use. All samples are used by default.
:param sampleIdx: Mask of samples to use. All samples are used by default.
:param params: Parameters of the EM algorithm.
@ -107,8 +105,6 @@ Returns a mixture component index of a sample.
.. ocv:function:: float CvEM::predict( const CvMat* sample, CvMat* probs ) const
.. ocv:pyfunction:: cv2.EM.predict(sample) -> retval, probs
:param sample: A sample for classification.
:param probs: If it is not null then the method will write posterior probabilities of each component given the sample data to this parameter.
@ -122,8 +118,6 @@ Returns the number of mixture components :math:`M` in the Gaussian mixture model
.. ocv:function:: int CvEM::get_nclusters() const
.. ocv:pyfunction:: cv2.EM.getNClusters() -> retval
CvEM::getMeans
------------------
@ -133,8 +127,6 @@ Returns mixture means :math:`a_k`.
.. ocv:function:: const CvMat* CvEM::get_means() const
.. ocv:pyfunction:: cv2.EM.getMeans() -> means
CvEM::getCovs
-------------
@ -144,8 +136,6 @@ Returns mixture covariance matrices :math:`S_k`.
.. ocv:function:: const CvMat** CvEM::get_covs() const
.. ocv:pyfunction:: cv2.EM.getCovs([covs]) -> covs
CvEM::getWeights
----------------
@ -155,8 +145,6 @@ Returns mixture weights :math:`\pi_k`.
.. ocv:function:: const CvMat* CvEM::get_weights() const
.. ocv:pyfunction:: cv2.EM.getWeights() -> weights
CvEM::getProbs
--------------
@ -166,8 +154,6 @@ Returns vectors of probabilities for each training sample.
.. ocv:function:: const CvMat* CvEM::get_probs() const
.. ocv:pyfunction:: cv2.EM.getProbs() -> probs
For each training sample :math:`i` (that have been passed to the constructor or to :ocv:func:`CvEM::train`) returns probabilities :math:`p_{i,k}` to belong to a mixture component :math:`k`.
@ -179,8 +165,6 @@ Returns logarithm of likelihood.
.. ocv:function:: double CvEM::get_log_likelihood() const
.. ocv:pyfunction:: cv2.EM.getLikelihood() -> likelihood
CvEM::write
-----------

@ -81,22 +81,22 @@ RandomizedTree::train
-------------------------
Trains a randomized tree using an input set of keypoints.
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
.. ocv:function:: void RandomizedTree::train( vector<BaseKeypoint> const& base_set, RNG & rng, int depth, int views, size_t reduced_num_dim, int num_quant_bits )
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
.. ocv:function:: void RandomizedTree::train( vector<BaseKeypoint> const& base_set, RNG & rng, PatchGenerator & make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits )
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
:param rng: Random-number generator used for training.
:param make_patch: Patch generator used for training.
:param depth: Maximum tree depth.
:param views: Number of random views of each keypoint neighborhood to generate.
:param reduced_num_dim: Number of dimensions used in the compressed signature.
:param num_quant_bits: Number of bits used for quantization.
@ -105,9 +105,9 @@ RandomizedTree::read
------------------------
Reads a pre-saved randomized tree from a file or stream.
.. ocv:function:: read(const char* file_name, int num_quant_bits)
.. ocv:function:: RandomizedTree::read(const char* file_name, int num_quant_bits)
.. ocv:function:: read(std::istream &is, int num_quant_bits)
.. ocv:function:: RandomizedTree::read(std::istream &is, int num_quant_bits)
:param file_name: Name of the file that contains randomized tree data.
@ -121,9 +121,9 @@ RandomizedTree::write
-------------------------
Writes the current randomized tree to a file or stream.
.. ocv:function:: void write(const char* file_name) const
.. ocv:function:: void RandomizedTree::write(const char* file_name) const
.. ocv:function:: void write(std::ostream &os) const
.. ocv:function:: void RandomizedTree::write(std::ostream &os) const
:param file_name: Name of the file where randomized tree data is stored.
@ -133,7 +133,7 @@ Writes the current randomized tree to a file or stream.
RandomizedTree::applyQuantization
-------------------------------------
.. ocv:function:: void applyQuantization(int num_quant_bits)
.. ocv:function:: void RandomizedTree::applyQuantization(int num_quant_bits)
Applies quantization to the current randomized tree.
@ -142,7 +142,7 @@ RandomizedTree::applyQuantization
RTreeNode
---------
.. ocv:class:: RTreeNode
.. ocv:struct:: RTreeNode
Class containing a base structure for ``RandomizedTree``. ::
@ -240,37 +240,34 @@ RTreeClassifier::train
--------------------------
Trains a randomized tree classifier using an input set of keypoints.
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
.. ocv:function:: void RTreeClassifier::train( vector<BaseKeypoint> const& base_set, RNG & rng, int num_trees=RTreeClassifier::DEFAULT_TREES, int depth=RandomizedTree::DEFAULT_DEPTH, int views=RandomizedTree::DEFAULT_VIEWS, size_t reduced_num_dim=RandomizedTree::DEFAULT_REDUCED_NUM_DIM, int num_quant_bits=DEFAULT_NUM_QUANT_BITS )
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
.. ocv:function:: void RTreeClassifier::train( vector<BaseKeypoint> const& base_set, RNG & rng, PatchGenerator & make_patch, int num_trees=RTreeClassifier::DEFAULT_TREES, int depth=RandomizedTree::DEFAULT_DEPTH, int views=RandomizedTree::DEFAULT_VIEWS, size_t reduced_num_dim=RandomizedTree::DEFAULT_REDUCED_NUM_DIM, int num_quant_bits=DEFAULT_NUM_QUANT_BITS )
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
:param rng: Random-number generator used for training.
:param make_patch: Patch generator used for training.
:param num_trees: Number of randomized trees used in ``RTreeClassificator`` .
:param depth: Maximum tree depth.
:param views: Number of random views of each keypoint neighborhood to generate.
:param reduced_num_dim: Number of dimensions used in the compressed signature.
:param num_quant_bits: Number of bits used for quantization.
:param print_status: Current status of training printed on the console.
:param num_quant_bits: Number of bits used for quantization.
RTreeClassifier::getSignature
---------------------------------
Returns a signature for an image patch.
.. ocv:function:: void getSignature(IplImage *patch, uchar *sig)
.. ocv:function:: void RTreeClassifier::getSignature(IplImage *patch, uchar *sig)
.. ocv:function:: void getSignature(IplImage *patch, float *sig)
.. ocv:function:: void RTreeClassifier::getSignature(IplImage *patch, float *sig)
:param patch: Image patch to calculate the signature for.
:param sig: Output signature (array dimension is ``reduced_num_dim)`` .
@ -278,15 +275,15 @@ Returns a signature for an image patch.
RTreeClassifier::getSparseSignature
---------------------------------------
---------------------------------------
Returns a sparse signature for an image patch
.. ocv:function:: void getSparseSignature(IplImage *patch, float *sig, float thresh)
.. ocv:function:: void RTreeClassifier::getSparseSignature(IplImage *patch, float *sig, float thresh)
:param patch: Image patch to calculate the signature for.
:param sig: Output signature (array dimension is ``reduced_num_dim)`` .
:param thresh: Threshold used for compressing the signature.
Returns a signature for an image patch similarly to ``getSignature`` but uses a threshold for removing all signature elements below the threshold so that the signature is compressed.
@ -296,7 +293,7 @@ RTreeClassifier::countNonZeroElements
-----------------------------------------
Returns the number of non-zero elements in an input array.
.. ocv:function:: static int countNonZeroElements(float *vec, int n, double tol=1e-10)
.. ocv:function:: static int RTreeClassifier::countNonZeroElements(float *vec, int n, double tol=1e-10)
:param vec: Input vector containing float elements.
@ -310,9 +307,9 @@ RTreeClassifier::read
-------------------------
Reads a pre-saved ``RTreeClassifier`` from a file or stream.
.. ocv:function:: read(const char* file_name)
.. ocv:function:: void RTreeClassifier::read(const char* file_name)
.. ocv:function:: read(std::istream& is)
.. ocv:function:: void RTreeClassifier::read( std::istream & is )
:param file_name: Name of the file that contains randomized tree data.
@ -324,9 +321,9 @@ RTreeClassifier::write
--------------------------
Writes the current ``RTreeClassifier`` to a file or stream.
.. ocv:function:: void write(const char* file_name) const
.. ocv:function:: void RTreeClassifier::write(const char* file_name) const
.. ocv:function:: void write(std::ostream &os) const
.. ocv:function:: void RTreeClassifier::write(std::ostream &os) const
:param file_name: Name of the file where randomized tree data is stored.
@ -338,7 +335,7 @@ RTreeClassifier::setQuantization
------------------------------------
Applies quantization to the current randomized tree.
.. ocv:function:: void setQuantization(int num_quant_bits)
.. ocv:function:: void RTreeClassifier::setQuantization(int num_quant_bits)
:param num_quant_bits: Number of bits used for quantization.

@ -0,0 +1,95 @@
Histograms
==========
.. highlight:: cpp
CalcPGH
-------
Calculates a pair-wise geometrical histogram for a contour.
.. ocv:cfunction:: void cvCalcPGH( const CvSeq* contour, CvHistogram* hist )
:param contour: Input contour. Currently, only integer point coordinates are allowed.
:param hist: Calculated histogram. It must be two-dimensional.
The function calculates a 2D pair-wise geometrical histogram (PGH), described in [Iivarinen97]_ for the contour. The algorithm considers every pair of contour
edges. The angle between the edges and the minimum/maximum distances
are determined for every pair. To do this, each of the edges in turn
is taken as the base, while the function loops through all the other
edges. When the base edge and any other edge are considered, the minimum
and maximum distances from the points on the non-base edge and line of
the base edge are selected. The angle between the edges defines the row
of the histogram in which all the bins that correspond to the distance
between the calculated minimum and maximum distances are incremented
(that is, the histogram is transposed relatively to the definition in the original paper). The histogram can be used for contour matching.
.. [Iivarinen97] Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. *Comparison of Combined Shape Descriptors for Irregular Objects*, 8th British Machine Vision Conference, BMVC'97. http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html
QueryHistValue*D
----------------
Queries the value of the histogram bin.
.. ocv:cfunction:: float cvQueryHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float cvQueryHistValue_2D(CvHistogram hist, int idx0, int idx1)
.. ocv:cfunction:: float cvQueryHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float cvQueryHistValue_nD(CvHistogram hist, const int* idx)
.. ocv:pyoldfunction:: cv.QueryHistValue_1D(hist, idx0) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_2D(hist, idx0, idx1) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_3D(hist, idx0, idx1, idx2) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_nD(hist, idx) -> float
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
The macros return the value of the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function returns 0. If the bin is not present in the histogram, no new bin is created.
GetHistValue\_?D
----------------
Returns a pointer to the histogram bin.
.. ocv:cfunction:: float cvGetHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float cvGetHistValue_2D(CvHistogram hist, int idx0, int idx1)
.. ocv:cfunction:: float cvGetHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float cvGetHistValue_nD(CvHistogram hist, int idx)
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
::
#define cvGetHistValue_1D( hist, idx0 )
((float*)(cvPtr1D( (hist)->bins, (idx0), 0 ))
#define cvGetHistValue_2D( hist, idx0, idx1 )
((float*)(cvPtr2D( (hist)->bins, (idx0), (idx1), 0 )))
#define cvGetHistValue_3D( hist, idx0, idx1, idx2 )
((float*)(cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0 )))
#define cvGetHistValue_nD( hist, idx )
((float*)(cvPtrND( (hist)->bins, (idx), 0 )))
..
The macros ``GetHistValue`` return a pointer to the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function creates a new bin and sets it to 0, unless it exists already.

@ -9,6 +9,7 @@ legacy. Deprecated stuff
motion_analysis
expectation_maximization
histograms
planar_subdivisions
feature_detection_and_description
common_interfaces_of_descriptor_extractors

@ -8,58 +8,58 @@ CalcOpticalFlowBM
-----------------
Calculates the optical flow for two images by using the block matching method.
.. ocv:cfunction:: void cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, CvSize blockSize, CvSize shiftSize, CvSize maxRange, int usePrevious, CvArr* velx, CvArr* vely )
.. ocv:cfunction:: void cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, CvSize block_size, CvSize shift_size, CvSize max_range, int use_previous, CvArr* velx, CvArr* vely )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowBM(prev, curr, blockSize, shiftSize, maxRange, usePrevious, velx, vely)-> None
.. ocv:pyoldfunction:: cv.CalcOpticalFlowBM(prev, curr, blockSize, shiftSize, max_range, usePrevious, velx, vely)-> None
:param prev: First image, 8-bit, single-channel
:param prev: First image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
:param blockSize: Size of basic blocks that are compared
:param block_size: Size of basic blocks that are compared
:param shiftSize: Block coordinate increments
:param shift_size: Block coordinate increments
:param maxRange: Size of the scanned neighborhood in pixels around the block
:param max_range: Size of the scanned neighborhood in pixels around the block
:param usePrevious: Flag that specifies whether to use the input velocity as initial approximations or not.
:param use_previous: Flag that specifies whether to use the input velocity as initial approximations or not.
:param velx: Horizontal component of the optical flow of
.. math::
\left \lfloor \frac{\texttt{prev->width} - \texttt{blockSize.width}}{\texttt{shiftSize.width}} \right \rfloor \times \left \lfloor \frac{\texttt{prev->height} - \texttt{blockSize.height}}{\texttt{shiftSize.height}} \right \rfloor
\left \lfloor \frac{\texttt{prev->width} - \texttt{block_size.width}}{\texttt{shift_size.width}} \right \rfloor \times \left \lfloor \frac{\texttt{prev->height} - \texttt{block_size.height}}{\texttt{shift_size.height}} \right \rfloor
size, 32-bit floating-point, single-channel
size, 32-bit floating-point, single-channel
:param vely: Vertical component of the optical flow of the same size ``velx`` , 32-bit floating-point, single-channel
:param vely: Vertical component of the optical flow of the same size ``velx`` , 32-bit floating-point, single-channel
The function calculates the optical flow for overlapped blocks ``blockSize.width x blockSize.height`` pixels each, thus the velocity fields are smaller than the original images. For every block in ``prev``
the functions tries to find a similar block in ``curr`` in some neighborhood of the original block or shifted by ``(velx(x0,y0), vely(x0,y0))`` block as has been calculated by previous function call (if ``usePrevious=1``)
The function calculates the optical flow for overlapped blocks ``block_size.width x block_size.height`` pixels each, thus the velocity fields are smaller than the original images. For every block in ``prev``
the functions tries to find a similar block in ``curr`` in some neighborhood of the original block or shifted by ``(velx(x0,y0), vely(x0,y0))`` block as has been calculated by previous function call (if ``use_previous=1``)
CalcOpticalFlowHS
-----------------
Calculates the optical flow for two images using Horn-Schunck algorithm.
.. ocv:cfunction:: void cvCalcOpticalFlowHS(const CvArr* prev, const CvArr* curr, int usePrevious, CvArr* velx, CvArr* vely, double lambda, CvTermCriteria criteria)
.. ocv:cfunction:: void cvCalcOpticalFlowHS(const CvArr* prev, const CvArr* curr, int use_previous, CvArr* velx, CvArr* vely, double lambda, CvTermCriteria criteria)
.. ocv:pyoldfunction:: cv.CalcOpticalFlowHS(prev, curr, usePrevious, velx, vely, lambda, criteria)-> None
:param prev: First image, 8-bit, single-channel
:param prev: First image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
:param usePrevious: Flag that specifies whether to use the input velocity as initial approximations or not.
:param use_previous: Flag that specifies whether to use the input velocity as initial approximations or not.
:param velx: Horizontal component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param velx: Horizontal component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param vely: Vertical component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param vely: Vertical component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param lambda: Smoothness weight. The larger it is, the smoother optical flow map you get.
:param criteria: Criteria of termination of velocity computing
:param criteria: Criteria of termination of velocity computing
The function computes the flow for every pixel of the first input image using the Horn and Schunck algorithm [Horn81]_. The function is obsolete. To track sparse features, use :ocv:func:`calcOpticalFlowPyrLK`. To track all the pixels, use :ocv:func:`calcOpticalFlowFarneback`.
@ -69,19 +69,19 @@ CalcOpticalFlowLK
Calculates the optical flow for two images using Lucas-Kanade algorithm.
.. ocv:cfunction:: void cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, CvSize winSize, CvArr* velx, CvArr* vely )
.. ocv:cfunction:: void cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, CvSize win_size, CvArr* velx, CvArr* vely )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowLK(prev, curr, winSize, velx, vely)-> None
:param prev: First image, 8-bit, single-channel
:param prev: First image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
:param winSize: Size of the averaging window used for grouping pixels
:param win_size: Size of the averaging window used for grouping pixels
:param velx: Horizontal component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param velx: Horizontal component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param vely: Vertical component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param vely: Vertical component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
The function computes the flow for every pixel of the first input image using the Lucas and Kanade algorithm [Lucas81]_. The function is obsolete. To track sparse features, use :ocv:func:`calcOpticalFlowPyrLK`. To track all the pixels, use :ocv:func:`calcOpticalFlowFarneback`.

@ -19,7 +19,7 @@ Planar subdivision.
CvSubdiv2DEdge recent_edge; \
CvPoint2D32f topleft; \
CvPoint2D32f bottomright;
typedef struct CvSubdiv2D
{
CV_SUBDIV2D_FIELDS()
@ -64,13 +64,13 @@ Quad-edge of a planar subdivision.
/* one of edges within quad-edge, lower 2 bits is index (0..3)
and upper bits are quad-edge pointer */
typedef long CvSubdiv2DEdge;
/* quad-edge structure fields */
#define CV_QUADEDGE2D_FIELDS() \
int flags; \
struct CvSubdiv2DPoint* pt[4]; \
CvSubdiv2DEdge next[4];
typedef struct CvQuadEdge2D
{
CV_QUADEDGE2D_FIELDS()
@ -97,9 +97,9 @@ Point of an original or dual subdivision.
CvSubdiv2DEdge first; \
CvPoint2D32f pt; \
int id;
#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
typedef struct CvSubdiv2DPoint
{
CV_SUBDIV2D_POINT_FIELDS()
@ -135,7 +135,7 @@ Removes all virtual points.
:param subdiv: Delaunay subdivision.
The function removes all of the virtual points. It
is called internally in
is called internally in
:ocv:cfunc:`CalcSubdivVoronoi2D`
if the subdivision
was modified after the previous call to the function.
@ -145,7 +145,7 @@ CreateSubdivDelaunay2D
Creates an empty Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage)-> emptyDelaunayTriangulation
.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage) -> CvSubdiv2D
:param rect: Rectangle that includes all of the 2D points that are to be added to the subdivision.
@ -157,7 +157,7 @@ subdivision where 2D points can be added using the function
. All of the points to be added must be within
the specified rectangle, otherwise a runtime error is raised.
Note that the triangulation is a single large triangle that covers the given rectangle. Hence the three vertices of this triangle are outside the rectangle
Note that the triangulation is a single large triangle that covers the given rectangle. Hence the three vertices of this triangle are outside the rectangle
``rect``
.
@ -192,7 +192,7 @@ Returns the edge destination.
The function returns the edge destination. The
returned pointer may be NULL if the edge is from a dual subdivision and
the virtual point coordinates are not calculated yet. The virtual points
can be calculated using the function
can be calculated using the function
:ocv:cfunc:`CalcSubdivVoronoi2D`.
Subdiv2DGetEdge
@ -235,9 +235,9 @@ Returns next edge around the edge origin.
:param edge: Subdivision edge (not a quad-edge).
The function returns the next edge around the edge origin:
The function returns the next edge around the edge origin:
``eOnext``
on the picture above if
on the picture above if
``e``
is the input edge).
@ -259,33 +259,33 @@ Returns the location of a point within a Delaunay triangulation.
The function locates the input point within the subdivision. There are five cases:
*
The point falls into some facet. The function returns
The point falls into some facet. The function returns
``CV_PTLOC_INSIDE``
and
and
``*edge``
will contain one of edges of the facet.
*
The point falls onto the edge. The function returns
The point falls onto the edge. The function returns
``CV_PTLOC_ON_EDGE``
and
and
``*edge``
will contain this edge.
*
The point coincides with one of the subdivision vertices. The function returns
The point coincides with one of the subdivision vertices. The function returns
``CV_PTLOC_VERTEX``
and
and
``*vertex``
will contain a pointer to the vertex.
*
The point is outside the subdivision reference rectangle. The function returns
The point is outside the subdivision reference rectangle. The function returns
``CV_PTLOC_OUTSIDE_RECT``
and no pointers are filled.
*
One of input arguments is invalid. A runtime error is raised or, if silent or "parent" error processing mode is selected,
One of input arguments is invalid. A runtime error is raised or, if silent or "parent" error processing mode is selected,
``CV_PTLOC_ERROR``
is returnd.

File diff suppressed because it is too large Load Diff

@ -65,10 +65,21 @@ training examples are recomputed at each training iteration. Examples deleted at
CvBoostParams
-------------
.. ocv:class:: CvBoostParams
.. ocv:struct:: CvBoostParams : public CvDTreeParams
Boosting training parameters.
There is one structure member that you can set directly:
.. ocv:member:: int split_criteria
Splitting criteria used to choose optimal splits during a weak tree construction. Possible values are:
* **CvBoost::DEFAULT** Use the default for the particular boosting method, see below.
* **CvBoost::GINI** Use Gini index. This is default option for Real AdaBoost; may be also used for Discrete AdaBoost.
* **CvBoost::MISCLASS** Use misclassification rate. This is default option for Discrete AdaBoost; may be also used for Real AdaBoost.
* **CvBoost::SQERR** Use least squares criteria. This is default and the only option for LogitBoost and Gentle AdaBoost.
The structure is derived from :ocv:class:`CvDTreeParams` but not all of the decision tree parameters are supported. In particular, cross-validation is not supported.
All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
@ -82,13 +93,13 @@ The constructors.
.. ocv:function:: CvBoostParams::CvBoostParams( int boost_type, int weak_count, double weight_trim_rate, int max_depth, bool use_surrogates, const float* priors )
:param boost_type: Type of the boosting algorithm. Possible values are:
* **CvBoost::DISCRETE** Discrete AdaBoost.
* **CvBoost::REAL** Real AdaBoost. It is a technique that utilizes confidence-rated predictions and works well with categorical data.
* **CvBoost::LOGIT** LogitBoost. It can produce good regression fits.
* **CvBoost::GENTLE** Gentle AdaBoost. It puts less weight on outlier data points and for that reason is often good with regression data.
* **CvBoost::GENTLE** Gentle AdaBoost. It puts less weight on outlier data points and for that reason is often good with regression data.
Gentle AdaBoost and Real AdaBoost are often the preferable choices.
Gentle AdaBoost and Real AdaBoost are often the preferable choices.
:param weak_count: The number of weak classifiers.
@ -96,17 +107,6 @@ The constructors.
See :ocv:func:`CvDTreeParams::CvDTreeParams` for description of other parameters.
Also there is one structure member that you can set directly:
.. ocv:member:: int split_criteria
Splitting criteria used to choose optimal splits during a weak tree construction. Possible values are:
* **CvBoost::DEFAULT** Use the default for the particular boosting method, see below.
* **CvBoost::GINI** Use Gini index. This is default option for Real AdaBoost; may be also used for Discrete AdaBoost.
* **CvBoost::MISCLASS** Use misclassification rate. This is default option for Discrete AdaBoost; may be also used for Real AdaBoost.
* **CvBoost::SQERR** Use least squares criteria. This is default and the only option for LogitBoost and Gentle AdaBoost.
Default parameters are:
::
@ -122,7 +122,7 @@ Default parameters are:
CvBoostTree
-----------
.. ocv:class:: CvBoostTree
.. ocv:class:: CvBoostTree : public CvDTree
The weak tree classifier, a component of the boosted tree classifier :ocv:class:`CvBoost`, is a derivative of :ocv:class:`CvDTree`. Normally, there is no need to use the weak classifiers directly. However, they can be accessed as elements of the sequence :ocv:member:`CvBoost::weak`, retrieved by :ocv:func:`CvBoost::get_weak_predictors`.
@ -130,7 +130,7 @@ The weak tree classifier, a component of the boosted tree classifier :ocv:class:
CvBoost
-------
.. ocv:class:: CvBoost
.. ocv:class:: CvBoost : public CvStatModel
Boosted tree classifier derived from :ocv:class:`CvStatModel`.
@ -144,7 +144,7 @@ Default and training constructors.
.. ocv:function:: CvBoost::CvBoost( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvBoostParams params=CvBoostParams() )
.. ocv:pyfunction:: cv2.Boost(trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]) -> <Boost object>
.. ocv:pyfunction:: cv2.Boost([trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]]) -> <Boost object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
@ -181,10 +181,10 @@ Predicts a response for an input sample.
:param weak_responses: Optional output parameter, a floating-point vector with responses of each individual weak classifier. The number of elements in the vector must be equal to the slice length.
:param slice: Continuous subset of the sequence of weak classifiers to be used for prediction. By default, all the weak classifiers are used.
:param slice: Continuous subset of the sequence of weak classifiers to be used for prediction. By default, all the weak classifiers are used.
:param raw_mode: Normally, it should be set to ``false``.
:param return_sum: If ``true`` then return sum of votes instead of the class label.
The method runs the sample through the trees in the ensemble and returns the output class label based on the weighted voting.
@ -199,7 +199,7 @@ Removes the specified weak classifiers.
:param slice: Continuous subset of the sequence of weak classifiers to be removed.
The method removes the specified weak classifiers from the sequence.
The method removes the specified weak classifiers from the sequence.
.. note:: Do not confuse this method with the pruning of individual decision trees, which is currently not supported.

@ -20,9 +20,9 @@ child node as the next observed node) or to the right based on the
value of a certain variable whose index is stored in the observed
node. The following variables are possible:
*
*
**Ordered variables.** The variable value is compared with a threshold that is also stored in the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the left, else to the right.
*
*
**Categorical variables.** A discrete variable value is tested to see whether it belongs to a certain subset of values (also stored in the node) from a limited set of values the variable could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For example, if the color is green or red, go to the left, else to the right.
So, in each node, a pair of entities (``variable_index`` , ``decision_rule
@ -57,95 +57,95 @@ Importance of each variable is computed over all the splits on this variable in
CvDTreeSplit
------------
.. ocv:class:: CvDTreeSplit
.. ocv:struct:: CvDTreeSplit
The structure represents a possible decision tree node split. It has public members:
The structure represents a possible decision tree node split. It has public members:
.. ocv:member:: int var_idx
.. ocv:member:: int var_idx
Index of variable on which the split is created.
Index of variable on which the split is created.
.. ocv:member:: int inversed
.. ocv:member:: int inversed
If it is not null then inverse split rule is used that is left and right branches are exchanged in the rule expressions below.
If it is not null then inverse split rule is used that is left and right branches are exchanged in the rule expressions below.
.. ocv:member:: float quality
.. ocv:member:: float quality
The split quality, a positive number. It is used to choose the best primary split, then to choose and sort the surrogate splits. After the tree is constructed, it is also used to compute variable importance.
The split quality, a positive number. It is used to choose the best primary split, then to choose and sort the surrogate splits. After the tree is constructed, it is also used to compute variable importance.
.. ocv:member:: CvDTreeSplit* next
.. ocv:member:: CvDTreeSplit* next
Pointer to the next split in the node list of splits.
Pointer to the next split in the node list of splits.
.. ocv:member:: int[] subset
.. ocv:member:: int[] subset
Bit array indicating the value subset in case of split on a categorical variable. The rule is: ::
Bit array indicating the value subset in case of split on a categorical variable. The rule is: ::
if var_value in subset
then next_node <- left
if var_value in subset
then next_node <- left
else next_node <- right
.. ocv:member:: float ord::c
.. ocv:member:: float ord::c
The threshold value in case of split on an ordered variable. The rule is: ::
The threshold value in case of split on an ordered variable. The rule is: ::
if var_value < ord.c
then next_node<-left
if var_value < ord.c
then next_node<-left
else next_node<-right
.. ocv:member:: int ord::split_point
.. ocv:member:: int ord::split_point
Used internally by the training algorithm.
Used internally by the training algorithm.
CvDTreeNode
-----------
.. ocv:class:: CvDTreeNode
.. ocv:struct:: CvDTreeNode
The structure represents a node in a decision tree. It has public members:
The structure represents a node in a decision tree. It has public members:
.. ocv:member:: int class_idx
.. ocv:member:: int class_idx
Class index normalized to 0..class_count-1 range and assigned to the node. It is used internally in classification trees and tree ensembles.
.. ocv:member:: int Tn
.. ocv:member:: int Tn
Tree index in a ordered sequence of pruned trees. The indices are used during and after the pruning procedure. The root node has the maximum value ``Tn`` of the whole tree, child nodes have ``Tn`` less than or equal to the parent's ``Tn``, and nodes with :math:`Tn \leq CvDTree::pruned\_tree\_idx` are not used at prediction stage (the corresponding branches are considered as cut-off), even if they have not been physically deleted from the tree at the pruning stage.
.. ocv:member:: double value
.. ocv:member:: double value
Value at the node: a class label in case of classification or estimated function value in case of regression.
.. ocv:member:: CvDTreeNode* parent
.. ocv:member:: CvDTreeNode* parent
Pointer to the parent node.
.. ocv:member:: CvDTreeNode* left
.. ocv:member:: CvDTreeNode* left
Pointer to the left child node.
.. ocv:member:: CvDTreeNode* right
.. ocv:member:: CvDTreeNode* right
Pointer to the right child node.
.. ocv:member:: CvDTreeSplit* split
.. ocv:member:: CvDTreeSplit* split
Pointer to the first (primary) split in the node list of splits.
.. ocv:member:: int sample_count
.. ocv:member:: int sample_count
The number of samples that fall into the node at the training stage. It is used to resolve the difficult cases - when the variable for the primary split is missing and all the variables for other surrogate splits are missing too. In this case the sample is directed to the left if ``left->sample_count > right->sample_count`` and to the right otherwise.
The number of samples that fall into the node at the training stage. It is used to resolve the difficult cases - when the variable for the primary split is missing and all the variables for other surrogate splits are missing too. In this case the sample is directed to the left if ``left->sample_count > right->sample_count`` and to the right otherwise.
.. ocv:member:: int depth
.. ocv:member:: int depth
Depth of the node. The root node depth is 0, the child nodes depth is the parent's depth + 1.
Depth of the node. The root node depth is 0, the child nodes depth is the parent's depth + 1.
Other numerous fields of ``CvDTreeNode`` are used internally at the training stage.
CvDTreeParams
-------------
.. ocv:class:: CvDTreeParams
.. ocv:struct:: CvDTreeParams
The structure contains all the decision tree training parameters. You can initialize it by default constructor and then override any parameters directly before training, or the structure may be fully initialized using the advanced variant of the constructor.
@ -153,19 +153,19 @@ CvDTreeParams::CvDTreeParams
----------------------------
The constructors.
.. ocv:function:: CvDTreeParams::CvDTreeParams()
.. ocv:function:: CvDTreeParams::CvDTreeParams()
.. ocv:function:: CvDTreeParams::CvDTreeParams( int max_depth, int min_sample_count, float regression_accuracy, bool use_surrogates, int max_categories, int cv_folds, bool use_1se_rule, bool truncate_pruned_tree, const float* priors )
:param max_depth: The maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than ``max_depth``. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned.
:param max_depth: The maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than ``max_depth``. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned.
:param min_sample_count: If the number of samples in a node is less than this parameter then the node will not be split.
:param regression_accuracy: Termination criteria for regression trees. If all absolute differences between an estimated value in a node and values of train samples in this node are less than this parameter then the node will not be split.
:param use_surrogates: If true then surrogate splits will be built. These splits allow to work with missing data and compute variable importance correctly.
:param max_categories: Cluster possible values of a categorical variable into ``K`` :math:`\leq` ``max_categories`` clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries to make a split, takes more than ``max_categories`` values, the precise best subset estimation may take a very long time because the algorithm is exponential. Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into ``max_categories`` clusters that is some categories are merged together. The clustering is applied only in ``n``>2-class classification problems for categorical variables with ``N > max_categories`` possible values. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
:param max_categories: Cluster possible values of a categorical variable into ``K`` :math:`\leq` ``max_categories`` clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries to make a split, takes more than ``max_categories`` values, the precise best subset estimation may take a very long time because the algorithm is exponential. Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into ``max_categories`` clusters that is some categories are merged together. The clustering is applied only in ``n``>2-class classification problems for categorical variables with ``N > max_categories`` possible values. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
:param cv_folds: If ``cv_folds > 1`` then prune a tree with ``K``-fold cross-validation where ``K`` is equal to ``cv_folds``.
@ -184,10 +184,10 @@ The default constructor initializes all the parameters with the default values t
truncate_pruned_tree(true), regression_accuracy(0.01f), priors(0)
{}
CvDTreeTrainData
----------------
.. ocv:class:: CvDTreeTrainData
.. ocv:struct:: CvDTreeTrainData
Decision tree training data and shared data for tree ensembles. The structure is mostly used internally for storing both standalone trees and tree ensembles efficiently. Basically, it contains the following types of information:
@ -212,7 +212,7 @@ There are two ways of using this structure. In simple cases (for example, a stan
CvDTree
-------
.. ocv:class:: CvDTree
.. ocv:class:: CvDTree : public CvStatModel
The class implements a decision tree as described in the beginning of this section.
@ -221,7 +221,7 @@ CvDTree::train
--------------
Trains a decision tree.
.. ocv:function:: bool CvDTree::train( const Mat& train_data, int tflag, const Mat& responses, const Mat& var_idx=Mat(), const Mat& sample_idx=Mat(), const Mat& var_type=Mat(), const Mat& missing_mask=Mat(), CvDTreeParams params=CvDTreeParams() )
.. ocv:function:: bool CvDTree::train( const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvDTreeParams params=CvDTreeParams() )
.. ocv:function:: bool CvDTree::train( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvDTreeParams params=CvDTreeParams() )
@ -258,7 +258,7 @@ Returns the leaf node of a decision tree corresponding to the input vector.
:param missingDataMask: Optional input missing measurement mask.
:param preprocessedInput: This parameter is normally set to ``false``, implying a regular input. If it is ``true``, the method assumes that all the values of the discrete input variables have been already normalized to :math:`0` to :math:`num\_of\_categories_i-1` ranges since the decision tree uses such normalized representation internally. It is useful for faster prediction with tree ensembles. For ordered input variables, the flag is not used.
The method traverses the decision tree and returns the reached leaf node as output. The prediction result, either the class label or the estimated function value, may be retrieved as the ``value`` field of the :ocv:class:`CvDTreeNode` structure, for example: ``dtree->predict(sample,mask)->value``.
@ -270,7 +270,7 @@ Returns error of the decision tree.
.. ocv:function:: float CvDTree::calc_error( CvMLData* trainData, int type, std::vector<float> *resp = 0 )
:param trainData: Data for the decision tree.
:param type: Type of error. Possible values are:
* **CV_TRAIN_ERROR** Error on train samples.
@ -290,7 +290,7 @@ Returns the variable importance array.
.. ocv:function:: const CvMat* CvDTree::get_var_importance()
.. ocv:pyfunction:: cv2.DTree.getVarImportance() -> importanceVector
.. ocv:pyfunction:: cv2.DTree.getVarImportance() -> retval
CvDTree::get_root
-----------------
@ -311,7 +311,7 @@ CvDTree::get_data
-----------------
Returns used train data of the decision tree.
.. ocv:function:: const CvDTreeTrainData* CvDTree::get_data() const
.. ocv:function:: CvDTreeTrainData* CvDTree::get_data() const
Example: building a tree for classifying mushrooms. See the ``mushroom.cpp`` sample that demonstrates how to build and use the
decision tree.

@ -10,6 +10,6 @@ Extremely randomized trees have been introduced by Pierre Geurts, Damien Ernst a
CvERTrees
----------
.. ocv:class:: CvERTrees
.. ocv:class:: CvERTrees : public CvRTrees
The class implements the Extremely randomized trees algorithm. ``CvERTrees`` is inherited from :ocv:class:`CvRTrees` and has the same interface, so see description of :ocv:class:`CvRTrees` class to get details. To set the training parameters of Extremely randomized trees the same class :ocv:class:`CvRTParams` is used.
The class implements the Extremely randomized trees algorithm. ``CvERTrees`` is inherited from :ocv:class:`CvRTrees` and has the same interface, so see description of :ocv:class:`CvRTrees` class to get details. To set the training parameters of Extremely randomized trees the same class :ocv:struct:`CvRTParams` is used.

@ -60,7 +60,7 @@ At the second step (Maximization step or M-step), the mixture parameter estimate
.. math::
\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}
\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}
Alternatively, the algorithm may start with the M-step when the initial values for
:math:`p_{i,k}` can be provided. Another alternative when
@ -91,7 +91,7 @@ already a good enough approximation).
EM
--
.. ocv:class:: EM
.. ocv:class:: EM : public Algorithm
The class implements the EM algorithm as described in the beginning of this section. It is inherited from :ocv:class:`Algorithm`.
@ -102,9 +102,10 @@ The constructor of the class
.. ocv:function:: EM::EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON) )
.. ocv:pyfunction:: cv2.EM([nclusters[, covMatType[, termCrit]]]) -> <EM object>
:param nclusters: The number of mixture components in the Gaussian mixture model. Default value of the parameter is ``EM::DEFAULT_NCLUSTERS=5``. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
:param covMatType: Constraint on covariance matrices which defines type of matrices. Possible values are:
* **EM::COV_MAT_SPHERICAL** A scaled identity matrix :math:`\mu_k * I`. There is the only parameter :math:`\mu_k` to be estimated for each matrix. The option may be used in special cases, when the constraint is relevant, or as a first step in the optimization (for example in case when the data is preprocessed with PCA). The results of such preliminary estimation may be passed again to the optimization procedure, this time with ``covMatType=EM::COV_MAT_DIAGONAL``.
@ -112,7 +113,7 @@ The constructor of the class
* **EM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of free parameters is ``d`` for each matrix. This is most commonly used option yielding good estimation results.
* **EM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free parameters in each matrix is about :math:`d^2/2`. It is not recommended to use this option, unless there is pretty accurate initial estimation of the parameters and/or a huge number of training samples.
:param termCrit: The termination criteria of the EM algorithm. The EM algorithm can be terminated by the number of iterations ``termCrit.maxCount`` (number of M-steps) or when relative change of likelihood logarithm is less than ``termCrit.epsilon``. Default maximum number of iterations is ``EM::DEFAULT_MAX_ITERS=100``.
EM::train
@ -122,23 +123,29 @@ Estimates the Gaussian mixture parameters from a samples set.
.. ocv:function:: bool EM::train(InputArray samples, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
.. ocv:function:: bool EM::trainE(InputArray samples, InputArray means0, InputArray covs0=noArray(), InputArray weights0=noArray(), OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
.. ocv:function:: bool EM::trainM(InputArray samples, InputArray probs0, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
.. ocv:pyfunction:: cv2.EM.train(samples[, logLikelihoods[, labels[, probs]]]) -> retval, logLikelihoods, labels, probs
.. ocv:pyfunction:: cv2.EM.trainE(samples, means0[, covs0[, weights0[, logLikelihoods[, labels[, probs]]]]]) -> retval, logLikelihoods, labels, probs
.. ocv:pyfunction:: cv2.EM.trainM(samples, probs0[, logLikelihoods[, labels[, probs]]]) -> retval, logLikelihoods, labels, probs
:param samples: Samples from which the Gaussian mixture model will be estimated. It should be a one-channel matrix, each row of which is a sample. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
:param means0: Initial means :math:`a_k` of mixture components. It is a one-channel matrix of :math:`nclusters \times dims` size. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
:param means0: Initial means :math:`a_k` of mixture components. It is a one-channel matrix of :math:`nclusters \times dims` size. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
:param covs0: The vector of initial covariance matrices :math:`S_k` of mixture components. Each of covariance matrices is a one-channel matrix of :math:`dims \times dims` size. If the matrices do not have ``CV_64F`` type they will be converted to the inner matrices of such type for the further computing.
:param weights0: Initial weights :math:`\pi_k` of mixture components. It should be a one-channel floating-point matrix with :math:`1 \times nclusters` or :math:`nclusters \times 1` size.
:param probs0: Initial probabilities :math:`p_{i,k}` of sample :math:`i` to belong to mixture component :math:`k`. It is a one-channel floating-point matrix of :math:`nsamples \times nclusters` size.
:param weights0: Initial weights :math:`\pi_k` of mixture components. It should be a one-channel floating-point matrix with :math:`1 \times nclusters` or :math:`nclusters \times 1` size.
:param probs0: Initial probabilities :math:`p_{i,k}` of sample :math:`i` to belong to mixture component :math:`k`. It is a one-channel floating-point matrix of :math:`nsamples \times nclusters` size.
:param logLikelihoods: The optional output matrix that contains a likelihood logarithm value for each sample. It has :math:`nsamples \times 1` size and ``CV_64FC1`` type.
:param labels: The optional output "class label" for each sample: :math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample). It has :math:`nsamples \times 1` size and ``CV_32SC1`` type.
:param probs: The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has :math:`nsamples \times nclusters` size and ``CV_64FC1`` type.
Three versions of training method differ in the initialization of Gaussian mixture model parameters and start step:
@ -166,8 +173,10 @@ EM::predict
-----------
Returns a likelihood logarithm value and an index of the most probable mixture component for the given sample.
.. ocv:function:: Vec2d predict(InputArray sample, OutputArray probs=noArray()) const
.. ocv:function:: Vec2d EM::predict(InputArray sample, OutputArray probs=noArray()) const
.. ocv:pyfunction:: cv2.EM.predict(sample[, probs]) -> retval, probs
:param sample: A sample for classification. It should be a one-channel matrix of :math:`1 \times dims` or :math:`dims \times 1` size.
:param probs: Optional output matrix that contains posterior probabilities of each component given the sample. It has :math:`1 \times nclusters` size and ``CV_64FC1`` type.
@ -180,6 +189,8 @@ Returns ``true`` if the Gaussian mixture model was trained.
.. ocv:function:: bool EM::isTrained() const
.. ocv:pyfunction:: cv2.EM.isTrained() -> retval
EM::read, EM::write
-------------------
See :ocv:func:`Algorithm::read` and :ocv:func:`Algorithm::write`.
@ -195,4 +206,5 @@ See :ocv:func:`Algorithm::get` and :ocv:func:`Algorithm::set`. The following par
* ``"weights"`` *(read-only)*
* ``"means"`` *(read-only)*
* ``"covs"`` *(read-only)*
..

@ -104,7 +104,7 @@ For classification problems, the result is :math:`\arg\max_{i=1..K}(f_i(x))`.
CvGBTreesParams
---------------
.. ocv:class:: CvGBTreesParams
.. ocv:struct:: CvGBTreesParams : public CvDTreeParams
GBT training parameters.
@ -149,7 +149,7 @@ By default the following constructor is used:
CvGBTrees
---------
.. ocv:class:: CvGBTrees
.. ocv:class:: CvGBTrees : public CvStatModel
The class implements the Gradient boosted tree model as described in the beginning of this section.

@ -7,7 +7,7 @@ The algorithm caches all training samples and predicts the response for a new sa
CvKNearest
----------
.. ocv:class:: CvKNearest
.. ocv:class:: CvKNearest : public CvStatModel
The class implements K-Nearest Neighbors model as described in the beginning of this section.

@ -237,7 +237,7 @@ The method returns a map that converts string class labels to the numerical clas
CvTrainTestSplit
----------------
.. ocv:class:: CvTrainTestSplit
.. ocv:struct:: CvTrainTestSplit
Structure setting the split of a data set read by :ocv:class:`CvMLData`.
::

@ -8,7 +8,7 @@ ML implements feed-forward artificial neural networks or, more particularly, mul
.. image:: pics/mlp.png
All the neurons in MLP are similar. Each of them has several input links (it takes the output values from several neurons in the previous layer as input) and several output links (it passes the response to several neurons in the next layer). The values retrieved from the previous layer are summed up with certain weights, individual for each neuron, plus the bias term. The sum is transformed using the activation function
:math:`f` that may be also different for different neurons.
:math:`f` that may be also different for different neurons.
.. image:: pics/neuron_model.png
@ -45,7 +45,7 @@ Different activation functions may be used. ML implements three standard functio
In ML, all the neurons have the same activation functions, with the same free parameters (
:math:`\alpha, \beta` ) that are specified by user and are not altered by the training algorithms.
So, the whole trained network works as follows:
So, the whole trained network works as follows:
#. Take the feature vector as input. The vector size is equal to the size of the input layer.
@ -93,45 +93,45 @@ The second (default) one is a batch RPROP algorithm.
.. [LeCun98] Y. LeCun, L. Bottou, G.B. Orr and K.-R. Muller, *Efficient backprop*, in Neural Networks---Tricks of the Trade, Springer Lecture Notes in Computer Sciences 1524, pp.5-50, 1998.
.. [RPROP93] M. Riedmiller and H. Braun, *A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm*, Proc. ICNN, San Francisco (1993).
CvANN_MLP_TrainParams
---------------------
.. ocv:class:: CvANN_MLP_TrainParams
.. ocv:struct:: CvANN_MLP_TrainParams
Parameters of the MLP training algorithm. You can initialize the structure by a constructor or the individual parameters can be adjusted after the structure is created.
Parameters of the MLP training algorithm. You can initialize the structure by a constructor or the individual parameters can be adjusted after the structure is created.
The back-propagation algorithm parameters:
The back-propagation algorithm parameters:
.. ocv:member:: double bp_dw_scale
.. ocv:member:: double bp_dw_scale
Strength of the weight gradient term. The recommended value is about 0.1.
Strength of the weight gradient term. The recommended value is about 0.1.
.. ocv:member:: double bp_moment_scale
.. ocv:member:: double bp_moment_scale
Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough
Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough
The RPROP algorithm parameters (see [RPROP93]_ for details):
The RPROP algorithm parameters (see [RPROP93]_ for details):
.. ocv:member:: double rp_dw0
.. ocv:member:: double rp_dw0
Initial value :math:`\Delta_0` of update-values :math:`\Delta_{ij}`.
Initial value :math:`\Delta_0` of update-values :math:`\Delta_{ij}`.
.. ocv:member:: double rp_dw_plus
.. ocv:member:: double rp_dw_plus
Increase factor :math:`\eta^+`. It must be >1.
Increase factor :math:`\eta^+`. It must be >1.
.. ocv:member:: double rp_dw_minus
.. ocv:member:: double rp_dw_minus
Decrease factor :math:`\eta^-`. It must be <1.
Decrease factor :math:`\eta^-`. It must be <1.
.. ocv:member:: double rp_dw_min
.. ocv:member:: double rp_dw_min
Update-values lower limit :math:`\Delta_{min}`. It must be positive.
Update-values lower limit :math:`\Delta_{min}`. It must be positive.
.. ocv:member:: double rp_dw_max
.. ocv:member:: double rp_dw_max
Update-values upper limit :math:`\Delta_{max}`. It must be >1.
Update-values upper limit :math:`\Delta_{max}`. It must be >1.
CvANN_MLP_TrainParams::CvANN_MLP_TrainParams
@ -169,9 +169,9 @@ By default the RPROP algorithm is used:
CvANN_MLP
---------
.. ocv:class:: CvANN_MLP
.. ocv:class:: CvANN_MLP : public CvStatModel
MLP model.
MLP model.
Unlike many other models in ML that are constructed and trained at once, in the MLP model these steps are separated. First, a network with the specified topology is created using the non-default constructor or the method :ocv:func:`CvANN_MLP::create`. All the weights are set to zeros. Then, the network is trained using a set of input and output vectors. The training procedure can be repeated more than once, that is, the weights can be adjusted based on the new training data.
@ -184,7 +184,7 @@ The constructors.
.. ocv:function:: CvANN_MLP::CvANN_MLP( const CvMat* layerSizes, int activateFunc=CvANN_MLP::SIGMOID_SYM, double fparam1=0, double fparam2=0 )
.. ocv:pyfunction:: cv2.ANN_MLP(layerSizes[, activateFunc[, fparam1[, fparam2]]]) -> <ANN_MLP object>
.. ocv:pyfunction:: cv2.ANN_MLP([layerSizes[, activateFunc[, fparam1[, fparam2]]]]) -> <ANN_MLP object>
The advanced constructor allows to create MLP with the specified topology. See :ocv:func:`CvANN_MLP::create` for details.
@ -203,7 +203,7 @@ Constructs MLP with the specified topology.
:param activateFunc: Parameter specifying the activation function for each neuron: one of ``CvANN_MLP::IDENTITY``, ``CvANN_MLP::SIGMOID_SYM``, and ``CvANN_MLP::GAUSSIAN``.
:param fparam1: Free parameter of the activation function, :math:`\alpha`. See the formulas in the introduction section.
:param fparam2: Free parameter of the activation function, :math:`\beta`. See the formulas in the introduction section.
The method creates an MLP network with the specified topology and assigns the same activation function to all the neurons.
@ -216,7 +216,7 @@ Trains/updates MLP.
.. ocv:function:: int CvANN_MLP::train( const CvMat* inputs, const CvMat* outputs, const CvMat* sampleWeights, const CvMat* sampleIdx=0, CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags=0 )
.. ocv:pyfunction:: cv2.ANN_MLP.train(inputs, outputs, sampleWeights[, sampleIdx[, params[, flags]]]) -> niterations
.. ocv:pyfunction:: cv2.ANN_MLP.train(inputs, outputs, sampleWeights[, sampleIdx[, params[, flags]]]) -> retval
:param inputs: Floating-point matrix of input vectors, one vector per row.
@ -249,7 +249,7 @@ Predicts responses for input samples.
.. ocv:function:: float CvANN_MLP::predict( const CvMat* inputs, CvMat* outputs ) const
.. ocv:pyfunction:: cv2.ANN_MLP.predict(inputs, outputs) -> retval
.. ocv:pyfunction:: cv2.ANN_MLP.predict(inputs[, outputs]) -> retval, outputs
:param inputs: Input samples.
@ -278,4 +278,4 @@ Returns neurons weights of the particular layer.
.. ocv:function:: double* CvANN_MLP::get_weights(int layer)
:param layer: Index of the particular layer.

@ -11,7 +11,7 @@ This simple classification model assumes that feature vectors from each class ar
CvNormalBayesClassifier
-----------------------
.. ocv:class:: CvNormalBayesClassifier
.. ocv:class:: CvNormalBayesClassifier : public CvStatModel
Bayes classifier for normally distributed data.
@ -25,7 +25,7 @@ Default and training constructors.
.. ocv:function:: CvNormalBayesClassifier::CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0 )
.. ocv:pyfunction:: cv2.NormalBayesClassifier(trainData, responses[, varIdx[, sampleIdx]]) -> <NormalBayesClassifier object>
.. ocv:pyfunction:: cv2.NormalBayesClassifier([trainData, responses[, varIdx[, sampleIdx]]]) -> <NormalBayesClassifier object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
@ -41,7 +41,7 @@ Trains the model.
:param update: Identifies whether the model should be trained from scratch (``update=false``) or should be updated using the new training data (``update=true``).
The method trains the Normal Bayes classifier. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations:
The method trains the Normal Bayes classifier. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations:
* Only ``CV_ROW_SAMPLE`` data layout is supported.
* Input variables are all ordered.

@ -42,7 +42,7 @@ For the random trees usage example, please, see letter_recog.cpp sample in OpenC
CvRTParams
----------
.. ocv:class:: CvRTParams
.. ocv:struct:: CvRTParams : public CvDTreeParams
Training parameters of random trees.
@ -53,7 +53,7 @@ CvRTParams::CvRTParams:
-----------------------
The constructors.
.. ocv:function:: CvRTParams::CvRTParams()
.. ocv:function:: CvRTParams::CvRTParams()
.. ocv:function:: CvRTParams::CvRTParams( int max_depth, int min_sample_count, float regression_accuracy, bool use_surrogates, int max_categories, const float* priors, bool calc_var_importance, int nactive_vars, int max_num_of_trees_in_the_forest, float forest_accuracy, int termcrit_type )
@ -72,9 +72,9 @@ The constructors.
:param forest_accuracy: Sufficient accuracy (OOB error).
:param termcrit_type: The type of the termination criteria:
* **CV_TERMCRIT_ITER** Terminate learning by the ``max_num_of_trees_in_the_forest``;
* **CV_TERMCRIT_EPS** Terminate learning by the ``forest_accuracy``;
* **CV_TERMCRIT_ITER | CV_TERMCRIT_EPS** Use both termination criteria.
@ -94,7 +94,7 @@ The default constructor sets all parameters to default values which are differen
CvRTrees
--------
.. ocv:class:: CvRTrees
.. ocv:class:: CvRTrees : public CvStatModel
The class implements the random forest predictor as described in the beginning of this section.
@ -118,7 +118,7 @@ CvRTrees::predict
-----------------
Predicts the output for an input sample.
.. ocv:function:: double CvRTrees::predict( const Mat& sample, const Mat& missing=Mat() ) const
.. ocv:function:: float CvRTrees::predict( const Mat& sample, const Mat& missing=Mat() ) const
.. ocv:function:: float CvRTrees::predict( const CvMat* sample, const CvMat* missing = 0 ) const
@ -156,7 +156,7 @@ Returns the variable importance array.
.. ocv:function:: const CvMat* CvRTrees::get_var_importance()
.. ocv:pyfunction:: cv2.RTrees.getVarImportance() -> importanceVector
.. ocv:pyfunction:: cv2.RTrees.getVarImportance() -> retval
The method returns the variable importance vector, computed at the training stage when ``CvRTParams::calc_var_importance`` is set to true. If this flag was set to false, the ``NULL`` pointer is returned. This differs from the decision trees where variable importance can be computed anytime after the training.
@ -181,7 +181,7 @@ CvRTrees::calc_error
--------------------
Returns error of the random forest.
.. ocv:function:: float CvRTrees::calc_error( CvMLData* data, int type, std::vector<float> *resp = 0 )
.. ocv:function:: float CvRTrees::calc_error( CvMLData* data, int type, std::vector<float>* resp=0 )
The method is identical to :ocv:func:`CvDTree::calc_error` but uses the random forest as predictor.

@ -52,7 +52,7 @@ CvStatModel::CvStatModel(...)
-----------------------------
The training constructor.
.. ocv:function:: CvStatModel::CvStatModel( const Mat& train_data ... )
.. ocv:function:: CvStatModel::CvStatModel()
Most ML classes provide a single-step constructor and train constructors. This constructor is equivalent to the default constructor, followed by the :ocv:func:`CvStatModel::train` method with the parameters that are passed to the constructor.

@ -16,21 +16,21 @@ SVM implementation in OpenCV is based on [LibSVM]_.
CvParamGrid
-----------
.. ocv:class:: CvParamGrid
.. ocv:struct:: CvParamGrid
The structure represents the logarithmic grid range of statmodel parameters. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate being computed by cross-validation.
The structure represents the logarithmic grid range of statmodel parameters. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate being computed by cross-validation.
.. ocv:member:: double CvParamGrid::min_val
.. ocv:member:: double CvParamGrid::min_val
Minimum value of the statmodel parameter.
Minimum value of the statmodel parameter.
.. ocv:member:: double CvParamGrid::max_val
.. ocv:member:: double CvParamGrid::max_val
Maximum value of the statmodel parameter.
Maximum value of the statmodel parameter.
.. ocv:member:: double CvParamGrid::step
.. ocv:member:: double CvParamGrid::step
Logarithmic step for iterating the statmodel parameter.
Logarithmic step for iterating the statmodel parameter.
The grid determines the following iteration sequence of the statmodel parameter values:
@ -77,7 +77,7 @@ Returns ``true`` if the grid is valid and ``false`` otherwise. The grid is valid
CvSVMParams
-----------
.. ocv:class:: CvSVMParams
.. ocv:struct:: CvSVMParams
SVM training parameters.
@ -114,7 +114,7 @@ The constructors.
* **CvSVM::RBF** Radial basis function (RBF), a good choice in most cases. :math:`K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0`.
* **CvSVM::SIGMOID** Sigmoid kernel: :math:`K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)`.
:param degree: Parameter ``degree`` of a kernel function (POLY).
:param gamma: Parameter :math:`\gamma` of a kernel function (POLY / RBF / SIGMOID).
@ -146,7 +146,7 @@ The default constructor initialize the structure with following values:
CvSVM
-----
.. ocv:class:: CvSVM
.. ocv:class:: CvSVM : public CvStatModel
Support Vector Machines.
@ -160,7 +160,7 @@ Default and training constructors.
.. ocv:function:: CvSVM::CvSVM( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, CvSVMParams params=CvSVMParams() )
.. ocv:pyfunction:: cv2.SVM(trainData, responses[, varIdx[, sampleIdx[, params]]]) -> <SVM object>
.. ocv:pyfunction:: cv2.SVM([trainData, responses[, varIdx[, sampleIdx[, params]]]]) -> <SVM object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
@ -174,7 +174,7 @@ Trains an SVM.
.. ocv:pyfunction:: cv2.SVM.train(trainData, responses[, varIdx[, sampleIdx[, params]]]) -> retval
The method trains the SVM model. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations:
The method trains the SVM model. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations:
* Only the ``CV_ROW_SAMPLE`` data layout is supported.
@ -199,7 +199,7 @@ Trains an SVM with optimal parameters.
.. ocv:pyfunction:: cv2.SVM.train_auto(trainData, responses, varIdx, sampleIdx, params[, k_fold[, Cgrid[, gammaGrid[, pGrid[, nuGrid[, coeffGrid[, degreeGrid[, balanced]]]]]]]]) -> retval
:param k_fold: Cross-validation parameter. The training set is divided into ``k_fold`` subsets. One subset is used to train the model, the others form the test set. So, the SVM algorithm is executed ``k_fold`` times.
:param \*Grid: Iteration grid for the corresponding SVM parameter.
:param balanced: If ``true`` and the problem is 2-class classification then the method creates more balanced cross-validation subsets that is proportions between classes in subsets are close to such proportion in the whole train dataset.
@ -285,7 +285,7 @@ Retrieves a number of support vectors and the particular vector.
.. ocv:function:: const float* CvSVM::get_support_vector(int i) const
.. ocv:pyfunction:: cv2.SVM.get_support_vector_count() -> nsupportVectors
.. ocv:pyfunction:: cv2.SVM.get_support_vector_count() -> retval
:param i: Index of the particular support vector.
@ -297,4 +297,4 @@ Returns the number of used features (variables count).
.. ocv:function:: int CvSVM::get_var_count() const
.. ocv:pyfunction:: cv2.SVM.get_var_count() -> nvars
.. ocv:pyfunction:: cv2.SVM.get_var_count() -> retval

@ -170,10 +170,10 @@ struct CV_EXPORTS_W_MAP CvParamGrid
min_val = max_val = step = 0;
}
CvParamGrid( double _min_val, double _max_val, double log_step )
CvParamGrid( double min_val, double max_val, double log_step )
{
min_val = _min_val;
max_val = _max_val;
this->min_val = min_val;
this->max_val = max_val;
step = log_step;
}
//CvParamGrid( int param_id );
@ -291,10 +291,10 @@ protected:
struct CV_EXPORTS_W_MAP CvSVMParams
{
CvSVMParams();
CvSVMParams( int _svm_type, int _kernel_type,
double _degree, double _gamma, double _coef0,
double Cvalue, double _nu, double _p,
CvMat* _class_weights, CvTermCriteria _term_crit );
CvSVMParams( int svm_type, int kernel_type,
double degree, double gamma, double coef0,
double Cvalue, double nu, double p,
CvMat* class_weights, CvTermCriteria term_crit );
CV_PROP_RW int svm_type;
CV_PROP_RW int kernel_type;
@ -569,8 +569,7 @@ public:
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+
TermCriteria::EPS,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
virtual ~EM();
@ -1026,7 +1025,7 @@ public:
virtual float get_proximity( const CvMat* sample1, const CvMat* sample2,
const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const;
virtual float calc_error( CvMLData* _data, int type , std::vector<float> *resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
virtual float calc_error( CvMLData* data, int type , std::vector<float>* resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
virtual float get_train_error();

@ -3,7 +3,7 @@ Feature Detection and Description
SIFT
----
.. ocv:class:: SIFT
.. ocv:class:: SIFT : public Feature2D
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm by D. Lowe [Lowe04]_.
@ -17,11 +17,11 @@ The SIFT constructors.
.. ocv:function:: SIFT::SIFT( int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6)
:param nfeatures: The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast)
:param nOctaveLayers: The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
:param contrastThreshold: The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
:param edgeThreshold: The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the ``edgeThreshold``, the less features are filtered out (more features are retained).
:param sigma: The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
@ -31,9 +31,9 @@ SIFT::operator ()
-----------------
Extract features and computes their descriptors using SIFT algorithm
.. ocv:function:: void SIFT::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:function:: void SIFT::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
:param image: Input 8-bit grayscale image
:param img: Input 8-bit grayscale image
:param mask: Optional input mask that marks the regions where we should detect features.
@ -43,34 +43,34 @@ Extract features and computes their descriptors using SIFT algorithm
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
SURF
----
.. ocv:class:: SURF
Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
.. ocv:member:: int extended
* 0 means that the basic descriptors (64 elements each) shall be computed
* 1 means that the extended descriptors (128 elements each) shall be computed
.. ocv:member:: int upright
* 0 means that detector computes orientation of each feature.
* 1 means that the orientation is not computed (which is much, much faster). For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting ``upright=1``.
.. ocv:member:: double hessianThreshold
Threshold for the keypoint detector. Only features, whose hessian is larger than ``hessianThreshold`` are retained by the detector. Therefore, the larger the value, the less keypoints you will get. A good default value could be from 300 to 500, depending from the image contrast.
.. ocv:member:: int nOctaves
The number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default. If you want to get very large features, use the larger value. If you want just small features, decrease it.
.. ocv:member:: int nOctaveLayers
The number of images within each octave of a gaussian pyramid. It is set to 2 by default.
.. ocv:class:: SURF : public Feature2D
Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
.. ocv:member:: int extended
* 0 means that the basic descriptors (64 elements each) shall be computed
* 1 means that the extended descriptors (128 elements each) shall be computed
.. ocv:member:: int upright
* 0 means that detector computes orientation of each feature.
* 1 means that the orientation is not computed (which is much, much faster). For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting ``upright=1``.
.. ocv:member:: double hessianThreshold
Threshold for the keypoint detector. Only features, whose hessian is larger than ``hessianThreshold`` are retained by the detector. Therefore, the larger the value, the less keypoints you will get. A good default value could be from 300 to 500, depending from the image contrast.
.. ocv:member:: int nOctaves
The number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default. If you want to get very large features, use the larger value. If you want just small features, decrease it.
.. ocv:member:: int nOctaveLayers
The number of images within each octave of a gaussian pyramid. It is set to 2 by default.
.. [Bay06] Bay, H. and Tuytelaars, T. and Van Gool, L. "SURF: Speeded Up Robust Features", 9th European Conference on Computer Vision, 2006
@ -82,18 +82,18 @@ The SURF extractor constructors.
.. ocv:function:: SURF::SURF()
.. ocv:function:: SURF::SURF(double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=false, bool upright=false)
.. ocv:function:: SURF::SURF( double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=true, bool upright=false )
.. ocv:pyfunction:: cv2.SURF(_hessianThreshold[, _nOctaves[, _nOctaveLayers[, _extended[, _upright]]]]) -> <SURF object>
.. ocv:pyfunction:: cv2.SURF([hessianThreshold[, nOctaves[, nOctaveLayers[, extended[, upright]]]]]) -> <SURF object>
:param hessianThreshold: Threshold for hessian keypoint detector used in SURF.
:param nOctaves: Number of pyramid octaves the keypoint detector will use.
:param nOctaveLayers: Number of octave layers within each octave.
:param extended: Extended descriptor flag (true - use extended 128-element descriptors; false - use 64-element descriptors).
:param upright: Up-right or rotated features flag (true - do not compute orientation of features; false - compute orientation).
@ -101,28 +101,28 @@ SURF::operator()
----------------
Detects keypoints and computes SURF descriptors for them.
.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const
.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints) const
.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:pyfunction:: cv2.SURF.detect(img, mask) -> keypoints
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, useProvidedKeypoints]) -> keypoints, descriptors
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors
.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
.. ocv:pyoldfunction:: cv.ExtractSURF(image, mask, storage, params)-> (keypoints, descriptors)
:param image: Input 8-bit grayscale image
:param mask: Optional input mask that marks the regions where we should detect features.
:param keypoints: The input/output vector of keypoints
:param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
:param storage: Memory storage for the output keypoints and descriptors in OpenCV 1.x API.
:param params: SURF algorithm parameters in OpenCV 1.x API.
The function is parallelized with the TBB library.

@ -58,9 +58,9 @@ namespace cv
class CV_EXPORTS_W SIFT : public Feature2D
{
public:
explicit SIFT( int _nfeatures=0, int _nOctaveLayers=3,
double _contrastThreshold=0.04, double _edgeThreshold=10,
double _sigma=1.6);
explicit SIFT( int nfeatures=0, int nOctaveLayers=3,
double contrastThreshold=0.04, double edgeThreshold=10,
double sigma=1.6);
//! returns the descriptor size in floats (128)
int descriptorSize() const;
@ -108,23 +108,23 @@ class CV_EXPORTS_W SURF : public Feature2D
{
public:
//! the default constructor
SURF();
CV_WRAP SURF();
//! the full constructor taking all the necessary parameters
explicit SURF(double _hessianThreshold,
int _nOctaves=4, int _nOctaveLayers=2,
bool _extended=true, bool _upright=false);
explicit CV_WRAP SURF(double hessianThreshold,
int nOctaves=4, int nOctaveLayers=2,
bool extended=true, bool upright=false);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
CV_WRAP int descriptorSize() const;
//! returns the descriptor type
int descriptorType() const;
CV_WRAP int descriptorType() const;
//! finds the keypoints using fast hessian detector used in SURF
void operator()(InputArray img, InputArray mask,
CV_WRAP_AS(detect) void operator()(InputArray img, InputArray mask,
CV_OUT vector<KeyPoint>& keypoints) const;
//! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
void operator()(InputArray img, InputArray mask,
CV_WRAP_AS(detect) void operator()(InputArray img, InputArray mask,
CV_OUT vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints=false) const;

@ -131,7 +131,7 @@ FeatureEvaluator::create
----------------------------
Constructs the feature evaluator.
.. ocv:function:: static Ptr<FeatureEvaluator> FeatureEvaluator::create(int type)
.. ocv:function:: Ptr<FeatureEvaluator> FeatureEvaluator::create(int type)
:param type: Type of features evaluated by cascade (``HAAR`` or ``LBP`` for now).
@ -148,7 +148,7 @@ Loads a classifier from a file.
.. ocv:function:: CascadeClassifier::CascadeClassifier(const string& filename)
.. ocv:pyfunction:: cv2.CascadeClassifier(filename) -> <CascadeClassifier object>
.. ocv:pyfunction:: cv2.CascadeClassifier([filename]) -> <CascadeClassifier object>
:param filename: Name of the file from which the classifier is loaded.
@ -193,9 +193,9 @@ Detects objects of different sizes in the input image. The detected objects are
.. ocv:pyfunction:: cv2.CascadeClassifier.detectMultiScale(image[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize]]]]]) -> objects
.. ocv:pyfunction:: cv2.CascadeClassifier.detectMultiScale(image, rejectLevels, levelWeights[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize[, outputRejectLevels]]]]]]) -> objects
.. ocv:cfunction:: CvSeq* cvHaarDetectObjects( const CvArr* image, CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scaleFactor=1.1, int minNeighbors=3, int flags=0, CvSize minSize=cvSize(0, 0), CvSize maxSize=cvSize(0, 0) )
.. ocv:cfunction:: CvSeq* cvHaarDetectObjects( const CvArr* image, CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scale_factor=1.1, int min_neighbors=3, int flags=0, CvSize min_size=cvSize(0,0), CvSize max_size=cvSize(0,0) )
.. ocv:pyoldfunction:: cv.HaarDetectObjects(image, cascade, storage, scaleFactor=1.1, minNeighbors=3, flags=0, minSize=(0, 0))-> detectedObjects
.. ocv:pyoldfunction:: cv.HaarDetectObjects(image, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, min_size=(0, 0)) -> detectedObjects
:param cascade: Haar classifier cascade (OpenCV 1.x API only). It can be loaded from XML or YAML file using :ocv:cfunc:`Load`. When the cascade is not needed anymore, release it using ``cvReleaseHaarClassifierCascade(&cascade)``.
@ -222,7 +222,7 @@ Sets an image for detection.
.. ocv:function:: bool CascadeClassifier::setImage( Ptr<FeatureEvaluator>& feval, const Mat& image )
.. ocv:cfunction:: void cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, const CvArr* sum, const CvArr* sqsum, const CvArr* tiltedSum, double scale )
.. ocv:cfunction:: void cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, const CvArr* sum, const CvArr* sqsum, const CvArr* tilted_sum, double scale )
:param cascade: Haar classifier cascade (OpenCV 1.x API only). See :ocv:func:`CascadeClassifier::detectMultiScale` for more information.
@ -239,9 +239,9 @@ CascadeClassifier::runAt
----------------------------
Runs the detector at the specified point.
.. ocv:function:: int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& feval, Point pt )
.. ocv:function:: int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& feval, Point pt, double& weight )
.. ocv:cfunction:: int cvRunHaarClassifierCascade( CvHaarClassifierCascade* cascade, CvPoint pt, int startStage=0 )
.. ocv:cfunction:: int cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade, CvPoint pt, int start_stage=0 )
:param cascade: Haar classifier cascade (OpenCV 1.x API only). See :ocv:func:`CascadeClassifier::detectMultiScale` for more information.

@ -27,13 +27,13 @@ model at a particular position and scale is the maximum over
components, of the score of that component model at the given
location.
In OpenCV there are C implementation of Latent SVM and C++ wrapper of it.
C version is the structure :ocv:struct:`CvObjectDetection` and a set of functions
working with this structure (see :ocv:func:`cvLoadLatentSvmDetector`,
In OpenCV there are C implementation of Latent SVM and C++ wrapper of it.
C version is the structure :ocv:struct:`CvObjectDetection` and a set of functions
working with this structure (see :ocv:func:`cvLoadLatentSvmDetector`,
:ocv:func:`cvReleaseLatentSvmDetector`, :ocv:func:`cvLatentSvmDetectObjects`).
C++ version is the class :ocv:class:`LatentSvmDetector` and has slightly different
functionality in contrast with C version - it supports loading and detection
of several models.
C++ version is the class :ocv:class:`LatentSvmDetector` and has slightly different
functionality in contrast with C version - it supports loading and detection
of several models.
There are two examples of Latent SVM usage: ``samples/c/latentsvmdetect.cpp``
and ``samples/cpp/latentsvm_multidetect.cpp``.
@ -45,97 +45,97 @@ CvLSVMFilterPosition
--------------------
.. ocv:struct:: CvLSVMFilterPosition
Structure describes the position of the filter in the feature pyramid.
.. ocv:member:: unsigned int l
level in the feature pyramid
.. ocv:member:: unsigned int x
x-coordinate in level l
.. ocv:member:: unsigned int y
y-coordinate in level l
Structure describes the position of the filter in the feature pyramid.
.. ocv:member:: unsigned int l
level in the feature pyramid
.. ocv:member:: unsigned int x
x-coordinate in level l
.. ocv:member:: unsigned int y
y-coordinate in level l
CvLSVMFilterObject
------------------
.. ocv:struct:: CvLSVMFilterObject
Description of the filter, which corresponds to the part of the object.
.. ocv:member:: CvLSVMFilterPosition V
ideal (penalty = 0) position of the partial filter
from the root filter position (V_i in the paper)
.. ocv:member:: float fineFunction[4]
vector describes penalty function (d_i in the paper)
pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2
.. ocv:member:: int sizeX
.. ocv:member:: int sizeY
Rectangular map (sizeX x sizeY),
every cell stores feature vector (dimension = p)
.. ocv:member:: int numFeatures
number of features
.. ocv:member:: float *H
matrix of feature vectors to set and get
feature vectors (i,j) used formula H[(j * sizeX + i) * p + k],
where k - component of feature vector in cell (i, j)
Description of the filter, which corresponds to the part of the object.
.. ocv:member:: CvLSVMFilterPosition V
ideal (penalty = 0) position of the partial filter
from the root filter position (V_i in the paper)
.. ocv:member:: float fineFunction[4]
vector describes penalty function (d_i in the paper)
pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2
.. ocv:member:: int sizeX
.. ocv:member:: int sizeY
Rectangular map (sizeX x sizeY),
every cell stores feature vector (dimension = p)
.. ocv:member:: int numFeatures
number of features
.. ocv:member:: float *H
matrix of feature vectors to set and get
feature vectors (i,j) used formula H[(j * sizeX + i) * p + k],
where k - component of feature vector in cell (i, j)
CvLatentSvmDetector
-------------------
.. ocv:struct:: CvLatentSvmDetector
Structure contains internal representation of trained Latent SVM detector.
.. ocv:member:: int num_filters
total number of filters (root plus part) in model
.. ocv:member:: int num_components
number of components in model
.. ocv:member:: int* num_part_filters
array containing number of part filters for each component
.. ocv:member:: CvLSVMFilterObject** filters
root and part filters for all model components
.. ocv:member:: float* b
biases for all model components
.. ocv:member:: float score_threshold
confidence level threshold
Structure contains internal representation of trained Latent SVM detector.
.. ocv:member:: int num_filters
total number of filters (root plus part) in model
.. ocv:member:: int num_components
number of components in model
.. ocv:member:: int* num_part_filters
array containing number of part filters for each component
.. ocv:member:: CvLSVMFilterObject** filters
root and part filters for all model components
.. ocv:member:: float* b
biases for all model components
.. ocv:member:: float score_threshold
confidence level threshold
CvObjectDetection
-----------------
.. ocv:struct:: CvObjectDetection
Structure contains the bounding box and confidence level for detected object.
Structure contains the bounding box and confidence level for detected object.
.. ocv:member:: CvRect rect
bounding box for a detected object
.. ocv:member:: CvRect rect
bounding box for a detected object
.. ocv:member:: float score
confidence level
.. ocv:member:: float score
confidence level
cvLoadLatentSvmDetector
@ -145,7 +145,7 @@ Loads trained detector from a file.
.. ocv:function:: CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename)
:param filename: Name of the file containing the description of a trained detector
cvReleaseLatentSvmDetector
--------------------------
@ -158,46 +158,46 @@ Release memory allocated for CvLatentSvmDetector structure.
cvLatentSvmDetectObjects
------------------------
Find rectangular regions in the given image that are likely to contain objects
Find rectangular regions in the given image that are likely to contain objects
and corresponding confidence levels.
.. ocv:function:: CvSeq* cvLatentSvmDetectObjects(IplImage* image, CvLatentSvmDetector* detector, CvMemStorage* storage, float overlap_threshold, int numThreads)
:param image: image
.. ocv:function:: CvSeq* cvLatentSvmDetectObjects( IplImage* image, CvLatentSvmDetector* detector, CvMemStorage* storage, float overlap_threshold=0.5f, int numThreads=-1 )
:param image: image
:param detector: LatentSVM detector in internal representation
:param storage: Memory storage to store the resultant sequence of the object candidate rectangles
:param overlap_threshold: Threshold for the non-maximum suppression algorithm
:param numThreads: Number of threads used in parallel version of the algorithm
.. highlight:: cpp
LatentSvmDetector
-----------------
.. ocv:class:: LatentSvmDetector
This is a C++ wrapping class of Latent SVM. It contains internal representation of several
trained Latent SVM detectors (models) and a set of methods to load the detectors and detect objects
This is a C++ wrapping class of Latent SVM. It contains internal representation of several
trained Latent SVM detectors (models) and a set of methods to load the detectors and detect objects
using them.
LatentSvmDetector::ObjectDetection
----------------------------------
.. ocv:class:: LatentSvmDetector::ObjectDetection
Structure contains the detection information.
.. ocv:member:: Rect rect
bounding box for a detected object
.. ocv:member:: float score
confidence level
.. ocv:member:: int classID
class (model or detector) ID that detect an object
.. ocv:struct:: LatentSvmDetector::ObjectDetection
Structure contains the detection information.
.. ocv:member:: Rect rect
bounding box for a detected object
.. ocv:member:: float score
confidence level
.. ocv:member:: int classID
class (model or detector) ID that detect an object
LatentSvmDetector::LatentSvmDetector
------------------------------------
Two types of constructors.
@ -208,8 +208,8 @@ Two types of constructors.
:param filenames: A set of filenames storing the trained detectors (models). Each file contains one model. See examples of such files here /opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007/.
:param filenames: A set of filenames storing the trained detectors (models). Each file contains one model. See examples of such files here /opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007/.
:param classNames: A set of trained models names. If it's empty then the name of each model will be constructed from the name of file containing the model. E.g. the model stored in "/home/user/cat.xml" will get the name "cat".
LatentSvmDetector::~LatentSvmDetector
@ -228,10 +228,10 @@ LatentSvmDetector::load
-----------------------
Load the trained models from given ``.xml`` files and return ``true`` if at least one model was loaded.
.. ocv:function:: bool LatentSvmDetector::load(const vector<string>& filenames, const vector<string>& classNames)
:param filenames: A set of filenames storing the trained detectors (models). Each file contains one model. See examples of such files here /opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007/.
.. ocv:function:: bool LatentSvmDetector::load( const vector<string>& filenames, const vector<string>& classNames=vector<string>() )
:param filenames: A set of filenames storing the trained detectors (models). Each file contains one model. See examples of such files here /opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007/.
:param classNames: A set of trained models names. If it's empty then the name of each model will be constructed from the name of file containing the model. E.g. the model stored in "/home/user/cat.xml" will get the name "cat".
LatentSvmDetector::detect
@ -239,13 +239,13 @@ LatentSvmDetector::detect
Find rectangular regions in the given image that are likely to contain objects of loaded classes (models)
and corresponding confidence levels.
.. ocv:function:: void LatentSvmDetector::detect( const Mat& image, vector<ObjectDetection>& objectDetections, float overlapThreshold=0.5, int numThreads=-1 )
.. ocv:function:: void LatentSvmDetector::detect( const Mat& image, vector<ObjectDetection>& objectDetections, float overlapThreshold=0.5f, int numThreads=-1 )
:param image: An image.
:param objectDetections: The detections: rectangulars, scores and class IDs.
:param overlapThreshold: Threshold for the non-maximum suppression algorithm.
:param numThreads: Number of threads used in parallel version of the algorithm.
LatentSvmDetector::getClassNames
--------------------------------
Return the class (model) names that were passed in constructor or method ``load`` or extracted from models filenames in those methods.
@ -256,9 +256,9 @@ LatentSvmDetector::getClassCount
--------------------------------
Return a count of loaded models (classes).
.. ocv:function:: size_t getClassCount() const
.. [Felzenszwalb2010] Felzenszwalb, P. F. and Girshick, R. B. and McAllester, D. and Ramanan, D. *Object Detection with Discriminatively Trained Part Based Models*. PAMI, vol. 32, no. 9, pp. 1627-1645, September 2010
.. ocv:function:: size_t LatentSvmDetector::getClassCount() const
.. [Felzenszwalb2010] Felzenszwalb, P. F. and Girshick, R. B. and McAllester, D. and Ramanan, D. *Object Detection with Discriminatively Trained Part Based Models*. PAMI, vol. 32, no. 9, pp. 1627-1645, September 2010

@ -137,7 +137,7 @@ CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade );
CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image,
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
double scale_factor CV_DEFAULT(1.1),
int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)));
@ -160,7 +160,7 @@ CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade,
// Structure describes the position of the filter in the feature pyramid
// l - level in the feature pyramid
// (x, y) - coordinate in level l
typedef struct
typedef struct CvLSVMFilterPosition
{
int x;
int y;
@ -174,14 +174,14 @@ typedef struct
// penaltyFunction - vector describes penalty function (d_i in the paper)
// pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2
// FILTER DESCRIPTION
// Rectangular map (sizeX x sizeY),
// Rectangular map (sizeX x sizeY),
// every cell stores feature vector (dimension = p)
// H - matrix of feature vectors
// to set and get feature vectors (i,j)
// to set and get feature vectors (i,j)
// used formula H[(j * sizeX + i) * p + k], where
// k - component of feature vector in cell (i, j)
// END OF FILTER DESCRIPTION
typedef struct{
typedef struct CvLSVMFilterObject{
CvLSVMFilterPosition V;
float fineFunction[4];
int sizeX;
@ -192,7 +192,7 @@ typedef struct{
// data type: STRUCT CvLatentSvmDetector
// structure contains internal representation of trained Latent SVM detector
// num_filters - total number of filters (root plus part) in model
// num_filters - total number of filters (root plus part) in model
// num_components - number of components in model
// num_part_filters - array containing number of part filters for each component
// filters - root and part filters for all model components
@ -210,9 +210,9 @@ typedef struct CvLatentSvmDetector
CvLatentSvmDetector;
// data type: STRUCT CvObjectDetection
// structure contains the bounding box and confidence level for detected object
// structure contains the bounding box and confidence level for detected object
// rect - bounding box for a detected object
// score - confidence level
// score - confidence level
typedef struct CvObjectDetection
{
CvRect rect;
@ -247,28 +247,28 @@ CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename);
CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector);
/*
// find rectangular regions in the given image that are likely
// find rectangular regions in the given image that are likely
// to contain objects and corresponding confidence levels
//
// API
// CvSeq* cvLatentSvmDetectObjects(const IplImage* image,
// CvLatentSvmDetector* detector,
// CvMemStorage* storage,
// CvSeq* cvLatentSvmDetectObjects(const IplImage* image,
// CvLatentSvmDetector* detector,
// CvMemStorage* storage,
// float overlap_threshold = 0.5f,
// int numThreads = -1);
// INPUT
// image - image to detect objects in
// detector - Latent SVM detector in internal representation
// storage - memory storage to store the resultant sequence
// storage - memory storage to store the resultant sequence
// of the object candidate rectangles
// overlap_threshold - threshold for the non-maximum suppression algorithm
// overlap_threshold - threshold for the non-maximum suppression algorithm
= 0.5f [here will be the reference to original paper]
// OUTPUT
// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures)
*/
CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image,
CvLatentSvmDetector* detector,
CvMemStorage* storage,
CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image,
CvLatentSvmDetector* detector,
CvMemStorage* storage,
float overlap_threshold CV_DEFAULT(0.5f),
int numThreads CV_DEFAULT(-1));
@ -285,7 +285,7 @@ CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image,
namespace cv
{
///////////////////////////// Object Detection ////////////////////////////
/*
@ -330,23 +330,23 @@ private:
CV_EXPORTS void groupRectangles(CV_OUT CV_IN_OUT vector<Rect>& rectList, int groupThreshold, double eps=0.2);
CV_EXPORTS_W void groupRectangles(CV_OUT CV_IN_OUT vector<Rect>& rectList, CV_OUT vector<int>& weights, int groupThreshold, double eps=0.2);
CV_EXPORTS void groupRectangles( vector<Rect>& rectList, int groupThreshold, double eps, vector<int>* weights, vector<double>* levelWeights );
CV_EXPORTS void groupRectangles(vector<Rect>& rectList, vector<int>& rejectLevels,
CV_EXPORTS void groupRectangles(vector<Rect>& rectList, vector<int>& rejectLevels,
vector<double>& levelWeights, int groupThreshold, double eps=0.2);
CV_EXPORTS void groupRectangles_meanshift(vector<Rect>& rectList, vector<double>& foundWeights, vector<double>& foundScales,
CV_EXPORTS void groupRectangles_meanshift(vector<Rect>& rectList, vector<double>& foundWeights, vector<double>& foundScales,
double detectThreshold = 0.0, Size winDetSize = Size(64, 128));
class CV_EXPORTS FeatureEvaluator
{
public:
public:
enum { HAAR = 0, LBP = 1, HOG = 2 };
virtual ~FeatureEvaluator();
virtual bool read(const FileNode& node);
virtual Ptr<FeatureEvaluator> clone() const;
virtual int getFeatureType() const;
virtual bool setImage(const Mat&, Size origWinSize);
virtual bool setImage(const Mat& img, Size origWinSize);
virtual bool setWindow(Point p);
virtual double calcOrd(int featureIdx) const;
@ -371,7 +371,7 @@ public:
CV_WRAP CascadeClassifier();
CV_WRAP CascadeClassifier( const string& filename );
virtual ~CascadeClassifier();
CV_WRAP virtual bool empty() const;
CV_WRAP bool load( const string& filename );
virtual bool read( const FileNode& node );
@ -425,8 +425,8 @@ protected:
template<class FEval>
friend int predictCategoricalStump( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &featureEvaluator, double& weight);
bool setImage( Ptr<FeatureEvaluator>&, const Mat& );
virtual int runAt( Ptr<FeatureEvaluator>&, Point, double& weight );
bool setImage( Ptr<FeatureEvaluator>& feval, const Mat& image);
virtual int runAt( Ptr<FeatureEvaluator>& feval, Point pt, double& weight );
class Data
{
@ -475,7 +475,7 @@ public:
class CV_EXPORTS MaskGenerator
{
public:
virtual ~MaskGenerator() {}
virtual ~MaskGenerator() {}
virtual cv::Mat generateMask(const cv::Mat& src)=0;
virtual void initializeMask(const cv::Mat& /*src*/) {};
};
@ -488,7 +488,7 @@ protected:
Ptr<MaskGenerator> maskGenerator;
};
//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
struct CV_EXPORTS_W HOGDescriptor
@ -496,13 +496,13 @@ struct CV_EXPORTS_W HOGDescriptor
public:
enum { L2Hys=0 };
enum { DEFAULT_NLEVELS=64 };
CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),
cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),
histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true),
histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true),
nlevels(HOGDescriptor::DEFAULT_NLEVELS)
{}
CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride,
Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1,
int _histogramNormType=HOGDescriptor::L2Hys,
@ -513,28 +513,28 @@ public:
histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold),
gammaCorrection(_gammaCorrection), nlevels(_nlevels)
{}
CV_WRAP HOGDescriptor(const String& filename)
{
load(filename);
}
HOGDescriptor(const HOGDescriptor& d)
{
d.copyTo(*this);
}
virtual ~HOGDescriptor() {}
CV_WRAP size_t getDescriptorSize() const;
CV_WRAP bool checkDetectorSize() const;
CV_WRAP double getWinSigma() const;
CV_WRAP virtual void setSVMDetector(InputArray _svmdetector);
virtual bool read(FileNode& fn);
virtual void write(FileStorage& fs, const String& objname) const;
CV_WRAP virtual bool load(const String& filename, const String& objname=String());
CV_WRAP virtual void save(const String& filename, const String& objname=String()) const;
virtual void copyTo(HOGDescriptor& c) const;
@ -544,9 +544,9 @@ public:
Size winStride=Size(), Size padding=Size(),
const vector<Point>& locations=vector<Point>()) const;
//with found weights output
CV_WRAP virtual void detect(const Mat& img, CV_OUT vector<Point>& foundLocations,
CV_WRAP virtual void detect(const Mat& img, CV_OUT vector<Point>& foundLocations,
CV_OUT vector<double>& weights,
double hitThreshold=0, Size winStride=Size(),
double hitThreshold=0, Size winStride=Size(),
Size padding=Size(),
const vector<Point>& searchLocations=vector<Point>()) const;
//without found weights output
@ -555,22 +555,22 @@ public:
Size padding=Size(),
const vector<Point>& searchLocations=vector<Point>()) const;
//with result weights output
CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,
CV_OUT vector<double>& foundWeights, double hitThreshold=0,
Size winStride=Size(), Size padding=Size(), double scale=1.05,
CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,
CV_OUT vector<double>& foundWeights, double hitThreshold=0,
Size winStride=Size(), Size padding=Size(), double scale=1.05,
double finalThreshold=2.0,bool useMeanshiftGrouping = false) const;
//without found weights output
virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,
virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,
double hitThreshold=0, Size winStride=Size(),
Size padding=Size(), double scale=1.05,
Size padding=Size(), double scale=1.05,
double finalThreshold=2.0, bool useMeanshiftGrouping = false) const;
CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,
Size paddingTL=Size(), Size paddingBR=Size()) const;
CV_WRAP static vector<float> getDefaultPeopleDetector();
CV_WRAP static vector<float> getDaimlerPeopleDetector();
CV_PROP Size winSize;
CV_PROP Size blockSize;
CV_PROP Size blockStride;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save