From bb704fc3af3bbaf386cd315190b1a4813a926f3e Mon Sep 17 00:00:00 2001 From: "dmitriy.anisimov" Date: Fri, 29 Aug 2014 12:36:34 +0400 Subject: [PATCH] removed generated document --- .../datasetstools/doc/html/_static/basic.css | 537 ------------------ .../doc/html/_static/default.css | 256 --------- .../datasetstools/doc/html/datasetstools.html | 431 -------------- 3 files changed, 1224 deletions(-) delete mode 100644 modules/datasetstools/doc/html/_static/basic.css delete mode 100644 modules/datasetstools/doc/html/_static/default.css delete mode 100644 modules/datasetstools/doc/html/datasetstools.html diff --git a/modules/datasetstools/doc/html/_static/basic.css b/modules/datasetstools/doc/html/_static/basic.css deleted file mode 100644 index 967e36ce0..000000000 --- a/modules/datasetstools/doc/html/_static/basic.css +++ /dev/null @@ -1,537 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox input[type="text"] { - width: 170px; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - width: 30px; -} - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable dl, table.indextable dd { - margin-top: 0; - margin-bottom: 0; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- general body styles --------------------------------------------------- */ - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.field-list ul { - padding-left: 1em; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.field-list td, table.field-list th { - border: 0 !important; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, .highlighted { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.optional { - font-size: 1.3em; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -tt.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -tt.descclassname { - background-color: transparent; -} - -tt.xref, a tt { - background-color: transparent; - font-weight: bold; -} - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/modules/datasetstools/doc/html/_static/default.css b/modules/datasetstools/doc/html/_static/default.css deleted file mode 100644 index 5f1399abd..000000000 --- a/modules/datasetstools/doc/html/_static/default.css +++ /dev/null @@ -1,256 +0,0 @@ -/* - * default.css_t - * ~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- default theme. - * - * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: sans-serif; - font-size: 100%; - background-color: #11303d; - color: #000; - margin: 0; - padding: 0; -} - -div.document { - background-color: #1c4e63; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 230px; -} - -div.body { - background-color: #ffffff; - color: #000000; - padding: 0 20px 30px 20px; -} - -div.footer { - color: #ffffff; - width: 100%; - padding: 9px 0 9px 0; - text-align: center; - font-size: 75%; -} - -div.footer a { - color: #ffffff; - text-decoration: underline; -} - -div.related { - background-color: #133f52; - line-height: 30px; - color: #ffffff; -} - -div.related a { - color: #ffffff; -} - -div.sphinxsidebar { -} - -div.sphinxsidebar h3 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.4em; - font-weight: normal; - margin: 0; - padding: 0; -} - -div.sphinxsidebar h3 a { - color: #ffffff; -} - -div.sphinxsidebar h4 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.3em; - font-weight: normal; - margin: 5px 0 0 0; - padding: 0; -} - -div.sphinxsidebar p { - color: #ffffff; -} - -div.sphinxsidebar p.topless { - margin: 5px 10px 10px 10px; -} - -div.sphinxsidebar ul { - margin: 10px; - padding: 0; - color: #ffffff; -} - -div.sphinxsidebar a { - color: #98dbcc; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - - - -/* -- hyperlink styles ------------------------------------------------------ */ - -a { - color: #355f7c; - text-decoration: none; -} - -a:visited { - color: #355f7c; - text-decoration: none; -} - -a:hover { - text-decoration: underline; -} - - - -/* -- body styles ----------------------------------------------------------- */ - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: 'Trebuchet MS', sans-serif; - background-color: #f2f2f2; - font-weight: normal; - color: #20435c; - border-bottom: 1px solid #ccc; - margin: 20px -20px 10px -20px; - padding: 3px 0 3px 10px; -} - -div.body h1 { margin-top: 0; font-size: 200%; } -div.body h2 { font-size: 160%; } -div.body h3 { font-size: 140%; } -div.body h4 { font-size: 120%; } -div.body h5 { font-size: 110%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #c60f0f; - font-size: 0.8em; - padding: 0 4px 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - background-color: #c60f0f; - color: white; -} - -div.body p, div.body dd, div.body li { - text-align: justify; - line-height: 130%; -} - -div.admonition p.admonition-title + p { - display: inline; -} - -div.admonition p { - margin-bottom: 5px; -} - -div.admonition pre { - margin-bottom: 5px; -} - -div.admonition ul, div.admonition ol { - margin-bottom: 5px; -} - -div.note { - background-color: #eee; - border: 1px solid #ccc; -} - -div.seealso { - background-color: #ffc; - border: 1px solid #ff6; -} - -div.topic { - background-color: #eee; -} - -div.warning { - background-color: #ffe4e4; - border: 1px solid #f66; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre { - padding: 5px; - background-color: #eeffcc; - color: #333333; - line-height: 120%; - border: 1px solid #ac9; - border-left: none; - border-right: none; -} - -tt { - background-color: #ecf0f3; - padding: 0 1px 0 1px; - font-size: 0.95em; -} - -th { - background-color: #ede; -} - -.warning tt { - background: #efc2c2; -} - -.note tt { - background: #d6d6d6; -} - -.viewcode-back { - font-family: sans-serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} \ No newline at end of file diff --git a/modules/datasetstools/doc/html/datasetstools.html b/modules/datasetstools/doc/html/datasetstools.html deleted file mode 100644 index 9f137ad2e..000000000 --- a/modules/datasetstools/doc/html/datasetstools.html +++ /dev/null @@ -1,431 +0,0 @@ - - - - - - - - datasetstools. Tools for working with different datasets. — OpenCV datasetstools 3.0 documentation - - - - - - - - - - - - - -
-
-
-
- -
-

datasetstools. Tools for working with different datasets.

-

The datasetstools module includes classes for working with different datasets.

-

First version of this module was implemented for Fall2014 OpenCV Challenge.

-
-

Action Recognition

-
-

ar_hmdb

-

Implements loading dataset:

-

“HMDB: A Large Human Motion Database”: http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: hmdb51_org.rar & test_train_splits.rar.
  2. -
  3. Unpack them.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_ar_hmdb -p=/home/user/path_to_unpacked_folders/
  6. -
-
-
-
-

ar_sports

-

Implements loading dataset:

-

“Sports-1M Dataset”: http://cs.stanford.edu/people/karpathy/deepvideo/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files (git clone https://code.google.com/p/sports-1m-dataset/).
  2. -
  3. To load data run: ./opencv/build/bin/example_datasetstools_ar_sports -p=/home/user/path_to_downloaded_folders/
  4. -
-
-
-
-
-

Face Recognition

-
-

fr_lfw

-

Implements loading dataset:

-

“Labeled Faces in the Wild-a”: http://www.openu.ac.il/home/hassner/data/lfwa/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset file: lfwa.tar.gz.
  2. -
  3. Unpack it.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_fr_lfw -p=/home/user/path_to_unpacked_folder/lfw2/
  6. -
-
-
-
-
-

Gesture Recognition

-
-

gr_chalearn

-

Implements loading dataset:

-

“ChaLearn Looking at People”: http://gesture.chalearn.org/

-
-

Note

-

Usage

-
    -
  1. Follow instruction from site above, download files for dataset “Track 3: Gesture Recognition”: Train1.zip-Train5.zip, Validation1.zip-Validation3.zip (Register on site: www.codalab.org and accept the terms and conditions of competition: https://www.codalab.org/competitions/991#learn_the_details There are three mirrors for downloading dataset files. When I downloaded data only mirror: “Universitat Oberta de Catalunya” works).
  2. -
  3. Unpack train archives Train1.zip-Train5.zip to one folder (currently loading validation files wasn’t implemented)
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_gr_chalearn -p=/home/user/path_to_unpacked_folder/
  6. -
-
-
-
-

gr_skig

-

Implements loading dataset:

-

“Sheffield Kinect Gesture Dataset”: http://lshao.staff.shef.ac.uk/data/SheffieldKinectGesture.htm

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: subject1_dep.7z-subject6_dep.7z, subject1_rgb.7z-subject6_rgb.7z.
  2. -
  3. Unpack them.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_gr_skig -p=/home/user/path_to_unpacked_folders/
  6. -
-
-
-
-
-

Human Pose Estimation

-
-

hpe_parse

-

Implements loading dataset:

-

“PARSE Dataset”: http://www.ics.uci.edu/~dramanan/papers/parse/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset file: people.zip.
  2. -
  3. Unpack it.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_hpe_parse -p=/home/user/path_to_unpacked_folder/people_all/
  6. -
-
-
-
-
-

Image Registration

-
-

ir_affine

-

Implements loading dataset:

-

“Affine Covariant Regions Datasets”: http://www.robots.ox.ac.uk/~vgg/data/data-aff.html

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: bark\bikes\boat\graf\leuven\trees\ubc\wall.tar.gz.
  2. -
  3. Unpack them.
  4. -
  5. To load data, for example, for “bark”, run: ./opencv/build/bin/example_datasetstools_ir_affine -p=/home/user/path_to_unpacked_folder/bark/
  6. -
-
-
-
-

ir_robot

-

Implements loading dataset:

-

“Robot Data Set”: http://roboimagedata.compute.dtu.dk/?page_id=24

-
-

Note

-

Usage

-
    -
  1. From link above download files for dataset “Point Feature Data Set – 2010”: SET001_6.tar.gz-SET055_60.tar.gz (there are two data sets: - Full resolution images (1200×1600), ~500 Gb and - Half size image (600×800), ~115 Gb.)
  2. -
  3. Unpack them to one folder.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_ir_robot -p=/home/user/path_to_unpacked_folder/
  6. -
-
-
-
-
-

Image Segmentation

-
-

is_bsds

-

Implements loading dataset:

-

“The Berkeley Segmentation Dataset and Benchmark”: https://www.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: BSDS300-human.tgz & BSDS300-images.tgz.
  2. -
  3. Unpack them.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_is_bsds -p=/home/user/path_to_unpacked_folder/BSDS300/
  6. -
-
-
-
-

is_weizmann

-

Implements loading dataset:

-

“Weizmann Segmentation Evaluation Database”: http://www.wisdom.weizmann.ac.il/~vision/Seg_Evaluation_DB/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: Weizmann_Seg_DB_1obj.ZIP & Weizmann_Seg_DB_2obj.ZIP.
  2. -
  3. Unpack them.
  4. -
  5. To load data, for example, for 1 object dataset, run: ./opencv/build/bin/example_datasetstools_is_weizmann -p=/home/user/path_to_unpacked_folder/1obj/
  6. -
-
-
-
-
-

Multiview Stereo Matching

-
-

msm_epfl

-

Implements loading dataset:

-

“EPFL Multi-View Stereo”: http://cvlabwww.epfl.ch/~strecha/multiview/denseMVS.html

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: castle_dense\castle_dense_large\castle_entry\fountain\herzjesu_dense\herzjesu_dense_large_bounding\cameras\images\p.tar.gz.
  2. -
  3. Unpack them in separate folder for each object. For example, for “fountain”, in folder fountain/ : fountain_dense_bounding.tar.gz -> bounding/, fountain_dense_cameras.tar.gz -> camera/, fountain_dense_images.tar.gz -> png/, fountain_dense_p.tar.gz -> P/
  4. -
  5. To load data, for example, for “fountain”, run: ./opencv/build/bin/example_datasetstools_msm_epfl -p=/home/user/path_to_unpacked_folder/fountain/
  6. -
-
-
-
-

msm_middlebury

-

Implements loading dataset:

-

“Stereo – Middlebury Computer Vision”: http://vision.middlebury.edu/mview/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: dino\dinoRing\dinoSparseRing\temple\templeRing\templeSparseRing.zip
  2. -
  3. Unpack them.
  4. -
  5. To load data, for example “temple” dataset, run: ./opencv/build/bin/example_datasetstools_msm_middlebury -p=/home/user/path_to_unpacked_folder/temple/
  6. -
-
-
-
-
-

Object Recognition

-
-

or_imagenet

-

Implements loading dataset:

-

“ImageNet”: http://www.image-net.org/

-

Currently implemented loading full list with urls. Planned to implement dataset from ILSVRC challenge.

-
-

Note

-

Usage

-
    -
  1. From link above download dataset file: imagenet_fall11_urls.tgz
  2. -
  3. Unpack it.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_or_imagenet -p=/home/user/path_to_unpacked_file/
  6. -
-
-
-
-

or_sun

-

Implements loading dataset:

-

“SUN Database”: http://sun.cs.princeton.edu/

-

Currently implemented loading “Scene Recognition Benchmark. SUN397”. Planned to implement also “Object Detection Benchmark. SUN2012”.

-
-

Note

-

Usage

-
    -
  1. From link above download dataset file: SUN397.tar
  2. -
  3. Unpack it.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_or_sun -p=/home/user/path_to_unpacked_folder/SUN397/
  6. -
-
-
-
-
-

SLAM

-
-

slam_kitti

-

Implements loading dataset:

-

“KITTI Vision Benchmark”: http://www.cvlibs.net/datasets/kitti/eval_odometry.php

-
-

Note

-

Usage

-
    -
  1. From link above download “Odometry” dataset files: data_odometry_gray\data_odometry_color\data_odometry_velodyne\data_odometry_poses\data_odometry_calib.zip.
  2. -
  3. Unpack data_odometry_poses.zip, it creates folder dataset/poses/. After that unpack data_odometry_gray.zip, data_odometry_color.zip, data_odometry_velodyne.zip. Folder dataset/sequences/ will be created with folders 00/..21/. Each of these folders will contain: image_0/, image_1/, image_2/, image_3/, velodyne/ and files calib.txt & times.txt. These two last files will be replaced after unpacking data_odometry_calib.zip at the end.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_slam_kitti -p=/home/user/path_to_unpacked_folder/dataset/
  6. -
-
-
-
-

slam_tumindoor

-

Implements loading dataset:

-

“TUMindoor Dataset”: http://www.navvis.lmt.ei.tum.de/dataset/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: dslr\info\ladybug\pointcloud.tar.bz2 for each dataset: 11-11-28 (1st floor)\11-12-13 (1st floor N1)\11-12-17a (4th floor)\11-12-17b (3rd floor)\11-12-17c (Ground I)\11-12-18a (Ground II)\11-12-18b (2nd floor)
  2. -
  3. Unpack them in separate folder for each dataset. dslr.tar.bz2 -> dslr/, info.tar.bz2 -> info/, ladybug.tar.bz2 -> ladybug/, pointcloud.tar.bz2 -> pointcloud/.
  4. -
  5. To load each dataset run: ./opencv/build/bin/example_datasetstools_slam_tumindoor -p=/home/user/path_to_unpacked_folders/
  6. -
-
-
-
-
-

Text Recognition

-
-

tr_chars

-

Implements loading dataset:

-

“The Chars74K Dataset”: http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset files: EnglishFnt\EnglishHnd\EnglishImg\KannadaHnd\KannadaImg.tgz, ListsTXT.tgz.
  2. -
  3. Unpack them.
  4. -
  5. Move *.m files from folder ListsTXT/ to appropriate folder. For example, English/list_English_Img.m for EnglishImg.tgz.
  6. -
  7. To load data, for example “EnglishImg”, run: ./opencv/build/bin/example_datasetstools_tr_chars -p=/home/user/path_to_unpacked_folder/English/
  8. -
-
-
-
-

tr_svt

-

Implements loading dataset:

-

“The Street View Text Dataset”: http://vision.ucsd.edu/~kai/svt/

-
-

Note

-

Usage

-
    -
  1. From link above download dataset file: svt.zip.
  2. -
  3. Unpack it.
  4. -
  5. To load data run: ./opencv/build/bin/example_datasetstools_tr_svt -p=/home/user/path_to_unpacked_folder/svt/svt1/
  6. -
-
-
-
-
- - -
-
-
- -
-
- - - - \ No newline at end of file