From dfb9832a25e8d1852e158bbcead941d3a5941343 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 15 Apr 2019 22:28:33 +0000
Subject: [PATCH 001/152] cmake(protobuf): ensure using of own headers

---
 3rdparty/protobuf/CMakeLists.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/3rdparty/protobuf/CMakeLists.txt b/3rdparty/protobuf/CMakeLists.txt
index ada9891a7b..7e0ea7df70 100644
--- a/3rdparty/protobuf/CMakeLists.txt
+++ b/3rdparty/protobuf/CMakeLists.txt
@@ -139,6 +139,7 @@ append_if_exist(Protobuf_SRCS
   ${PROTOBUF_ROOT}/src/google/protobuf/wrappers.pb.cc
 )
 
+include_directories(BEFORE "${PROTOBUF_ROOT}/src")  # ensure using if own headers: https://github.com/opencv/opencv/issues/13328
 add_library(libprotobuf STATIC ${Protobuf_SRCS})
 target_include_directories(libprotobuf SYSTEM PUBLIC $<BUILD_INTERFACE:${PROTOBUF_ROOT}/src>)
 set_target_properties(libprotobuf

From da555a2c9bdce45a944ff4c85a2981b79d763724 Mon Sep 17 00:00:00 2001
From: Clement Courbet <courbet@google.com>
Date: Thu, 20 Aug 2020 14:12:33 +0200
Subject: [PATCH 002/152] Optimize opencv dft by vectorizing radix2 and radix3.

This is useful for non power-of-two sizes when WITH_IPP is not an option.

This shows consistent improvement over openCV benchmarks, and we measure
even larger improvements on our internal workloads.

For example, for 320x480, `32FC*`, we can see a ~5% improvement}, as
`320=2^6*5` and `480=2^5*3*5`, so the improved radix3 version is used.
`64FC*` is flat as expected, as we do not specialize the functors for `double`
in this change.

```
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, 0, false)                                1.239  1.153     1.07
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, 0, true)                                 0.991  0.926     1.07
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_COMPLEX_OUTPUT, false)               1.367  1.281     1.07
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_COMPLEX_OUTPUT, true)                1.114  1.049     1.06
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE, false)                      1.313  1.254     1.05
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE, true)                       1.027  0.977     1.05
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false)   1.296  1.217     1.06
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)    1.039  0.963     1.08
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_ROWS, false)                         0.542  0.524     1.04
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_ROWS, true)                          0.293  0.277     1.06
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_SCALE, false)                        1.265  1.175     1.08
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_SCALE, true)                         1.004  0.942     1.07
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, 0, false)                                1.292  1.280     1.01
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, 0, true)                                 1.038  1.030     1.01
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_COMPLEX_OUTPUT, false)               1.484  1.488     1.00
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_COMPLEX_OUTPUT, true)                1.222  1.224     1.00
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE, false)                      1.380  1.355     1.02
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE, true)                       1.117  1.133     0.99
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false)   1.372  1.383     0.99
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)    1.117  1.127     0.99
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_ROWS, false)                         0.546  0.539     1.01
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_ROWS, true)                          0.293  0.299     0.98
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_SCALE, false)                        1.351  1.339     1.01
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_SCALE, true)                         1.099  1.092     1.01
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, 0, false)                                2.235  2.123     1.05
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, 0, true)                                 1.843  1.727     1.07
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_COMPLEX_OUTPUT, false)               2.189  2.109     1.04
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_COMPLEX_OUTPUT, true)                1.827  1.754     1.04
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE, false)                      2.392  2.309     1.04
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE, true)                       1.951  1.865     1.05
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false)   2.391  2.293     1.04
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)    1.954  1.882     1.04
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_ROWS, false)                         0.811  0.815     0.99
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_ROWS, true)                          0.426  0.437     0.98
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_SCALE, false)                        2.268  2.152     1.05
dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_SCALE, true)                         1.893  1.788     1.06
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, 0, false)                                4.546  4.395     1.03
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, 0, true)                                 3.616  3.426     1.06
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_COMPLEX_OUTPUT, false)               4.843  4.668     1.04
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_COMPLEX_OUTPUT, true)                3.825  3.748     1.02
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE, false)                      4.720  4.525     1.04
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE, true)                       3.743  3.601     1.04
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false)   4.755  4.527     1.05
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)    3.744  3.586     1.04
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_ROWS, false)                         1.992  2.012     0.99
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_ROWS, true)                          1.048  1.048     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_SCALE, false)                        4.625  4.451     1.04
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_SCALE, true)                         3.643  3.491     1.04
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, 0, false)                                4.499  4.488     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, 0, true)                                 3.559  3.555     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_COMPLEX_OUTPUT, false)               5.155  5.165     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_COMPLEX_OUTPUT, true)                4.103  4.101     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE, false)                      5.484  5.474     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE, true)                       4.617  4.518     1.02
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false)   5.547  5.509     1.01
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)    4.553  4.554     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_ROWS, false)                         2.067  2.018     1.02
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_ROWS, true)                          1.104  1.079     1.02
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_SCALE, false)                        4.665  4.619     1.01
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_SCALE, true)                         3.698  3.681     1.00
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, 0, false)                                8.774  8.275     1.06
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, 0, true)                                 6.975  6.527     1.07
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_COMPLEX_OUTPUT, false)               8.720  8.270     1.05
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_COMPLEX_OUTPUT, true)                6.928  6.532     1.06
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE, false)                      9.272  8.862     1.05
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE, true)                       7.323  6.946     1.05
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false)   9.262  8.768     1.06
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)    7.298  6.871     1.06
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_ROWS, false)                         3.766  3.639     1.03
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_ROWS, true)                          1.932  1.889     1.02
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_SCALE, false)                        8.865  8.417     1.05
dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_SCALE, true)                         7.067  6.643     1.06
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, 0, false)                              10.014 10.141    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, 0, true)                               7.600  7.632     1.00
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_COMPLEX_OUTPUT, false)             11.059 11.283    0.98
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_COMPLEX_OUTPUT, true)              8.475  8.552     0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE, false)                    12.678 12.789    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE, true)                     10.445 10.359    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 12.626 12.925    0.98
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  10.538 10.553    1.00
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_ROWS, false)                       5.041  5.084     0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_ROWS, true)                        2.595  2.607     1.00
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_SCALE, false)                      10.231 10.330    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_SCALE, true)                       7.786  7.815     1.00
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, 0, false)                              13.597 13.302    1.02
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, 0, true)                               10.377 10.207    1.02
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_COMPLEX_OUTPUT, false)             15.940 15.545    1.03
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_COMPLEX_OUTPUT, true)              12.299 12.230    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE, false)                    15.270 15.181    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE, true)                     12.757 12.339    1.03
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 15.512 15.157    1.02
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  12.505 12.635    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_ROWS, false)                       6.359  6.255     1.02
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_ROWS, true)                        3.314  3.248     1.02
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_SCALE, false)                      13.937 13.733    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_SCALE, true)                       10.782 10.495    1.03
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, 0, false)                              18.985 18.926    1.00
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, 0, true)                               14.256 14.509    0.98
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_COMPLEX_OUTPUT, false)             18.696 19.021    0.98
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_COMPLEX_OUTPUT, true)              14.290 14.429    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE, false)                    20.135 20.296    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE, true)                     15.390 15.512    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 20.121 20.354    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  15.341 15.605    0.98
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_ROWS, false)                       8.932  9.084     0.98
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_ROWS, true)                        4.539  4.649     0.98
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_SCALE, false)                      19.137 19.303    0.99
dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_SCALE, true)                       14.565 14.808    0.98
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, 0, false)                              22.553 21.171    1.07
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, 0, true)                               17.850 16.390    1.09
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_COMPLEX_OUTPUT, false)             24.062 22.634    1.06
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_COMPLEX_OUTPUT, true)              19.342 17.932    1.08
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE, false)                    28.609 27.326    1.05
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE, true)                     24.591 23.289    1.06
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 28.667 27.467    1.04
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  24.671 23.309    1.06
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_ROWS, false)                       9.458  9.077     1.04
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_ROWS, true)                        4.709  4.566     1.03
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_SCALE, false)                      22.791 21.583    1.06
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_SCALE, true)                       18.029 16.691    1.08
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, 0, false)                              25.238 24.427    1.03
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, 0, true)                               19.636 19.270    1.02
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_COMPLEX_OUTPUT, false)             28.342 27.957    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_COMPLEX_OUTPUT, true)              22.413 22.477    1.00
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE, false)                    26.465 26.085    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE, true)                     21.972 21.704    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 26.497 26.127    1.01
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  22.010 21.523    1.02
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_ROWS, false)                       11.188 10.774    1.04
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_ROWS, true)                        6.094  5.916     1.03
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_SCALE, false)                      25.728 24.934    1.03
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_SCALE, true)                       20.077 19.653    1.02
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, 0, false)                              43.834 40.726    1.08
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, 0, true)                               35.198 32.218    1.09
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_COMPLEX_OUTPUT, false)             43.743 40.897    1.07
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_COMPLEX_OUTPUT, true)              35.240 32.226    1.09
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE, false)                    46.022 42.612    1.08
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE, true)                     36.779 33.961    1.08
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 46.396 42.723    1.09
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  37.025 33.874    1.09
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_ROWS, false)                       17.334 16.832    1.03
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_ROWS, true)                        9.212  8.970     1.03
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_SCALE, false)                      44.190 41.211    1.07
dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_SCALE, true)                       35.900 32.888    1.09
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, 0, false)                              40.948 38.256    1.07
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, 0, true)                               33.825 30.759    1.10
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_COMPLEX_OUTPUT, false)             53.210 53.584    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_COMPLEX_OUTPUT, true)              46.356 46.712    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE, false)                    47.471 47.213    1.01
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE, true)                     40.491 41.363    0.98
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 46.724 47.049    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  40.834 41.381    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_ROWS, false)                       14.508 14.490    1.00
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_ROWS, true)                        7.832  7.828     1.00
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_SCALE, false)                      41.491 38.341    1.08
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_SCALE, true)                       34.587 31.208    1.11
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, 0, false)                              65.155 63.173    1.03
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, 0, true)                               56.091 54.752    1.02
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_COMPLEX_OUTPUT, false)             71.549 70.626    1.01
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_COMPLEX_OUTPUT, true)              62.319 61.437    1.01
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE, false)                    61.480 59.540    1.03
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE, true)                     54.047 52.650    1.03
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 61.752 61.366    1.01
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  54.400 53.665    1.01
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_ROWS, false)                       20.219 19.704    1.03
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_ROWS, true)                        11.145 10.868    1.03
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_SCALE, false)                      66.220 64.525    1.03
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_SCALE, true)                       57.389 56.114    1.02
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, 0, false)                              86.761 88.128    0.98
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, 0, true)                               75.528 76.725    0.98
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_COMPLEX_OUTPUT, false)             86.750 88.223    0.98
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_COMPLEX_OUTPUT, true)              75.830 76.809    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE, false)                    91.728 92.161    1.00
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE, true)                     78.797 79.876    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 92.163 92.177    1.00
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true)  78.957 79.863    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_ROWS, false)                       24.781 25.576    0.97
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_ROWS, true)                        13.226 13.695    0.97
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_SCALE, false)                      87.990 89.324    0.99
dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_SCALE, true)                       76.732 77.869    0.99
```
---
 modules/core/src/dxt.cpp | 389 ++++++++++++++++++++++++++++-----------
 1 file changed, 280 insertions(+), 109 deletions(-)

diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp
index bfa61d0502..b307703a32 100644
--- a/modules/core/src/dxt.cpp
+++ b/modules/core/src/dxt.cpp
@@ -122,6 +122,33 @@ static const double DFTTab[][2] =
 { 1.00000000000000000, 0.00000000292583616 }
 };
 
+namespace {
+template <typename T>
+struct Constants {
+    static const T sin_120;
+    static const T fft5_2;
+    static const T fft5_3;
+    static const T fft5_4;
+    static const T fft5_5;
+};
+
+template <typename T>
+const T Constants<T>::sin_120 = (T)0.86602540378443864676372317075294;
+
+template <typename T>
+const T Constants<T>::fft5_2 = (T)0.559016994374947424102293417182819;
+
+template <typename T>
+const T Constants<T>::fft5_3 = (T)-0.951056516295153572116439333379382;
+
+template <typename T>
+const T Constants<T>::fft5_4 = (T)-1.538841768587626701285145288018455;
+
+template <typename T>
+const T Constants<T>::fft5_5 = (T)0.363271264002680442947733378740309;
+
+}  //namespace
+
 #define BitRev(i,shift) \
    ((int)((((unsigned)bitrevTab[(i)&255] << 24)+ \
            ((unsigned)bitrevTab[((i)>> 8)&255] << 16)+ \
@@ -372,6 +399,149 @@ DFTInit( int n0, int nf, const int* factors, int* itab, int elem_size, void* _wa
     }
 }
 
+// Reference radix-2 implementation.
+template<typename T> struct DFT_R2
+{
+    void operator()(Complex<T>* dst, const int c_n, const int n, const int dw0, const Complex<T>* wave) const {
+        const int nx = n/2;
+        for(int i = 0 ; i < c_n; i += n)
+        {
+            Complex<T>* v = dst + i;
+            T r0 = v[0].re + v[nx].re;
+            T i0 = v[0].im + v[nx].im;
+            T r1 = v[0].re - v[nx].re;
+            T i1 = v[0].im - v[nx].im;
+            v[0].re = r0; v[0].im = i0;
+            v[nx].re = r1; v[nx].im = i1;
+
+            for( int j = 1, dw = dw0; j < nx; j++, dw += dw0 )
+            {
+                v = dst + i + j;
+                r1 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im;
+                i1 = v[nx].im*wave[dw].re + v[nx].re*wave[dw].im;
+                r0 = v[0].re; i0 = v[0].im;
+
+                v[0].re = r0 + r1; v[0].im = i0 + i1;
+                v[nx].re = r0 - r1; v[nx].im = i0 - i1;
+            }
+        }
+    }
+};
+
+// Reference radix-3 implementation.
+template<typename T> struct DFT_R3
+{
+    void operator()(Complex<T>* dst, const int c_n, const int n, const int dw0, const Complex<T>* wave) const {
+        const int nx = n / 3;
+        for(int i = 0; i < c_n; i += n )
+        {
+            {
+                Complex<T>* v = dst + i;
+                T r1 = v[nx].re + v[nx*2].re;
+                T i1 = v[nx].im + v[nx*2].im;
+                T r0 = v[0].re;
+                T i0 = v[0].im;
+                T r2 = Constants<T>::sin_120*(v[nx].im - v[nx*2].im);
+                T i2 = Constants<T>::sin_120*(v[nx*2].re - v[nx].re);
+                v[0].re = r0 + r1; v[0].im = i0 + i1;
+                r0 -= (T)0.5*r1; i0 -= (T)0.5*i1;
+                v[nx].re = r0 + r2; v[nx].im = i0 + i2;
+                v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2;
+            }
+
+            for(int j = 1, dw = dw0; j < nx; j++, dw += dw0 )
+            {
+                Complex<T>* v = dst + i + j;
+                T r0 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im;
+                T i0 = v[nx].re*wave[dw].im + v[nx].im*wave[dw].re;
+                T i2 = v[nx*2].re*wave[dw*2].re - v[nx*2].im*wave[dw*2].im;
+                T r2 = v[nx*2].re*wave[dw*2].im + v[nx*2].im*wave[dw*2].re;
+                T r1 = r0 + i2; T i1 = i0 + r2;
+
+                r2 = Constants<T>::sin_120*(i0 - r2); i2 = Constants<T>::sin_120*(i2 - r0);
+                r0 = v[0].re; i0 = v[0].im;
+                v[0].re = r0 + r1; v[0].im = i0 + i1;
+                r0 -= (T)0.5*r1; i0 -= (T)0.5*i1;
+                v[nx].re = r0 + r2; v[nx].im = i0 + i2;
+                v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2;
+            }
+        }
+    }
+};
+
+// Reference radix-5 implementation.
+template<typename T> struct DFT_R5
+{
+    void operator()(Complex<T>* dst, const int c_n, const int n, const int dw0, const Complex<T>* wave) const {
+        const int nx = n / 5;
+        for(int i = 0; i < c_n; i += n )
+        {
+            for(int j = 0, dw = 0; j < nx; j++, dw += dw0 )
+            {
+                Complex<T>* v0 = dst + i + j;
+                Complex<T>* v1 = v0 + nx*2;
+                Complex<T>* v2 = v1 + nx*2;
+
+                T r0, i0, r1, i1, r2, i2, r3, i3, r4, i4, r5, i5;
+
+                r3 = v0[nx].re*wave[dw].re - v0[nx].im*wave[dw].im;
+                i3 = v0[nx].re*wave[dw].im + v0[nx].im*wave[dw].re;
+                r2 = v2[0].re*wave[dw*4].re - v2[0].im*wave[dw*4].im;
+                i2 = v2[0].re*wave[dw*4].im + v2[0].im*wave[dw*4].re;
+
+                r1 = r3 + r2; i1 = i3 + i2;
+                r3 -= r2; i3 -= i2;
+
+                r4 = v1[nx].re*wave[dw*3].re - v1[nx].im*wave[dw*3].im;
+                i4 = v1[nx].re*wave[dw*3].im + v1[nx].im*wave[dw*3].re;
+                r0 = v1[0].re*wave[dw*2].re - v1[0].im*wave[dw*2].im;
+                i0 = v1[0].re*wave[dw*2].im + v1[0].im*wave[dw*2].re;
+
+                r2 = r4 + r0; i2 = i4 + i0;
+                r4 -= r0; i4 -= i0;
+
+                r0 = v0[0].re; i0 = v0[0].im;
+                r5 = r1 + r2; i5 = i1 + i2;
+
+                v0[0].re = r0 + r5; v0[0].im = i0 + i5;
+
+                r0 -= (T)0.25*r5; i0 -= (T)0.25*i5;
+                r1 = Constants<T>::fft5_2*(r1 - r2); i1 = Constants<T>::fft5_2*(i1 - i2);
+                r2 = -Constants<T>::fft5_3*(i3 + i4); i2 = Constants<T>::fft5_3*(r3 + r4);
+
+                i3 *= -Constants<T>::fft5_5; r3 *= Constants<T>::fft5_5;
+                i4 *= -Constants<T>::fft5_4; r4 *= Constants<T>::fft5_4;
+
+                r5 = r2 + i3; i5 = i2 + r3;
+                r2 -= i4; i2 -= r4;
+
+                r3 = r0 + r1; i3 = i0 + i1;
+                r0 -= r1; i0 -= i1;
+
+                v0[nx].re = r3 + r2; v0[nx].im = i3 + i2;
+                v2[0].re = r3 - r2; v2[0].im = i3 - i2;
+
+                v1[0].re = r0 + r5; v1[0].im = i0 + i5;
+                v1[nx].re = r0 - r5; v1[nx].im = i0 - i5;
+            }
+        }
+    }
+};
+
+template<typename T> struct DFT_VecR2
+{
+    void operator()(Complex<T>* dst, const int c_n, const int n, const int dw0, const Complex<T>* wave) const {
+        return DFT_R2<T>()(dst, c_n, n, dw0, wave);
+    }
+};
+
+template<typename T> struct DFT_VecR3
+{
+    void operator()(Complex<T>* dst, const int c_n, const int n, const int dw0, const Complex<T>* wave) const {
+        return DFT_R3<T>()(dst, c_n, n, dw0, wave);
+    }
+};
+
 template<typename T> struct DFT_VecR4
 {
     int operator()(Complex<T>*, int, int, int&, const Complex<T>*) const { return 1; }
@@ -379,6 +549,98 @@ template<typename T> struct DFT_VecR4
 
 #if CV_SSE3
 
+// multiplies *a and *b:
+//  r_re + i*r_im = (a_re + i*a_im)*(b_re + i*b_im)
+// r_re and r_im are placed respectively in bits 31:0 and 63:32 of the resulting
+// vector register.
+inline __m128 complexMul(const Complex<float>* const a, const Complex<float>* const b) {
+    const __m128 z = _mm_setzero_ps();
+    const __m128 neg_elem0 = _mm_set_ps(0.0f,0.0f,0.0f,-0.0f);
+    // v_a[31:0] is a->re and v_a[63:32] is a->im.
+    const __m128 v_a = _mm_loadl_pi(z, (const __m64*)a);
+    const __m128 v_b = _mm_loadl_pi(z, (const __m64*)b);
+    // x_1 = v[nx] * wave[dw].
+    const __m128 v_a_riri = _mm_shuffle_ps(v_a, v_a, _MM_SHUFFLE(0, 1, 0, 1));
+    const __m128 v_b_irri = _mm_shuffle_ps(v_b, v_b, _MM_SHUFFLE(1, 0, 0, 1));
+    const __m128 mul = _mm_mul_ps(v_a_riri, v_b_irri);
+    const __m128 xored = _mm_xor_ps(mul, neg_elem0);
+    return _mm_hadd_ps(xored, z);
+}
+
+// optimized radix-2 transform
+template<> struct DFT_VecR2<float> {
+    void operator()(Complex<float>* dst, const int c_n, const int n, const int dw0, const Complex<float>* wave) const {
+        const __m128 z = _mm_setzero_ps();
+        const int nx = n/2;
+        for(int i = 0 ; i < c_n; i += n)
+        {
+            {
+                Complex<float>* v = dst + i;
+                float r0 = v[0].re + v[nx].re;
+                float i0 = v[0].im + v[nx].im;
+                float r1 = v[0].re - v[nx].re;
+                float i1 = v[0].im - v[nx].im;
+                v[0].re = r0; v[0].im = i0;
+                v[nx].re = r1; v[nx].im = i1;
+            }
+
+            for( int j = 1, dw = dw0; j < nx; j++, dw += dw0 )
+            {
+                Complex<float>* v = dst + i + j;
+                const __m128 x_1 = complexMul(&v[nx], &wave[dw]);
+                const __m128 v_0 = _mm_loadl_pi(z, (const __m64*)&v[0]);
+                _mm_storel_pi((__m64*)&v[0], _mm_add_ps(v_0, x_1));
+                _mm_storel_pi((__m64*)&v[nx], _mm_sub_ps(v_0, x_1));
+            }
+        }
+    }
+};
+
+// Optimized radix-3 implementation.
+template<> struct DFT_VecR3<float> {
+    void operator()(Complex<float>* dst, const int c_n, const int n, const int dw0, const Complex<float>* wave) const {
+        const int nx = n / 3;
+        const __m128 z = _mm_setzero_ps();
+        const __m128 neg_elem1 = _mm_set_ps(0.0f,0.0f,-0.0f,0.0f);
+        const __m128 sin_120 = _mm_set1_ps(Constants<float>::sin_120);
+        const __m128 one_half = _mm_set1_ps(0.5f);
+        for(int i = 0; i < c_n; i += n )
+        {
+            {
+                Complex<float>* v = dst + i;
+
+                float r1 = v[nx].re + v[nx*2].re;
+                float i1 = v[nx].im + v[nx*2].im;
+                float r0 = v[0].re;
+                float i0 = v[0].im;
+                float r2 = Constants<float>::sin_120*(v[nx].im - v[nx*2].im);
+                float i2 = Constants<float>::sin_120*(v[nx*2].re - v[nx].re);
+                v[0].re = r0 + r1; v[0].im = i0 + i1;
+                r0 -= (float)0.5*r1; i0 -= (float)0.5*i1;
+                v[nx].re = r0 + r2; v[nx].im = i0 + i2;
+                v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2;
+            }
+
+            for(int j = 1, dw = dw0; j < nx; j++, dw += dw0 )
+            {
+                Complex<float>* v = dst + i + j;
+                const __m128 x_0 = complexMul(&v[nx], &wave[dw]);
+                const __m128 x_2 = complexMul(&v[nx*2], &wave[dw*2]);
+                const __m128 x_1 = _mm_add_ps(x_0, x_2);
+
+                const __m128 v_0 = _mm_loadl_pi(z, (const __m64*)&v[0]);
+                _mm_storel_pi((__m64*)&v[0], _mm_add_ps(v_0, x_1));
+
+                const __m128 x_3 = _mm_mul_ps(sin_120, _mm_xor_ps(_mm_sub_ps(x_2, x_0), neg_elem1));
+                const __m128 x_3s = _mm_shuffle_ps(x_3, x_3, _MM_SHUFFLE(0, 1, 0, 1));
+                const __m128 x_4 = _mm_sub_ps(v_0, _mm_mul_ps(one_half, x_1));
+                _mm_storel_pi((__m64*)&v[nx], _mm_add_ps(x_4, x_3s));
+                _mm_storel_pi((__m64*)&v[nx*2], _mm_sub_ps(x_4, x_3s));
+            }
+        }
+    }
+};
+
 // optimized radix-4 transform
 template<> struct DFT_VecR4<float>
 {
@@ -573,12 +835,6 @@ struct OcvDftOptions {
 template<typename T> static void
 DFT(const OcvDftOptions & c, const Complex<T>* src, Complex<T>* dst)
 {
-    static const T sin_120 = (T)0.86602540378443864676372317075294;
-    static const T fft5_2 = (T)0.559016994374947424102293417182819;
-    static const T fft5_3 = (T)-0.951056516295153572116439333379382;
-    static const T fft5_4 = (T)-1.538841768587626701285145288018455;
-    static const T fft5_5 = (T)0.363271264002680442947733378740309;
-
     const Complex<T>* wave = (Complex<T>*)c.wave;
     const int * itab = c.itab;
 
@@ -775,30 +1031,18 @@ DFT(const OcvDftOptions & c, const Complex<T>* src, Complex<T>* dst)
         for( ; n < c.factors[0]; )
         {
             // do the remaining radix-2 transform
-            nx = n;
             n *= 2;
             dw0 /= 2;
 
-            for( i = 0; i < c.n; i += n )
+            if(c.haveSSE3)
             {
-                Complex<T>* v = dst + i;
-                T r0 = v[0].re + v[nx].re;
-                T i0 = v[0].im + v[nx].im;
-                T r1 = v[0].re - v[nx].re;
-                T i1 = v[0].im - v[nx].im;
-                v[0].re = r0; v[0].im = i0;
-                v[nx].re = r1; v[nx].im = i1;
-
-                for( j = 1, dw = dw0; j < nx; j++, dw += dw0 )
-                {
-                    v = dst + i + j;
-                    r1 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im;
-                    i1 = v[nx].im*wave[dw].re + v[nx].re*wave[dw].im;
-                    r0 = v[0].re; i0 = v[0].im;
-
-                    v[0].re = r0 + r1; v[0].im = i0 + i1;
-                    v[nx].re = r0 - r1; v[nx].im = i0 - i1;
-                }
+                DFT_VecR2<T> vr2;
+                vr2(dst, c.n, n, dw0, wave);
+            }
+            else
+            {
+                DFT_R2<T> vr2;
+                vr2(dst, c.n, n, dw0, wave);
             }
         }
     }
@@ -813,94 +1057,21 @@ DFT(const OcvDftOptions & c, const Complex<T>* src, Complex<T>* dst)
 
         if( factor == 3 )
         {
-            // radix-3
-            for( i = 0; i < c.n; i += n )
+            if(c.haveSSE3)
             {
-                Complex<T>* v = dst + i;
-
-                T r1 = v[nx].re + v[nx*2].re;
-                T i1 = v[nx].im + v[nx*2].im;
-                T r0 = v[0].re;
-                T i0 = v[0].im;
-                T r2 = sin_120*(v[nx].im - v[nx*2].im);
-                T i2 = sin_120*(v[nx*2].re - v[nx].re);
-                v[0].re = r0 + r1; v[0].im = i0 + i1;
-                r0 -= (T)0.5*r1; i0 -= (T)0.5*i1;
-                v[nx].re = r0 + r2; v[nx].im = i0 + i2;
-                v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2;
-
-                for( j = 1, dw = dw0; j < nx; j++, dw += dw0 )
-                {
-                    v = dst + i + j;
-                    r0 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im;
-                    i0 = v[nx].re*wave[dw].im + v[nx].im*wave[dw].re;
-                    i2 = v[nx*2].re*wave[dw*2].re - v[nx*2].im*wave[dw*2].im;
-                    r2 = v[nx*2].re*wave[dw*2].im + v[nx*2].im*wave[dw*2].re;
-                    r1 = r0 + i2; i1 = i0 + r2;
-
-                    r2 = sin_120*(i0 - r2); i2 = sin_120*(i2 - r0);
-                    r0 = v[0].re; i0 = v[0].im;
-                    v[0].re = r0 + r1; v[0].im = i0 + i1;
-                    r0 -= (T)0.5*r1; i0 -= (T)0.5*i1;
-                    v[nx].re = r0 + r2; v[nx].im = i0 + i2;
-                    v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2;
-                }
+                DFT_VecR3<T> vr3;
+                vr3(dst, c.n, n, dw0, wave);
+            }
+            else
+            {
+                DFT_R3<T> vr3;
+                vr3(dst, c.n, n, dw0, wave);
             }
         }
         else if( factor == 5 )
         {
-            // radix-5
-            for( i = 0; i < c.n; i += n )
-            {
-                for( j = 0, dw = 0; j < nx; j++, dw += dw0 )
-                {
-                    Complex<T>* v0 = dst + i + j;
-                    Complex<T>* v1 = v0 + nx*2;
-                    Complex<T>* v2 = v1 + nx*2;
-
-                    T r0, i0, r1, i1, r2, i2, r3, i3, r4, i4, r5, i5;
-
-                    r3 = v0[nx].re*wave[dw].re - v0[nx].im*wave[dw].im;
-                    i3 = v0[nx].re*wave[dw].im + v0[nx].im*wave[dw].re;
-                    r2 = v2[0].re*wave[dw*4].re - v2[0].im*wave[dw*4].im;
-                    i2 = v2[0].re*wave[dw*4].im + v2[0].im*wave[dw*4].re;
-
-                    r1 = r3 + r2; i1 = i3 + i2;
-                    r3 -= r2; i3 -= i2;
-
-                    r4 = v1[nx].re*wave[dw*3].re - v1[nx].im*wave[dw*3].im;
-                    i4 = v1[nx].re*wave[dw*3].im + v1[nx].im*wave[dw*3].re;
-                    r0 = v1[0].re*wave[dw*2].re - v1[0].im*wave[dw*2].im;
-                    i0 = v1[0].re*wave[dw*2].im + v1[0].im*wave[dw*2].re;
-
-                    r2 = r4 + r0; i2 = i4 + i0;
-                    r4 -= r0; i4 -= i0;
-
-                    r0 = v0[0].re; i0 = v0[0].im;
-                    r5 = r1 + r2; i5 = i1 + i2;
-
-                    v0[0].re = r0 + r5; v0[0].im = i0 + i5;
-
-                    r0 -= (T)0.25*r5; i0 -= (T)0.25*i5;
-                    r1 = fft5_2*(r1 - r2); i1 = fft5_2*(i1 - i2);
-                    r2 = -fft5_3*(i3 + i4); i2 = fft5_3*(r3 + r4);
-
-                    i3 *= -fft5_5; r3 *= fft5_5;
-                    i4 *= -fft5_4; r4 *= fft5_4;
-
-                    r5 = r2 + i3; i5 = i2 + r3;
-                    r2 -= i4; i2 -= r4;
-
-                    r3 = r0 + r1; i3 = i0 + i1;
-                    r0 -= r1; i0 -= i1;
-
-                    v0[nx].re = r3 + r2; v0[nx].im = i3 + i2;
-                    v2[0].re = r3 - r2; v2[0].im = i3 - i2;
-
-                    v1[0].re = r0 + r5; v1[0].im = i0 + i5;
-                    v1[nx].re = r0 - r5; v1[nx].im = i0 - i5;
-                }
-            }
+            DFT_R5<T> vr5;
+            vr5(dst, c.n, n, dw0, wave);
         }
         else
         {

From e3da18121f836a3ef361a8f1847f6ae30d536770 Mon Sep 17 00:00:00 2001
From: Michael Gruner <michael.gruner@ridgerun.com>
Date: Thu, 1 Oct 2020 19:48:15 -0600
Subject: [PATCH 003/152] Enable a GMainLoop when capturing using GStreamer

A running GMainLoop processes many events on the GLib/GStreamer
world. While some things may work without it, many others wont.
Examples of these are signals, timers and many other source
events. The problem becomes more concerning by the fact that
some GStreamer elements rely on signals to work.

This commit allows the user to specify an OpenCV option to
start a main loop, if needed. Since the loop blocks, this is
done in a separate thread.
---
 modules/videoio/src/cap_gstreamer.cpp | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/modules/videoio/src/cap_gstreamer.cpp b/modules/videoio/src/cap_gstreamer.cpp
index 4d9330daf8..67c119cd36 100644
--- a/modules/videoio/src/cap_gstreamer.cpp
+++ b/modules/videoio/src/cap_gstreamer.cpp
@@ -54,6 +54,7 @@
 
 #include <iostream>
 #include <string.h>
+#include <thread>
 
 #include <gst/gst.h>
 #include <gst/gstbuffer.h>
@@ -107,6 +108,7 @@ template<> inline void GSafePtr_release<GstBuffer>(GstBuffer** pPtr) { if (pPtr)
 template<> inline void GSafePtr_release<GstSample>(GstSample** pPtr) { if (pPtr) { gst_sample_unref(*pPtr); *pPtr = NULL; } }
 template<> inline void GSafePtr_release<GstBus>(GstBus** pPtr) { if (pPtr) { gst_object_unref(G_OBJECT(*pPtr)); *pPtr = NULL; } }
 template<> inline void GSafePtr_release<GstMessage>(GstMessage** pPtr) { if (pPtr) { gst_message_unref(*pPtr); *pPtr = NULL; } }
+template<> inline void GSafePtr_release<GMainLoop>(GMainLoop** pPtr) { if (pPtr) { g_main_loop_unref(*pPtr); *pPtr = NULL; } }
 
 template<> inline void GSafePtr_release<GstEncodingVideoProfile>(GstEncodingVideoProfile** pPtr) { if (pPtr) { gst_encoding_profile_unref(*pPtr); *pPtr = NULL; } }
 template<> inline void GSafePtr_release<GstEncodingContainerProfile>(GstEncodingContainerProfile** pPtr) { if (pPtr) { gst_object_unref(G_OBJECT(*pPtr)); *pPtr = NULL; } }
@@ -194,10 +196,15 @@ public:
 private:
     bool isFailed;
     bool call_deinit;
+    bool start_loop;
+    GSafePtr<GMainLoop> loop;
+    std::thread thread;
+
     gst_initializer() :
         isFailed(false)
     {
         call_deinit = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_GSTREAMER_CALL_DEINIT", false);
+        start_loop = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_GSTREAMER_START_MAINLOOP", false);
 
         GSafePtr<GError> err;
         gst_init_check(NULL, NULL, err.getRef());
@@ -215,6 +222,14 @@ private:
             isFailed = true;
             return;
         }
+
+        if (start_loop)
+        {
+            loop.attach(g_main_loop_new (NULL, FALSE));
+            thread = std::thread([this](){
+                g_main_loop_run (loop);
+            });
+        }
     }
     ~gst_initializer()
     {
@@ -223,6 +238,12 @@ private:
             // Debug leaks: GST_LEAKS_TRACER_STACK_TRACE=1 GST_DEBUG="GST_TRACER:7" GST_TRACERS="leaks"
             gst_deinit();
         }
+
+        if (start_loop)
+        {
+            g_main_loop_quit(loop);
+            thread.join();
+        }
     }
 };
 

From 1546b9bf994258a6aeea7b0cf3f7675d3b1c0514 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Tue, 6 Oct 2020 00:12:58 +0300
Subject: [PATCH 004/152] build: winpack_dldt with dldt 2021.1.0

---
 .../20201005-dldt-fix-cldnn-compilation.patch |  12 ++
 platforms/winpack_dldt/2020.4/patch.config.py |   1 +
 .../2021.1/20200413-dldt-pdb.patch            |  14 ++
 .../20200604-dldt-disable-multidevice.patch   |  13 ++
 ...20201005-dldt-disable-unused-targets.patch | 178 ++++++++++++++++++
 platforms/winpack_dldt/2021.1/patch.config.py |   3 +
 .../winpack_dldt/2021.1/sysroot.config.py     |  56 ++++++
 platforms/winpack_dldt/build_package.py       |   4 +-
 8 files changed, 279 insertions(+), 2 deletions(-)
 create mode 100644 platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch
 create mode 100644 platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch
 create mode 100644 platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch
 create mode 100644 platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch
 create mode 100644 platforms/winpack_dldt/2021.1/patch.config.py
 create mode 100644 platforms/winpack_dldt/2021.1/sysroot.config.py

diff --git a/platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch b/platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch
new file mode 100644
index 0000000000..152af26c6f
--- /dev/null
+++ b/platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch
@@ -0,0 +1,12 @@
+diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h b/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h
+index 3dbdfd0b..6b04b910 100644
+--- a/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h
++++ b/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h
+@@ -15,6 +15,7 @@
+ 
+ #pragma once
+ 
++#include <stdexcept>
+ #include "common_types.h"
+ #include "common_tools.h"
+ #include <vector>
diff --git a/platforms/winpack_dldt/2020.4/patch.config.py b/platforms/winpack_dldt/2020.4/patch.config.py
index 496f383800..6fe3e6e1c1 100644
--- a/platforms/winpack_dldt/2020.4/patch.config.py
+++ b/platforms/winpack_dldt/2020.4/patch.config.py
@@ -1,3 +1,4 @@
 applyPatch('20200701-dldt-disable-unused-targets.patch')
 applyPatch('20200413-dldt-pdb.patch')
 applyPatch('20200604-dldt-disable-multidevice.patch')
+applyPatch('20201005-dldt-fix-cldnn-compilation.patch')
diff --git a/platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch b/platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch
new file mode 100644
index 0000000000..081c3c04f6
--- /dev/null
+++ b/platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch
@@ -0,0 +1,14 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 1f981ed2..90eb500a 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -22,6 +22,9 @@ endif()
+ 
+ project(OpenVINO)
+ 
++set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Zi /FS")
++set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DEBUG /OPT:REF /OPT:ICF")
++
+ set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+ set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine)
+ list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake")
diff --git a/platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch b/platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch
new file mode 100644
index 0000000000..b4d1ef9bfe
--- /dev/null
+++ b/platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch
@@ -0,0 +1,13 @@
+diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt
+index 0ba0dd78..7d34e7cb 100644
+--- a/inference-engine/src/CMakeLists.txt
++++ b/inference-engine/src/CMakeLists.txt
+@@ -26,7 +26,7 @@ endif()
+ 
+ add_subdirectory(hetero_plugin)
+ 
+-add_subdirectory(multi_device)
++#add_subdirectory(multi_device)
+ 
+ add_subdirectory(transformations)
+ 
diff --git a/platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch b/platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch
new file mode 100644
index 0000000000..0f56717ee4
--- /dev/null
+++ b/platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch
@@ -0,0 +1,178 @@
+diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt
+index 7f45ab02..a7bac7e9 100644
+--- a/inference-engine/CMakeLists.txt
++++ b/inference-engine/CMakeLists.txt
+@@ -70,7 +70,7 @@ if(ENABLE_TESTS)
+     add_subdirectory(tests)
+ endif()
+ 
+-add_subdirectory(tools)
++#add_subdirectory(tools)
+ 
+ function(ie_build_samples)
+     # samples should be build with the same flags as from OpenVINO package,
+@@ -89,7 +89,7 @@ endfunction()
+ 
+ # gflags and format_reader targets are kept inside of samples directory and
+ # they must be built even if samples build is disabled (required for tests and tools).
+-ie_build_samples()
++#ie_build_samples()
+ 
+ file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
+ add_cpplint_target(sample_cpplint
+@@ -180,7 +180,7 @@ endif()
+ # Developer package
+ #
+ 
+-ie_developer_export_targets(format_reader)
++#ie_developer_export_targets(format_reader)
+ ie_developer_export_targets(${NGRAPH_LIBRARIES})
+ 
+ # for Template plugin
+@@ -188,7 +188,7 @@ if(NGRAPH_INTERPRETER_ENABLE)
+     ie_developer_export_targets(ngraph_backend interpreter_backend)
+ endif()
+ 
+-ie_developer_export()
++#ie_developer_export()
+ 
+ configure_file(
+     "${IE_MAIN_SOURCE_DIR}/cmake/developer_package_config.cmake.in"
+diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt
+index 9ab88898..8badb591 100644
+--- a/inference-engine/src/inference_engine/CMakeLists.txt
++++ b/inference-engine/src/inference_engine/CMakeLists.txt
+@@ -118,7 +118,7 @@ add_cpplint_target(${TARGET_NAME}_plugin_api_cpplint FOR_SOURCES ${plugin_api_sr
+ 
+ # Create common base object library
+ 
+-add_library(${TARGET_NAME}_common_obj OBJECT
++add_library(${TARGET_NAME}_common_obj OBJECT EXCLUDE_FROM_ALL
+             ${IE_BASE_SOURCE_FILES})
+ 
+ target_compile_definitions(${TARGET_NAME}_common_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
+@@ -132,7 +132,7 @@ target_include_directories(${TARGET_NAME}_common_obj SYSTEM PRIVATE
+ 
+ # Create object library
+ 
+-add_library(${TARGET_NAME}_obj OBJECT
++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
+             ${LIBRARY_SRC}
+             ${LIBRARY_HEADERS}
+             ${PUBLIC_HEADERS})
+@@ -183,7 +183,7 @@ ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
+ 
+ # Static library used for unit tests which are always built
+ 
+-add_library(${TARGET_NAME}_s STATIC
++add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL
+             $<TARGET_OBJECTS:${TARGET_NAME}_obj>
+             $<TARGET_OBJECTS:${TARGET_NAME}_common_obj>
+             $<TARGET_OBJECTS:${TARGET_NAME}_legacy_obj>
+diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt
+index ed87a073..b30e6671 100644
+--- a/inference-engine/src/legacy_api/CMakeLists.txt
++++ b/inference-engine/src/legacy_api/CMakeLists.txt
+@@ -26,7 +26,7 @@ endif()
+ 
+ # Create object library
+ 
+-add_library(${TARGET_NAME}_obj OBJECT
++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
+             ${LIBRARY_SRC}
+             ${PUBLIC_HEADERS})
+ 
+diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt
+index 166818cd..6c1e8e36 100644
+--- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt
++++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt
+@@ -193,7 +193,7 @@ cross_compiled_file(${TARGET_NAME}
+ 
+ #  add test object library
+ 
+-add_library(${TARGET_NAME}_obj OBJECT ${SOURCES} ${HEADERS})
++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL ${SOURCES} ${HEADERS})
+ 
+ target_include_directories(${TARGET_NAME}_obj PRIVATE $<TARGET_PROPERTY:inference_engine_preproc_s,INTERFACE_INCLUDE_DIRECTORIES>
+                                                       $<TARGET_PROPERTY:inference_engine_lp_transformations,INTERFACE_INCLUDE_DIRECTORIES>
+diff --git a/inference-engine/src/preprocessing/CMakeLists.txt b/inference-engine/src/preprocessing/CMakeLists.txt
+index f4fed72a..9cedd6b5 100644
+--- a/inference-engine/src/preprocessing/CMakeLists.txt
++++ b/inference-engine/src/preprocessing/CMakeLists.txt
+@@ -124,7 +124,7 @@ endif()
+ 
+ # Create object library
+ 
+-add_library(${TARGET_NAME}_obj OBJECT
++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
+             ${LIBRARY_SRC}
+             ${LIBRARY_HEADERS})
+ 
+@@ -175,7 +175,7 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}
+ 
+ # Static library used for unit tests which are always built
+ 
+-add_library(${TARGET_NAME}_s STATIC
++add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL
+             $<TARGET_OBJECTS:${TARGET_NAME}_obj>)
+ 
+ set_ie_threading_interface_for(${TARGET_NAME}_s)
+diff --git a/inference-engine/src/vpu/common/CMakeLists.txt b/inference-engine/src/vpu/common/CMakeLists.txt
+index b291d5b4..74ab8287 100644
+--- a/inference-engine/src/vpu/common/CMakeLists.txt
++++ b/inference-engine/src/vpu/common/CMakeLists.txt
+@@ -57,7 +57,7 @@ add_common_target("vpu_common_lib" FALSE)
+ 
+ # Unit tests support for graph transformer
+ if(WIN32)
+-    add_common_target("vpu_common_lib_test_static" TRUE)
++    #add_common_target("vpu_common_lib_test_static" TRUE)
+ else()
+     add_library("vpu_common_lib_test_static" ALIAS "vpu_common_lib")
+ endif()
+diff --git a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
+index a4543745..807b8e36 100644
+--- a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
++++ b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
+@@ -65,7 +65,7 @@ add_graph_transformer_target("vpu_graph_transformer" FALSE)
+ 
+ # Unit tests support for graph transformer
+ if(WIN32)
+-    add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE)
++    #add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE)
+ else()
+     add_library("vpu_graph_transformer_test_static" ALIAS "vpu_graph_transformer")
+ endif()
+diff --git a/inference-engine/thirdparty/CMakeLists.txt b/inference-engine/thirdparty/CMakeLists.txt
+index a2550bfa..10ce316f 100644
+--- a/inference-engine/thirdparty/CMakeLists.txt
++++ b/inference-engine/thirdparty/CMakeLists.txt
+@@ -56,13 +56,13 @@ function(build_with_lto)
+     endfunction()
+ 
+     ie_build_pugixml()
+-    add_subdirectory(stb_lib)
++    #add_subdirectory(stb_lib)
+     add_subdirectory(ade)
+     add_subdirectory(fluid/modules/gapi)
+ 
+     target_include_directories(pugixml INTERFACE "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/pugixml/src>")
+ 
+-    set_target_properties(pugixml ade fluid stb_image
++    set_target_properties(pugixml ade fluid
+                           PROPERTIES FOLDER thirdparty)
+ 
+     # developer package
+diff --git a/inference-engine/thirdparty/pugixml/CMakeLists.txt b/inference-engine/thirdparty/pugixml/CMakeLists.txt
+index 8bcb2801..380fb468 100644
+--- a/inference-engine/thirdparty/pugixml/CMakeLists.txt
++++ b/inference-engine/thirdparty/pugixml/CMakeLists.txt
+@@ -41,7 +41,7 @@ if(BUILD_SHARED_LIBS)
+ else()
+ 	add_library(pugixml STATIC ${SOURCES})
+ 	if (MSVC)
+-		add_library(pugixml_mt STATIC ${SOURCES})
++               #add_library(pugixml_mt STATIC ${SOURCES})
+ 		#if (WIN32)
+ 		#	set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
+ 		#	set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
diff --git a/platforms/winpack_dldt/2021.1/patch.config.py b/platforms/winpack_dldt/2021.1/patch.config.py
new file mode 100644
index 0000000000..8c890159e6
--- /dev/null
+++ b/platforms/winpack_dldt/2021.1/patch.config.py
@@ -0,0 +1,3 @@
+applyPatch('20201005-dldt-disable-unused-targets.patch')
+applyPatch('20200413-dldt-pdb.patch')
+applyPatch('20200604-dldt-disable-multidevice.patch')
diff --git a/platforms/winpack_dldt/2021.1/sysroot.config.py b/platforms/winpack_dldt/2021.1/sysroot.config.py
new file mode 100644
index 0000000000..fc8dffd32a
--- /dev/null
+++ b/platforms/winpack_dldt/2021.1/sysroot.config.py
@@ -0,0 +1,56 @@
+sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin')
+copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph')
+#rm_one(self.sysrootdir / 'ngraph' / 'lib' / 'ngraph.dll')
+
+build_config = 'Release' if not self.config.build_debug else 'Debug'
+build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config
+
+def copy_bin(name):
+    global build_bin_dir, sysroot_bin_dir
+    copytree(build_bin_dir / name, sysroot_bin_dir / name)
+
+dll_suffix = 'd' if self.config.build_debug else ''
+def copy_dll(name):
+    global copy_bin, dll_suffix
+    copy_bin(name + dll_suffix + '.dll')
+    copy_bin(name + dll_suffix + '.pdb')
+
+copy_bin('cache.json')
+copy_dll('clDNNPlugin')
+copy_dll('HeteroPlugin')
+copy_dll('inference_engine')
+copy_dll('inference_engine_ir_reader')
+copy_dll('inference_engine_legacy')
+copy_dll('inference_engine_transformations')  # runtime
+copy_dll('inference_engine_lp_transformations')  # runtime
+copy_dll('MKLDNNPlugin')  # runtime
+copy_dll('myriadPlugin')  # runtime
+#copy_dll('MultiDevicePlugin')  # runtime, not used
+copy_dll('ngraph')
+copy_bin('plugins.xml')
+copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf')
+copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd')
+copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd')
+
+copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir)
+copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb')
+
+sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine')
+sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64')
+
+copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include')
+if not self.config.build_debug:
+    copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib')
+    copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib')
+    copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib')
+    copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib')
+else:
+    copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib')
+    copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib')
+    copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib')
+    copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib')
+
+sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses')
+copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE')
+copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE')
+copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE')
diff --git a/platforms/winpack_dldt/build_package.py b/platforms/winpack_dldt/build_package.py
index c33e07026b..8618e11cbf 100644
--- a/platforms/winpack_dldt/build_package.py
+++ b/platforms/winpack_dldt/build_package.py
@@ -443,8 +443,8 @@ class Builder:
 def main():
 
     dldt_src_url = 'https://github.com/openvinotoolkit/openvino'
-    dldt_src_commit = '2020.4'
-    dldt_release = '2020040000'
+    dldt_src_commit = '2021.1'
+    dldt_release = '2021010000'
 
     build_cache_dir_default = os.environ.get('BUILD_CACHE_DIR', '.build_cache')
     build_subst_drive = os.environ.get('BUILD_SUBST_DRIVE', None)

From d9ea9bedb20bb55a06439f7f013046025f7ff2a7 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Wed, 7 Oct 2020 20:16:40 +0000
Subject: [PATCH 005/152] doxygen: backport style changes

---
 doc/stylesheet.css | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/doc/stylesheet.css b/doc/stylesheet.css
index 806e03bfd4..eae05885f7 100644
--- a/doc/stylesheet.css
+++ b/doc/stylesheet.css
@@ -6,12 +6,11 @@ body, table, div, p, dl {
 }
 
 code {
-    font: 12px Consolas, "Liberation Mono", Courier, monospace;
     font-size: 85%;
+    font-family: "SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace;
     white-space: pre-wrap;
     padding: 1px 5px;
-    padding: 0;
-    background-color: #ddd;
+    background-color: rgb(223, 229, 241);
     vertical-align: baseline;
 }
 
@@ -20,6 +19,16 @@ body {
     margin: 0 auto;
 }
 
+div.fragment {
+    padding: 3px;
+    padding-bottom: 0px;
+}
+
+div.line {
+    padding-bottom: 3px;
+    font-family: "SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace;
+}
+
 div.contents {
     width: 980px;
     margin: 0 auto;
@@ -35,3 +44,11 @@ span.arrow {
 div.image img{
     max-width: 900px;
 }
+
+#projectlogo
+{
+    text-align: center;
+    vertical-align: middle;
+    border-collapse: separate;
+    padding-left: 0.5em;
+}

From af2f8c69f03ae5ee381d60f2f44bad849bd05e06 Mon Sep 17 00:00:00 2001
From: "Anastasiya(Asya) Pronina" <anastasiya.pronina@intel.com>
Date: Thu, 8 Oct 2020 00:48:49 +0300
Subject: [PATCH 006/152] Merge pull request #18496 from
 AsyaPronina:comp_args_serialization

Serialization && deserialization for compile arguments

* Initial stub

* Add test on serialization of a custom type

* Namespaces rework

* Fix isSupported in test struct

* Fix clang lookup issue

* Initial implementation

* Drop the isSupported flag

* Initial implementation

* Removed internal header inclusion

* Switched to public API

* Implemented serialization

* Adding desirialize: WIP

* Fixed merge errors

* Implemented

* Final polishing

* Addressed review comments and added debug throw

* Added FluidROI test

* Polishing

* Polishing

* Polishing

* Polishing

* Polishing

* Updated CMakeLists.txt

* Fixed comments

* Addressed review comments

* Removed decay from deserialize_arg

* Addressed review comments

* Removed extra inclusion

* Fixed Win64 warning

* Update gcommon.hpp

* Update serialization.cpp

* Update gcommon.hpp

* gapi: drop GAPI_EXPORTS_W_SIMPLE from GCompileArg

Co-authored-by: Smirnov Alexey <alexey.smirnov@intel.com>
Co-authored-by: AsyaPronina <155jj@mail.ru>
---
 modules/gapi/CMakeLists.txt                   |  1 +
 modules/gapi/include/opencv2/gapi/gcommon.hpp | 32 ++++++++-
 modules/gapi/include/opencv2/gapi/s11n.hpp    | 67 +++++++++++++++----
 .../gapi/include/opencv2/gapi/s11n/base.hpp   | 36 ++++++++++
 modules/gapi/misc/python/shadow_gapi.hpp      |  2 +
 modules/gapi/src/api/s11n.cpp                 |  7 ++
 .../src/backends/common/serialization.cpp     | 16 ++++-
 .../src/backends/common/serialization.hpp     |  7 ++
 .../cpu/gapi_ocv_stateful_kernel_tests.cpp    |  8 +--
 modules/gapi/test/s11n/gapi_s11n_tests.cpp    | 22 ++++++
 10 files changed, 179 insertions(+), 19 deletions(-)
 create mode 100644 modules/gapi/include/opencv2/gapi/s11n/base.hpp

diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
index 0278d9326a..88ddeead16 100644
--- a/modules/gapi/CMakeLists.txt
+++ b/modules/gapi/CMakeLists.txt
@@ -49,6 +49,7 @@ file(GLOB gapi_ext_hdrs
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/ocl/*.hpp"
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/own/*.hpp"
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/render/*.hpp"
+    "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/s11n/*.hpp"
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp"
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp"
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp"
diff --git a/modules/gapi/include/opencv2/gapi/gcommon.hpp b/modules/gapi/include/opencv2/gapi/gcommon.hpp
index e008fe4bf1..2b260ed07c 100644
--- a/modules/gapi/include/opencv2/gapi/gcommon.hpp
+++ b/modules/gapi/include/opencv2/gapi/gcommon.hpp
@@ -19,6 +19,7 @@
 #include <opencv2/gapi/own/exports.hpp>
 #include <opencv2/gapi/own/assert.hpp>
 #include <opencv2/gapi/render/render_types.hpp>
+#include <opencv2/gapi/s11n/base.hpp>
 
 namespace cv {
 
@@ -94,6 +95,15 @@ enum class GShape: int
     GFRAME,
 };
 
+namespace gapi {
+namespace s11n {
+namespace detail {
+template<typename T> struct wrap_serialize;
+} // namespace detail
+} // namespace s11n
+} // namespace gapi
+
+
 struct GCompileArg;
 
 namespace detail {
@@ -139,7 +149,7 @@ namespace detail {
  * passed in (a variadic template parameter pack) into a vector of
  * cv::GCompileArg objects.
  */
-struct GAPI_EXPORTS_W_SIMPLE GCompileArg
+struct GCompileArg
 {
 public:
     // NB: Required for pythnon bindings
@@ -151,6 +161,7 @@ public:
     template<typename T, typename std::enable_if<!detail::is_compile_arg<T>::value, int>::type = 0>
     explicit GCompileArg(T &&t)
         : tag(detail::CompileArgTag<typename std::decay<T>::type>::tag())
+        , serializeF(&cv::gapi::s11n::detail::wrap_serialize<T>::serialize)
         , arg(t)
     {
     }
@@ -165,7 +176,13 @@ public:
         return util::any_cast<T>(arg);
     }
 
+    void serialize(cv::gapi::s11n::IOStream& os) const
+    {
+        serializeF(os, *this);
+    }
+
 private:
+    std::function<void(cv::gapi::s11n::IOStream&, const GCompileArg&)> serializeF;
     util::any arg;
 };
 
@@ -198,6 +215,19 @@ inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
     }
     return cv::util::optional<T>();
 }
+
+namespace s11n {
+namespace detail {
+template<typename T> struct wrap_serialize
+{
+    static void serialize(IOStream& os, const GCompileArg& arg)
+    {
+        using decayed_type = typename std::decay<T>::type;
+        S11N<decayed_type>::serialize(os, arg.get<decayed_type>());
+    }
+};
+} // namespace detail
+} // namespace s11n
 } // namespace gapi
 
 /**
diff --git a/modules/gapi/include/opencv2/gapi/s11n.hpp b/modules/gapi/include/opencv2/gapi/s11n.hpp
index 0b61304c5c..e8a8dbcab4 100644
--- a/modules/gapi/include/opencv2/gapi/s11n.hpp
+++ b/modules/gapi/include/opencv2/gapi/s11n.hpp
@@ -10,6 +10,7 @@
 #include <vector>
 #include <map>
 #include <unordered_map>
+#include <opencv2/gapi/s11n/base.hpp>
 #include <opencv2/gapi/gcomputation.hpp>
 
 namespace cv {
@@ -17,14 +18,13 @@ namespace gapi {
 
 namespace detail {
     GAPI_EXPORTS cv::GComputation getGraph(const std::vector<char> &p);
-} // namespace detail
 
-namespace detail {
     GAPI_EXPORTS cv::GMetaArgs getMetaArgs(const std::vector<char> &p);
-} // namespace detail
 
-namespace detail {
     GAPI_EXPORTS cv::GRunArgs getRunArgs(const std::vector<char> &p);
+
+    template<typename... Types>
+    cv::GCompileArgs getCompileArgs(const std::vector<char> &p);
 } // namespace detail
 
 GAPI_EXPORTS std::vector<char> serialize(const cv::GComputation &c);
@@ -35,6 +35,7 @@ T deserialize(const std::vector<char> &p);
 
 //} //ananymous namespace
 
+GAPI_EXPORTS std::vector<char> serialize(const cv::GCompileArgs&);
 GAPI_EXPORTS std::vector<char> serialize(const cv::GMetaArgs&);
 GAPI_EXPORTS std::vector<char> serialize(const cv::GRunArgs&);
 
@@ -53,6 +54,11 @@ cv::GRunArgs deserialize(const std::vector<char> &p) {
     return detail::getRunArgs(p);
 }
 
+template<typename T, typename... Types> inline
+typename std::enable_if<std::is_same<T, GCompileArgs>::value, GCompileArgs>::
+type deserialize(const std::vector<char> &p) {
+    return detail::getCompileArgs<Types...>(p);
+}
 } // namespace gapi
 } // namespace cv
 
@@ -91,6 +97,10 @@ struct GAPI_EXPORTS IIStream {
     virtual IIStream& operator>> (std::string &) = 0;
 };
 
+namespace detail {
+GAPI_EXPORTS std::unique_ptr<IIStream> getInStream(const std::vector<char> &p);
+} // namespace detail
+
 ////////////////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////////////////
 // S11N operators
@@ -174,17 +184,48 @@ IIStream& operator>> (IIStream& is, std::vector<T> &ts) {
     }
     return is;
 }
+} // namespace s11n
 
-namespace detail {
-    // Will be used along with default types if possible in specific cases (compile args, etc)
-    // Note: actual implementation is defined by user
-    template<typename T>
-    struct GAPI_EXPORTS S11N {
-        static void serialize(IOStream &, const T &) {}
-        static T deserialize(IIStream &) { T t; return t; }
-    };
+namespace detail
+{
+template<typename T> struct deserialize_arg;
+
+template<> struct deserialize_arg<std::tuple<>> {
+static GCompileArg exec(cv::gapi::s11n::IIStream&, const std::string&) {
+        throw std::logic_error("Passed arg can't be deserialized!");
+    }
+};
+
+template<typename T, typename... Types>
+struct deserialize_arg<std::tuple<T, Types...>> {
+static GCompileArg exec(cv::gapi::s11n::IIStream& is, const std::string& tag) {
+    if (tag == cv::detail::CompileArgTag<T>::tag()) {
+        return GCompileArg {
+            cv::gapi::s11n::detail::S11N<T>::deserialize(is)
+        };
+    }
+
+    return deserialize_arg<std::tuple<Types...>>::exec(is, tag);
+}
+};
+
+template<typename... Types>
+cv::GCompileArgs getCompileArgs(const std::vector<char> &p) {
+    std::unique_ptr<cv::gapi::s11n::IIStream> pIs = cv::gapi::s11n::detail::getInStream(p);
+    cv::gapi::s11n::IIStream& is = *pIs;
+    cv::GCompileArgs args;
+
+    uint32_t sz = 0;
+    is >> sz;
+    for (uint32_t i = 0; i < sz; ++i) {
+        std::string tag;
+        is >> tag;
+        args.push_back(cv::gapi::detail::deserialize_arg<std::tuple<Types...>>::exec(is, tag));
+    }
+
+    return args;
+}
 } // namespace detail
-} // namespace s11n
 } // namespace gapi
 } // namespace cv
 
diff --git a/modules/gapi/include/opencv2/gapi/s11n/base.hpp b/modules/gapi/include/opencv2/gapi/s11n/base.hpp
new file mode 100644
index 0000000000..6bf5d5fb0f
--- /dev/null
+++ b/modules/gapi/include/opencv2/gapi/s11n/base.hpp
@@ -0,0 +1,36 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#ifndef OPENCV_GAPI_S11N_BASE_HPP
+#define OPENCV_GAPI_S11N_BASE_HPP
+
+#include <opencv2/gapi/own/assert.hpp>
+
+namespace cv {
+namespace gapi {
+namespace s11n {
+struct IOStream;
+struct IIStream;
+
+namespace detail {
+// Will be used along with default types if possible in specific cases (compile args, etc)
+// Note: actual implementation is defined by user
+template<typename T>
+struct S11N {
+    static void serialize(IOStream &, const T &) {
+        GAPI_Assert(false && "No serialization routine is provided!");
+    }
+    static T deserialize(IIStream &) {
+        GAPI_Assert(false && "No deserialization routine is provided!");
+    }
+};
+
+} // namespace detail
+} // namespace s11n
+} // namespace gapi
+} // namespace cv
+
+#endif // OPENCV_GAPI_S11N_BASE_HPP
diff --git a/modules/gapi/misc/python/shadow_gapi.hpp b/modules/gapi/misc/python/shadow_gapi.hpp
index dab083def7..4f988440e8 100644
--- a/modules/gapi/misc/python/shadow_gapi.hpp
+++ b/modules/gapi/misc/python/shadow_gapi.hpp
@@ -3,6 +3,8 @@
 
 namespace cv
 {
+   struct GAPI_EXPORTS_W_SIMPLE GCompileArg { };
+
    GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg);
 
    class GAPI_EXPORTS_W_SIMPLE GProtoArg { };
diff --git a/modules/gapi/src/api/s11n.cpp b/modules/gapi/src/api/s11n.cpp
index 54a0850394..52c276fd5d 100644
--- a/modules/gapi/src/api/s11n.cpp
+++ b/modules/gapi/src/api/s11n.cpp
@@ -44,6 +44,13 @@ std::vector<char> cv::gapi::serialize(const cv::GRunArgs& ra)
     return os.data();
 }
 
+std::vector<char> cv::gapi::serialize(const cv::GCompileArgs& ca)
+{
+    cv::gapi::s11n::ByteMemoryOutStream os;
+    serialize(os, ca);
+    return os.data();
+}
+
 // FIXME: This function should move from S11N to GRunArg-related entities.
 // it has nothing to do with the S11N as it is
 cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results)
diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp
index c0b3281449..ca73d29ffb 100644
--- a/modules/gapi/src/backends/common/serialization.cpp
+++ b/modules/gapi/src/backends/common/serialization.cpp
@@ -329,6 +329,13 @@ IIStream& operator>> (IIStream& is,       cv::gapi::wip::draw::Line &l) {
 
 // G-API types /////////////////////////////////////////////////////////////////
 
+IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg)
+{
+    os << arg.tag;
+    arg.serialize(os);
+    return os;
+}
+
 // Stubs (empty types)
 
 IOStream& operator<< (IOStream& os, cv::util::monostate  ) {return os;}
@@ -865,6 +872,14 @@ IIStream& ByteMemoryInStream::operator>> (std::string& str) {
     return *this;
 }
 
+GAPI_EXPORTS std::unique_ptr<IIStream> detail::getInStream(const std::vector<char> &p) {
+    return std::unique_ptr<ByteMemoryInStream>(new ByteMemoryInStream(p));
+}
+
+GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca) {
+    os << ca;
+}
+
 GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma) {
     os << ma;
 }
@@ -882,7 +897,6 @@ GAPI_EXPORTS GRunArgs run_args_deserialize(IIStream& is) {
     return s;
 }
 
-
 } // namespace s11n
 } // namespace gapi
 } // namespace cv
diff --git a/modules/gapi/src/backends/common/serialization.hpp b/modules/gapi/src/backends/common/serialization.hpp
index 4c60e71d87..e2aa56c45b 100644
--- a/modules/gapi/src/backends/common/serialization.hpp
+++ b/modules/gapi/src/backends/common/serialization.hpp
@@ -40,6 +40,8 @@ struct GSerialized {
 
 // G-API types /////////////////////////////////////////////////////////////////
 
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg);
+
 GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::util::monostate  );
 GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::util::monostate &);
 
@@ -268,6 +270,11 @@ public:
     virtual IIStream& operator>> (std::string &) override;
 };
 
+namespace detail {
+GAPI_EXPORTS std::unique_ptr<IIStream> getInStream(const std::vector<char> &p);
+} // namespace detail
+
+GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca);
 GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma);
 GAPI_EXPORTS void serialize(IOStream& os, const cv::GRunArgs &ra);
 GAPI_EXPORTS GMetaArgs meta_args_deserialize(IIStream& is);
diff --git a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp
index 75ca7989e0..fe6a1f94af 100644
--- a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp
+++ b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp
@@ -21,7 +21,7 @@ namespace opencv_test
     {
         std::string method;
     };
-}
+} // namespace opencv_test
 
 namespace cv
 {
@@ -31,11 +31,11 @@ namespace cv
         {
             static const char* tag()
             {
-                return "org.opencv.test..background_substractor_state_params";
+                return "org.opencv.test.background_substractor_state_params";
             }
         };
-    }
-}
+    } // namespace detail
+} // namespace cv
 
 namespace opencv_test
 {
diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
index 10fe586188..1a4faec12c 100644
--- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp
+++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
@@ -34,6 +34,17 @@ namespace detail {
 } // namespace gapi
 } // namespace cv
 
+
+namespace cv {
+namespace detail {
+template<> struct CompileArgTag<MyCustomType> {
+    static const char* tag() {
+        return "org.opencv.test.mycustomtype";
+    }
+};
+} // namespace detail
+} // namespace cv
+
 namespace opencv_test {
 
 struct S11N_Basic: public ::testing::Test {
@@ -511,4 +522,15 @@ TEST_F(S11N_Basic, Test_Custom_Type) {
     MyCustomType new_var = cv::gapi::s11n::detail::S11N<MyCustomType>::deserialize(is);
     EXPECT_EQ(var, new_var);
 }
+
+TEST_F(S11N_Basic, Test_Custom_CompileArg) {
+    MyCustomType customVar{1248, "World", {1280, 720, 640, 480}, {{5, 32434142342}, {7, 34242432}}};
+
+    std::vector<char> sArgs = cv::gapi::serialize(cv::compile_args(customVar));
+
+    GCompileArgs dArgs = cv::gapi::deserialize<GCompileArgs, MyCustomType>(sArgs);
+
+    MyCustomType dCustomVar = cv::gapi::getCompileArg<MyCustomType>(dArgs).value();
+    EXPECT_EQ(customVar, dCustomVar);
+}
 } // namespace opencv_test

From ae265a48c75948a2f089c4e3dc4fbc84a9b77ee3 Mon Sep 17 00:00:00 2001
From: Maksim Shabunin <maksim.shabunin@gmail.com>
Date: Thu, 8 Oct 2020 11:50:07 +0300
Subject: [PATCH 007/152] Doc: fixed warnings when CUDA modules are missing

---
 .../config_reference/config_reference.markdown        | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/doc/tutorials/introduction/config_reference/config_reference.markdown b/doc/tutorials/introduction/config_reference/config_reference.markdown
index 5107af6a8a..a6bde80102 100644
--- a/doc/tutorials/introduction/config_reference/config_reference.markdown
+++ b/doc/tutorials/introduction/config_reference/config_reference.markdown
@@ -247,15 +247,18 @@ When `WITH_` option is enabled:
 
 `WITH_CUDA` (default: _OFF_)
 
-Many algorithms have been implemented using CUDA acceleration, these functions are located in separate modules: @ref cuda. CUDA toolkit must be installed from the official NVIDIA site as a prerequisite. For cmake versions older than 3.9 OpenCV uses own `cmake/FindCUDA.cmake` script, for newer versions - the one packaged with CMake. Additional options can be used to control build process, e.g. `CUDA_GENERATION` or `CUDA_ARCH_BIN`. These parameters are not documented yet, please consult with the `cmake/OpenCVDetectCUDA.cmake` script for details.
-
-Some tutorials can be found in the corresponding section: @ref tutorial_table_of_content_gpu
+Many algorithms have been implemented using CUDA acceleration, these functions are located in separate modules. CUDA toolkit must be installed from the official NVIDIA site as a prerequisite. For cmake versions older than 3.9 OpenCV uses own `cmake/FindCUDA.cmake` script, for newer versions - the one packaged with CMake. Additional options can be used to control build process, e.g. `CUDA_GENERATION` or `CUDA_ARCH_BIN`. These parameters are not documented yet, please consult with the `cmake/OpenCVDetectCUDA.cmake` script for details.
 
 @note Since OpenCV version 4.0 all CUDA-accelerated algorithm implementations have been moved to the _opencv_contrib_ repository. To build _opencv_ and _opencv_contrib_ together check @ref tutorial_config_reference_general_contrib.
 
+@cond CUDA_MODULES
+@note Some tutorials can be found in the corresponding section: @ref tutorial_table_of_content_gpu
+@see @ref cuda
+@endcond
+
 @see https://en.wikipedia.org/wiki/CUDA
 
-TODO: other options: `WITH_CUFFT`, `WITH_CUBLAS`, WITH_NVCUVID`?
+TODO: other options: `WITH_CUFFT`, `WITH_CUBLAS`, `WITH_NVCUVID`?
 
 ### OpenCL support
 

From 6da05f708639bae5ee9b2b338793bfa3be2d764b Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Tue, 6 Oct 2020 23:40:27 +0000
Subject: [PATCH 008/152] dnn(test): update tests for OpenVINO 2021.1

---
 modules/dnn/perf/perf_net.cpp              | 18 +++++++++++++++++-
 modules/dnn/test/test_darknet_importer.cpp |  5 +++++
 modules/dnn/test/test_torch_importer.cpp   |  4 ++++
 3 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/modules/dnn/perf/perf_net.cpp b/modules/dnn/perf/perf_net.cpp
index 23ece025e7..600193915d 100644
--- a/modules/dnn/perf/perf_net.cpp
+++ b/modules/dnn/perf/perf_net.cpp
@@ -111,6 +111,10 @@ PERF_TEST_P_(DNNTestNetwork, ENet)
     if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) ||
         (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
         throw SkipTestException("");
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021010000)
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+        throw SkipTestException("");
+#endif
     processNet("dnn/Enet-model-best.net", "", "enet.yml",
             Mat(cv::Size(512, 256), CV_32FC3));
 }
@@ -202,6 +206,10 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv3)
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
         throw SkipTestException("Test is disabled in OpenVINO 2020.4");
 #endif
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000)  // nGraph compilation failure
+    if (target == DNN_TARGET_MYRIAD)
+        throw SkipTestException("");
+#endif
 
     Mat sample = imread(findDataFile("dnn/dog416.png"));
     cvtColor(sample, sample, COLOR_BGR2RGB);
@@ -214,7 +222,7 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv4)
 {
     if (backend == DNN_BACKEND_HALIDE)
         throw SkipTestException("");
-    if (target == DNN_TARGET_MYRIAD)
+    if (target == DNN_TARGET_MYRIAD)  // not enough resources
         throw SkipTestException("");
 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)  // nGraph compilation failure
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
@@ -233,6 +241,10 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv4_tiny)
 {
     if (backend == DNN_BACKEND_HALIDE)
         throw SkipTestException("");
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000)  // nGraph compilation failure
+    if (target == DNN_TARGET_MYRIAD)
+        throw SkipTestException("");
+#endif
     Mat sample = imread(findDataFile("dnn/dog416.png"));
     cvtColor(sample, sample, COLOR_BGR2RGB);
     Mat inp;
@@ -263,6 +275,10 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_Faster_RCNN)
 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
         throw SkipTestException("Test is disabled in OpenVINO 2019R2");
+#endif
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000)
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
+        throw SkipTestException("Test is disabled in OpenVINO 2021.1 / MYRIAD");
 #endif
     if (backend == DNN_BACKEND_HALIDE ||
         (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) ||
diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp
index 4986e8e399..a47e771084 100644
--- a/modules/dnn/test/test_darknet_importer.cpp
+++ b/modules/dnn/test/test_darknet_importer.cpp
@@ -625,6 +625,11 @@ TEST_P(Test_Darknet_nets, YOLOv4_tiny)
         target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB
     );
 
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000)  // nGraph compilation failure
+    if (target == DNN_TARGET_MYRIAD)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+#endif
+
     const double confThreshold = 0.5;
     // batchId, classId, confidence, left, top, right, bottom
     const int N0 = 2;
diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp
index 3be22d6d25..9eb89f3d78 100644
--- a/modules/dnn/test/test_torch_importer.cpp
+++ b/modules/dnn/test/test_torch_importer.cpp
@@ -359,6 +359,10 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
         throw SkipTestException("");
     }
+#endif
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021010000)
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
 #endif
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
     {

From e87a0baa4b8e14f0b5534954a191358bea62f650 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Thu, 8 Oct 2020 20:27:03 +0000
Subject: [PATCH 009/152] dnn(test): enable tests from issue 17953

---
 modules/dnn/test/test_layers.cpp | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp
index 085e5a51b8..e61b754b86 100644
--- a/modules/dnn/test/test_layers.cpp
+++ b/modules/dnn/test/test_layers.cpp
@@ -2349,13 +2349,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
     if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
 
-    // bug: https://github.com/opencv/opencv/issues/17953
-    if (eltwiseOp == "sum" && actType == "ChannelsPReLU" && bias_term == false &&
-        backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
-    {
-        applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
-    }
-
     // bug: https://github.com/opencv/opencv/issues/17964
     if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
@@ -2442,13 +2435,6 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
     if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
 
-    // bug: https://github.com/opencv/opencv/issues/17953
-    if (actType == "ChannelsPReLU" && bias_term == false &&
-        backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
-    {
-        applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
-    }
-
     Net net;
     int convId = net.addLayer(convParams.name, convParams.type, convParams);
     int activId = net.addLayer(activationParams.name, activationParams.type, activationParams);

From 76be3529f491aa5835602021c65ef99975837cac Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Fri, 9 Oct 2020 01:12:25 +0300
Subject: [PATCH 010/152] Merge pull request #18419 from
 TolyaTalamanov:at/generic-inference

[G-API] Introduce generic version for cv::gapi::infer

* Introduce generic infer

* Move Generic to infer.hpp

* Removew num_outs

* Fix windows warnings

* Fix comments to review

* Fix doxygen

* Add comment

* Fix comments to review

* standoalone ifdef in ginfer.cpp

* Fix test
---
 modules/gapi/include/opencv2/gapi/gcall.hpp   |  9 ++-
 modules/gapi/include/opencv2/gapi/infer.hpp   | 78 +++++++++++++++++++
 .../gapi/include/opencv2/gapi/infer/ie.hpp    | 32 +++++++-
 modules/gapi/src/api/gcall.cpp                | 10 +++
 modules/gapi/src/api/gcall_priv.hpp           |  3 +-
 modules/gapi/src/api/ginfer.cpp               | 30 +++++++
 modules/gapi/src/backends/ie/giebackend.cpp   | 18 ++++-
 modules/gapi/src/compiler/gmodel.cpp          |  8 +-
 modules/gapi/src/compiler/gmodel.hpp          |  7 +-
 modules/gapi/src/compiler/gmodelbuilder.cpp   |  2 +-
 .../gapi/test/infer/gapi_infer_ie_test.cpp    | 53 +++++++++++++
 11 files changed, 237 insertions(+), 13 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/gcall.hpp b/modules/gapi/include/opencv2/gapi/gcall.hpp
index ed5ba5fde8..511eca1408 100644
--- a/modules/gapi/include/opencv2/gapi/gcall.hpp
+++ b/modules/gapi/include/opencv2/gapi/gcall.hpp
@@ -56,11 +56,16 @@ public:
     Priv& priv();
     const Priv& priv() const;
 
-protected:
-    std::shared_ptr<Priv> m_priv;
+    // GKernel and params can be modified, it's needed for infer<Generic>,
+    // because information about output shapes doesn't exist in compile time
+    GKernel& kernel();
+    cv::util::any& params();
 
     void setArgs(std::vector<GArg> &&args);
 
+protected:
+    std::shared_ptr<Priv> m_priv;
+
     // Public versions return a typed array or opaque, those are implementation details
     detail::GArrayU yieldArray(int output = 0);
     detail::GOpaqueU yieldOpaque(int output = 0);
diff --git a/modules/gapi/include/opencv2/gapi/infer.hpp b/modules/gapi/include/opencv2/gapi/infer.hpp
index 50086dd848..4fdd2df875 100644
--- a/modules/gapi/include/opencv2/gapi/infer.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer.hpp
@@ -121,6 +121,45 @@ struct GInferBase {
     }
 };
 
+// Struct stores network input/output names.
+// Used by infer<Generic>
+struct InOutInfo
+{
+    std::vector<std::string> in_names;
+    std::vector<std::string> out_names;
+};
+
+/**
+ * @{
+ * @brief G-API object used to collect network inputs
+ */
+class GAPI_EXPORTS GInferInputs
+{
+public:
+    cv::GMat& operator[](const std::string& name);
+    const std::unordered_map<std::string, cv::GMat>& getBlobs() const;
+
+private:
+    std::unordered_map<std::string, cv::GMat> in_blobs;
+};
+/** @} */
+
+/**
+ * @{
+ * @brief G-API object used to collect network outputs
+ */
+struct GAPI_EXPORTS GInferOutputs
+{
+public:
+    GInferOutputs(std::shared_ptr<cv::GCall> call);
+    cv::GMat at(const std::string& name);
+
+private:
+    std::shared_ptr<cv::GCall> m_call;
+    InOutInfo* m_info = nullptr;
+    std::unordered_map<std::string, cv::GMat> out_blobs;
+};
+/** @} */
 
 // Base "Infer list" kernel.
 // All notes from "Infer" kernel apply here as well.
@@ -254,6 +293,45 @@ typename Net::Result infer(Args&&... args) {
     return GInfer<Net>::on(std::forward<Args>(args)...);
 }
 
+/**
+ * @brief Special network type
+ */
+struct Generic { };
+
+/**
+ * @brief Calculates response for generic network
+ *
+ * @param tag a network tag
+ * @param inputs networks's inputs
+ * @return a GInferOutputs
+ */
+template<typename T = Generic> GInferOutputs
+infer(const std::string& tag, const GInferInputs& inputs)
+{
+    std::vector<GArg> input_args;
+    std::vector<std::string> input_names;
+
+    const auto& blobs = inputs.getBlobs();
+    for (auto&& p : blobs)
+    {
+        input_names.push_back(p.first);
+        input_args.emplace_back(p.second);
+    }
+
+    GKinds kinds(blobs.size(), cv::detail::OpaqueKind::CV_MAT);
+    auto call = std::make_shared<cv::GCall>(GKernel{
+                GInferBase::id(),
+                tag,
+                GInferBase::getOutMeta,
+                {}, // outShape will be filled later
+                std::move(kinds)
+            });
+
+    call->setArgs(std::move(input_args));
+    call->params() = InOutInfo{input_names, {}};
+
+    return GInferOutputs{std::move(call)};
+}
 
 } // namespace gapi
 } // namespace cv
diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
index c6d7f272a8..8421d9e2c9 100644
--- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
@@ -17,6 +17,7 @@
 
 #include <opencv2/core/cvdef.h>     // GAPI_EXPORTS
 #include <opencv2/gapi/gkernel.hpp> // GKernelPackage
+#include <opencv2/gapi/infer.hpp>   // Generic
 
 namespace cv {
 namespace gapi {
@@ -58,6 +59,8 @@ namespace detail {
         // (e.g. topology's partial execution)
         std::size_t num_in;  // How many inputs are defined in the operation
         std::size_t num_out; // How many outputs are defined in the operation
+
+        bool is_generic;
     };
 } // namespace detail
 
@@ -80,7 +83,7 @@ public:
         : desc{ model, weights, device, {}, {}, {}
               , std::tuple_size<typename Net::InArgs>::value  // num_in
               , std::tuple_size<typename Net::OutArgs>::value // num_out
-              } {
+              , false} {
     };
 
     Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
@@ -107,13 +110,34 @@ public:
     }
 
     // BEGIN(G-API's network parametrization API)
-    GBackend      backend() const { return cv::gapi::ie::backend();  }
-    std::string   tag()     const { return Net::tag(); }
-    cv::util::any params()  const { return { desc }; }
+    GBackend      backend()    const { return cv::gapi::ie::backend();  }
+    std::string   tag()        const { return Net::tag(); }
+    cv::util::any params()     const { return { desc }; }
+    // END(G-API's network parametrization API)
+
+protected:
+    detail::ParamDesc desc;
+};
+
+template<>
+class Params<cv::gapi::Generic> {
+public:
+    Params(const std::string& tag,
+           const std::string &model,
+           const std::string &weights,
+           const std::string &device)
+        : desc{ model, weights, device, {}, {}, {}, 0u, 0u, true}, m_tag(tag) {
+    };
+
+    // BEGIN(G-API's network parametrization API)
+    GBackend      backend()    const { return cv::gapi::ie::backend();  }
+    std::string   tag()        const { return m_tag; }
+    cv::util::any params()     const { return { desc }; }
     // END(G-API's network parametrization API)
 
 protected:
     detail::ParamDesc desc;
+    std::string m_tag;
 };
 
 } // namespace ie
diff --git a/modules/gapi/src/api/gcall.cpp b/modules/gapi/src/api/gcall.cpp
index 6f5f65bbfd..6a2121bd36 100644
--- a/modules/gapi/src/api/gcall.cpp
+++ b/modules/gapi/src/api/gcall.cpp
@@ -78,3 +78,13 @@ const cv::GCall::Priv& cv::GCall::priv() const
 {
     return *m_priv;
 }
+
+cv::GKernel& cv::GCall::kernel()
+{
+    return m_priv->m_k;
+}
+
+cv::util::any& cv::GCall::params()
+{
+    return m_priv->m_params;
+}
diff --git a/modules/gapi/src/api/gcall_priv.hpp b/modules/gapi/src/api/gcall_priv.hpp
index edc2c225dc..b142432c78 100644
--- a/modules/gapi/src/api/gcall_priv.hpp
+++ b/modules/gapi/src/api/gcall_priv.hpp
@@ -42,10 +42,11 @@ class GCall::Priv
 {
 public:
     std::vector<GArg> m_args;
-    const GKernel     m_k;
+    GKernel     m_k;
 
     // TODO: Rename to "constructionNode" or smt to reflect its lifetime
     GNode             m_node;
+    cv::util::any     m_params;
 
     explicit Priv(const GKernel &k);
 };
diff --git a/modules/gapi/src/api/ginfer.cpp b/modules/gapi/src/api/ginfer.cpp
index 98eeef5ab6..31d851b8e6 100644
--- a/modules/gapi/src/api/ginfer.cpp
+++ b/modules/gapi/src/api/ginfer.cpp
@@ -25,3 +25,33 @@ std::vector<cv::gapi::GBackend> cv::gapi::GNetPackage::backends() const {
     for (const auto &nn : networks) unique_set.insert(nn.backend);
     return std::vector<cv::gapi::GBackend>(unique_set.begin(), unique_set.end());
 }
+
+// FIXME: Inference API is currently only available in full mode
+#if !defined(GAPI_STANDALONE)
+
+cv::GMat& cv::GInferInputs::operator[](const std::string& name) {
+    return in_blobs[name];
+}
+
+const std::unordered_map<std::string, cv::GMat>& cv::GInferInputs::getBlobs() const {
+    return in_blobs;
+}
+
+cv::GInferOutputs::GInferOutputs(std::shared_ptr<cv::GCall> call)
+    : m_call(std::move(call)), m_info(cv::util::any_cast<InOutInfo>(&m_call->params()))
+{
+};
+
+cv::GMat cv::GInferOutputs::at(const std::string& name)
+{
+    auto it = out_blobs.find(name);
+    if (it == out_blobs.end()) {
+        // FIXME: Avoid modifying GKernel
+        m_call->kernel().outShapes.push_back(cv::GShape::GMAT);
+        int out_idx = static_cast<int>(out_blobs.size());
+        it = out_blobs.emplace(name, m_call->yield(out_idx)).first;
+        m_info->out_names.push_back(name);
+    }
+    return it->second;
+};
+#endif // GAPI_STANDALONE
diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp
index 1565d03aec..b7bda2fe9f 100644
--- a/modules/gapi/src/backends/ie/giebackend.cpp
+++ b/modules/gapi/src/backends/ie/giebackend.cpp
@@ -721,9 +721,23 @@ namespace {
             // FIXME: Introduce a DNNBackend interface which'd specify
             // the framework for this???
             GIEModel gm(gr);
-            const auto &np = gm.metadata(nh).get<NetworkParams>();
-            const auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque);
+            auto &np = gm.metadata(nh).get<NetworkParams>();
+            auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque);
             const auto &ki = cv::util::any_cast<KImpl>(ii.opaque);
+
+            GModel::Graph model(gr);
+            auto& op = model.metadata(nh).get<Op>();
+
+            // NB: In case generic infer, info about in/out names is stored in operation (op.params)
+            if (pp.is_generic)
+            {
+                auto& info      = cv::util::any_cast<cv::InOutInfo>(op.params);
+                pp.input_names  = info.in_names;
+                pp.output_names = info.out_names;
+                pp.num_in       = info.in_names.size();
+                pp.num_out      = info.out_names.size();
+            }
+
             gm.metadata(nh).set(IEUnit{pp});
             gm.metadata(nh).set(IECallable{ki.run});
             gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc});
diff --git a/modules/gapi/src/compiler/gmodel.cpp b/modules/gapi/src/compiler/gmodel.cpp
index 39dc1da33b..b5b76fd1c9 100644
--- a/modules/gapi/src/compiler/gmodel.cpp
+++ b/modules/gapi/src/compiler/gmodel.cpp
@@ -23,12 +23,16 @@
 
 namespace cv { namespace gimpl {
 
-ade::NodeHandle GModel::mkOpNode(GModel::Graph &g, const GKernel &k, const std::vector<GArg> &args, const std::string &island)
+ade::NodeHandle GModel::mkOpNode(GModel::Graph &g,
+                                 const GKernel &k,
+                                 const std::vector<GArg> &args,
+                                 const cv::util::any &params,
+                                 const std::string &island)
 {
     ade::NodeHandle op_h = g.createNode();
     g.metadata(op_h).set(NodeType{NodeType::OP});
     //These extra empty {} are to please GCC (-Wmissing-field-initializers)
-    g.metadata(op_h).set(Op{k, args, {}, {}});
+    g.metadata(op_h).set(Op{k, args, {}, {}, params});
     if (!island.empty())
         g.metadata(op_h).set(Island{island});
     return op_h;
diff --git a/modules/gapi/src/compiler/gmodel.hpp b/modules/gapi/src/compiler/gmodel.hpp
index 8f78ba49b7..5f02e58354 100644
--- a/modules/gapi/src/compiler/gmodel.hpp
+++ b/modules/gapi/src/compiler/gmodel.hpp
@@ -61,6 +61,7 @@ struct Op
     std::vector<RcDesc> outs; // TODO: Introduce a new type for resource references
 
     cv::gapi::GBackend  backend;
+    cv::util::any params; // Operation specific information
 };
 
 struct Data
@@ -262,7 +263,11 @@ namespace GModel
     // GAPI_EXPORTS for tests
     GAPI_EXPORTS void init (Graph& g);
 
-    GAPI_EXPORTS ade::NodeHandle mkOpNode(Graph &g, const GKernel &k, const std::vector<GArg>& args, const std::string &island);
+    GAPI_EXPORTS ade::NodeHandle mkOpNode(Graph &g,
+                                          const GKernel &k,
+                                          const std::vector<GArg>& args,
+                                          const cv::util::any& params,
+                                          const std::string &island);
     // Isn't used by the framework or default backends, required for external backend development
     GAPI_EXPORTS ade::NodeHandle mkDataNode(Graph &g, const GShape shape);
 
diff --git a/modules/gapi/src/compiler/gmodelbuilder.cpp b/modules/gapi/src/compiler/gmodelbuilder.cpp
index 87e9ab55b8..80abadd9c6 100644
--- a/modules/gapi/src/compiler/gmodelbuilder.cpp
+++ b/modules/gapi/src/compiler/gmodelbuilder.cpp
@@ -286,7 +286,7 @@ ade::NodeHandle cv::gimpl::GModelBuilder::put_OpNode(const cv::GNode &node)
     {
         GAPI_Assert(node.shape() == GNode::NodeShape::CALL);
         const auto &call_p = node.call().priv();
-        auto nh = cv::gimpl::GModel::mkOpNode(m_gm, call_p.m_k, call_p.m_args, node_p.m_island);
+        auto nh = cv::gimpl::GModel::mkOpNode(m_gm, call_p.m_k, call_p.m_args, call_p.m_params, node_p.m_island);
         m_graph_ops[&node_p] = nh;
         return nh;
     }
diff --git a/modules/gapi/test/infer/gapi_infer_ie_test.cpp b/modules/gapi/test/infer/gapi_infer_ie_test.cpp
index 74d8558909..3125705365 100644
--- a/modules/gapi/test/infer/gapi_infer_ie_test.cpp
+++ b/modules/gapi/test/infer/gapi_infer_ie_test.cpp
@@ -350,6 +350,59 @@ TEST(DISABLED_TestTwoIENNPipeline, InferBasicImage)
     normAssert(cv::gapi::ie::util::to_ocv(ie_gender2), gapi_gender2, "Test gender output 2");
 }
 
+TEST(TestAgeGenderIE, GenericInfer)
+{
+    initDLDTDataPath();
+
+    cv::gapi::ie::detail::ParamDesc params;
+    params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
+    params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
+    params.device_id = "CPU";
+
+    cv::Mat in_mat(cv::Size(320, 240), CV_8UC3);
+    cv::randu(in_mat, 0, 255);
+
+    cv::Mat gapi_age, gapi_gender;
+
+    // Load & run IE network
+    IE::Blob::Ptr ie_age, ie_gender;
+    {
+        auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
+        auto net    = cv::gimpl::ie::wrap::readNetwork(params);
+        setNetParameters(net);
+        auto this_network  = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
+        auto infer_request = this_network.CreateInferRequest();
+        infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
+        infer_request.Infer();
+        ie_age    = infer_request.GetBlob("age_conv3");
+        ie_gender = infer_request.GetBlob("prob");
+    }
+
+    // Configure & run G-API
+    cv::GMat in;
+    GInferInputs inputs;
+    inputs["data"] = in;
+
+    auto outputs = cv::gapi::infer<cv::gapi::Generic>("age-gender-generic", inputs);
+
+    auto age    = outputs.at("age_conv3");
+    auto gender = outputs.at("prob");
+
+    cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
+
+    cv::gapi::ie::Params<cv::gapi::Generic> pp{"age-gender-generic",
+                                                params.model_path,
+                                                params.weights_path,
+                                                params.device_id};
+
+    comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender),
+               cv::compile_args(cv::gapi::networks(pp)));
+
+    // Validate with IE itself (avoid DNN module dependency here)
+    normAssert(cv::gapi::ie::util::to_ocv(ie_age),    gapi_age,    "Test age output"   );
+    normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
+}
+
 } // namespace opencv_test
 
 #endif //  HAVE_INF_ENGINE

From 718dd9f170041995fe84ec01a614e9465b9f063f Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Fri, 9 Oct 2020 11:57:49 +0000
Subject: [PATCH 011/152] dnn(opencl): bypass unsupported fusion cases

---
 modules/dnn/src/dnn.cpp                      |  4 +++-
 modules/dnn/src/layers/convolution_layer.cpp | 10 ++++++++++
 modules/dnn/test/test_common.impl.hpp        |  4 ++--
 modules/dnn/test/test_layers.cpp             | 18 +++---------------
 4 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index c50dae7967..9ee688f497 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -2460,10 +2460,12 @@ struct Net::Impl : public detail::NetImplBase
                                     if( nextData )
                                         nextActivLayer = nextData->layerInstance.dynamicCast<ActivationLayer>();
 
+                                    Ptr<PowerLayer> activ_power;
                                     if( !nextActivLayer.empty() &&
                                             (!nextData->type.compare("ReLU") ||
                                              !nextData->type.compare("ChannelsPReLU") ||
-                                             !nextData->type.compare("Power")) &&
+                                             (!nextData->type.compare("Power") && (activ_power = nextActivLayer.dynamicCast<PowerLayer>()) && activ_power->scale == 1.0f)
+                                            ) &&
                                             currLayer->setActivation(nextActivLayer) )
                                     {
                                         CV_Assert_N(biasLayerData->outputBlobsWrappers.size() == 1, ld.inputBlobsWrappers.size() == 1);
diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp
index ddc318def2..206ce72fa0 100644
--- a/modules/dnn/src/layers/convolution_layer.cpp
+++ b/modules/dnn/src/layers/convolution_layer.cpp
@@ -46,6 +46,8 @@
 #include "../op_inf_engine.hpp"
 #include "../ie_ngraph.hpp"
 
+#include <opencv2/core/utils/logger.hpp>
+
 #include "opencv2/core/hal/hal.hpp"
 #include "opencv2/core/hal/intrin.hpp"
 #include <iostream>
@@ -371,6 +373,14 @@ public:
             Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
             if (!activ_power.empty())
             {
+                if (activ_power->scale != 1.0f)  // not supported well by implementation, #17964
+                {
+                    // FIXIT no way to check number of blobs (like, eltwise input)
+                    CV_LOG_INFO(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)");
+                    activ.release();
+                    newActiv = false;
+                    return false;
+                }
                 if (activ_power->scale != 1.f || activ_power->shift != 0.f)
                 {
                     const int outCh = blobs[0].size[0];
diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp
index 559b74f126..e55e6cb7b3 100644
--- a/modules/dnn/test/test_common.impl.hpp
+++ b/modules/dnn/test/test_common.impl.hpp
@@ -63,10 +63,10 @@ void normAssert(
         double l1 /*= 0.00001*/, double lInf /*= 0.0001*/)
 {
     double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
-    EXPECT_LE(normL1, l1) << comment;
+    EXPECT_LE(normL1, l1) << comment << "  |ref| = " << cvtest::norm(ref, cv::NORM_INF);
 
     double normInf = cvtest::norm(ref, test, cv::NORM_INF);
-    EXPECT_LE(normInf, lInf) << comment;
+    EXPECT_LE(normInf, lInf) << comment << "  |ref| = " << cvtest::norm(ref, cv::NORM_INF);
 }
 
 std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)
diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp
index e61b754b86..3872f562ef 100644
--- a/modules/dnn/test/test_layers.cpp
+++ b/modules/dnn/test/test_layers.cpp
@@ -2219,10 +2219,6 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
     Backend backendId = get<0>(get<2>(GetParam()));
     Target targetId = get<1>(get<2>(GetParam()));
 
-    // bug: https://github.com/opencv/opencv/issues/17964
-    if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
-        applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
-
     Net net;
     int convId = net.addLayer(convParams.name, convParams.type, convParams);
     int activId = net.addLayerToPrev(activationParams.name, activationParams.type, activationParams);
@@ -2235,7 +2231,7 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
             expectedFusedLayers.push_back(activId); // all activations are fused
         else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
         {
-            if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power")
+            if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/)
                 expectedFusedLayers.push_back(activId);
         }
     }
@@ -2349,10 +2345,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
     if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
 
-    // bug: https://github.com/opencv/opencv/issues/17964
-    if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
-        applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
-
     Net net;
     int convId = net.addLayer(convParams.name, convParams.type, convParams);
     int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams);
@@ -2369,7 +2361,7 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
             expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer
         else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
         {
-            if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "Power")
+            if (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/)
             {
                 expectedFusedLayers.push_back(eltwiseId);
                 expectedFusedLayers.push_back(activId);
@@ -2431,10 +2423,6 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
     Backend backendId = get<0>(get<4>(GetParam()));
     Target targetId = get<1>(get<4>(GetParam()));
 
-    // bug: https://github.com/opencv/opencv/issues/17964
-    if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
-        applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
-
     Net net;
     int convId = net.addLayer(convParams.name, convParams.type, convParams);
     int activId = net.addLayer(activationParams.name, activationParams.type, activationParams);
@@ -2451,7 +2439,7 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
             expectedFusedLayers.push_back(activId); // activation fused with convolution
         else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
         {
-            if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power")
+            if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/)
                 expectedFusedLayers.push_back(activId); // activation fused with convolution
         }
     }

From cdcf7e62f37f8476eb209439fe94b51e8a93846c Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Fri, 9 Oct 2020 16:33:48 +0000
Subject: [PATCH 012/152] dnn(opencl): bypass unsupported fusion cases 2

---
 modules/dnn/src/dnn.cpp                      | 59 +++++++++++++++-----
 modules/dnn/src/layers/convolution_layer.cpp |  2 +-
 modules/dnn/test/test_layers.cpp             |  8 +--
 3 files changed, 50 insertions(+), 19 deletions(-)

diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index 9ee688f497..c789638793 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -2413,14 +2413,42 @@ struct Net::Impl : public detail::NetImplBase
                 }
 
                 // fuse convolution layer followed by eltwise + relu
-                if ( IS_DNN_OPENCL_TARGET(preferableTarget) && ld.layerInstance->type == "Convolution" )
+                while (nextData && IS_DNN_OPENCL_TARGET(preferableTarget) && ld.layerInstance->type == "Convolution")  // semantic of 'if'
                 {
-                    Ptr<EltwiseLayer> nextEltwiseLayer;
-                    if( nextData )
-                        nextEltwiseLayer = nextData->layerInstance.dynamicCast<EltwiseLayer>();
+                    Ptr<EltwiseLayer> nextEltwiseLayer = nextData->layerInstance.dynamicCast<EltwiseLayer>();
+                    if (nextEltwiseLayer.empty())
+                        break;
+
+                    if (pinsToKeep.count(lpNext) != 0)
+                        break;
+                    if (nextData->inputBlobsId.size() != 2)
+                        break;
+
+                    if (!nextData->params.has("operation") || nextData->params.get<String>("operation").toLowerCase() == "sum")
+                    {
+                        if (nextData->params.has("coeff"))
+                        {
+                            DictValue paramCoeff = nextData->params.get("coeff");
+                            int n = paramCoeff.size();
+                            bool isCoeffOneOne = (n == 2);
+                            for (int i = 0; isCoeffOneOne && i < n; i++)
+                            {
+                                float c = paramCoeff.get<float>(i);
+                                isCoeffOneOne &= (c == 1.0f);
+                            }
+                            if (!isCoeffOneOne)
+                            {
+                                CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion of 'Sum' without coeffs (or {1.0, 1.0}) is supported only");
+                                break;
+                            }
+                        }
+                    }
+                    else
+                    {
+                        CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion with eltwise operation is not supported: " << nextData->params.get<String>("operation"));
+                        break;
+                    }
 
-                    if( !nextEltwiseLayer.empty() && pinsToKeep.count(lpNext) == 0 &&
-                        nextData && nextData->inputBlobsId.size() == 2 )
                     {
                         LayerData *eltwiseData = nextData;
 
@@ -2517,6 +2545,8 @@ struct Net::Impl : public detail::NetImplBase
                             }
                         }
                     }
+
+                    break;
                 }
             }
 
@@ -2698,11 +2728,11 @@ struct Net::Impl : public detail::NetImplBase
 
         Ptr<Layer> layer = ld.layerInstance;
 
-        TickMeter tm;
-        tm.start();
-
         if( !ld.skip )
         {
+            TickMeter tm;
+            tm.start();
+
             std::map<int, Ptr<BackendNode> >::iterator it = ld.backendNodes.find(preferableBackend);
             if (preferableBackend == DNN_BACKEND_OPENCV || it == ld.backendNodes.end() || it->second.empty())
             {
@@ -2881,12 +2911,15 @@ struct Net::Impl : public detail::NetImplBase
                     CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
                 }
             }
+
+            tm.stop();
+            int64 t = tm.getTimeTicks();
+            layersTimings[ld.id] = (t > 0) ? t : t + 1;  // zero for skipped layers only
         }
         else
-            tm.reset();
-
-        tm.stop();
-        layersTimings[ld.id] = tm.getTimeTicks();
+        {
+            layersTimings[ld.id] = 0;
+        }
 
         ld.flag = 1;
     }
diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp
index 206ce72fa0..473c07b755 100644
--- a/modules/dnn/src/layers/convolution_layer.cpp
+++ b/modules/dnn/src/layers/convolution_layer.cpp
@@ -376,7 +376,7 @@ public:
                 if (activ_power->scale != 1.0f)  // not supported well by implementation, #17964
                 {
                     // FIXIT no way to check number of blobs (like, eltwise input)
-                    CV_LOG_INFO(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)");
+                    CV_LOG_DEBUG(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)");
                     activ.release();
                     newActiv = false;
                     return false;
diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp
index 3872f562ef..327f3e9abd 100644
--- a/modules/dnn/test/test_layers.cpp
+++ b/modules/dnn/test/test_layers.cpp
@@ -2341,10 +2341,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
     Backend backendId = get<0>(get<4>(GetParam()));
     Target targetId = get<1>(get<4>(GetParam()));
 
-    // bug: https://github.com/opencv/opencv/issues/17945
-    if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
-        applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
-
     Net net;
     int convId = net.addLayer(convParams.name, convParams.type, convParams);
     int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams);
@@ -2361,7 +2357,9 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
             expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer
         else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
         {
-            if (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/)
+            if (eltwiseOp == "sum" && !weightedEltwise &&
+                (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/)
+            )
             {
                 expectedFusedLayers.push_back(eltwiseId);
                 expectedFusedLayers.push_back(activId);

From 171fbf879fa5b2a991192cae72eb5fed61cec522 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Fri, 9 Oct 2020 22:00:02 +0000
Subject: [PATCH 013/152] cmake: fix typo in CUDA_GENERATION=Auto cache

---
 cmake/OpenCVDetectCUDA.cmake | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/cmake/OpenCVDetectCUDA.cmake b/cmake/OpenCVDetectCUDA.cmake
index c55f9f1903..c7cfebe50f 100644
--- a/cmake/OpenCVDetectCUDA.cmake
+++ b/cmake/OpenCVDetectCUDA.cmake
@@ -198,7 +198,7 @@ if(CUDA_FOUND)
 
       if(${status} EQUAL 0)
         # cache detected values
-        set(OPENCV_CACHE_CUDA_ACTIVE_CC ${${result_list}} CACHE INTERNAL "")
+        set(OPENCV_CACHE_CUDA_ACTIVE_CC ${${output}} CACHE INTERNAL "")
         set(OPENCV_CACHE_CUDA_ACTIVE_CC_check "${__cache_key_check}" CACHE INTERNAL "")
       endif()
     endif()

From 2dd2d6095584b957b243bb86948b13223ecb39b3 Mon Sep 17 00:00:00 2001
From: maxint <maxint@foxmail.com>
Date: Sat, 10 Oct 2020 07:13:23 +0000
Subject: [PATCH 014/152] Fix warnings: "-Wrange-loop-construct" in gapi

---
 modules/gapi/src/backends/fluid/gfluidbackend.cpp | 4 ++--
 modules/gapi/src/compiler/passes/exec.cpp         | 8 ++++----
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/modules/gapi/src/backends/fluid/gfluidbackend.cpp b/modules/gapi/src/backends/fluid/gfluidbackend.cpp
index 9b95dff036..030bb10198 100644
--- a/modules/gapi/src/backends/fluid/gfluidbackend.cpp
+++ b/modules/gapi/src/backends/fluid/gfluidbackend.cpp
@@ -952,7 +952,7 @@ namespace
         using namespace cv::gimpl;
         GModel::Graph g(graph);
         GFluidModel fg(graph);
-        for (const auto node : g.nodes())
+        for (const auto& node : g.nodes())
         {
             if (g.metadata(node).get<NodeType>().t == NodeType::DATA)
             {
@@ -1440,7 +1440,7 @@ void GFluidBackendImpl::addMetaSensitiveBackendPasses(ade::ExecutionEngineSetupC
                 {
                     // Add FluidData to all data nodes inside island,
                     // set internal = true if node is not a slot in terms of higher-level GIslandModel
-                    for (const auto node : isl->contents())
+                    for (const auto& node : isl->contents())
                     {
                         if (g.metadata(node).get<NodeType>().t == NodeType::DATA &&
                             !fg.metadata(node).contains<FluidData>())
diff --git a/modules/gapi/src/compiler/passes/exec.cpp b/modules/gapi/src/compiler/passes/exec.cpp
index 755538bb46..0eb8352b76 100644
--- a/modules/gapi/src/compiler/passes/exec.cpp
+++ b/modules/gapi/src/compiler/passes/exec.cpp
@@ -71,12 +71,12 @@ namespace
 
         all.insert(src_g.nodes().begin(), src_g.nodes().end());
 
-        for (const auto nh : proto.in_nhs)
+        for (const auto& nh : proto.in_nhs)
         {
             all.erase(nh);
             in_ops.insert(nh->outNodes().begin(), nh->outNodes().end());
         }
-        for (const auto nh : proto.out_nhs)
+        for (const auto& nh : proto.out_nhs)
         {
             all.erase(nh);
             out_ops.insert(nh->inNodes().begin(), nh->inNodes().end());
@@ -90,12 +90,12 @@ namespace
 
         auto ih = GIslandModel::mkIslandNode(g, std::move(isl));
 
-        for (const auto nh : proto.in_nhs)
+        for (const auto& nh : proto.in_nhs)
         {
             auto slot = GIslandModel::mkSlotNode(g, nh);
             g.link(slot, ih);
         }
-        for (const auto nh : proto.out_nhs)
+        for (const auto& nh : proto.out_nhs)
         {
             auto slot = GIslandModel::mkSlotNode(g, nh);
             g.link(ih, slot);

From dc15187f1b6784ef2ece30dae223570811eaddff Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 10 Oct 2020 20:14:29 +0000
Subject: [PATCH 015/152] release: OpenCV 3.4.12

---
 modules/core/include/opencv2/core/version.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp
index 00172f809e..c5d015afe0 100644
--- a/modules/core/include/opencv2/core/version.hpp
+++ b/modules/core/include/opencv2/core/version.hpp
@@ -8,7 +8,7 @@
 #define CV_VERSION_MAJOR    3
 #define CV_VERSION_MINOR    4
 #define CV_VERSION_REVISION 12
-#define CV_VERSION_STATUS   "-pre"
+#define CV_VERSION_STATUS   ""
 
 #define CVAUX_STR_EXP(__A)  #__A
 #define CVAUX_STR(__A)      CVAUX_STR_EXP(__A)

From e58da86efc38484b1c660263405326bcb22594c9 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 10 Oct 2020 21:34:15 +0000
Subject: [PATCH 016/152] dnn(test): update tests for OpenVINO 2021.1 (OpenCV
 4.x)

---
 modules/dnn/test/test_model.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/dnn/test/test_model.cpp b/modules/dnn/test/test_model.cpp
index 215cc1c743..ddec6e79e4 100644
--- a/modules/dnn/test/test_model.cpp
+++ b/modules/dnn/test/test_model.cpp
@@ -363,7 +363,7 @@ TEST_P(Test_Model, Detection_normalized)
         scoreDiff = 5e-3;
         iouDiff = 0.09;
     }
-#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
     {
         iouDiff = 0.095f;

From 16561ed71e49b11d9790d3c161c65bdd3c0a2992 Mon Sep 17 00:00:00 2001
From: mtfrctl <sky.franker@gmail.com>
Date: Sun, 11 Oct 2020 19:16:03 +0900
Subject: [PATCH 017/152] Add data pointer bridge method to Mat for
 Objective-C/Swift

---
 modules/core/misc/objc/common/Mat.h  | 1 +
 modules/core/misc/objc/common/Mat.mm | 4 ++++
 2 files changed, 5 insertions(+)

diff --git a/modules/core/misc/objc/common/Mat.h b/modules/core/misc/objc/common/Mat.h
index 72f81dd9b7..229337f524 100644
--- a/modules/core/misc/objc/common/Mat.h
+++ b/modules/core/misc/objc/common/Mat.h
@@ -97,6 +97,7 @@ CV_EXPORTS @interface Mat : NSObject
 - (void)createEx:(NSArray<NSNumber*>*)sizes type:(int)type  NS_SWIFT_NAME(create(sizes:type:));
 - (void)copySize:(Mat*)mat;
 - (Mat*)cross:(Mat*)mat;
+- (unsigned char*)dataPtr NS_SWIFT_NAME(dataPointer());
 - (int)depth;
 - (Mat*)diag:(int)diagonal;
 - (Mat*)diag;
diff --git a/modules/core/misc/objc/common/Mat.mm b/modules/core/misc/objc/common/Mat.mm
index c075e26046..5d41a3622e 100644
--- a/modules/core/misc/objc/common/Mat.mm
+++ b/modules/core/misc/objc/common/Mat.mm
@@ -286,6 +286,10 @@ static bool updateIdx(cv::Mat* mat, std::vector<int>& indices, int inc) {
     return [[Mat alloc] initWithNativeMat:new cv::Mat(_nativePtr->cross(*(cv::Mat*)mat.nativePtr))];
 }
 
+- (unsigned char*)dataPtr {
+    return _nativePtr->data;
+}
+
 - (int)depth {
     return _nativePtr->depth();
 }

From de93782fab107ffc2193fb23d98916aba1787f37 Mon Sep 17 00:00:00 2001
From: catree <catree.catreus@outlook.com>
Date: Sun, 11 Oct 2020 17:18:05 +0200
Subject: [PATCH 018/152] Move colorscale_deepgreen.jpg to the correct folder.

---
 .../pics/{ => colormaps}/colorscale_deepgreen.jpg   | Bin
 1 file changed, 0 insertions(+), 0 deletions(-)
 rename modules/imgproc/doc/pics/{ => colormaps}/colorscale_deepgreen.jpg (100%)

diff --git a/modules/imgproc/doc/pics/colorscale_deepgreen.jpg b/modules/imgproc/doc/pics/colormaps/colorscale_deepgreen.jpg
similarity index 100%
rename from modules/imgproc/doc/pics/colorscale_deepgreen.jpg
rename to modules/imgproc/doc/pics/colormaps/colorscale_deepgreen.jpg

From d5fd2f0155ffad366f9ac912dfd6d189a7a6a98e Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sun, 11 Oct 2020 21:26:07 +0000
Subject: [PATCH 019/152] release: OpenCV 4.5.0

---
 modules/core/include/opencv2/core/version.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp
index 7026fad88f..9e8b1b06ed 100644
--- a/modules/core/include/opencv2/core/version.hpp
+++ b/modules/core/include/opencv2/core/version.hpp
@@ -8,7 +8,7 @@
 #define CV_VERSION_MAJOR    4
 #define CV_VERSION_MINOR    5
 #define CV_VERSION_REVISION 0
-#define CV_VERSION_STATUS   "-pre"
+#define CV_VERSION_STATUS   ""
 
 #define CVAUX_STR_EXP(__A)  #__A
 #define CVAUX_STR(__A)      CVAUX_STR_EXP(__A)

From 1048feac3b1252884b0ab91a526aa58f3621f348 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Tue, 13 Oct 2020 13:41:07 +0000
Subject: [PATCH 020/152] build: winpack_dldt with videoio plugins

---
 platforms/winpack_dldt/build_package.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/platforms/winpack_dldt/build_package.py b/platforms/winpack_dldt/build_package.py
index 8618e11cbf..05991da6b4 100644
--- a/platforms/winpack_dldt/build_package.py
+++ b/platforms/winpack_dldt/build_package.py
@@ -350,6 +350,8 @@ class Builder:
             INSTALL_PDB='ON',
             INSTALL_PDB_COMPONENT_EXCLUDE_FROM_ALL='OFF',
 
+            VIDEOIO_PLUGIN_LIST='all',
+
             OPENCV_SKIP_CMAKE_ROOT_CONFIG='ON',
             OPENCV_BIN_INSTALL_PATH='bin',
             OPENCV_INCLUDE_INSTALL_PATH='include',

From 25163eb008910f6579c5582300994ce0763069fe Mon Sep 17 00:00:00 2001
From: Welton Rodrigo Torres Nascimento <rodrigo@familianascimento.org>
Date: Tue, 13 Oct 2020 11:51:23 -0300
Subject: [PATCH 021/152] Doc: INTER_LINEAR_EXACT unsupported in remap

Update documentation to reflect INTER_LINEAR_EXACT being
unsupported in remap
---
 modules/imgproc/include/opencv2/imgproc.hpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp
index c73a382ddf..d75f3bcffc 100644
--- a/modules/imgproc/include/opencv2/imgproc.hpp
+++ b/modules/imgproc/include/opencv2/imgproc.hpp
@@ -2320,8 +2320,8 @@ CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating poin
 representation to fixed-point for speed.
 @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
 if map1 is (x,y) points), respectively.
-@param interpolation Interpolation method (see #InterpolationFlags). The method #INTER_AREA is
-not supported by this function.
+@param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
+and #INTER_LINEAR_EXACT are not supported by this function.
 @param borderMode Pixel extrapolation method (see #BorderTypes). When
 borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
 corresponds to the "outliers" in the source image are not modified by the function.

From 09a62012b2e429d4e4ee8f27fdf5cdffd107c148 Mon Sep 17 00:00:00 2001
From: Welton Rodrigo Torres Nascimento <rodrigo@familianascimento.org>
Date: Tue, 13 Oct 2020 11:07:11 -0300
Subject: [PATCH 022/152] Doc update. INTER_LINEAR_EXACT unsupported in remap

---
 modules/gapi/include/opencv2/gapi/core.hpp  | 4 ++--
 modules/imgproc/include/opencv2/imgproc.hpp | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/core.hpp b/modules/gapi/include/opencv2/gapi/core.hpp
index c4ddaf6bd3..2c01328f09 100644
--- a/modules/gapi/include/opencv2/gapi/core.hpp
+++ b/modules/gapi/include/opencv2/gapi/core.hpp
@@ -1491,8 +1491,8 @@ Output image must be of the same size and depth as input one.
 CV_32FC1, or CV_32FC2.
 @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
 if map1 is (x,y) points), respectively.
-@param interpolation Interpolation method (see cv::InterpolationFlags). The method INTER_AREA is
-not supported by this function.
+@param interpolation Interpolation method (see cv::InterpolationFlags). The methods #INTER_AREA
+and #INTER_LINEAR_EXACT are not supported by this function.
 @param borderMode Pixel extrapolation method (see cv::BorderTypes). When
 borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that
 corresponds to the "outliers" in the source image are not modified by the function.
diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp
index 4a41ba6b50..8309315ef4 100644
--- a/modules/imgproc/include/opencv2/imgproc.hpp
+++ b/modules/imgproc/include/opencv2/imgproc.hpp
@@ -2367,8 +2367,8 @@ CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating poin
 representation to fixed-point for speed.
 @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
 if map1 is (x,y) points), respectively.
-@param interpolation Interpolation method (see #InterpolationFlags). The method #INTER_AREA is
-not supported by this function.
+@param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
+and #INTER_LINEAR_EXACT are not supported by this function.
 @param borderMode Pixel extrapolation method (see #BorderTypes). When
 borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
 corresponds to the "outliers" in the source image are not modified by the function.

From 4c048a487e932cd5bf8b560294009bebbb8b8fe5 Mon Sep 17 00:00:00 2001
From: Anna Khakimova <anna.khakimova@intel.com>
Date: Wed, 14 Oct 2020 19:51:35 +0300
Subject: [PATCH 023/152] Merge pull request #18516 from
 anna-khakimova:ak/bgr2rgb

GAPI: Addition new Color conversion kernels to CPU backend.

* Add BGR2RGB kernel to CPU backend

* Add BGR2I420 and RGB2I420 kernels to CPU backend

* Add I4202BGR and I4202RGB kernels to CPU backend
---
 modules/gapi/include/opencv2/gapi/imgproc.hpp | 120 ++++++++++++++++++
 modules/gapi/src/api/kernels_imgproc.cpp      |  25 ++++
 modules/gapi/src/backends/cpu/gcpuimgproc.cpp |  45 +++++++
 .../gapi/test/common/gapi_imgproc_tests.hpp   |   5 +
 .../test/common/gapi_imgproc_tests_inl.hpp    |  95 ++++++++++++++
 .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp  |  39 ++++++
 6 files changed, 329 insertions(+)

diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp
index b4905e932b..23ad41eb25 100644
--- a/modules/gapi/include/opencv2/gapi/imgproc.hpp
+++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp
@@ -124,6 +124,12 @@ namespace imgproc {
         }
     };
 
+    G_TYPED_KERNEL(GBGR2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2rgb") {
+        static GMatDesc outMeta(GMatDesc in) {
+            return in; // type still remains CV_8UC3;
+        }
+    };
+
     G_TYPED_KERNEL(GRGB2YUV, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2yuv") {
         static GMatDesc outMeta(GMatDesc in) {
             return in; // type still remains CV_8UC3;
@@ -136,6 +142,42 @@ namespace imgproc {
         }
     };
 
+    G_TYPED_KERNEL(GBGR2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2i420") {
+        static GMatDesc outMeta(GMatDesc in) {
+            GAPI_Assert(in.depth == CV_8U);
+            GAPI_Assert(in.chan == 3);
+            GAPI_Assert(in.size.height % 2 == 0);
+            return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
+        }
+    };
+
+    G_TYPED_KERNEL(GRGB2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2i420") {
+        static GMatDesc outMeta(GMatDesc in) {
+            GAPI_Assert(in.depth == CV_8U);
+            GAPI_Assert(in.chan == 3);
+            GAPI_Assert(in.size.height % 2 == 0);
+            return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
+        }
+    };
+
+    G_TYPED_KERNEL(GI4202BGR, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202bgr") {
+        static GMatDesc outMeta(GMatDesc in) {
+            GAPI_Assert(in.depth == CV_8U);
+            GAPI_Assert(in.chan == 1);
+            GAPI_Assert(in.size.height % 3 == 0);
+            return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
+        }
+    };
+
+    G_TYPED_KERNEL(GI4202RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202rgb") {
+        static GMatDesc outMeta(GMatDesc in) {
+            GAPI_Assert(in.depth == CV_8U);
+            GAPI_Assert(in.chan == 1);
+            GAPI_Assert(in.size.height % 3 == 0);
+            return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
+        }
+    };
+
     G_TYPED_KERNEL(GNV12toRGB, <GMat(GMat, GMat)>, "org.opencv.imgproc.colorconvert.nv12torgb") {
         static GMatDesc outMeta(GMatDesc in_y, GMatDesc in_uv) {
             GAPI_Assert(in_y.chan == 1);
@@ -812,6 +854,20 @@ The algorithm normalizes the brightness and increases the contrast of the image.
  */
 GAPI_EXPORTS GMat equalizeHist(const GMat& src);
 
+/** @brief Converts an image from BGR color space to RGB color space.
+
+The function converts an input image from BGR color space to RGB.
+The conventional ranges for B, G, and R channel values are 0 to 255.
+
+Output image is 8-bit unsigned 3-channel image @ref CV_8UC3.
+
+@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2rgb"
+
+@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
+@sa RGB2BGR
+*/
+GAPI_EXPORTS GMat BGR2RGB(const GMat& src);
+
 //! @} gapi_filters
 
 //! @addtogroup gapi_colorconvert
@@ -871,6 +927,70 @@ Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
 */
 GAPI_EXPORTS GMat RGB2YUV(const GMat& src);
 
+/** @brief Converts an image from BGR color space to I420 color space.
+
+The function converts an input image from BGR color space to I420.
+The conventional ranges for R, G, and B channel values are 0 to 255.
+
+Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
+Width of I420 output image must be the same as width of input image.
+Height of I420 output image must be equal 3/2 from height of input image.
+
+@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2i420"
+
+@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
+@sa I4202BGR
+*/
+GAPI_EXPORTS GMat BGR2I420(const GMat& src);
+
+/** @brief Converts an image from RGB color space to I420 color space.
+
+The function converts an input image from RGB color space to I420.
+The conventional ranges for R, G, and B channel values are 0 to 255.
+
+Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
+Width of I420 output image must be the same as width of input image.
+Height of I420 output image must be equal 3/2 from height of input image.
+
+@note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2i420"
+
+@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
+@sa I4202RGB
+*/
+GAPI_EXPORTS GMat RGB2I420(const GMat& src);
+
+/** @brief Converts an image from I420 color space to BGR color space.
+
+The function converts an input image from I420 color space to BGR.
+The conventional ranges for B, G, and R channel values are 0 to 255.
+
+Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
+Width of BGR output image must be the same as width of input image.
+Height of BGR output image must be equal 2/3 from height of input image.
+
+@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202bgr"
+
+@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
+@sa BGR2I420
+*/
+GAPI_EXPORTS GMat I4202BGR(const GMat& src);
+
+/** @brief Converts an image from I420 color space to BGR color space.
+
+The function converts an input image from I420 color space to BGR.
+The conventional ranges for B, G, and R channel values are 0 to 255.
+
+Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
+Width of RGB output image must be the same as width of input image.
+Height of RGB output image must be equal 2/3 from height of input image.
+
+@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202rgb"
+
+@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
+@sa RGB2I420
+*/
+GAPI_EXPORTS GMat I4202RGB(const GMat& src);
+
 /** @brief Converts an image from BGR color space to LUV color space.
 
 The function converts an input image from BGR color space to LUV.
diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp
index 108eefcb81..652f83935f 100644
--- a/modules/gapi/src/api/kernels_imgproc.cpp
+++ b/modules/gapi/src/api/kernels_imgproc.cpp
@@ -115,6 +115,11 @@ cv::GArray<cv::Point2f> goodFeaturesToTrack(const GMat& image, int maxCorners, d
                                       useHarrisDetector, k);
 }
 
+GMat BGR2RGB(const GMat& src)
+{
+    return imgproc::GBGR2RGB::on(src);
+}
+
 GMat RGB2Gray(const GMat& src)
 {
     return imgproc::GRGB2Gray::on(src);
@@ -160,6 +165,26 @@ GMat YUV2RGB(const GMat& src)
     return imgproc::GYUV2RGB::on(src);
 }
 
+GMat BGR2I420(const GMat& src)
+{
+    return imgproc::GBGR2I420::on(src);
+}
+
+GMat RGB2I420(const GMat& src)
+{
+    return imgproc::GRGB2I420::on(src);
+}
+
+GMat I4202BGR(const GMat& src)
+{
+    return imgproc::GI4202BGR::on(src);
+}
+
+GMat I4202RGB(const GMat& src)
+{
+    return imgproc::GI4202RGB::on(src);
+}
+
 GMat NV12toRGB(const GMat& src_y, const GMat& src_uv)
 {
     return imgproc::GNV12toRGB::on(src_y, src_uv);
diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
index 8104565f03..c07ed6785c 100644
--- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
+++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
@@ -211,6 +211,46 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures)
     }
 };
 
+GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB)
+{
+    static void run(const cv::Mat& in, cv::Mat &out)
+    {
+        cv::cvtColor(in, out, cv::COLOR_BGR2RGB);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUBGR2I420, cv::gapi::imgproc::GBGR2I420)
+{
+    static void run(const cv::Mat& in, cv::Mat &out)
+    {
+        cv::cvtColor(in, out, cv::COLOR_BGR2YUV_I420);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPURGB2I420, cv::gapi::imgproc::GRGB2I420)
+{
+    static void run(const cv::Mat& in, cv::Mat &out)
+    {
+        cv::cvtColor(in, out, cv::COLOR_RGB2YUV_I420);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUI4202BGR, cv::gapi::imgproc::GI4202BGR)
+{
+    static void run(const cv::Mat& in, cv::Mat &out)
+    {
+        cv::cvtColor(in, out, cv::COLOR_YUV2BGR_I420);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUI4202RGB, cv::gapi::imgproc::GI4202RGB)
+{
+    static void run(const cv::Mat& in, cv::Mat &out)
+    {
+        cv::cvtColor(in, out, cv::COLOR_YUV2RGB_I420);
+    }
+};
+
 GAPI_OCV_KERNEL(GCPURGB2YUV, cv::gapi::imgproc::GRGB2YUV)
 {
     static void run(const cv::Mat& in, cv::Mat &out)
@@ -445,8 +485,13 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels()
         , GCPUCanny
         , GCPUGoodFeatures
         , GCPUEqualizeHist
+        , GCPUBGR2RGB
         , GCPURGB2YUV
         , GCPUYUV2RGB
+        , GCPUBGR2I420
+        , GCPURGB2I420
+        , GCPUI4202BGR
+        , GCPUI4202RGB
         , GCPUNV12toRGB
         , GCPUNV12toBGR
         , GCPURGB2Lab
diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp
index cd074efda0..38a02985e7 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp
@@ -64,9 +64,14 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(GoodFeaturesTest,
                                           double,int,bool),
                               8, cmpF, fileName, type, maxCorners, qualityLevel, minDistance,
                               blockSize, useHarrisDetector)
+GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(RGB2YUVTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
+GAPI_TEST_FIXTURE(BGR2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
+GAPI_TEST_FIXTURE(RGB2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
+GAPI_TEST_FIXTURE(I4202BGRTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
+GAPI_TEST_FIXTURE(I4202RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(YUV2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(YUV2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(NV12toRGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
index 4aadc17d5d..95728e87b7 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
@@ -447,6 +447,25 @@ TEST_P(GoodFeaturesTest, AccuracyTest)
     }
 }
 
+TEST_P(BGR2RGBTest, AccuracyTest)
+{
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::BGR2RGB(in);
+
+    cv::GComputation c(in, out);
+    c.apply(in_mat1, out_mat_gapi, getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2RGB);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
+        EXPECT_EQ(out_mat_gapi.size(), sz);
+    }
+}
+
 TEST_P(RGB2GrayTest, AccuracyTest)
 {
     // G-API code //////////////////////////////////////////////////////////////
@@ -523,6 +542,82 @@ TEST_P(YUV2RGBTest, AccuracyTest)
     }
 }
 
+TEST_P(BGR2I420Test, AccuracyTest)
+{
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::BGR2I420(in);
+
+    cv::GComputation c(in, out);
+    c.apply(in_mat1, out_mat_gapi, getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2YUV_I420);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
+        EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2));
+    }
+}
+
+TEST_P(RGB2I420Test, AccuracyTest)
+{
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::RGB2I420(in);
+
+    cv::GComputation c(in, out);
+    c.apply(in_mat1, out_mat_gapi, getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_RGB2YUV_I420);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
+        EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2));
+    }
+}
+
+TEST_P(I4202BGRTest, AccuracyTest)
+{
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::I4202BGR(in);
+
+    cv::GComputation c(in, out);
+    c.apply(in_mat1, out_mat_gapi, getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2BGR_I420);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
+        EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3));
+    }
+}
+
+TEST_P(I4202RGBTest, AccuracyTest)
+{
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::I4202RGB(in);
+
+    cv::GComputation c(in, out);
+    c.apply(in_mat1, out_mat_gapi, getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2RGB_I420);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
+        EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3));
+    }
+}
+
 TEST_P(NV12toRGBTest, AccuracyTest)
 {
     // G-API code //////////////////////////////////////////////////////////////
diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
index 8a94583fcc..e7f9667096 100644
--- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
+++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
@@ -241,6 +241,13 @@ INSTANTIATE_TEST_CASE_P(GoodFeaturesInternalTestCPU, GoodFeaturesTest,
                                 Values(3),
                                 Values(true)));
 
+INSTANTIATE_TEST_CASE_P(BGR2RGBTestCPU, BGR2RGBTest,
+                        Combine(Values(CV_8UC3),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(CV_8UC3),
+                                Values(IMGPROC_CPU),
+                                Values(AbsExact().to_compare_obj())));
 
 INSTANTIATE_TEST_CASE_P(RGB2GrayTestCPU, RGB2GrayTest,
                         Combine(Values(CV_8UC3),
@@ -274,6 +281,38 @@ INSTANTIATE_TEST_CASE_P(YUV2RGBTestCPU, YUV2RGBTest,
                                 Values(IMGPROC_CPU),
                                 Values(AbsExact().to_compare_obj())));
 
+INSTANTIATE_TEST_CASE_P(BGR2I420TestCPU, BGR2I420Test,
+                        Combine(Values(CV_8UC3),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(CV_8UC1),
+                                Values(IMGPROC_CPU),
+                                Values(AbsExact().to_compare_obj())));
+
+INSTANTIATE_TEST_CASE_P(RGB2I420TestCPU, RGB2I420Test,
+                        Combine(Values(CV_8UC3),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(CV_8UC1),
+                                Values(IMGPROC_CPU),
+                                Values(AbsExact().to_compare_obj())));
+
+INSTANTIATE_TEST_CASE_P(I4202BGRTestCPU, I4202BGRTest,
+                        Combine(Values(CV_8UC1),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(CV_8UC3),
+                                Values(IMGPROC_CPU),
+                                Values(AbsExact().to_compare_obj())));
+
+INSTANTIATE_TEST_CASE_P(I4202RGBTestCPU, I4202RGBTest,
+                        Combine(Values(CV_8UC1),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(CV_8UC3),
+                                Values(IMGPROC_CPU),
+                                Values(AbsExact().to_compare_obj())));
+
 INSTANTIATE_TEST_CASE_P(NV12toRGBTestCPU, NV12toRGBTest,
                         Combine(Values(CV_8UC1),
                                 Values(cv::Size(1280, 720),

From 7de189114b5ab9b73144833440f58183b759fbe5 Mon Sep 17 00:00:00 2001
From: mtfrctl <sky.franker@gmail.com>
Date: Thu, 15 Oct 2020 01:58:06 +0900
Subject: [PATCH 024/152] Merge pull request #18547 from
 mtfrctl:objc-conversions-macosx

Mat conversions for macOS/AppKit

* Extract CoreGraphics conversion logics from ios_conversions.mm to apple_conversions.h, apple_conversions. Add macosx_conversions.mm

* Add macosx.h

* Add Mat+Conversions.h and Mat+Conversions.mm

* Delete duplicated declaration from apple_conversion.mm

* Use short license header

* Add compile guard

* Delete unused imports

* Move precomp.hpp import from header to implementation

* Add macosx.h to skip headers

* Fix compile guard condition

* Use short license header

* Remove commented out unused code
---
 cmake/templates/opencv_abi.xml.in             |  1 +
 modules/imgcodecs/CMakeLists.txt              |  8 ++
 .../imgcodecs/include/opencv2/imgcodecs.hpp   |  1 +
 .../include/opencv2/imgcodecs/macosx.h        | 20 ++++
 .../misc/objc/macosx/Mat+Converters.h         | 32 +++++++
 .../misc/objc/macosx/Mat+Converters.mm        | 44 +++++++++
 modules/imgcodecs/src/apple_conversions.h     | 11 +++
 modules/imgcodecs/src/apple_conversions.mm    | 94 ++++++++++++++++++
 modules/imgcodecs/src/ios_conversions.mm      | 96 ++-----------------
 modules/imgcodecs/src/macosx_conversions.mm   | 25 +++++
 modules/objc/generator/gen_objc.py            |  5 +
 11 files changed, 248 insertions(+), 89 deletions(-)
 create mode 100644 modules/imgcodecs/include/opencv2/imgcodecs/macosx.h
 create mode 100644 modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
 create mode 100644 modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm
 create mode 100644 modules/imgcodecs/src/apple_conversions.h
 create mode 100644 modules/imgcodecs/src/apple_conversions.mm
 create mode 100644 modules/imgcodecs/src/macosx_conversions.mm

diff --git a/cmake/templates/opencv_abi.xml.in b/cmake/templates/opencv_abi.xml.in
index 212b6d67d4..711c4e99ee 100644
--- a/cmake/templates/opencv_abi.xml.in
+++ b/cmake/templates/opencv_abi.xml.in
@@ -32,6 +32,7 @@
     opencv2/flann/hdf5.h
     opencv2/imgcodecs/imgcodecs_c.h
     opencv2/imgcodecs/ios.h
+    opencv2/imgcodecs/macosx.h
     opencv2/videoio/videoio_c.h
     opencv2/videoio/cap_ios.h
     opencv2/xobjdetect/private.hpp
diff --git a/modules/imgcodecs/CMakeLists.txt b/modules/imgcodecs/CMakeLists.txt
index f8bfd18e1f..80f7e1c248 100644
--- a/modules/imgcodecs/CMakeLists.txt
+++ b/modules/imgcodecs/CMakeLists.txt
@@ -113,10 +113,18 @@ file(GLOB imgcodecs_ext_hdrs
      "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/legacy/*.h"
      )
 
+if(APPLE)
+  list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/apple_conversions.h)
+  list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/apple_conversions.mm)
+endif()
 if(IOS)
   list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/ios_conversions.mm)
   list(APPEND IMGCODECS_LIBRARIES "-framework UIKit" "-framework AssetsLibrary")
 endif()
+if(APPLE AND (NOT IOS))
+  list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/macosx_conversions.mm)
+  list(APPEND IMGCODECS_LIBRARIES "-framework AppKit")
+endif()
 if(APPLE_FRAMEWORK)
   list(APPEND IMGCODECS_LIBRARIES "-framework Accelerate" "-framework CoreGraphics" "-framework QuartzCore")
 endif()
diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp
index 97ca866e1b..c07a905914 100644
--- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp
+++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp
@@ -50,6 +50,7 @@
   @{
     @defgroup imgcodecs_c C API
     @defgroup imgcodecs_ios iOS glue
+    @defgroup imgcodecs_macosx MacOS(OSX) glue
   @}
 */
 
diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h b/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h
new file mode 100644
index 0000000000..f5d9c082c4
--- /dev/null
+++ b/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h
@@ -0,0 +1,20 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#if !defined(__APPLE__) || !defined(__MACH__)
+#error This header should be used in macOS ObjC/Swift projects.
+#endif
+
+#import <AppKit/AppKit.h>
+#include "opencv2/core.hpp"
+
+//! @addtogroup imgcodecs_macosx
+//! @{
+
+CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image);
+CV_EXPORTS void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist = false);
+CV_EXPORTS NSImage* MatToNSImage(const cv::Mat& image);
+CV_EXPORTS void NSImageToMat(const NSImage* image, cv::Mat& m, bool alphaExist = false);
+
+//! @}
diff --git a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
new file mode 100644
index 0000000000..4abf806d1e
--- /dev/null
+++ b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
@@ -0,0 +1,32 @@
+//
+//  Mat+Converters.h
+//
+//  Created by Masaya Tsuruta on 2020/10/08.
+//
+
+#pragma once
+
+#ifdef __cplusplus
+#import "opencv.hpp"
+#else
+#define CV_EXPORTS
+#endif
+
+#import <Foundation/Foundation.h>
+#import <AppKit/AppKit.h>
+#import "Mat.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+CV_EXPORTS @interface Mat (Converters)
+
+-(CGImageRef)toCGImage;
+-(instancetype)initWithCGImage:(CGImageRef)image;
+-(instancetype)initWithCGImage:(CGImageRef)image alphaExist:(BOOL)alphaExist;
+-(NSImage*)toNSImage;
+-(instancetype)initWithNSImage:(NSImage*)image;
+-(instancetype)initWithNSImage:(NSImage*)image alphaExist:(BOOL)alphaExist;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm
new file mode 100644
index 0000000000..725569784a
--- /dev/null
+++ b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm
@@ -0,0 +1,44 @@
+//
+//  Mat+Converters.mm
+//
+//  Created by Masaya Tsuruta on 2020/10/08.
+//
+
+#import "Mat+Converters.h"
+#import <opencv2/imgcodecs/macosx.h>
+
+@implementation Mat (Converters)
+
+-(CGImageRef)toCGImage {
+    return MatToCGImage(self.nativeRef);
+}
+
+-(instancetype)initWithCGImage:(CGImageRef)image {
+    return [self initWithCGImage:image alphaExist:NO];
+}
+
+-(instancetype)initWithCGImage:(CGImageRef)image alphaExist:(BOOL)alphaExist {
+    self = [self init];
+    if (self) {
+        CGImageToMat(image, self.nativeRef, (bool)alphaExist);
+    }
+    return self;
+}
+
+-(NSImage*)toNSImage {
+    return MatToNSImage(self.nativeRef);
+}
+
+-(instancetype)initWithNSImage:(NSImage*)image {
+    return [self initWithNSImage:image alphaExist:NO];
+}
+
+-(instancetype)initWithNSImage:(NSImage*)image alphaExist:(BOOL)alphaExist {
+    self = [self init];
+    if (self) {
+        NSImageToMat(image, self.nativeRef, (bool)alphaExist);
+    }
+    return self;
+}
+
+@end
diff --git a/modules/imgcodecs/src/apple_conversions.h b/modules/imgcodecs/src/apple_conversions.h
new file mode 100644
index 0000000000..2762424379
--- /dev/null
+++ b/modules/imgcodecs/src/apple_conversions.h
@@ -0,0 +1,11 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#import <Accelerate/Accelerate.h>
+#import <AVFoundation/AVFoundation.h>
+#import <ImageIO/ImageIO.h>
+#include "opencv2/core.hpp"
+
+CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image);
+CV_EXPORTS void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist);
diff --git a/modules/imgcodecs/src/apple_conversions.mm b/modules/imgcodecs/src/apple_conversions.mm
new file mode 100644
index 0000000000..6126039ce0
--- /dev/null
+++ b/modules/imgcodecs/src/apple_conversions.mm
@@ -0,0 +1,94 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "apple_conversions.h"
+#include "precomp.hpp"
+
+CGImageRef MatToCGImage(const cv::Mat& image) {
+    NSData *data = [NSData dataWithBytes:image.data
+                                  length:image.step.p[0] * image.rows];
+
+    CGColorSpaceRef colorSpace;
+
+    if (image.elemSize() == 1) {
+        colorSpace = CGColorSpaceCreateDeviceGray();
+    } else {
+        colorSpace = CGColorSpaceCreateDeviceRGB();
+    }
+
+    CGDataProviderRef provider =
+            CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
+
+    // Preserve alpha transparency, if exists
+    bool alpha = image.channels() == 4;
+    CGBitmapInfo bitmapInfo = (alpha ? kCGImageAlphaLast : kCGImageAlphaNone) | kCGBitmapByteOrderDefault;
+
+    // Creating CGImage from cv::Mat
+    CGImageRef imageRef = CGImageCreate(image.cols,
+                                        image.rows,
+                                        8 * image.elemSize1(),
+                                        8 * image.elemSize(),
+                                        image.step.p[0],
+                                        colorSpace,
+                                        bitmapInfo,
+                                        provider,
+                                        NULL,
+                                        false,
+                                        kCGRenderingIntentDefault
+                                        );
+
+    CGDataProviderRelease(provider);
+    CGColorSpaceRelease(colorSpace);
+
+    return imageRef;
+}
+
+void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist) {
+    CGColorSpaceRef colorSpace = CGImageGetColorSpace(image);
+    CGFloat cols = CGImageGetWidth(image), rows = CGImageGetHeight(image);
+    CGContextRef contextRef;
+    CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast;
+    if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelMonochrome)
+    {
+        m.create(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
+        bitmapInfo = kCGImageAlphaNone;
+        if (!alphaExist)
+            bitmapInfo = kCGImageAlphaNone;
+        else
+            m = cv::Scalar(0);
+        contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
+                                           m.step[0], colorSpace,
+                                           bitmapInfo);
+    }
+    else if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelIndexed)
+    {
+        // CGBitmapContextCreate() does not support indexed color spaces.
+        colorSpace = CGColorSpaceCreateDeviceRGB();
+        m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
+        if (!alphaExist)
+            bitmapInfo = kCGImageAlphaNoneSkipLast |
+                                kCGBitmapByteOrderDefault;
+        else
+            m = cv::Scalar(0);
+        contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
+                                           m.step[0], colorSpace,
+                                           bitmapInfo);
+        CGColorSpaceRelease(colorSpace);
+    }
+    else
+    {
+        m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
+        if (!alphaExist)
+            bitmapInfo = kCGImageAlphaNoneSkipLast |
+                                kCGBitmapByteOrderDefault;
+        else
+            m = cv::Scalar(0);
+        contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
+                                           m.step[0], colorSpace,
+                                           bitmapInfo);
+    }
+    CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows),
+                       image);
+    CGContextRelease(contextRef);
+}
diff --git a/modules/imgcodecs/src/ios_conversions.mm b/modules/imgcodecs/src/ios_conversions.mm
index 53fb788d65..2aba323a2d 100644
--- a/modules/imgcodecs/src/ios_conversions.mm
+++ b/modules/imgcodecs/src/ios_conversions.mm
@@ -41,105 +41,23 @@
 //M*/
 
 #import <UIKit/UIKit.h>
-#import <Accelerate/Accelerate.h>
-#import <AVFoundation/AVFoundation.h>
-#import <ImageIO/ImageIO.h>
-#include "opencv2/core.hpp"
-#include "precomp.hpp"
+#include "apple_conversions.h"
 
 CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image);
 CV_EXPORTS void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist);
 
 UIImage* MatToUIImage(const cv::Mat& image) {
-
-    NSData *data = [NSData dataWithBytes:image.data
-                                  length:image.step.p[0] * image.rows];
-
-    CGColorSpaceRef colorSpace;
-
-    if (image.elemSize() == 1) {
-        colorSpace = CGColorSpaceCreateDeviceGray();
-    } else {
-        colorSpace = CGColorSpaceCreateDeviceRGB();
-    }
-
-    CGDataProviderRef provider =
-            CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
-
-    // Preserve alpha transparency, if exists
-    bool alpha = image.channels() == 4;
-    CGBitmapInfo bitmapInfo = (alpha ? kCGImageAlphaLast : kCGImageAlphaNone) | kCGBitmapByteOrderDefault;
-
     // Creating CGImage from cv::Mat
-    CGImageRef imageRef = CGImageCreate(image.cols,
-                                        image.rows,
-                                        8 * image.elemSize1(),
-                                        8 * image.elemSize(),
-                                        image.step.p[0],
-                                        colorSpace,
-                                        bitmapInfo,
-                                        provider,
-                                        NULL,
-                                        false,
-                                        kCGRenderingIntentDefault
-                                        );
-
+    CGImageRef imageRef = MatToCGImage(image);
 
     // Getting UIImage from CGImage
-    UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
+    UIImage *uiImage = [UIImage imageWithCGImage:imageRef];
     CGImageRelease(imageRef);
-    CGDataProviderRelease(provider);
-    CGColorSpaceRelease(colorSpace);
 
-    return finalImage;
+    return uiImage;
 }
 
-void UIImageToMat(const UIImage* image,
-                         cv::Mat& m, bool alphaExist) {
-    CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
-    CGFloat cols = CGImageGetWidth(image.CGImage), rows = CGImageGetHeight(image.CGImage);
-    CGContextRef contextRef;
-    CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast;
-    if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelMonochrome)
-    {
-        m.create(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
-        bitmapInfo = kCGImageAlphaNone;
-        if (!alphaExist)
-            bitmapInfo = kCGImageAlphaNone;
-        else
-            m = cv::Scalar(0);
-        contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
-                                           m.step[0], colorSpace,
-                                           bitmapInfo);
-    }
-    else if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelIndexed)
-    {
-        // CGBitmapContextCreate() does not support indexed color spaces.
-        colorSpace = CGColorSpaceCreateDeviceRGB();
-        m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
-        if (!alphaExist)
-            bitmapInfo = kCGImageAlphaNoneSkipLast |
-                                kCGBitmapByteOrderDefault;
-        else
-            m = cv::Scalar(0);
-        contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
-                                           m.step[0], colorSpace,
-                                           bitmapInfo);
-        CGColorSpaceRelease(colorSpace);
-    }
-    else
-    {
-        m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
-        if (!alphaExist)
-            bitmapInfo = kCGImageAlphaNoneSkipLast |
-                                kCGBitmapByteOrderDefault;
-        else
-            m = cv::Scalar(0);
-        contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8,
-                                           m.step[0], colorSpace,
-                                           bitmapInfo);
-    }
-    CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows),
-                       image.CGImage);
-    CGContextRelease(contextRef);
+void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist) {
+    CGImageRef imageRef = image.CGImage;
+    CGImageToMat(imageRef, m, alphaExist);
 }
diff --git a/modules/imgcodecs/src/macosx_conversions.mm b/modules/imgcodecs/src/macosx_conversions.mm
new file mode 100644
index 0000000000..c1827e71f1
--- /dev/null
+++ b/modules/imgcodecs/src/macosx_conversions.mm
@@ -0,0 +1,25 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#import <AppKit/AppKit.h>
+#include "apple_conversions.h"
+
+CV_EXPORTS NSImage* MatToNSImage(const cv::Mat& image);
+CV_EXPORTS void NSImageToMat(const NSImage* image, cv::Mat& m, bool alphaExist);
+
+NSImage* MatToNSImage(const cv::Mat& image) {
+    // Creating CGImage from cv::Mat
+    CGImageRef imageRef = MatToCGImage(image);
+
+    // Getting NSImage from CGImage
+    NSImage *nsImage = [[NSImage alloc] initWithCGImage:imageRef size:CGSizeMake(CGImageGetWidth(imageRef), CGImageGetHeight(imageRef))];
+    CGImageRelease(imageRef);
+
+    return nsImage;
+}
+
+void NSImageToMat(const NSImage* image, cv::Mat& m, bool alphaExist) {
+    CGImageRef imageRef = [image CGImageForProposedRect:NULL context:NULL hints:NULL];
+    CGImageToMat(imageRef, m, alphaExist);
+}
diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index 1ae00ab5f1..8c4a5b2cac 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -1584,6 +1584,11 @@ if __name__ == "__main__":
             if os.path.exists(ios_files_dir):
                 copied_files += copy_objc_files(ios_files_dir, objc_base_path, module, True)
 
+        if args.target == 'osx':
+            osx_files_dir = os.path.join(misc_location, 'macosx')
+            if os.path.exists(osx_files_dir):
+                copied_files += copy_objc_files(osx_files_dir, objc_base_path, module, True)
+
         objc_test_files_dir = os.path.join(misc_location, 'test')
         if os.path.exists(objc_test_files_dir):
             copy_objc_files(objc_test_files_dir, objc_test_base_path, 'test', False)

From 0d3e05f9b3a3c8223d9e80d89d8176fdbe3158a4 Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Thu, 15 Oct 2020 01:21:09 +0300
Subject: [PATCH 025/152] Merge pull request #18493 from
 TolyaTalamanov:at/wrap-streaming

[G-API Wrap streaming

* Wrap streaming

* Fix build

* Add comments

* Remove comment

* Fix comments to review

* Add test for python pull overload
---
 .../include/opencv2/gapi/gcomputation.hpp     |   4 +-
 modules/gapi/include/opencv2/gapi/gproto.hpp  |   2 +-
 .../gapi/include/opencv2/gapi/gstreaming.hpp  |  17 ++-
 modules/gapi/include/opencv2/gapi/imgproc.hpp |   2 +-
 .../include/opencv2/gapi/streaming/cap.hpp    |   6 +
 modules/gapi/misc/python/pyopencv_gapi.hpp    |  32 +++++
 modules/gapi/misc/python/shadow_gapi.hpp      |  11 ++
 .../misc/python/test/test_gapi_streaming.py   | 129 ++++++++++++++++++
 modules/gapi/src/compiler/gcompiler.cpp       |   8 ++
 modules/gapi/src/compiler/gstreaming.cpp      |  33 +++++
 modules/gapi/src/compiler/gstreaming_priv.hpp |   6 +
 .../test/streaming/gapi_streaming_tests.cpp   |  35 +++++
 12 files changed, 274 insertions(+), 11 deletions(-)
 create mode 100644 modules/gapi/misc/python/test/test_gapi_streaming.py

diff --git a/modules/gapi/include/opencv2/gapi/gcomputation.hpp b/modules/gapi/include/opencv2/gapi/gcomputation.hpp
index 1172c0f5d6..8732ada0d6 100644
--- a/modules/gapi/include/opencv2/gapi/gcomputation.hpp
+++ b/modules/gapi/include/opencv2/gapi/gcomputation.hpp
@@ -436,7 +436,7 @@ public:
      *
      * @sa @ref gapi_compile_args
      */
-    GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
+    GAPI_WRAP GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
 
     /**
      * @brief Compile the computation for streaming mode.
@@ -457,7 +457,7 @@ public:
      *
      * @sa @ref gapi_compile_args
      */
-    GStreamingCompiled compileStreaming(GCompileArgs &&args = {});
+    GAPI_WRAP GStreamingCompiled compileStreaming(GCompileArgs &&args = {});
 
     // 2. Direct metadata version
     /**
diff --git a/modules/gapi/include/opencv2/gapi/gproto.hpp b/modules/gapi/include/opencv2/gapi/gproto.hpp
index fbcccb38ea..f91fcdb2c8 100644
--- a/modules/gapi/include/opencv2/gapi/gproto.hpp
+++ b/modules/gapi/include/opencv2/gapi/gproto.hpp
@@ -135,7 +135,7 @@ GRunArg value_of(const GOrigin &origin);
 // Transform run-time computation arguments into a collection of metadata
 // extracted from that arguments
 GMetaArg  GAPI_EXPORTS descr_of(const GRunArg  &arg );
-GMetaArgs GAPI_EXPORTS descr_of(const GRunArgs &args);
+GMetaArgs GAPI_EXPORTS_W descr_of(const GRunArgs &args);
 
 // Transform run-time operation result argument into metadata extracted from that argument
 // Used to compare the metadata, which generated at compile time with the metadata result operation in run time
diff --git a/modules/gapi/include/opencv2/gapi/gstreaming.hpp b/modules/gapi/include/opencv2/gapi/gstreaming.hpp
index 7079042069..f45c30bdae 100644
--- a/modules/gapi/include/opencv2/gapi/gstreaming.hpp
+++ b/modules/gapi/include/opencv2/gapi/gstreaming.hpp
@@ -49,11 +49,11 @@ namespace cv {
  *
  * @sa GCompiled
  */
-class GAPI_EXPORTS GStreamingCompiled
+class GAPI_EXPORTS_W_SIMPLE GStreamingCompiled
 {
 public:
     class GAPI_EXPORTS Priv;
-    GStreamingCompiled();
+    GAPI_WRAP GStreamingCompiled();
 
     // FIXME: More overloads?
     /**
@@ -96,7 +96,7 @@ public:
      * @param ins vector of inputs to process.
      * @sa gin
      */
-    void setSource(GRunArgs &&ins);
+    GAPI_WRAP void setSource(GRunArgs &&ins);
 
     /**
      * @brief Specify an input video stream for a single-input
@@ -109,7 +109,7 @@ public:
      * @param s a shared pointer to IStreamSource representing the
      * input video stream.
      */
-    void setSource(const gapi::wip::IStreamSource::Ptr& s);
+    GAPI_WRAP void setSource(const gapi::wip::IStreamSource::Ptr& s);
 
     /**
      * @brief Start the pipeline execution.
@@ -126,7 +126,7 @@ public:
      * start()/stop()/setSource() may be called on the same object in
      * multiple threads in your application.
      */
-    void start();
+    GAPI_WRAP void start();
 
     /**
      * @brief Get the next processed frame from the pipeline.
@@ -150,6 +150,9 @@ public:
      */
     bool pull(cv::GRunArgsP &&outs);
 
+    // NB: Used from python
+    GAPI_WRAP std::tuple<bool, cv::GRunArgs> pull();
+
     /**
      * @brief Try to get the next processed frame from the pipeline.
      *
@@ -172,7 +175,7 @@ public:
      *
      * Throws if the pipeline is not running.
      */
-    void stop();
+    GAPI_WRAP void stop();
 
     /**
      * @brief Test if the pipeline is running.
@@ -184,7 +187,7 @@ public:
      *
      * @return true if the current stream is not over yet.
      */
-    bool running() const;
+    GAPI_WRAP bool running() const;
 
     /// @private
     Priv& priv();
diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp
index 23ad41eb25..294b3b7842 100644
--- a/modules/gapi/include/opencv2/gapi/imgproc.hpp
+++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp
@@ -497,7 +497,7 @@ The median filter uses cv::BORDER_REPLICATE internally to cope with border pixel
 @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
 @sa  boxFilter, gaussianBlur
  */
-GAPI_EXPORTS GMat medianBlur(const GMat& src, int ksize);
+GAPI_EXPORTS_W GMat medianBlur(const GMat& src, int ksize);
 
 /** @brief Erodes an image by using a specific structuring element.
 
diff --git a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp
index faa555063a..9781ef1ffb 100644
--- a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp
+++ b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp
@@ -103,6 +103,12 @@ protected:
     }
 };
 
+// NB: Overload for using from python
+GAPI_EXPORTS_W cv::Ptr<IStreamSource> inline make_capture_src(const std::string& path)
+{
+    return make_src<GCaptureSource>(path);
+}
+
 } // namespace wip
 } // namespace gapi
 } // namespace cv
diff --git a/modules/gapi/misc/python/pyopencv_gapi.hpp b/modules/gapi/misc/python/pyopencv_gapi.hpp
index 702e8c4032..0e862a4010 100644
--- a/modules/gapi/misc/python/pyopencv_gapi.hpp
+++ b/modules/gapi/misc/python/pyopencv_gapi.hpp
@@ -3,7 +3,14 @@
 
 #ifdef HAVE_OPENCV_GAPI
 
+// NB: Python wrapper replaces :: with _ for classes
 using gapi_GKernelPackage = cv::gapi::GKernelPackage;
+using gapi_wip_IStreamSource_Ptr = cv::Ptr<cv::gapi::wip::IStreamSource>;
+
+// FIXME: Python wrapper generate code without namespace std,
+// so it cause error: "string wasn't declared"
+// WA: Create using
+using std::string;
 
 template<>
 bool pyopencv_to(PyObject* obj, std::vector<GCompileArg>& value, const ArgInfo& info)
@@ -78,6 +85,18 @@ PyObject* pyopencv_from(const GRunArgs& value)
     return list;
 }
 
+template<>
+bool pyopencv_to(PyObject* obj, GMetaArgs& value, const ArgInfo& info)
+{
+    return pyopencv_to_generic_vec(obj, value, info);
+}
+
+template<>
+PyObject* pyopencv_from(const GMetaArgs& value)
+{
+    return pyopencv_from_generic_vec(value);
+}
+
 template <typename T>
 static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw)
 {
@@ -151,6 +170,19 @@ static PyObject* pyopencv_cv_gin(PyObject* , PyObject* py_args, PyObject* kw)
                 return NULL;
             }
         }
+        else if (PyObject_TypeCheck(item,
+                    reinterpret_cast<PyTypeObject*>(pyopencv_gapi_wip_IStreamSource_TypePtr)))
+        {
+            cv::gapi::wip::IStreamSource::Ptr source =
+                reinterpret_cast<pyopencv_gapi_wip_IStreamSource_t*>(item)->v;
+            args.emplace_back(source);
+        }
+        else
+        {
+            PyErr_SetString(PyExc_TypeError, "cv.gin can works only with cv::Mat,"
+                                             "cv::Scalar, cv::gapi::wip::IStreamSource::Ptr");
+            return NULL;
+        }
     }
 
     return pyopencv_from_generic_vec(args);
diff --git a/modules/gapi/misc/python/shadow_gapi.hpp b/modules/gapi/misc/python/shadow_gapi.hpp
index 4f988440e8..72d7686eeb 100644
--- a/modules/gapi/misc/python/shadow_gapi.hpp
+++ b/modules/gapi/misc/python/shadow_gapi.hpp
@@ -7,11 +7,22 @@ namespace cv
 
    GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg);
 
+   // NB: This classes doesn't exist in *.so
+   // HACK: Mark them as a class to force python wrapper generate code for this entities
    class GAPI_EXPORTS_W_SIMPLE GProtoArg { };
    class GAPI_EXPORTS_W_SIMPLE GProtoInputArgs { };
    class GAPI_EXPORTS_W_SIMPLE GProtoOutputArgs { };
    class GAPI_EXPORTS_W_SIMPLE GRunArg {  };
+   class GAPI_EXPORTS_W_SIMPLE GMetaArg {  };
 
    using GProtoInputArgs  = GIOProtoArgs<In_Tag>;
    using GProtoOutputArgs = GIOProtoArgs<Out_Tag>;
+
+   namespace gapi
+   {
+       namespace wip
+       {
+           class GAPI_EXPORTS_W IStreamSource { };
+       }
+   }
 } // namespace cv
diff --git a/modules/gapi/misc/python/test/test_gapi_streaming.py b/modules/gapi/misc/python/test/test_gapi_streaming.py
new file mode 100644
index 0000000000..bf182d9c91
--- /dev/null
+++ b/modules/gapi/misc/python/test/test_gapi_streaming.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+
+import numpy as np
+import cv2 as cv
+import os
+
+from tests_common import NewOpenCVTests
+
+class test_gapi_streaming(NewOpenCVTests):
+
+    def test_image_input(self):
+        sz = (1280, 720)
+        in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
+
+        # OpenCV
+        expected = cv.medianBlur(in_mat, 3)
+
+        # G-API
+        g_in = cv.GMat()
+        g_out = cv.gapi.medianBlur(g_in, 3)
+        c = cv.GComputation(g_in, g_out)
+        ccomp = c.compileStreaming(cv.descr_of(cv.gin(in_mat)))
+        ccomp.setSource(cv.gin(in_mat))
+        ccomp.start()
+
+        _, actual = ccomp.pull()
+
+        # Assert
+        self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
+
+
+    def test_video_input(self):
+        ksize = 3
+        path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
+
+        # OpenCV
+        cap = cv.VideoCapture(path)
+
+        # G-API
+        g_in = cv.GMat()
+        g_out = cv.gapi.medianBlur(g_in, ksize)
+        c = cv.GComputation(g_in, g_out)
+
+        ccomp = c.compileStreaming()
+        source = cv.gapi.wip.make_capture_src(path)
+        ccomp.setSource(source)
+        ccomp.start()
+
+        # Assert
+        while cap.isOpened():
+            has_expected, expected = cap.read()
+            has_actual,   actual   = ccomp.pull()
+
+            self.assertEqual(has_expected, has_actual)
+
+            if not has_actual:
+                break
+
+            self.assertEqual(0.0, cv.norm(cv.medianBlur(expected, ksize), actual, cv.NORM_INF))
+
+
+    def test_video_split3(self):
+        path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
+
+        # OpenCV
+        cap = cv.VideoCapture(path)
+
+        # G-API
+        g_in = cv.GMat()
+        b, g, r = cv.gapi.split3(g_in)
+        c = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))
+
+        ccomp = c.compileStreaming()
+        source = cv.gapi.wip.make_capture_src(path)
+        ccomp.setSource(source)
+        ccomp.start()
+
+        # Assert
+        while cap.isOpened():
+            has_expected, frame = cap.read()
+            has_actual,   actual   = ccomp.pull()
+
+            self.assertEqual(has_expected, has_actual)
+
+            if not has_actual:
+                break
+
+            expected = cv.split(frame)
+            for e, a in zip(expected, actual):
+                self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF))
+
+
+    def test_video_add(self):
+        sz = (576, 768, 3)
+        in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
+
+        path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
+
+        # OpenCV
+        cap = cv.VideoCapture(path)
+
+        # G-API
+        g_in1 = cv.GMat()
+        g_in2 = cv.GMat()
+        out = cv.gapi.add(g_in1, g_in2)
+        c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(out))
+
+        ccomp = c.compileStreaming()
+        source = cv.gapi.wip.make_capture_src(path)
+        ccomp.setSource(cv.gin(source, in_mat))
+        ccomp.start()
+
+        # Assert
+        while cap.isOpened():
+            has_expected, frame  = cap.read()
+            has_actual,   actual = ccomp.pull()
+
+            self.assertEqual(has_expected, has_actual)
+
+            if not has_actual:
+                break
+
+            expected = cv.add(frame, in_mat)
+            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
+
+
+
+if __name__ == '__main__':
+    NewOpenCVTests.bootstrap()
diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp
index 2f46ea873b..76c40ddca0 100644
--- a/modules/gapi/src/compiler/gcompiler.cpp
+++ b/modules/gapi/src/compiler/gcompiler.cpp
@@ -448,6 +448,14 @@ cv::GStreamingCompiled cv::gimpl::GCompiler::produceStreamingCompiled(GPtr &&pg)
         outMetas = GModel::ConstGraph(*pg).metadata().get<OutputMeta>().outMeta;
     }
 
+    auto out_desc = GModel::ConstGraph(*pg).metadata().get<cv::gimpl::Protocol>().outputs;
+    GShapes out_shapes;
+    for (auto&& desc : out_desc)
+    {
+        out_shapes.push_back(desc.shape);
+    }
+    compiled.priv().setOutShapes(std::move(out_shapes));
+
     std::unique_ptr<GStreamingExecutor> pE(new GStreamingExecutor(std::move(pg),
                                                                   m_args));
     if (!m_metas.empty() && !outMetas.empty())
diff --git a/modules/gapi/src/compiler/gstreaming.cpp b/modules/gapi/src/compiler/gstreaming.cpp
index 2e9c016ceb..29c98ddfd4 100644
--- a/modules/gapi/src/compiler/gstreaming.cpp
+++ b/modules/gapi/src/compiler/gstreaming.cpp
@@ -111,6 +111,39 @@ bool cv::GStreamingCompiled::pull(cv::GRunArgsP &&outs)
     return m_priv->pull(std::move(outs));
 }
 
+std::tuple<bool, cv::GRunArgs> cv::GStreamingCompiled::pull()
+{
+    GRunArgs run_args;
+    GRunArgsP outs;
+    const auto& out_shapes = m_priv->outShapes();
+    run_args.reserve(out_shapes.size());
+    outs.reserve(out_shapes.size());
+
+    for (auto&& shape : out_shapes)
+    {
+        switch (shape)
+        {
+            case cv::GShape::GMAT:
+            {
+                run_args.emplace_back(cv::Mat{});
+                outs.emplace_back(&cv::util::get<cv::Mat>(run_args.back()));
+                break;
+            }
+            case cv::GShape::GSCALAR:
+            {
+                run_args.emplace_back(cv::Scalar{});
+                outs.emplace_back(&cv::util::get<cv::Scalar>(run_args.back()));
+                break;
+            }
+            default:
+                util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output"));
+        }
+    }
+
+    bool is_over = m_priv->pull(std::move(outs));
+    return std::make_tuple(is_over, run_args);
+}
+
 bool cv::GStreamingCompiled::try_pull(cv::GRunArgsP &&outs)
 {
     return m_priv->try_pull(std::move(outs));
diff --git a/modules/gapi/src/compiler/gstreaming_priv.hpp b/modules/gapi/src/compiler/gstreaming_priv.hpp
index 447bcda76e..73ca002f85 100644
--- a/modules/gapi/src/compiler/gstreaming_priv.hpp
+++ b/modules/gapi/src/compiler/gstreaming_priv.hpp
@@ -27,6 +27,7 @@ class GAPI_EXPORTS GStreamingCompiled::Priv
     GMetaArgs  m_metas;    // passed by user
     GMetaArgs  m_outMetas; // inferred by compiler
     std::unique_ptr<cv::gimpl::GStreamingExecutor> m_exec;
+    GShapes m_out_shapes;
 
 public:
     void setup(const GMetaArgs &metaArgs,
@@ -45,6 +46,11 @@ public:
     void stop();
 
     bool running() const;
+
+    // NB: std::tuple<bool, cv::GRunArgs> pull() creates GRunArgs for outputs,
+    // so need to know out shapes to create corresponding GRunArg
+    void setOutShapes(GShapes shapes) { m_out_shapes = std::move(shapes); }
+    const GShapes& outShapes() const { return m_out_shapes; }
 };
 
 } // namespace cv
diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp
index 1150e6a862..dfd2331bfd 100644
--- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp
+++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp
@@ -983,4 +983,39 @@ TEST_F(GAPI_Streaming_Unit, SetSource_After_Completion)
     EXPECT_EQ(0., cv::norm(out, out_ref, cv::NORM_INF));
 }
 
+// NB: Check pull overload for python
+TEST(Streaming, Python_Pull_Overload)
+{
+    cv::GMat in;
+    auto out = cv::gapi::copy(in);
+    cv::GComputation c(in, out);
+
+    cv::Size sz(3,3);
+    cv::Mat in_mat(sz, CV_8UC3);
+    cv::randu(in_mat, cv::Scalar::all(0), cv::Scalar(255));
+
+    auto ccomp = c.compileStreaming(cv::descr_of(in_mat));
+
+    EXPECT_TRUE(ccomp);
+    EXPECT_FALSE(ccomp.running());
+
+    ccomp.setSource(cv::gin(in_mat));
+
+    ccomp.start();
+    EXPECT_TRUE(ccomp.running());
+
+    bool has_output;
+    cv::GRunArgs outputs;
+    std::tie(has_output, outputs) = ccomp.pull();
+
+    EXPECT_TRUE(has_output);
+    EXPECT_EQ(1u, outputs.size());
+
+    auto out_mat = cv::util::get<cv::Mat>(outputs[0]);
+    EXPECT_EQ(0., cv::norm(in_mat, out_mat, cv::NORM_INF));
+
+    ccomp.stop();
+    EXPECT_FALSE(ccomp.running());
+}
+
 } // namespace opencv_test

From 8bf451a3e0c6af796691ee336abd157b762fd6af Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Thu, 15 Oct 2020 16:59:02 +0300
Subject: [PATCH 026/152] Merge pull request #18542 from
 TolyaTalamanov:at/import-network

[G-API] Support ImportNetwork for cv::gapi::infer

* wip

* Refactoring

* Fix comments to review

* Fix warning

Co-authored-by: Ruslan Garnov <ruslan.garnov@intel.com>
---
 .../gapi/include/opencv2/gapi/infer/ie.hpp    | 22 +++++++++-
 modules/gapi/src/backends/common/gbackend.hpp |  1 +
 modules/gapi/src/backends/ie/giebackend.cpp   | 43 ++++++++++++++++---
 .../src/backends/ie/giebackend/giewrapper.cpp | 18 ++++++++
 .../src/backends/ie/giebackend/giewrapper.hpp | 13 ++++++
 5 files changed, 88 insertions(+), 9 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
index 8421d9e2c9..dd2459da08 100644
--- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
@@ -60,6 +60,8 @@ namespace detail {
         std::size_t num_in;  // How many inputs are defined in the operation
         std::size_t num_out; // How many outputs are defined in the operation
 
+        enum class Kind { Load, Import };
+        Kind kind;
         bool is_generic;
     };
 } // namespace detail
@@ -83,6 +85,16 @@ public:
         : desc{ model, weights, device, {}, {}, {}
               , std::tuple_size<typename Net::InArgs>::value  // num_in
               , std::tuple_size<typename Net::OutArgs>::value // num_out
+              , detail::ParamDesc::Kind::Load
+              , false} {
+    };
+
+    Params(const std::string &model,
+           const std::string &device)
+        : desc{ model, {}, device, {}, {}, {}
+              , std::tuple_size<typename Net::InArgs>::value  // num_in
+              , std::tuple_size<typename Net::OutArgs>::value // num_out
+              , detail::ParamDesc::Kind::Import
               , false} {
     };
 
@@ -122,11 +134,17 @@ protected:
 template<>
 class Params<cv::gapi::Generic> {
 public:
-    Params(const std::string& tag,
+    Params(const std::string &tag,
            const std::string &model,
            const std::string &weights,
            const std::string &device)
-        : desc{ model, weights, device, {}, {}, {}, 0u, 0u, true}, m_tag(tag) {
+        : desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true}, m_tag(tag) {
+    };
+
+    Params(const std::string &tag,
+           const std::string &model,
+           const std::string &device)
+        : desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true}, m_tag(tag) {
     };
 
     // BEGIN(G-API's network parametrization API)
diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp
index f747a0dd1c..e96d2b0776 100644
--- a/modules/gapi/src/backends/common/gbackend.hpp
+++ b/modules/gapi/src/backends/common/gbackend.hpp
@@ -27,6 +27,7 @@ namespace gimpl {
                                 : cv::Mat(v.dims(), v.type(), v.ptr());
     }
     inline RMat::View asView(const Mat& m, RMat::View::DestroyCallback&& cb = nullptr) {
+        // FIXME: View doesn't support multidimensional cv::Mat's
         return RMat::View(cv::descr_of(m), m.data, m.step, std::move(cb));
     }
 
diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp
index b7bda2fe9f..08836163a7 100644
--- a/modules/gapi/src/backends/ie/giebackend.cpp
+++ b/modules/gapi/src/backends/ie/giebackend.cpp
@@ -175,11 +175,26 @@ struct IEUnit {
     IE::InputsDataMap inputs;
     IE::OutputsDataMap outputs;
 
+    IE::ExecutableNetwork this_network;
+    cv::gimpl::ie::wrap::Plugin this_plugin;
+
     explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp)
         : params(pp) {
-        net = cv::gimpl::ie::wrap::readNetwork(params);
-        inputs  = net.getInputsInfo();
-        outputs = net.getOutputsInfo();
+        if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
+            net = cv::gimpl::ie::wrap::readNetwork(params);
+            inputs  = net.getInputsInfo();
+            outputs = net.getOutputsInfo();
+        } else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) {
+            this_plugin  = cv::gimpl::ie::wrap::getPlugin(params);
+            this_network = cv::gimpl::ie::wrap::importNetwork(this_plugin, params);
+            // FIXME: ICNNetwork returns InputsDataMap/OutputsDataMap,
+            // but ExecutableNetwork returns ConstInputsDataMap/ConstOutputsDataMap
+            inputs  = cv::gimpl::ie::wrap::toInputsDataMap(this_network.GetInputsInfo());
+            outputs = cv::gimpl::ie::wrap::toOutputsDataMap(this_network.GetOutputsInfo());
+        } else {
+            cv::util::throw_error(std::logic_error("Unsupported ParamDesc::Kind"));
+        }
+
         // The practice shows that not all inputs and not all outputs
         // are mandatory to specify in IE model.
         // So what we're concerned here about is:
@@ -205,10 +220,15 @@ struct IEUnit {
 
     // This method is [supposed to be] called at Island compilation stage
     cv::gimpl::ie::IECompiled compile() const {
-        auto plugin       = cv::gimpl::ie::wrap::getPlugin(params);
-        auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
-        auto this_request = this_network.CreateInferRequest();
+        IEUnit* non_const_this = const_cast<IEUnit*>(this);
+        if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
+            // FIXME: In case importNetwork for fill inputs/outputs need to obtain ExecutableNetwork, but
+            // for loadNetwork they can be obtained by using readNetwork
+            non_const_this->this_plugin  = cv::gimpl::ie::wrap::getPlugin(params);
+            non_const_this->this_network = cv::gimpl::ie::wrap::loadNetwork(non_const_this->this_plugin, net, params);
+        }
 
+        auto this_request = non_const_this->this_network.CreateInferRequest();
         // Bind const data to infer request
         for (auto &&p : params.const_inputs) {
             // FIXME: SetBlob is known to be inefficient,
@@ -217,7 +237,16 @@ struct IEUnit {
             // Still, constant data is to set only once.
             this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second));
         }
-        return {plugin, this_network, this_request};
+        // Bind const data to infer request
+        for (auto &&p : params.const_inputs) {
+            // FIXME: SetBlob is known to be inefficient,
+            // it is worth to make a customizable "initializer" and pass the
+            // cv::Mat-wrapped blob there to support IE's optimal "GetBlob idiom"
+            // Still, constant data is to set only once.
+            this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second));
+        }
+
+        return {this_plugin, this_network, this_request};
     }
 };
 
diff --git a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp
index 444d9553e7..8f5a7eca11 100644
--- a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp
+++ b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp
@@ -22,6 +22,24 @@ namespace IE = InferenceEngine;
 namespace giewrap = cv::gimpl::ie::wrap;
 using GIEParam = cv::gapi::ie::detail::ParamDesc;
 
+IE::InputsDataMap giewrap::toInputsDataMap (const IE::ConstInputsDataMap& inputs) {
+    IE::InputsDataMap transformed;
+    auto convert = [](const std::pair<std::string, IE::InputInfo::CPtr>& p) {
+        return std::make_pair(p.first, std::make_shared<IE::InputInfo>(*p.second));
+    };
+    std::transform(inputs.begin(), inputs.end(), std::inserter(transformed, transformed.end()), convert);
+    return transformed;
+}
+
+IE::OutputsDataMap giewrap::toOutputsDataMap (const IE::ConstOutputsDataMap& outputs) {
+    IE::OutputsDataMap transformed;
+    auto convert = [](const std::pair<std::string, IE::CDataPtr>& p) {
+        return std::make_pair(p.first, std::make_shared<IE::Data>(*p.second));
+    };
+    std::transform(outputs.begin(), outputs.end(), std::inserter(transformed, transformed.end()), convert);
+    return transformed;
+}
+
 #if INF_ENGINE_RELEASE < 2020000000  // < 2020.1
 // Load extensions (taken from DNN module)
 std::vector<std::string> giewrap::getExtensions(const GIEParam& params) {
diff --git a/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp b/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp
index 7871942d26..3927c802b7 100644
--- a/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp
+++ b/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp
@@ -28,7 +28,11 @@ namespace wrap {
 GAPI_EXPORTS std::vector<std::string> getExtensions(const GIEParam& params);
 GAPI_EXPORTS IE::CNNNetwork readNetwork(const GIEParam& params);
 
+IE::InputsDataMap  toInputsDataMap (const IE::ConstInputsDataMap& inputs);
+IE::OutputsDataMap toOutputsDataMap(const IE::ConstOutputsDataMap& outputs);
+
 #if INF_ENGINE_RELEASE < 2019020000  // < 2019.R2
+using Plugin = IE::InferencePlugin;
 GAPI_EXPORTS IE::InferencePlugin getPlugin(const GIEParam& params);
 GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork(      IE::InferencePlugin& plugin,
                                                       const IE::CNNNetwork&      net,
@@ -36,7 +40,12 @@ GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork(      IE::InferencePlugin&
     return plugin.LoadNetwork(net, {}); // FIXME: 2nd parameter to be
                                         // configurable via the API
 }
+GAPI_EXPORTS inline IE::ExecutableNetwork importNetwork(      IE::CNNNetwork& plugin,
+                                                        const GIEParam& param) {
+    return plugin.ImportNetwork(param.model_path, param.device_id, {});
+}
 #else // >= 2019.R2
+using Plugin = IE::Core;
 GAPI_EXPORTS IE::Core getCore();
 GAPI_EXPORTS IE::Core getPlugin(const GIEParam& params);
 GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork(      IE::Core&       core,
@@ -44,6 +53,10 @@ GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork(      IE::Core&       core
                                                       const GIEParam& params) {
     return core.LoadNetwork(net, params.device_id);
 }
+GAPI_EXPORTS inline IE::ExecutableNetwork importNetwork(      IE::Core& core,
+                                                        const GIEParam& param) {
+    return core.ImportNetwork(param.model_path, param.device_id, {});
+}
 #endif // INF_ENGINE_RELEASE < 2019020000
 }}}}
 

From 1fb6c6e6e570b06ddeb6116b01052f06193fdc06 Mon Sep 17 00:00:00 2001
From: Krushnal Patel <krushnalpatel11@gmail.com>
Date: Wed, 14 Oct 2020 23:22:46 +0530
Subject: [PATCH 027/152] Update demosaicing.cpp

---
 modules/imgproc/src/demosaicing.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/modules/imgproc/src/demosaicing.cpp b/modules/imgproc/src/demosaicing.cpp
index e02104d222..03bc781046 100644
--- a/modules/imgproc/src/demosaicing.cpp
+++ b/modules/imgproc/src/demosaicing.cpp
@@ -1566,9 +1566,9 @@ public:
             int x = 1;
             if (start_with_green)
             {
-                D[blue<<1] = (S[-sstep] + S[sstep]) >> 1;
+                D[blue<<1] = (S[-sstep] + S[sstep] + 1) >> 1;
                 D[1] = S[0];
-                D[2-(blue<<1)] = (S[-1] + S[1]) >> 1;
+                D[2-(blue<<1)] = (S[-1] + S[1] + 1) >> 1;
                 D += dcn;
                 ++S;
                 ++x;
@@ -1584,7 +1584,7 @@ public:
                 {
                     D[0] = S[0];
                     D[1] = (std::abs(S[-1] - S[1]) > std::abs(S[sstep] - S[-sstep]) ? (S[sstep] + S[-sstep] + 1) : (S[-1] + S[1] + 1)) >> 1;
-                    D[2] = (S[-sstep-1] + S[-sstep+1] + S[sstep-1] + S[sstep+1]) >> 2;
+                    D[2] = (S[-sstep-1] + S[-sstep+1] + S[sstep-1] + S[sstep+1] + 2) >> 2;
 
                     D[3] = (S[0] + S[2] + 1) >> 1;
                     D[4] = S[1];

From bc6a70c689745e40a84ff70b67cf29106449799f Mon Sep 17 00:00:00 2001
From: Pavel Rojtberg <rojtberg@gmail.com>
Date: Wed, 7 Oct 2020 12:19:17 +0200
Subject: [PATCH 028/152] imwrite: multi-image overload for bindings

---
 modules/imgcodecs/include/opencv2/imgcodecs.hpp | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp
index e1f8208e0b..e2636e19f7 100644
--- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp
+++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp
@@ -220,6 +220,14 @@ It also demonstrates how to save multiple images in a TIFF file:
 CV_EXPORTS_W bool imwrite( const String& filename, InputArray img,
               const std::vector<int>& params = std::vector<int>());
 
+/// @overload multi-image overload for bindings
+CV_WRAP static inline
+bool imwritemulti(const String& filename, InputArrayOfArrays img,
+                  const std::vector<int>& params = std::vector<int>())
+{
+    return imwrite(filename, img, params);
+}
+
 /** @brief Reads an image from a buffer in memory.
 
 The function imdecode reads an image from the specified buffer in the memory. If the buffer is too short or

From aa51382dbc6526df2e3b0aa1c438d6c3cc8854cc Mon Sep 17 00:00:00 2001
From: arodrigu <alejandro.rodriguez@inait.ai>
Date: Fri, 16 Oct 2020 14:41:41 +0200
Subject: [PATCH 029/152] Fix: UsacParams Python bindings

---
 modules/calib3d/include/opencv2/calib3d.hpp | 23 +++++++++++----------
 modules/calib3d/src/solvepnp.cpp            | 15 ++++++++++++++
 2 files changed, 27 insertions(+), 11 deletions(-)

diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp
index cc5fdbfe1c..a1a4c57d8a 100644
--- a/modules/calib3d/include/opencv2/calib3d.hpp
+++ b/modules/calib3d/include/opencv2/calib3d.hpp
@@ -550,17 +550,18 @@ enum NeighborSearchMethod { NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS };
 
 struct CV_EXPORTS_W_SIMPLE UsacParams
 { // in alphabetical order
-    double confidence = 0.99;
-    bool isParallel = false;
-    int loIterations = 5;
-    LocalOptimMethod loMethod = LocalOptimMethod::LOCAL_OPTIM_INNER_LO;
-    int loSampleSize = 14;
-    int maxIterations = 5000;
-    NeighborSearchMethod neighborsSearch = NeighborSearchMethod::NEIGH_GRID;
-    int randomGeneratorState = 0;
-    SamplingMethod sampler = SamplingMethod::SAMPLING_UNIFORM;
-    ScoreMethod score = ScoreMethod::SCORE_METHOD_MSAC;
-    double threshold = 1.5;
+    CV_WRAP UsacParams();
+    CV_PROP_RW double confidence;
+    CV_PROP_RW bool isParallel;
+    CV_PROP_RW int loIterations;
+    CV_PROP_RW LocalOptimMethod loMethod;
+    CV_PROP_RW int loSampleSize;
+    CV_PROP_RW int maxIterations;
+    CV_PROP_RW NeighborSearchMethod neighborsSearch;
+    CV_PROP_RW int randomGeneratorState;
+    CV_PROP_RW SamplingMethod sampler;
+    CV_PROP_RW ScoreMethod score;
+    CV_PROP_RW double threshold;
 };
 
 /** @brief Converts a rotation matrix to a rotation vector or vice versa.
diff --git a/modules/calib3d/src/solvepnp.cpp b/modules/calib3d/src/solvepnp.cpp
index 5c04662489..bb595085fa 100644
--- a/modules/calib3d/src/solvepnp.cpp
+++ b/modules/calib3d/src/solvepnp.cpp
@@ -197,6 +197,21 @@ public:
     Mat tvec;
 };
 
+UsacParams::UsacParams()
+{
+    confidence = 0.99;
+    isParallel = false;
+    loIterations = 5;
+    loMethod = LocalOptimMethod::LOCAL_OPTIM_INNER_LO;
+    loSampleSize = 14;
+    maxIterations = 5000;
+    neighborsSearch = NeighborSearchMethod::NEIGH_GRID;
+    randomGeneratorState = 0;
+    sampler = SamplingMethod::SAMPLING_UNIFORM;
+    score = ScoreMethod::SCORE_METHOD_MSAC;
+    threshold = 1.5;
+}
+
 bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
                     InputArray _cameraMatrix, InputArray _distCoeffs,
                     OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,

From b5717f82a04019fa2fa8ac7e671d5403cee324ce Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Fri, 16 Oct 2020 15:35:51 +0000
Subject: [PATCH 030/152] core: fix __clang_major__ typo regression

---
 modules/core/include/opencv2/core/cvdef.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h
index 08db1c820f..5bd3af33a4 100644
--- a/modules/core/include/opencv2/core/cvdef.h
+++ b/modules/core/include/opencv2/core/cvdef.h
@@ -90,7 +90,7 @@ namespace cv { namespace debug_build_guard { } using namespace debug_build_guard
 // keep current value (through OpenCV port file)
 #elif defined __GNUC__ || (defined (__cpluscplus) && (__cpluscplus >= 201103))
 #define CV_Func __func__
-#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major >= 305)
+#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major__ >= 305)
 #define CV_Func __func__
 #elif defined(__STDC_VERSION__) && (__STDC_VERSION >= 199901)
 #define CV_Func __func__

From ef21fd3cf8496e5c03da4920dee767c2b7df1615 Mon Sep 17 00:00:00 2001
From: Giles Payne <gilespayne@gmail.com>
Date: Sat, 17 Oct 2020 14:31:24 +0900
Subject: [PATCH 031/152] Fix handling of properties with enum type

---
 modules/objc/generator/gen_objc.py | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index 1ae00ab5f1..9ea245eef0 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -274,8 +274,9 @@ class ClassInfo(GeneralInfo):
 
     def getForwardDeclarations(self, module):
         enum_decl = filter(lambda x:self.isEnum(x) and type_dict[x]["import_module"] != module, self.imports)
+        enum_imports = list(set(map(lambda m: type_dict[m]["import_module"], enum_decl)))
         class_decl = filter(lambda x: not self.isEnum(x), self.imports)
-        return ["#import \"%s.h\"" % type_dict[c]["import_module"] for c in enum_decl] + [""] + ["@class %s;" % c for c in sorted(class_decl)]
+        return ["#import \"%s.h\"" % c for c in enum_imports] + [""] + ["@class %s;" % c for c in sorted(class_decl)]
 
     def addImports(self, ctype, is_out_type):
         if ctype == self.cname:
@@ -721,10 +722,7 @@ class ObjectiveCWrapperGenerator(object):
 
         # class props
         for p in decl[3]:
-            if True: #"vector" not in p[0]:
-                classinfo.props.append( ClassPropInfo(p) )
-            else:
-                logging.warning("Skipped property: [%s]" % name, p)
+            classinfo.props.append( ClassPropInfo(p) )
 
         if name != self.Module:
             type_dict.setdefault("Ptr_"+name, {}).update(
@@ -786,7 +784,8 @@ class ObjectiveCWrapperGenerator(object):
             type_dict[objc_type] = { "cast_to" : get_cname(enumType),
                                      "objc_type": objc_type,
                                      "is_enum": True,
-                                     "import_module": import_module}
+                                     "import_module": import_module,
+                                     "from_cpp": "(" + objc_type + ")%(n)s"}
             self.classes[self.Module].member_enums.append(objc_type)
 
         const_decls = decl[3]
@@ -1301,7 +1300,7 @@ typedef NS_ENUM(int, {2}) {{
                     ci.method_implementations.write("\t" + ("\n\t".join(prologue)) + "\n")
                     ci.method_implementations.write("\t" + ptr_ref + pi.name + " = valVector;\n}\n\n")
                 else:
-                    to_cpp = type_data.get("to_cpp", "%(n)s")
+                    to_cpp = type_data.get("to_cpp", ("(" + type_data.get("cast_to") + ")%(n)s") if type_data.has_key("cast_to") else "%(n)s")
                     val = to_cpp % {"n": pi.name}
                     ci.method_implementations.write("-(void)set" + pi.name[0].upper() + pi.name[1:] + ":(" + objc_type + ")" + pi.name + " {\n\t" + ptr_ref + pi.name + " = " + val + ";\n}\n\n")
 

From c82417697a35c1b265603328d9b292cc07d3d8b3 Mon Sep 17 00:00:00 2001
From: Kun Liang <kun.liang@intel.com>
Date: Mon, 19 Oct 2020 04:30:36 +0800
Subject: [PATCH 032/152] Merge pull request #18068 from
 lionkunonly:gsoc_2020_simd

[GSoC] OpenCV.js: WASM SIMD optimization 2.0

* gsoc_2020_simd Add perf test for filter2d

* add perf test for kernel scharr and kernel gaussianBlur

* add perf test for blur, medianBlur, erode, dilate

* fix the errors for the opencv PR robot

fix the trailing whitespace.

* add perf tests for kernel remap, warpAffine, warpPersepective, pyrDown

* fix a bug in  modules/js/perf/perf_imgproc/perf_remap.js

* add function smoothBorder in helpfun.js and remove replicated function in perf test of warpAffine and warpPrespective

* fix the trailing white space issues

* add OpenCV.js loader

* Implement the Loader with help of WebAssembly Feature Detection, remove trailing whitespaces

* modify the explantion for loader in js_setup.markdown and fix bug in loader.js
---
 .../js_setup/js_setup/js_setup.markdown       |  34 ++
 .../include/opencv2/core/hal/intrin_wasm.hpp  | 313 +++------------
 modules/js/CMakeLists.txt                     |  19 +
 modules/js/perf/base.js                       |  31 +-
 modules/js/perf/perf_64bits.html              |  67 ++++
 modules/js/perf/perf_64bits.js                | 180 +++++++++
 modules/js/perf/perf_helpfunc.js              | 244 ++++++++++++
 modules/js/perf/perf_imgproc/perf_blur.html   |  73 ++++
 modules/js/perf/perf_imgproc/perf_blur.js     | 130 ++++++
 modules/js/perf/perf_imgproc/perf_cvtcolor.js | 374 +++++-------------
 modules/js/perf/perf_imgproc/perf_dilate.html |  73 ++++
 modules/js/perf/perf_imgproc/perf_dilate.js   | 117 ++++++
 modules/js/perf/perf_imgproc/perf_erode.html  |  73 ++++
 modules/js/perf/perf_imgproc/perf_erode.js    | 117 ++++++
 .../js/perf/perf_imgproc/perf_filter2D.html   |  73 ++++
 modules/js/perf/perf_imgproc/perf_filter2D.js | 127 ++++++
 .../perf/perf_imgproc/perf_gaussianBlur.html  |  73 ++++
 .../js/perf/perf_imgproc/perf_gaussianBlur.js | 126 ++++++
 .../js/perf/perf_imgproc/perf_medianBlur.html |  73 ++++
 .../js/perf/perf_imgproc/perf_medianBlur.js   | 118 ++++++
 .../js/perf/perf_imgproc/perf_pyrDown.html    |  73 ++++
 modules/js/perf/perf_imgproc/perf_pyrDown.js  | 116 ++++++
 modules/js/perf/perf_imgproc/perf_remap.html  |  73 ++++
 modules/js/perf/perf_imgproc/perf_remap.js    | 182 +++++++++
 modules/js/perf/perf_imgproc/perf_resize.js   | 227 +++--------
 modules/js/perf/perf_imgproc/perf_scharr.html |  73 ++++
 modules/js/perf/perf_imgproc/perf_scharr.js   | 156 ++++++++
 modules/js/perf/perf_imgproc/perf_sobel.html  |  73 ++++
 modules/js/perf/perf_imgproc/perf_sobel.js    | 170 ++++++++
 .../js/perf/perf_imgproc/perf_threshold.js    | 231 ++++-------
 .../js/perf/perf_imgproc/perf_warpAffine.html |  73 ++++
 .../js/perf/perf_imgproc/perf_warpAffine.js   | 130 ++++++
 .../perf_imgproc/perf_warpPerspective.html    |  73 ++++
 .../perf/perf_imgproc/perf_warpPerspective.js | 143 +++++++
 modules/js/src/loader.js                      |  96 +++++
 platforms/js/build_js.py                      |  14 +
 36 files changed, 3502 insertions(+), 836 deletions(-)
 create mode 100644 modules/js/perf/perf_64bits.html
 create mode 100644 modules/js/perf/perf_64bits.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_blur.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_blur.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_dilate.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_dilate.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_erode.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_erode.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_filter2D.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_filter2D.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_gaussianBlur.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_gaussianBlur.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_medianBlur.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_medianBlur.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_pyrDown.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_pyrDown.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_remap.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_remap.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_scharr.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_scharr.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_sobel.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_sobel.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_warpAffine.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_warpAffine.js
 create mode 100644 modules/js/perf/perf_imgproc/perf_warpPerspective.html
 create mode 100644 modules/js/perf/perf_imgproc/perf_warpPerspective.js
 create mode 100644 modules/js/src/loader.js

diff --git a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
index 87167cd219..435f06fe02 100644
--- a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
+++ b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
@@ -32,6 +32,15 @@ source ./emsdk_env.sh
 echo ${EMSCRIPTEN}
 @endcode
 
+The version 1.39.16 of emscripten is verified for latest WebAssembly. Please check the version of emscripten to use the newest features of WebAssembly.
+
+For example:
+@code{.bash}
+./emsdk update
+./emsdk install 1.39.16
+./emsdk activate 1.39.16
+@endcode
+
 Obtaining OpenCV Source Code
 --------------------------
 
@@ -76,6 +85,31 @@ Building OpenCV.js from Source
     python ./platforms/js/build_js.py build_wasm --build_wasm
     @endcode
 
+-#  [Optional] To build the OpenCV.js loader, append `--build_loader`.
+
+    For example:
+    @code{.bash}
+    python ./platforms/js/build_js.py build_js --build_loader
+    @endcode
+
+    @note
+    The loader is implemented as a js file in the path `<opencv_js_dir>/bin/loader.js`. The loader utilizes the [WebAssembly Feature Detection](https://github.com/GoogleChromeLabs/wasm-feature-detect) to detect the features of the broswer and load corresponding OpenCV.js automatically. To use it, you need to use the UMD version of [WebAssembly Feature Detection](https://github.com/GoogleChromeLabs/wasm-feature-detect) and introduce the `loader.js` in your Web application.
+
+    Example Code:
+    @code{.javascipt}
+    // Set paths configuration
+    let pathsConfig = {
+        wasm: "../../build_wasm/opencv.js",
+        threads: "../../build_mt/opencv.js",
+        simd: "../../build_simd/opencv.js",
+        threadsSimd: "../../build_mtSIMD/opencv.js",
+    }
+
+    // Load OpenCV.js and use the pathsConfiguration and main function as the params.
+    loadOpenCV(pathsConfig, main);
+    @endcode
+
+
 -#  [optional] To build documents, append `--build_doc` option.
 
     For example:
diff --git a/modules/core/include/opencv2/core/hal/intrin_wasm.hpp b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp
index d1bfb6da6d..ef928f6a5c 100644
--- a/modules/core/include/opencv2/core/hal/intrin_wasm.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp
@@ -207,13 +207,7 @@ struct v_uint64x2
 
     uint64 get0() const
     {
-#ifdef __wasm_unimplemented_simd128__
         return (uint64)wasm_i64x2_extract_lane(val, 0);
-#else
-        uint64 des[2];
-        wasm_v128_store(des, val);
-        return des[0];
-#endif
     }
 
     v128_t val;
@@ -235,13 +229,7 @@ struct v_int64x2
 
     int64 get0() const
     {
-#ifdef __wasm_unimplemented_simd128__
         return wasm_i64x2_extract_lane(val, 0);
-#else
-        int64 des[2];
-        wasm_v128_store(des, val);
-        return des[0];
-#endif
     }
 
     v128_t val;
@@ -263,13 +251,7 @@ struct v_float64x2
 
     double get0() const
     {
-#ifdef __wasm_unimplemented_simd128__
         return wasm_f64x2_extract_lane(val, 0);
-#else
-        double des[2];
-        wasm_v128_store(des, val);
-        return des[0];
-#endif
     }
 
     v128_t val;
@@ -1797,22 +1779,9 @@ OPENCV_HAL_IMPL_WASM_INITVEC(v_int16x8, short, s16, i16x8, short)
 OPENCV_HAL_IMPL_WASM_INITVEC(v_uint32x4, unsigned, u32, i32x4, int)
 OPENCV_HAL_IMPL_WASM_INITVEC(v_int32x4, int, s32, i32x4, int)
 OPENCV_HAL_IMPL_WASM_INITVEC(v_float32x4, float, f32, f32x4, float)
-
-#ifdef __wasm_unimplemented_simd128__
 OPENCV_HAL_IMPL_WASM_INITVEC(v_uint64x2, uint64, u64, i64x2, int64)
 OPENCV_HAL_IMPL_WASM_INITVEC(v_int64x2, int64, s64, i64x2, int64)
 OPENCV_HAL_IMPL_WASM_INITVEC(v_float64x2, double, f64, f64x2, double)
-#else
-#define OPENCV_HAL_IMPL_FALLBACK_INITVEC(_Tpvec, _Tp, suffix, _Tps) \
-inline _Tpvec v_setzero_##suffix() { return _Tpvec((_Tps)0, (_Tps)0); } \
-inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec((_Tps)v, (_Tps)v); } \
-template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \
-{ return _Tpvec(a.val); }
-
-OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_uint64x2, uint64, u64, int64)
-OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_int64x2, int64, s64, int64)
-OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_float64x2, double, f64, double)
-#endif
 
 //////////////// PACK ///////////////
 inline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b)
@@ -1931,28 +1900,18 @@ inline v_int16x8 v_rshr_pack(const v_int32x4& a, const v_int32x4& b)
 template<int n>
 inline v_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
     v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n);
     v128_t b1 = wasm_u64x2_shr(wasm_i64x2_add(b.val, delta), n);
     return v_uint32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
-#else
-    fallback::v_uint64x2 a_(a), b_(b);
-    return fallback::v_rshr_pack<n>(a_, b_);
-#endif
 }
 template<int n>
 inline v_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
     v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n);
     v128_t b1 = wasm_i64x2_shr(wasm_i64x2_add(b.val, delta), n);
     return v_int32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
-#else
-    fallback::v_int64x2 a_(a), b_(b);
-    return fallback::v_rshr_pack<n>(a_, b_);
-#endif
 }
 template<int n>
 inline v_uint8x16 v_rshr_pack_u(const v_int16x8& a, const v_int16x8& b)
@@ -2139,7 +2098,6 @@ inline void v_rshr_pack_store(short* ptr, const v_int32x4& a)
 template<int n>
 inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
     v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n);
     v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
@@ -2148,15 +2106,10 @@ inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a)
     for (int i=0; i<2; ++i) {
         ptr[i] = t_ptr[i];
     }
-#else
-    fallback::v_uint64x2 _a(a);
-    fallback::v_rshr_pack_store<n>(ptr, _a);
-#endif
 }
 template<int n>
 inline void v_rshr_pack_store(int* ptr, const v_int64x2& a)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
     v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n);
     v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
@@ -2165,10 +2118,6 @@ inline void v_rshr_pack_store(int* ptr, const v_int64x2& a)
     for (int i=0; i<2; ++i) {
         ptr[i] = t_ptr[i];
     }
-#else
-    fallback::v_int64x2 _a(a);
-    fallback::v_rshr_pack_store<n>(ptr, _a);
-#endif
 }
 template<int n>
 inline void v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a)
@@ -2228,7 +2177,6 @@ inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uin
                            const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f,
                            const v_uint64x2& g, const v_uint64x2& h)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t maxval = wasm_i32x4_splat(255);
     v128_t a1 = wasm_v128_bitselect(maxval, a.val, ((__u64x2)(a.val) > (__u64x2)maxval));
     v128_t b1 = wasm_v128_bitselect(maxval, b.val, ((__u64x2)(b.val) > (__u64x2)maxval));
@@ -2245,10 +2193,6 @@ inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uin
     v128_t abcd = wasm_v8x16_shuffle(ab, cd, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19);
     v128_t efgh = wasm_v8x16_shuffle(ef, gh, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19);
     return v_uint8x16(wasm_v8x16_shuffle(abcd, efgh, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23));
-#else
-    fallback::v_uint64x2 a_(a), b_(b), c_(c), d_(d), e_(e), f_(f), g_(g), h_(h);
-    return fallback::v_pack_b(a_, b_, c_, d_, e_, f_, g_, h_);
-#endif
 }
 
 inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
@@ -2310,8 +2254,6 @@ OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float32x4, wasm_f32x4_add)
 OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float32x4, wasm_f32x4_sub)
 OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float32x4, wasm_f32x4_mul)
 OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float32x4, wasm_f32x4_div)
-
-#ifdef __wasm_unimplemented_simd128__
 OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint64x2, wasm_i64x2_add)
 OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint64x2, wasm_i64x2_sub)
 OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int64x2, wasm_i64x2_add)
@@ -2320,30 +2262,6 @@ OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float64x2, wasm_f64x2_add)
 OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float64x2, wasm_f64x2_sub)
 OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float64x2, wasm_f64x2_mul)
 OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float64x2, wasm_f64x2_div)
-#else
-#define OPENCV_HAL_IMPL_FALLBACK_BIN_OP(bin_op, _Tpvec) \
-inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
-{ \
-    fallback::_Tpvec a_(a), b_(b); \
-    return _Tpvec((a_) bin_op (b_)); \
-} \
-inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
-{ \
-    fallback::_Tpvec a_(a), b_(b); \
-    a_ bin_op##= b_; \
-    a = _Tpvec(a_); \
-    return a; \
-}
-
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_uint64x2)
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_uint64x2)
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_int64x2)
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_int64x2)
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_float64x2)
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_float64x2)
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(*, v_float64x2)
-OPENCV_HAL_IMPL_FALLBACK_BIN_OP(/, v_float64x2)
-#endif
 
 // saturating multiply 8-bit, 16-bit
 #define OPENCV_HAL_IMPL_WASM_MUL_SAT(_Tpvec, _Tpwvec)        \
@@ -2405,19 +2323,11 @@ inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,
 inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b,
                          v_uint64x2& c, v_uint64x2& d)
 {
-#ifdef __wasm_unimplemented_simd128__
     v_uint64x2 a0, a1, b0, b1;
     v_expand(a, a0, a1);
     v_expand(b, b0, b1);
     c.val = ((__u64x2)(a0.val) * (__u64x2)(b0.val));
     d.val = ((__u64x2)(a1.val) * (__u64x2)(b1.val));
-#else
-    fallback::v_uint32x4 a_(a), b_(b);
-    fallback::v_uint64x2 c_, d_;
-    fallback::v_mul_expand(a_, b_, c_, d_);
-    c = v_uint64x2(c_);
-    d = v_uint64x2(d_);
-#endif
 }
 
 inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b)
@@ -2457,7 +2367,6 @@ inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32
 
 inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t a0 = wasm_i64x2_shr(wasm_i64x2_shl(a.val, 32), 32);
     v128_t a1 = wasm_i64x2_shr(a.val, 32);
     v128_t b0 = wasm_i64x2_shr(wasm_i64x2_shl(b.val, 32), 32);
@@ -2465,22 +2374,10 @@ inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b)
     v128_t c = (v128_t)((__i64x2)a0 * (__i64x2)b0);
     v128_t d = (v128_t)((__i64x2)a1 * (__i64x2)b1);
     return v_int64x2(wasm_i64x2_add(c, d));
-#else
-    fallback::v_int32x4 a_(a);
-    fallback::v_int32x4 b_(b);
-    return fallback::v_dotprod(a_, b_);
-#endif
 }
 inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c)
 {
-#ifdef __wasm_unimplemented_simd128__
     return v_dotprod(a, b) + c;
-#else
-    fallback::v_int32x4 a_(a);
-    fallback::v_int32x4 b_(b);
-    fallback::v_int64x2 c_(c);
-    return fallback::v_dotprod(a_, b_, c_);
-#endif
 }
 
 // 8 >> 32
@@ -2515,32 +2412,32 @@ inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, const
 // 16 >> 64
 inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b)
 {
-    fallback::v_uint16x8 a_(a);
-    fallback::v_uint16x8 b_(b);
-    return fallback::v_dotprod_expand(a_, b_);
+    v128_t a0 = wasm_u32x4_shr(wasm_i32x4_shl(a.val, 16), 16);
+    v128_t a1 = wasm_u32x4_shr(a.val, 16);
+    v128_t b0 = wasm_u32x4_shr(wasm_i32x4_shl(b.val, 16), 16);
+    v128_t b1 = wasm_u32x4_shr(b.val, 16);
+    return v_uint64x2((
+        v_dotprod(v_int32x4(a0), v_int32x4(b0)) +
+        v_dotprod(v_int32x4(a1), v_int32x4(b1))).val
+    );
 }
 inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c)
-{
-    fallback::v_uint16x8 a_(a);
-    fallback::v_uint16x8 b_(b);
-    fallback::v_uint64x2 c_(c);
-    return fallback::v_dotprod_expand(a_, b_, c_);
-}
+{ return v_dotprod_expand(a, b) + c; }
 
 inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b)
 {
-    fallback::v_int16x8 a_(a);
-    fallback::v_int16x8 b_(b);
-    return fallback::v_dotprod_expand(a_, b_);
+    v128_t a0 = wasm_i32x4_shr(wasm_i32x4_shl(a.val, 16), 16);
+    v128_t a1 = wasm_i32x4_shr(a.val, 16);
+    v128_t b0 = wasm_i32x4_shr(wasm_i32x4_shl(b.val, 16), 16);
+    v128_t b1 = wasm_i32x4_shr(b.val, 16);
+    return v_int64x2((
+        v_dotprod(v_int32x4(a0), v_int32x4(b0)) +
+        v_dotprod(v_int32x4(a1), v_int32x4(b1)))
+    );
 }
 
 inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c)
-{
-    fallback::v_int16x8 a_(a);
-    fallback::v_int16x8 b_(b);
-    fallback::v_int64x2 c_(c);
-    return fallback::v_dotprod_expand(a_, b_, c_);
-}
+{ return v_dotprod_expand(a, b) + c; }
 
 // 32 >> 64f
 inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b)
@@ -2610,44 +2507,24 @@ OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_float64x2)
 
 inline v_float32x4 v_sqrt(const v_float32x4& x)
 {
-#ifdef __wasm_unimplemented_simd128__
     return v_float32x4(wasm_f32x4_sqrt(x.val));
-#else
-    fallback::v_float32x4 x_(x);
-    return fallback::v_sqrt(x_);
-#endif
 }
 
 inline v_float32x4 v_invsqrt(const v_float32x4& x)
 {
-#ifdef __wasm_unimplemented_simd128__
     const v128_t _1_0 = wasm_f32x4_splat(1.0);
     return v_float32x4(wasm_f32x4_div(_1_0, wasm_f32x4_sqrt(x.val)));
-#else
-    fallback::v_float32x4 x_(x);
-    return fallback::v_invsqrt(x_);
-#endif
 }
 
 inline v_float64x2 v_sqrt(const v_float64x2& x)
 {
-#ifdef __wasm_unimplemented_simd128__
     return v_float64x2(wasm_f64x2_sqrt(x.val));
-#else
-    fallback::v_float64x2 x_(x);
-    return fallback::v_sqrt(x_);
-#endif
 }
 
 inline v_float64x2 v_invsqrt(const v_float64x2& x)
 {
-#ifdef __wasm_unimplemented_simd128__
     const v128_t _1_0 = wasm_f64x2_splat(1.0);
     return v_float64x2(wasm_f64x2_div(_1_0, wasm_f64x2_sqrt(x.val)));
-#else
-    fallback::v_float64x2 x_(x);
-    return fallback::v_invsqrt(x_);
-#endif
 }
 
 #define OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(_Tpuvec, _Tpsvec, suffix, zsuffix, shiftWidth) \
@@ -2666,12 +2543,7 @@ inline v_float32x4 v_abs(const v_float32x4& x)
 { return v_float32x4(wasm_f32x4_abs(x.val)); }
 inline v_float64x2 v_abs(const v_float64x2& x)
 {
-#ifdef __wasm_unimplemented_simd128__
     return v_float64x2(wasm_f64x2_abs(x.val));
-#else
-    fallback::v_float64x2 x_(x);
-    return fallback::v_abs(x_);
-#endif
 }
 
 // TODO: exp, log, sin, cos
@@ -2684,21 +2556,8 @@ inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
 
 OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_min, wasm_f32x4_min)
 OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_max, wasm_f32x4_max)
-
-#ifdef __wasm_unimplemented_simd128__
 OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_min, wasm_f64x2_min)
 OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_max, wasm_f64x2_max)
-#else
-#define OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(func) \
-inline v_float64x2 func(const v_float64x2& a, const v_float64x2& b) \
-{ \
-    fallback::v_float64x2 a_(a), b_(b); \
-    return fallback::func(a_, b_); \
-}
-
-OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_min)
-OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_max)
-#endif
 
 #define OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(_Tpvec, suffix) \
 inline _Tpvec v_min(const _Tpvec& a, const _Tpvec& b) \
@@ -2753,24 +2612,7 @@ OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int16x8, i16x8, i16x8)
 OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint32x4, u32x4, i32x4)
 OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int32x4, i32x4, i32x4)
 OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float32x4, f32x4, f32x4)
-
-#ifdef __wasm_unimplemented_simd128__
 OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float64x2, f64x2, f64x2)
-#else
-#define OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(_Tpvec, bin_op) \
-inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
-{ \
-    fallback::_Tpvec a_(a), b_(b); \
-    return _Tpvec((a_) bin_op (b_));\
-} \
-
-OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, ==)
-OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, !=)
-OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <)
-OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >)
-OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <=)
-OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >=)
-#endif
 
 #define OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(_Tpvec, cast) \
 inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
@@ -2789,14 +2631,9 @@ inline v_float32x4 v_not_nan(const v_float32x4& a)
 }
 inline v_float64x2 v_not_nan(const v_float64x2& a)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t z = wasm_i64x2_splat(0x7fffffffffffffff);
     v128_t t = wasm_i64x2_splat(0x7ff0000000000000);
     return v_float64x2((__u64x2)(wasm_v128_and(a.val, z)) < (__u64x2)t);
-#else
-    fallback::v_float64x2 a_(a);
-    return fallback::v_not_nan(a_);
-#endif
 }
 
 OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_add_wrap, wasm_i8x16_add)
@@ -2877,32 +2714,30 @@ inline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b)
 }
 inline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b)
 {
-#ifdef __wasm_unimplemented_simd128__
     v128_t absmask_vec = wasm_u64x2_shr(wasm_i32x4_splat(-1), 1);
     return v_float64x2(wasm_v128_and(wasm_f64x2_sub(a.val, b.val), absmask_vec));
-#else
-    fallback::v_float64x2 a_(a), b_(b);
-    return fallback::v_absdiff(a_, b_);
-#endif
 }
 
-#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec) \
+#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec, suffix) \
 inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \
 { \
-    fallback::_Tpvec a_(a), b_(b); \
-    return fallback::v_magnitude(a_, b_); \
+    v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \
+    v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \
+    return _Tpvec(wasm_##suffix##_sqrt(wasm_##suffix##_add(a_Square, b_Square))); \
 } \
 inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \
 { \
-    return v_fma(a, a, b*b); \
+    v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \
+    v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \
+    return _Tpvec(wasm_##suffix##_add(a_Square, b_Square)); \
 } \
 inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \
 { \
-    return v_fma(a, b, c); \
+    return _Tpvec(wasm_##suffix##_add(wasm_##suffix##_mul(a.val, b.val), c.val)); \
 }
 
-OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4)
-OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2)
+OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4, f32x4)
+OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2, f64x2)
 
 #define OPENCV_HAL_IMPL_WASM_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, ssuffix) \
 inline _Tpuvec operator << (const _Tpuvec& a, int imm) \
@@ -2945,37 +2780,7 @@ inline _Tpsvec v_shr(const _Tpsvec& a) \
 OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint8x16, v_int8x16, i8x16, u8x16)
 OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint16x8, v_int16x8, i16x8, u16x8)
 OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint32x4, v_int32x4, i32x4, u32x4)
-
-#ifdef __wasm_unimplemented_simd128__
 OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint64x2, v_int64x2, i64x2, u64x2)
-#else
-#define OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(_Tpvec) \
-inline _Tpvec operator << (const _Tpvec& a, int imm) \
-{ \
-    fallback::_Tpvec a_(a); \
-    return a_ << imm; \
-} \
-inline _Tpvec operator >> (const _Tpvec& a, int imm) \
-{ \
-    fallback::_Tpvec a_(a); \
-    return a_ >> imm; \
-} \
-template<int imm> \
-inline _Tpvec v_shl(const _Tpvec& a) \
-{ \
-    fallback::_Tpvec a_(a); \
-    return fallback::v_shl<imm>(a_); \
-} \
-template<int imm> \
-inline _Tpvec v_shr(const _Tpvec& a) \
-{ \
-    fallback::_Tpvec a_(a); \
-    return fallback::v_shr<imm>(a_); \
-} \
-
-OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_uint64x2)
-OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_int64x2)
-#endif
 
 namespace hal_wasm_internal
 {
@@ -3180,9 +2985,18 @@ OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint8x16, unsigned)
 OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int8x16, int)
 OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint16x8, unsigned)
 OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int16x8, int)
-OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint64x2, uint64)
-OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int64x2, int64)
-OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_float64x2, double)
+
+
+#define OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(_Tpvec, scalartype, regtype, suffix, esuffix) \
+inline scalartype v_reduce_sum(const _Tpvec& a) \
+{ \
+    regtype val = a.val; \
+    val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7)); \
+    return (scalartype)wasm_##esuffix##_extract_lane(val, 0); \
+}
+OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_uint64x2, uint64, v128_t, i64x2, i64x2)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_int64x2, int64,  v128_t, i64x2, i64x2)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_float64x2, double,  v128_t, f64x2,f64x2)
 
 inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
                                  const v_float32x4& c, const v_float32x4& d)
@@ -3318,30 +3132,27 @@ OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int16x8, i16x8, short)
 OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint32x4, i32x4, int)
 OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int32x4, i32x4, int)
 OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float32x4, i32x4, float)
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float64x2, f64x2, double)
+
+#define OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(_Tpvec, suffix, esuffix) \
+inline bool v_check_all(const _Tpvec& a) \
+{ \
+    v128_t masked = v_reinterpret_as_##esuffix(a).val; \
+    masked = wasm_i32x4_replace_lane(masked, 0, 0xffffffff); \
+    masked = wasm_i32x4_replace_lane(masked, 2, 0xffffffff); \
+    return wasm_i8x16_all_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \
+} \
+inline bool v_check_any(const _Tpvec& a) \
+{ \
+    v128_t masked = v_reinterpret_as_##esuffix(a).val; \
+    masked = wasm_i32x4_replace_lane(masked, 0, 0x0); \
+    masked = wasm_i32x4_replace_lane(masked, 2, 0x0); \
+    return wasm_i8x16_any_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \
+} \
+
+OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_int64x2, i32x4, s32)
+OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_uint64x2, i32x4, u32)
 
-inline int v_signmask(const v_float64x2& a)
-{
-    fallback::v_float64x2 a_(a);
-    return fallback::v_signmask(a_);
-}
-inline bool v_check_all(const v_float64x2& a)
-{
-#ifdef __wasm_unimplemented_simd128__
-    return wasm_i8x16_all_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0)));
-#else
-    fallback::v_float64x2 a_(a);
-    return fallback::v_check_all(a_);
-#endif
-}
-inline bool v_check_any(const v_float64x2& a)
-{
-#ifdef __wasm_unimplemented_simd128__
-    return wasm_i8x16_any_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0)));;
-#else
-    fallback::v_float64x2 a_(a);
-    return fallback::v_check_any(a_);
-#endif
-}
 
 inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
 inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
@@ -3366,8 +3177,8 @@ OPENCV_HAL_IMPL_WASM_SELECT(v_uint16x8)
 OPENCV_HAL_IMPL_WASM_SELECT(v_int16x8)
 OPENCV_HAL_IMPL_WASM_SELECT(v_uint32x4)
 OPENCV_HAL_IMPL_WASM_SELECT(v_int32x4)
-// OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2)
-// OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2)
+OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2)
+OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2)
 OPENCV_HAL_IMPL_WASM_SELECT(v_float32x4)
 OPENCV_HAL_IMPL_WASM_SELECT(v_float64x2)
 
diff --git a/modules/js/CMakeLists.txt b/modules/js/CMakeLists.txt
index 62fd1bac9f..f3a625b37e 100644
--- a/modules/js/CMakeLists.txt
+++ b/modules/js/CMakeLists.txt
@@ -175,3 +175,22 @@ endforeach()
 
 add_custom_target(${PROJECT_NAME}_perf ALL
                   DEPENDS ${OCV_JS_PATH} ${opencv_perf_js_file_deps})
+
+#loader
+set(opencv_loader_js_bin_dir "${EXECUTABLE_OUTPUT_PATH}")
+set(loader_dir ${CMAKE_CURRENT_SOURCE_DIR}/src)
+
+set(opencv_loader_js_file_deps "")
+
+# make sure the build directory exists
+file(MAKE_DIRECTORY "${opencv_loader_js_bin_dir}")
+
+add_custom_command(
+        TARGET ${PROJECT_NAME} POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy
+                ${loader_dir}/loader.js
+                ${opencv_loader_js_bin_dir}/loader.js)
+list(APPEND opencv_loader_js_file_deps "${loader_dir}/loader.js" "${opencv_loader_js_bin_dir}/loader.js")
+
+add_custom_target(${PROJECT_NAME}_loader ALL
+                  DEPENDS ${OCV_JS_PATH} ${opencv_loader_js_file_deps})
\ No newline at end of file
diff --git a/modules/js/perf/base.js b/modules/js/perf/base.js
index 6c2e772e30..3948f21254 100644
--- a/modules/js/perf/base.js
+++ b/modules/js/perf/base.js
@@ -2,17 +2,28 @@ if (typeof window === 'undefined') {
   var cv = require("../opencv");
 }
 
-const cvSize = {
-  szODD: new cv.Size(127, 61),
-  szQVGA: new cv.Size(320, 240),
-  szVGA: new cv.Size(640, 480),
-  szqHD: new cv.Size(960, 540),
-  sz720p: new cv.Size(1280, 720),
-  sz1080p: new cv.Size(1920, 1080),
-  sz130x60: new cv.Size(130, 60),
-  sz213x120: new cv.Size(120 * 1280 / 720, 120),
+let gCvSize;
+
+function getCvSize() {
+  if (gCvSize === undefined) {
+    gCvSize = {
+      szODD: new cv.Size(127, 61),
+      szQVGA: new cv.Size(320, 240),
+      szVGA: new cv.Size(640, 480),
+      szSVGA: new cv.Size(800, 600),
+      szqHD: new cv.Size(960, 540),
+      szXGA: new cv.Size(1024, 768),
+      sz720p: new cv.Size(1280, 720),
+      szSXGA: new cv.Size(1280, 1024),
+      sz1080p: new cv.Size(1920, 1080),
+      sz130x60: new cv.Size(130, 60),
+      sz213x120: new cv.Size(120 * 1280 / 720, 120),
+    };
+  }
+
+  return gCvSize;
 }
 
 if (typeof window === 'undefined') {
-  exports.cvSize = cvSize;
+  exports.getCvSize = getCvSize;
 }
\ No newline at end of file
diff --git a/modules/js/perf/perf_64bits.html b/modules/js/perf/perf_64bits.html
new file mode 100644
index 0000000000..efbe808fbd
--- /dev/null
+++ b/modules/js/perf/perf_64bits.html
@@ -0,0 +1,67 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Functions for 64-bit Perf</h4>
+              <h7>CountnonZero, Mat::dot, Split, Merge</h7>
+          </div>
+          <div>
+            <h4>Mat Shape</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1000x1000)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../opencv.js" type="text/javascript"></script>
+    <script src="./perf_64bits.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_64bits.js b/modules/js/perf/perf_64bits.js
new file mode 100644
index 0000000000..dc4e234d4c
--- /dev/null
+++ b/modules/js/perf/perf_64bits.js
@@ -0,0 +1,180 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+  console.log('opencv.js loaded');
+  if (isNodeJs) {
+    global.cv = cv;
+  } else {
+    runButton.removeAttribute('disabled');
+    runButton.setAttribute('class', 'btn btn-primary');
+    runButton.innerHTML = 'Run';
+  }
+  let totalCaseNum, currentCaseId;
+
+
+  function addCountNonZeroCase(suite) {
+    suite.add('countNonZero', function() {
+      cv.countNonZero(mat);
+    }, {
+      'setup': function() {
+        let size = this.params.size;
+        let mat = cv.Mat.eye(size[0], size[1], cv.CV_64F);
+      }, 'teardown': function() {
+        mat.delete();
+      }
+    });
+  }
+
+  function addMatDotCase(suite) {
+    suite.add('Mat::dot', function() {
+      mat.dot(matT);
+    }, {
+      'setup': function() {
+        let size = this.params.size;
+        let mat = cv.Mat.ones(size[0], size[1], cv.CV_64FC1);
+        let matT = mat.t();
+      }, 'teardown': function() {
+        mat.delete();
+        matT.delete();
+      }
+    });
+  }
+
+  function addSplitCase(suite) {
+    suite.add('Split', function() {
+      cv.split(mat, planes);
+    }, {
+      'setup': function() {
+        let size = this.params.size;
+        let mat = cv.Mat.ones(size[0], size[1], cv.CV_64FC3);
+        let planes = new cv.MatVector();
+      }, 'teardown': function() {
+        mat.delete();
+        planes.delete();
+      }
+    });
+  }
+
+  function addMergeCase(suite) {
+    suite.add('Merge', function() {
+      cv.merge(planes, mat);
+    }, {
+      'setup': function() {
+        let size = this.params.size;
+        let mat = new cv.Mat();
+        let mat1 = cv.Mat.ones(size[0], size[1], cv.CV_64FC3);
+        let planes = new cv.MatVector();
+        cv.split(mat1, planes);
+      }, 'teardown': function() {
+        mat.delete();
+        mat1.delete();
+        planes.delete();
+      }
+    });
+  }
+
+  function setInitParams(suite, sizeArray) {
+    for( let i =0; i < suite.length; i++) {
+      suite[i].params = {
+        size: sizeArray
+      };
+    }
+  }
+
+  function log(message) {
+    console.log(message);
+    if (!isNodeJs) {
+      logElement.innerHTML += `\n${'\t' + message}`;
+    }
+  }
+
+  function setBenchmarkSuite(suite) {
+    suite
+    // add listeners
+    .on('cycle', function(event) {
+      ++currentCaseId;
+      let size = event.target.params.size;
+      log(`=== ${event.target.name} ${currentCaseId} ===`);
+      log(`params: (${parseInt(size[0])}x${parseInt(size[1])})`);
+      log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
+      log('mean time:' +String(event.target.stats.mean*1000)+' ms');
+      log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
+      log(String(event.target));
+    })
+    .on('error', function(event) { log(`test case ${event.target.name} failed`); })
+    .on('complete', function(event) {
+      log(`\n ###################################`)
+      log(`Finished testing ${event.currentTarget.length} cases \n`);
+      if (!isNodeJs) {
+        runButton.removeAttribute('disabled');
+        runButton.setAttribute('class', 'btn btn-primary');
+        runButton.innerHTML = 'Run';
+      }
+    });
+  }
+
+  function genBenchmarkCase(paramsContent) {
+    let suite = new Benchmark.Suite;
+    var sizeArray;
+    totalCaseNum = 4;
+    currentCaseId = 0;
+    if (/\([0-9]+x[0-9]+\)/g.test(paramsContent.toString())) {
+      let params = paramsContent.toString().match(/\([0-9]+x[0-9]+\)/g)[0];
+      let sizeStrs = (params.match(/[0-9]+/g) || []).slice(0, 2).toString().split(",");
+      sizeArray = sizeStrs.map(Number);
+    } else {
+      log("no getting invalid params, run all the cases with Mat of shape (1000 x 1000)");
+      sizeArray = [1000, 1000];
+    }
+    addCountNonZeroCase(suite);
+    addMatDotCase(suite);
+    addSplitCase(suite);
+    addMergeCase(suite);
+    setInitParams(suite, sizeArray)
+    setBenchmarkSuite(suite);
+    log(`Running ${totalCaseNum} tests from 64-bit intrinsics`);
+    suite.run({ 'async': true }); // run the benchmark
+  }
+
+
+  // set test filter params
+  if (isNodeJs) {
+    const args = process.argv.slice(2);
+    let paramsContent = '';
+    if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+\)/g.test(args.toString())) {
+      paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+\)/g)[0];
+    }
+    genBenchmarkCase(paramsContent);
+  } else {
+    runButton.onclick = function() {
+      let paramsContent = paramsElement.value;
+      genBenchmarkCase(paramsContent);
+      if (totalCaseNum !== 0) {
+        runButton.setAttribute("disabled", "disabled");
+        runButton.setAttribute('class', 'btn btn-primary disabled');
+        runButton.innerHTML = "Running";
+      }
+    }
+  }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_helpfunc.js b/modules/js/perf/perf_helpfunc.js
index e07e3a297e..e42f4ad807 100644
--- a/modules/js/perf/perf_helpfunc.js
+++ b/modules/js/perf/perf_helpfunc.js
@@ -16,14 +16,57 @@ var fillGradient = function(cv, img, delta=5) {
   }
 }
 
+var smoothBorder = function(cv, img, color, delta=5) {
+  let ch = img.channels();
+  console.assert(!img.empty() && img.depth() == cv.CV_8U && ch <= 4);
+
+  let n = 100/delta;
+  let nR = Math.min(n, (img.rows+1)/2);
+  let nC = Math.min(n, (img.cols+1)/2);
+  let s = new cv.Scalar();
+
+  for (let r = 0; r < nR; r++) {
+    let k1 = r*delta/100.0, k2 = 1-k1;
+    for(let c = 0; c < img.cols; c++) {
+      let view = img.ptr(r, c);
+      for(let i = 0; i < ch; i++) s[i] = view[i];
+      for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2;
+    }
+    for(let c=0; c < img.cols; c++) {
+      let view = img.ptr(img.rows-r-1, c);
+      for(let i = 0; i < ch; i++) s[i] = view[i];
+      for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2;
+    }
+  }
+  for (let r = 0; r < img.rows; r++) {
+    for(let c = 0; c < nC; c++) {
+      let k1 = c*delta/100.0, k2 = 1-k1;
+      let view = img.ptr(r, c);
+      for(let i = 0; i < ch; i++) s[i] = view[i];
+      for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2;
+    }
+    for(let c = 0; c < n; c++) {
+      let k1 = c*delta/100.0, k2 = 1-k1;
+      let view = img.ptr(r, img.cols-c-1);
+      for(let i = 0; i < ch; i++) s[i] = view[i];
+      for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2;
+    }
+  }
+}
+
 var cvtStr2cvSize = function(strSize) {
   let size;
+
+  let cvSize = getCvSize();
   switch(strSize) {
     case "127,61": size = cvSize.szODD;break;
     case '320,240': size = cvSize.szQVGA;break;
     case '640,480': size = cvSize.szVGA;break;
+    case '800,600': size = cvSize.szSVGA;break;
     case '960,540': size = cvSize.szqHD;break;
+    case '1024,768': size = cvSize.szXGA;break;
     case '1280,720': size = cvSize.sz720p;break;
+    case '1280,1024': size = cvSize.szSXGA;break;
     case '1920,1080': size = cvSize.sz1080p;break;
     case "130,60": size = cvSize.sz130x60;break;
     case '213,120': size = cvSize.sz213x120;break;
@@ -52,8 +95,209 @@ function permute (source, target) {
   return result;
 }
 
+var constructMode = function (startStr, sChannel, dChannel) {
+  let modeList = []
+  for (let j in dChannel) {
+    modeList.push(startStr+sChannel+"2"+dChannel[j])
+  }
+  return modeList;
+}
+
+var enableButton = function () {
+  runButton.removeAttribute('disabled');
+  runButton.setAttribute('class', 'btn btn-primary');
+  runButton.innerHTML = 'Run';
+}
+
+var disableButton = function () {
+  runButton.setAttribute("disabled", "disabled");
+  runButton.setAttribute('class', 'btn btn-primary disabled');
+  runButton.innerHTML = "Running";
+}
+
+var log = function (message) {
+  console.log(message);
+  if (!isNodeJs) {
+    logElement.innerHTML += `\n${'\t' + message}`;
+  }
+}
+
+var addKernelCase = function (suite, params, type, kernelFunc) {
+  kernelFunc(suite, type);
+  let index = suite.length - 1;
+  suite[index].params = params;
+}
+
+function constructParamLog(params, kernel) {
+  let paramLog = '';
+  if (kernel == "cvtcolor") {
+    let mode = params.mode;
+    let size = params.size;
+    paramLog = `params: (${parseInt(size[0])}x${parseInt(size[1])}, ${mode})`;
+  } else if (kernel == "resize") {
+    let matType = params.matType;
+    let size1 = params.from;
+    let size2 = params.to;
+    paramLog = `params: (${matType},${parseInt(size1.width)}x${parseInt(size1.height)},`+
+    `${parseInt(size2.width)}x${parseInt(size2.height)})`;
+  } else if (kernel == "threshold") {
+    let matSize = params.matSize;
+    let matType = params.matType;
+    let threshType = params.threshType;
+    paramLog = `params: (${parseInt(matSize.width)}x${parseInt(matSize.height)},`+
+    `${matType},${threshType})`;
+  } else if (kernel == "sobel") {
+    let size = params.size;
+    let ddepth = params.ddepth;
+    let dxdy = params.dxdy;
+    let ksize = params.ksize;
+    let borderType = params.borderType;
+    paramLog = `params: (${parseInt(size[0])}x${parseInt(size[1])},`+
+    `${ddepth},${dxdy},${borderType}, ksize:${ksize})`;
+  } else if (kernel == "filter2d") {
+    let size = params.size;
+    let ksize = params.ksize;
+    let borderMode = params.borderMode;
+    paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+
+    `${ksize},${borderMode})`;
+  } else if (kernel == "scharr") {
+    let size = params.size;
+    let ddepth = params.ddepth;
+    let dxdy = params.dxdy;
+    let borderType = params.borderType;
+    paramLog = `params: (${parseInt(size[0])}x${parseInt(size[1])},`+
+    `${ddepth},${dxdy},${borderType})`;
+  } else if (kernel == "gaussianBlur" || kernel == "blur") {
+    let size = params.size;
+    let matType = params.matType;
+    let borderType = params.borderType;
+    let ksize = params.ksize;
+    paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+
+    `${matType},${borderType}, ksize: (${ksize}x${ksize}))`;
+  } else if (kernel == "medianBlur") {
+    let size = params.size;
+    let matType = params.matType;
+    let ksize = params.ksize;
+    paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+
+    `${matType}, ksize: ${ksize})`;
+  } else if (kernel == "erode" || kernel == "dilate" || kernel == "pyrDown") {
+    let size = params.size;
+    let matType = params.matType;
+    paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+
+    `${matType})`;
+  } else if (kernel == "remap") {
+    let size = params.size;
+    let matType = params.matType;
+    let mapType = params.mapType;
+    let interType = params.interType;
+    paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+
+    `${matType}, ${mapType}, ${interType})`;
+  } else if (kernel == "warpAffine" || kernel == "warpPerspective") {
+    let size = params.size;
+    let interType = params.interType;
+    let borderMode = params.borderMode;
+    paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+
+    `${interType}, ${borderMode})`;
+  }
+  return paramLog;
+}
+
+var setBenchmarkSuite =  function (suite, kernel, currentCaseId) {
+  suite
+  // add listeners
+  .on('cycle', function(event) {
+    ++currentCaseId;
+    let params = event.target.params;
+    paramLog = constructParamLog(params, kernel);
+
+    log(`=== ${event.target.name} ${currentCaseId} ===`);
+    log(paramLog);
+    log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
+    log('mean time:' +String(event.target.stats.mean*1000)+' ms');
+    log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
+    log(String(event.target));
+  })
+  .on('error', function(event) { log(`test case ${event.target.name} failed`); })
+  .on('complete', function(event) {
+    log(`\n ###################################`)
+    log(`Finished testing ${event.currentTarget.length} cases \n`);
+    if (!isNodeJs) {
+      runButton.removeAttribute('disabled');
+      runButton.setAttribute('class', 'btn btn-primary');
+      runButton.innerHTML = 'Run';
+    }
+  });
+}
+
+var decodeParams2Case = function(paramContent, paramsList, combinations) {
+  let sizeString = (paramContent.match(/[0-9]+x[0-9]+/g) || []).toString();
+  let sizes = (sizeString.match(/[0-9]+/g) || []);
+  let paramSize = paramsList.length;
+  let paramObjs = []
+  let sizeCount = 0;
+  for (let i = 0; i < paramSize; i++) {
+      let param = paramsList[i];
+      let paramName = param.name;
+      let paramValue = param.value;
+      let paramReg = param.reg;
+      let paramIndex = param.index;
+
+      if(paramValue != "") {
+        paramObjs.push({name: paramName, value: paramValue, index: paramIndex});
+      } else if (paramName.startsWith('size')) {
+        let sizeStr = sizes.slice(sizeCount, sizeCount+2).toString();
+        paramValue = cvtStr2cvSize(sizeStr);
+        sizeCount += 2;
+        paramObjs.push({name: paramName, value: paramValue, index: paramIndex});
+      } else {
+        for (let index in paramReg) {
+          let reg = eval(paramReg[index]);
+          if ('loc' in param) {
+            paramValue = (paramContent.match(reg) || [])[param.loc].toString();
+          } else {
+            paramValue = (paramContent.match(reg) || []).toString();
+          }
+
+          if (paramValue != "") {
+            paramObjs.push({name: paramName, value: paramValue, index: paramIndex});
+            break;
+          }
+        }
+      }
+  }
+
+  let location = [];
+  for (let i = 0; i < combinations.length; ++i) {
+    let combination = combinations[i];
+    for (let j = 0; j < combination.length; ++j) {
+      if (judgeCombin(combination[j], paramObjs)) {
+        location.push([i,j]);
+      }
+    }
+  }
+  return location;
+}
+
+function judgeCombin(combination, paramObjs) {
+  for (let i =0; i < paramObjs.length; i++) {
+    if (paramObjs[i].value != combination[paramObjs[i].index]){
+      return false;
+    }
+  }
+  return true;
+}
+
+
 if (typeof window === 'undefined') {
+  exports.enableButton = enableButton;
+  exports.disableButton = disableButton;
   exports.fillGradient = fillGradient;
+  exports.smoothBorder = smoothBorder;
   exports.cvtStr2cvSize = cvtStr2cvSize;
   exports.combine = combine;
+  exports.constructMode = constructMode;
+  exports.log = log;
+  exports.decodeParams2Case = decodeParams2Case;
+  exports.setBenchmarkSuite = setBenchmarkSuite;
+  exports.addKernelCase = addKernelCase;
 }
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_blur.html b/modules/js/perf/perf_imgproc/perf_blur.html
new file mode 100644
index 0000000000..c6fae45db0
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_blur.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Blur</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1280x720, CV_8UC1, BORDER_REPLICATE)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_blur.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_blur.js b/modules/js/perf/perf_imgproc/perf_blur.js
new file mode 100644
index 0000000000..59712fb478
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_blur.js
@@ -0,0 +1,130 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const BlurSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA, cvSize.sz720p];
+    const Blur5x16Size = [cvSize.szVGA, cvSize.sz720p];
+    const BlurType = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1"];
+    const BlurType5x5 = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1", "CV_32FC3"];
+    const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"];
+    const BorderTypeAll = ["BORDER_REPLICATE", "BORDER_CONSTANT", "BORDER_REFLECT", "BORDER_REFLECT101"];
+
+    const combiBlur3x3 = combine(BlurSize, BlurType, BorderType3x3);
+    const combiBlur16x16 = combine(Blur5x16Size, BlurType, BorderTypeAll);
+    const combiBlur5x5 = combine(Blur5x16Size, BlurType5x5, BorderTypeAll);
+
+    function addBlurCase(suite, type) {
+        suite.add('blur', function() {
+            cv.blur(src, dst, ksize, new cv.Point(-1,-1), borderType);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let matType = cv[this.params.matType];
+                let borderType = cv[this.params.borderType];
+                let ksizeNum = this.params.ksize;
+                let ksize = new cv.Size(ksizeNum, ksizeNum);
+                let src = new cv.Mat(size, matType);
+                let dst = new cv.Mat(size, matType);
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+              }
+          });
+    }
+
+    function addBlurModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let matType = combination[i][1];
+        let borderType = combination[i][2];
+        let ksizeArray = [3, 16, 5];
+
+        let params = {size: size, matType:matType, ksize: ksizeArray[type], borderType:borderType};
+        addKernelCase(suite, params, type, addBlurCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1});
+          paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2});
+          let locationList = decodeParams2Case(params, paramObjs,blurCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addBlurModeCase(suite, [blurCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addBlurModeCase(suite, combiBlur3x3, 0);
+        addBlurModeCase(suite, combiBlur16x16, 1);
+        addBlurModeCase(suite, combiBlur5x5, 2);
+      }
+      setBenchmarkSuite(suite, "blur", currentCaseId);
+      log(`Running ${totalCaseNum} tests from blur`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let blurCombinations = [combiBlur3x3, combiBlur16x16, combiBlur5x5];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_cvtcolor.js b/modules/js/perf/perf_imgproc/perf_cvtcolor.js
index 752691ef77..b5007985cc 100644
--- a/modules/js/perf/perf_imgproc/perf_cvtcolor.js
+++ b/modules/js/perf/perf_imgproc/perf_cvtcolor.js
@@ -11,17 +11,17 @@ if (isNodeJs) {
   var logElement = document.getElementById('log');
 }
 
-cv.onRuntimeInitialized = () => {
+function perf() {
+
   console.log('opencv.js loaded');
   if (isNodeJs) {
     global.cv = cv;
     global.combine = HelpFunc.combine;
     global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
-    global.cvSize = Base.cvSize;
+    global.cvSize = Base.getCvSize();
   } else {
-    runButton.removeAttribute('disabled');
-    runButton.setAttribute('class', 'btn btn-primary');
-    runButton.innerHTML = 'Run';
+    enableButton();
+    cvSize = getCvSize();
   }
   let totalCaseNum, currentCaseId;
 
@@ -73,126 +73,77 @@ cv.onRuntimeInitialized = () => {
     cv.CX_YUV2RGBA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_YUV2RGB
   };
 
-  const CvtMode = [
-    "COLOR_BGR2BGR555", "COLOR_BGR2BGR565", "COLOR_BGR2BGRA", "COLOR_BGR2GRAY",
-    "COLOR_BGR2HLS", "COLOR_BGR2HLS_FULL", "COLOR_BGR2HSV", "COLOR_BGR2HSV_FULL",
-    "COLOR_BGR2Lab", "COLOR_BGR2Luv", "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ",
-    "COLOR_BGR2YCrCb", "COLOR_BGR2YUV", "COLOR_BGR5552BGR", "COLOR_BGR5552BGRA",
-
-    "COLOR_BGR5552GRAY", "COLOR_BGR5552RGB", "COLOR_BGR5552RGBA", "COLOR_BGR5652BGR",
-    "COLOR_BGR5652BGRA", "COLOR_BGR5652GRAY", "COLOR_BGR5652RGB", "COLOR_BGR5652RGBA",
-
-    "COLOR_BGRA2BGR", "COLOR_BGRA2BGR555", "COLOR_BGRA2BGR565", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA",
-    "CX_BGRA2HLS", "CX_BGRA2HLS_FULL", "CX_BGRA2HSV", "CX_BGRA2HSV_FULL",
-    "CX_BGRA2Lab", "CX_BGRA2Luv", "CX_BGRA2XYZ",
-    "CX_BGRA2YCrCb", "CX_BGRA2YUV",
-
-    "COLOR_GRAY2BGR", "COLOR_GRAY2BGR555", "COLOR_GRAY2BGR565", "COLOR_GRAY2BGRA",
-
-    "COLOR_HLS2BGR", "COLOR_HLS2BGR_FULL", "COLOR_HLS2RGB", "COLOR_HLS2RGB_FULL",
-    "CX_HLS2BGRA", "CX_HLS2BGRA_FULL", "CX_HLS2RGBA", "CX_HLS2RGBA_FULL",
-
-    "COLOR_HSV2BGR", "COLOR_HSV2BGR_FULL", "COLOR_HSV2RGB", "COLOR_HSV2RGB_FULL",
-    "CX_HSV2BGRA", "CX_HSV2BGRA_FULL", "CX_HSV2RGBA", "CX_HSV2RGBA_FULL",
-
-    "COLOR_Lab2BGR", "COLOR_Lab2LBGR", "COLOR_Lab2LRGB", "COLOR_Lab2RGB",
-    "CX_Lab2BGRA", "CX_Lab2LBGRA", "CX_Lab2LRGBA", "CX_Lab2RGBA",
-
-    "COLOR_LBGR2Lab", "COLOR_LBGR2Luv", "COLOR_LRGB2Lab", "COLOR_LRGB2Luv",
-    "CX_LBGRA2Lab", "CX_LBGRA2Luv", "CX_LRGBA2Lab", "CX_LRGBA2Luv",
-
-    "COLOR_Luv2BGR", "COLOR_Luv2LBGR", "COLOR_Luv2LRGB", "COLOR_Luv2RGB",
-    "CX_Luv2BGRA", "CX_Luv2LBGRA", "CX_Luv2LRGBA", "CX_Luv2RGBA",
-
-    "COLOR_RGB2BGR555", "COLOR_RGB2BGR565", "COLOR_RGB2GRAY",
-    "COLOR_RGB2HLS", "COLOR_RGB2HLS_FULL", "COLOR_RGB2HSV", "COLOR_RGB2HSV_FULL",
-    "COLOR_RGB2Lab", "COLOR_RGB2Luv", "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV",
-
-    "COLOR_RGBA2BGR", "COLOR_RGBA2BGR555", "COLOR_RGBA2BGR565", "COLOR_RGBA2GRAY",
-    "CX_RGBA2HLS", "CX_RGBA2HLS_FULL", "CX_RGBA2HSV", "CX_RGBA2HSV_FULL",
-    "CX_RGBA2Lab", "CX_RGBA2Luv", "CX_RGBA2XYZ",
-    "CX_RGBA2YCrCb", "CX_RGBA2YUV",
-
-    "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA",
-
-    "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA",
-    "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA"
-  ];
-  const CvtModeSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
-  const combiCvtMode = combine(CvtModeSize, CvtMode);
-
   // didn't support 16u and 32f perf tests according to
   // https://github.com/opencv/opencv/commit/4e679e1cc5b075ec006b29a58b4fe117523fba1d
-  const CvtMode16U = [
-    "COLOR_BGR2BGRA", "COLOR_BGR2GRAY",
-    "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ",
-    "COLOR_BGR2YCrCb", "COLOR_BGR2YUV",
-
-    "COLOR_BGRA2BGR", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA",
-    "CX_BGRA2XYZ",
-    "CX_BGRA2YCrCb", "CX_BGRA2YUV",
-
-    "COLOR_GRAY2BGR", "COLOR_GRAY2BGRA",
-
-    "COLOR_RGB2GRAY",
-    "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV",
-
-    "COLOR_RGBA2BGR", "COLOR_RGBA2GRAY",
-    "CX_RGBA2XYZ",
-    "CX_RGBA2YCrCb", "CX_RGBA2YUV",
+  function constructCvtMode16U() {
+    let cvtMode16U = [];
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "BGR", ["BGRA", "GRAY", "RGB", "RGBA", "XYZ", "YCrCb", "YUV"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "BGRA", ["BGR", "GRAY", "RGBA"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("CX_", "BGRA", ["XYZ", "YCrCb", "YUV"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "GRAY", ["BGR", "BGRA"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "RGB", ["GRAY", "XYZ", "YCrCb", "YUV"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "RGBA", ["BGR", "GRAY"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("CX_", "RGBA", ["XYZ", "YCrCb", "YUV"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "XYZ", ["BGR", "RGB"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("CX_", "XYZ", ["BGRA", "RGBA"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "YCrCb", ["BGR", "RGB"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("CX_", "YCrCb", ["BGRA", "RGBA"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "YUV", ["BGR", "RGB"]));
+    cvtMode16U = cvtMode16U.concat(constructMode("CX_", "YUV", ["BGRA", "RGBA"]));
+
+    return cvtMode16U;
+  }
 
-    "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA",
+  const CvtMode16U = constructCvtMode16U();
 
-    "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA",
-    "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA"
-  ];
   const CvtMode16USize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
   const combiCvtMode16U = combine(CvtMode16USize, CvtMode16U);
 
-  const CvtMode32F = [
-    "COLOR_BGR2BGRA", "COLOR_BGR2GRAY",
-    "COLOR_BGR2HLS", "COLOR_BGR2HLS_FULL", "COLOR_BGR2HSV", "COLOR_BGR2HSV_FULL",
-    "COLOR_BGR2Lab", "COLOR_BGR2Luv", "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ",
-    "COLOR_BGR2YCrCb", "COLOR_BGR2YUV",
-
-    "COLOR_BGRA2BGR", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA",
-    "CX_BGRA2HLS", "CX_BGRA2HLS_FULL", "CX_BGRA2HSV", "CX_BGRA2HSV_FULL",
-    "CX_BGRA2Lab", "CX_BGRA2Luv", "CX_BGRA2XYZ",
-    "CX_BGRA2YCrCb", "CX_BGRA2YUV",
-
-    "COLOR_GRAY2BGR", "COLOR_GRAY2BGRA",
-
-    "COLOR_HLS2BGR", "COLOR_HLS2BGR_FULL", "COLOR_HLS2RGB", "COLOR_HLS2RGB_FULL",
-    "CX_HLS2BGRA", "CX_HLS2BGRA_FULL", "CX_HLS2RGBA", "CX_HLS2RGBA_FULL",
-
-    "COLOR_HSV2BGR", "COLOR_HSV2BGR_FULL", "COLOR_HSV2RGB", "COLOR_HSV2RGB_FULL",
-    "CX_HSV2BGRA", "CX_HSV2BGRA_FULL", "CX_HSV2RGBA", "CX_HSV2RGBA_FULL",
-
-    "COLOR_Lab2BGR", "COLOR_Lab2LBGR", "COLOR_Lab2LRGB", "COLOR_Lab2RGB",
-    "CX_Lab2BGRA", "CX_Lab2LBGRA", "CX_Lab2LRGBA", "CX_Lab2RGBA",
-
-    "COLOR_LBGR2Lab", "COLOR_LBGR2Luv", "COLOR_LRGB2Lab", "COLOR_LRGB2Luv",
-    "CX_LBGRA2Lab", "CX_LBGRA2Luv", "CX_LRGBA2Lab", "CX_LRGBA2Luv",
+  function constructCvtMode32F(source) {
+    let cvtMode32F = source;
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "BGR", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "BGRA", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "HLS", ["BGR", "BGR_FULL", "RGB", "RGB_FULL"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "HLS", ["BGRA", "BGRA_FULL", "RGBA", "RGBA_FULL"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "HSV", ["BGR", "BGR_FULL", "RGB", "RGB_FULL"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "HSV", ["BGRA", "BGRA_FULL", "RGBA", "RGBA_FULL"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "Lab", ["BGR", "LBGR", "RGB", "LRGB"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "Lab", ["BGRA", "LBGRA", "RGBA", "LRGBA"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "Luv", ["BGR", "LBGR", "RGB", "LRGB"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "Luv", ["BGRA", "LBGRA", "RGBA", "LRGBA"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "LBGR", ["Lab", "Luv"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "LBGRA", ["Lab", "Luv"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "LRGB", ["Lab", "Luv"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "LRGBA", ["Lab", "Luv"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "RGB", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"]));
+    cvtMode32F = cvtMode32F.concat(constructMode("CX_", "RGBA", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"]));
+
+    return cvtMode32F;
+  }
 
-    "COLOR_Luv2BGR", "COLOR_Luv2LBGR", "COLOR_Luv2LRGB", "COLOR_Luv2RGB",
-    "CX_Luv2BGRA", "CX_Luv2LBGRA", "CX_Luv2LRGBA", "CX_Luv2RGBA",
+  const CvtMode32F = constructCvtMode32F(CvtMode16U);
 
-    "COLOR_RGB2GRAY",
-    "COLOR_RGB2HLS", "COLOR_RGB2HLS_FULL", "COLOR_RGB2HSV", "COLOR_RGB2HSV_FULL",
-    "COLOR_RGB2Lab", "COLOR_RGB2Luv", "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV",
+  const CvtMode32FSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
+  const combiCvtMode32F = combine(CvtMode32FSize, CvtMode32F);
 
-    "COLOR_RGBA2BGR", "COLOR_RGBA2GRAY",
-    "CX_RGBA2HLS", "CX_RGBA2HLS_FULL", "CX_RGBA2HSV", "CX_RGBA2HSV_FULL",
-    "CX_RGBA2Lab", "CX_RGBA2Luv", "CX_RGBA2XYZ",
-    "CX_RGBA2YCrCb", "CX_RGBA2YUV",
+  function constructeCvtMode(source) {
+    let cvtMode = source
+    cvtMode = cvtMode.concat(constructMode("COLOR_", "BGR", ["BGR555", "BGR565"]));
+    cvtMode = cvtMode.concat(constructMode("COLOR_", "BGR555", ["BGR", "BGRA", "GRAY", "RGB", "RGBA"]));
+    cvtMode = cvtMode.concat(constructMode("COLOR_", "BGR565", ["BGR", "BGRA", "GRAY", "RGB", "RGBA"]));
+    cvtMode = cvtMode.concat(constructMode("COLOR_", "BGRA", ["BGR555", "BGR565"]));
+    cvtMode = cvtMode.concat(constructMode("COLOR_", "GRAY", ["BGR555", "BGR565"]));
+    cvtMode = cvtMode.concat(constructMode("COLOR_", "RGB", ["BGR555", "BGR565"]));
+    cvtMode = cvtMode.concat(constructMode("COLOR_", "RGBA", ["BGR555", "BGR565"]));
+
+    return cvtMode;
+  }
 
-    "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA",
+  const CvtMode = constructeCvtMode(CvtMode32F);
 
-    "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA",
-    "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA"
-  ];
-  const CvtMode32FSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
-  const combiCvtMode32F = combine(CvtMode32FSize, CvtMode32F);
+  const CvtModeSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
+  // combiCvtMode permute size and mode
+  const combiCvtMode = combine(CvtModeSize, CvtMode);
 
   const CvtModeBayer = [
     "COLOR_BayerBG2BGR", "COLOR_BayerBG2BGRA", "COLOR_BayerBG2BGR_VNG", "COLOR_BayerBG2GRAY",
@@ -357,7 +308,7 @@ cv.onRuntimeInitialized = () => {
     return [mat1Type, mat2Type];
   }
 
-  function addCvtColorCase(suite) {
+  function addCvtColorCase(suite, type) {
     suite.add('cvtColor', function() {
       cv.cvtColor(mat1, mat2, mode, 0);
       }, {
@@ -375,154 +326,22 @@ cv.onRuntimeInitialized = () => {
     });
   }
 
-  function addCvtModeCase(suite, combination) {
+  function addCvtModeCase(suite, combination, type) {
     totalCaseNum += combination.length;
     for(let i = 0; i < combination.length; ++i) {
       let size = combination[i][0];
       let mode = combination[i][1];
       let chPair = getConversionInfo(mode);
       let matType = getMatType(chPair);
-      let sizeArray = [size.width, size.height];
-
-      addCvtColorCase(suite);
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        size: sizeArray,
-        matType: matType,
-        mode: mode
-      };
-    };
-  }
-
-  function addCvtModeBayerCase(suite, combination) {
-    totalCaseNum += combination.length;
-    for(let i = 0; i < combination.length; ++i) {
-      let size = combination[i][0];
-      let mode = combination[i][1];
-      let chPair = getConversionInfo(mode);
-      let matType = getMatType(chPair);
-      let sizeArray = [size.width, size.height];
-
-      addCvtColorCase(suite);
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        size: sizeArray,
-        matType: matType,
-        mode: mode
-      };
-    };
-  }
-
-  function addCvtMode2Case(suite, combination) {
-    totalCaseNum += combination.length;
-    for(let i = 0; i < combination.length; ++i) {
-      let size = combination[i][0];
-      let mode = combination[i][1];
-      let chPair = getConversionInfo(mode);
-      let matType = getMatType(chPair);
-      let sizeArray = [size.width, size.height+size.height/2];
-
-      addCvtColorCase(suite);
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        size: sizeArray,
-        matType: matType,
-        mode: mode
-      };
-    };
-  }
-
-  function addCvtMode3Case(suite, combination) {
-    totalCaseNum += combination.length;
-    for(let i = 0; i < combination.length; ++i) {
-      let size = combination[i][0];
-      let mode = combination[i][1];
-      let chPair = getConversionInfo(mode);
-      let matType = getMatType(chPair);
-      let sizeArray = [size.width, size.height+size.height/2];
-
-      addCvtColorCase(suite);
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        size: sizeArray,
-        matType: matType,
-        mode: mode
-      };
-    };
-  }
-
-  function addEdgeAwareBayerModeCase(suite, combination) {
-    totalCaseNum += combination.length;
-    for(let i = 0; i < combination.length; ++i) {
-      let size = combination[i][0];
-      let mode = combination[i][1];
-      let chPair = getConversionInfo(mode);
-      let matType = getMatType(chPair);
-      let sizeArray = [size.width, size.height];
-
-      addCvtColorCase(suite);
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        size: sizeArray,
-        matType: matType,
-        mode: mode
-      };
-    };
-  }
-
-  function decodeParams2Case(suite, params) {
-    let sizeStr = (params.match(/[0-9]+/g) || []).slice(0, 2).toString();
-    let mode = (params.match(/CX\_[A-z]+2[A-z]+/) || params.match(/COLOR\_[A-z]+2[A-z]+/) || []).toString();
-    let size = cvtStr2cvSize(sizeStr);
-
-    // check if the params match and add case
-    for (let i = 0; i < combinations.length; ++i) {
-      let combination = combinations[i];
-      for (let j = 0; j < combination.length; ++j) {
-        if (size === combination[j][0] && mode === combination[j][1]) {
-          cvtFunc[i](suite, [combination[j]]);
-        }
-      }
-    }
-  }
-
-  function log(message) {
-    console.log(message);
-    if (!isNodeJs) {
-      logElement.innerHTML += `\n${'\t' + message}`;
-    }
-  }
-
-  function setBenchmarkSuite(suite) {
-    suite
-    // add listeners
-    .on('cycle', function(event) {
-      ++currentCaseId;
-      let params = event.target.params;
-      let mode = params.mode;
-      let size = params.size;
-      log(`=== ${event.target.name} ${currentCaseId} ===`);
-      log(`params: (${parseInt(size[0])}x${parseInt(size[1])}, ${mode})`);
-      log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
-      log('mean time:' +String(event.target.stats.mean*1000)+' ms');
-      log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
-      log(String(event.target));
-    })
-    .on('error', function(event) { log(`test case ${event.target.name} failed`); })
-    .on('complete', function(event) {
-      log(`\n ###################################`)
-      log(`Finished testing ${event.currentTarget.length} cases \n`);
-      if (!isNodeJs) {
-        runButton.removeAttribute('disabled');
-        runButton.setAttribute('class', 'btn btn-primary');
-        runButton.innerHTML = 'Run';
+      let sizeArray;
+      if (type == 0) {
+        sizeArray = [size.width, size.height];
+      } else {
+        sizeArray = [size.width, size.height+size.height/2];
       }
-    });
+      let params = {size:sizeArray, matType: matType, mode: mode};
+      addKernelCase(suite, params, type, addCvtColorCase);
+    };
   }
 
   function genBenchmarkCase(paramsContent) {
@@ -531,23 +350,33 @@ cv.onRuntimeInitialized = () => {
     currentCaseId = 0;
     if (/\([0-9]+x[0-9]+,[\ ]*\w+\)/g.test(paramsContent.toString())) {
       let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+\)/g)[0];
-      decodeParams2Case(suite, params);
+      let paramObjs = [];
+      paramObjs.push({name:"mode", value:"", reg:["/CX\_[A-z]+2[A-z]+/", "/COLOR\_[A-z]+2[A-z]+/"], index:1});
+      paramObjs.push({name:"size", value:"", reg:[""], index:0});
+
+      let locationList = decodeParams2Case(params, paramObjs,combinations);
+      for (let i = 0; i < locationList.length; i++){
+        let first = locationList[i][0];
+        let second = locationList[i][1];
+        if (first < 2) {
+          addCvtModeCase(suite, [combinations[first][second]], 0);
+        } else {
+          addCvtModeCase(suite, [combinations[first][second]], 1);
+        }
+      }
     } else {
       log("no filter or getting invalid params, run all the cases");
-      addCvtModeCase(suite, combiCvtMode);
-      addCvtModeBayerCase(suite, combiCvtModeBayer);
-      addCvtMode2Case(suite, combiCvtMode2);
-      addCvtMode3Case(suite, combiCvtMode3);
+      addCvtModeCase(suite, combiCvtMode, 0);
+      addCvtModeCase(suite, combiCvtModeBayer, 0);
+      addCvtModeCase(suite, combiCvtMode2, 1);
+      addCvtModeCase(suite, combiCvtMode3, 1);
     }
-    setBenchmarkSuite(suite);
+    setBenchmarkSuite(suite, "cvtcolor", currentCaseId);
     log(`Running ${totalCaseNum} tests from CvtColor`);
     suite.run({ 'async': true }); // run the benchmark
   }
 
-
-
   // init
-  let cvtFunc = [addCvtModeCase, addCvtModeBayerCase, addCvtMode2Case, addCvtMode3Case];//, addEdgeAwareBayerModeCase];
   let combinations = [combiCvtMode, combiCvtModeBayer, combiCvtMode2, combiCvtMode3];//, combiEdgeAwareBayer];
 
   // set test filter params
@@ -563,10 +392,19 @@ cv.onRuntimeInitialized = () => {
       let paramsContent = paramsElement.value;
       genBenchmarkCase(paramsContent);
       if (totalCaseNum !== 0) {
-        runButton.setAttribute("disabled", "disabled");
-        runButton.setAttribute('class', 'btn btn-primary disabled');
-        runButton.innerHTML = "Running";
+        disableButton();
       }
     }
   }
-};
\ No newline at end of file
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_dilate.html b/modules/js/perf/perf_imgproc/perf_dilate.html
new file mode 100644
index 0000000000..49c61f4be3
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_dilate.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Dilate</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1024x768, CV_8UC1)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_dilate.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_dilate.js b/modules/js/perf/perf_imgproc/perf_dilate.js
new file mode 100644
index 0000000000..c4e14c7be2
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_dilate.js
@@ -0,0 +1,117 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const DilateSize = [cvSize.szQVGA, cvSize.szVGA, cvSize.szSVGA, cvSize.szXGA, cvSize.szSXGA];
+    const DilateType = ["CV_8UC1", "CV_8UC4"];
+    const combiDilate = combine(DilateSize, DilateType);
+
+    function addDialteCase(suite, type) {
+        suite.add('dilate', function() {
+            cv.dilate(src, dst, kernel);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let matType = cv[this.params.matType];
+                let src = new cv.Mat(size, matType);
+                let dst = new cv.Mat(size, matType);
+                let kernel = new cv.Mat();
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+                kernel.delete();
+              }
+          });
+    }
+
+    function addDilateModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let matType = combination[i][1];
+
+        let params = {size: size, matType:matType};
+        addKernelCase(suite, params, type, addDialteCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1});
+          let locationList = decodeParams2Case(params, paramObjs, dilateCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addDilateModeCase(suite, [dilateCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addDilateModeCase(suite, combiDilate, 0);
+      }
+      setBenchmarkSuite(suite, "dilate", currentCaseId);
+      log(`Running ${totalCaseNum} tests from dilate`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let dilateCombinations = [combiDilate];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_erode.html b/modules/js/perf/perf_imgproc/perf_erode.html
new file mode 100644
index 0000000000..2db653bd7a
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_erode.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Erode</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1024x768, CV_8UC1)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_erode.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_erode.js b/modules/js/perf/perf_imgproc/perf_erode.js
new file mode 100644
index 0000000000..95aba6fa21
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_erode.js
@@ -0,0 +1,117 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const ErodeSize = [cvSize.szQVGA, cvSize.szVGA, cvSize.szSVGA, cvSize.szXGA, cvSize.szSXGA];
+    const ErodeType = ["CV_8UC1", "CV_8UC4"];
+    const combiErode = combine(ErodeSize, ErodeType);
+
+    function addErodeCase(suite, type) {
+        suite.add('erode', function() {
+            cv.erode(src, dst, kernel);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let matType = cv[this.params.matType];
+                let src = new cv.Mat(size, matType);
+                let dst = new cv.Mat(size, matType);
+                let kernel = new cv.Mat();
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+                kernel.delete();
+              }
+          });
+    }
+
+    function addErodeModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let matType = combination[i][1];
+
+        let params = {size: size, matType:matType};
+        addKernelCase(suite, params, type, addErodeCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1});
+          let locationList = decodeParams2Case(params, paramObjs, erodeCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addErodeModeCase(suite, [erodeCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addErodeModeCase(suite, combiErode, 0);
+      }
+      setBenchmarkSuite(suite, "erode", currentCaseId);
+      log(`Running ${totalCaseNum} tests from erode`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let erodeCombinations = [combiErode];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_filter2D.html b/modules/js/perf/perf_imgproc/perf_filter2D.html
new file mode 100644
index 0000000000..347fa8076d
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_filter2D.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Filter2D</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (320x240, 3, BORDER_CONSTANT)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_filter2D.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_filter2D.js b/modules/js/perf/perf_imgproc/perf_filter2D.js
new file mode 100644
index 0000000000..d92dc2b55a
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_filter2D.js
@@ -0,0 +1,127 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const Filter2dSize = [cvSize.szQVGA, cvSize.sz1080p];
+    const Filter2dKsize = ["3", "5"];
+    const Filter2dBorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT_101"];
+    const DISABLED_Filter2dBorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE"];
+    const combiFilter2dCase = combine(Filter2dSize, Filter2dKsize, Filter2dBorderMode);
+    const combiDISABLEDFilter2dCase = combine(Filter2dSize, Filter2dKsize, DISABLED_Filter2dBorderMode);
+
+    function addFilter2dCase(suite, type) {
+        suite.add('filter2d', function() {
+            cv.filter2D(src, dst, cv.CV_8UC4, kernel, new cv.Point(1, 1), 0.0, borderMode);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let ksize = parseInt(this.params.ksize);
+                let borderMode = cv[this.params.borderMode];
+
+                let src = new cv.Mat(size, cv.CV_8UC4);
+                let dst = new cv.Mat(size, cv.CV_8UC4);
+                let kernelElement = [];
+                for (let i = 0; i < ksize*ksize; i++) {
+                    let randNum = Math.random();
+                    kernelElement.push(-3.0+randNum*13.0);
+                }
+                let kernel = cv.matFromArray(ksize, ksize, cv.CV_32FC1, kernelElement);
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+              }
+          });
+    }
+
+    function addFilter2dModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let ksize = combination[i][1];
+        let borderMode = combination[i][2];
+        let params = {size: size, ksize: ksize, borderMode:borderMode};
+        addKernelCase(suite, params, type, addFilter2dCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+        let suite = new Benchmark.Suite;
+        totalCaseNum = 0;
+        currentCaseId = 0;
+
+        if (/\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) {
+            let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g)[0];
+            let paramObjs = [];
+            paramObjs.push({name:"size", value:"", reg:[""], index:0});
+            paramObjs.push({name:"ksize", value:"", reg:["/\\b[0-9]\\b/"], index:1});
+            paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2});
+            let locationList = decodeParams2Case(params, paramObjs,filter2dCombinations);
+
+            for (let i = 0; i < locationList.length; i++){
+                let first = locationList[i][0];
+                let second = locationList[i][1];
+                addFilter2dModeCase(suite, [filter2dCombinations[first][second]], 0);
+              }
+        } else {
+          log("no filter or getting invalid params, run all the cases");
+          addFilter2dModeCase(suite, combiFilter2dCase, 0);
+        }
+        setBenchmarkSuite(suite, "filter2d", currentCaseId);
+        log(`Running ${totalCaseNum} tests from Filter2d`);
+        suite.run({ 'async': true }); // run the benchmark
+    }
+
+    let filter2dCombinations = [combiFilter2dCase];//,combiDISABLEDFilter2dCase];
+
+    if (isNodeJs) {
+        const args = process.argv.slice(2);
+        let paramsContent = '';
+        if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g.test(args.toString())) {
+          paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g)[0];
+        }
+        genBenchmarkCase(paramsContent);
+      } else {
+        runButton.onclick = function() {
+          let paramsContent = paramsElement.value;
+          genBenchmarkCase(paramsContent);
+          if (totalCaseNum !== 0) {
+            disableButton();
+          }
+        }
+      }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_gaussianBlur.html b/modules/js/perf/perf_imgproc/perf_gaussianBlur.html
new file mode 100644
index 0000000000..3f56c22f7d
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_gaussianBlur.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>gaussianBlur</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1280x720, CV_8UC1, BORDER_REPLICATE)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_gaussianBlur.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_gaussianBlur.js b/modules/js/perf/perf_imgproc/perf_gaussianBlur.js
new file mode 100644
index 0000000000..33c5401a7e
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_gaussianBlur.js
@@ -0,0 +1,126 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const GaussianBlurSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA, cvSize.sz720p];
+    const GaussianBlurType = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1"];
+    const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"];
+    const BorderType3x3ROI = ["BORDER_REPLICATE", "BORDER_CONSTANT", "BORDER_REFLECT", "BORDER_REFLECT101"];
+
+    const combiGaussianBlurBorder3x3 = combine(GaussianBlurSize, GaussianBlurType, BorderType3x3);
+    const combiGaussianBlurBorder3x3ROI = combine(GaussianBlurSize, GaussianBlurType, BorderType3x3ROI);
+
+    function addGaussianBlurCase(suite, type) {
+        suite.add('gaussianBlur', function() {
+            cv.GaussianBlur(src, dst, ksize, 1, 0, borderType);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let matType = cv[this.params.matType];
+                let borderType = cv[this.params.borderType];
+                let type = this.params.type;
+                let src = new cv.Mat(size, matType);
+                let dst = new cv.Mat(size, matType);
+                let ksizeNum = this.params.ksize;
+                let ksize = new cv.Size(ksizeNum, ksizeNum);
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+              }
+          });
+    }
+
+    function addGaussianBlurModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let matType = combination[i][1];
+        let borderType = combination[i][2];
+        let ksizeArray = [3, 5];
+        let params = {size: size, matType:matType, ksize: ksizeArray[type], borderType:borderType};
+        addKernelCase(suite, params, type, addGaussianBlurCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1});
+          paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2});
+          let locationList = decodeParams2Case(params, paramObjs,gaussianBlurCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addGaussianBlurModeCase(suite, [gaussianBlurCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addGaussianBlurModeCase(suite, combiGaussianBlurBorder3x3, 0);
+        addGaussianBlurModeCase(suite, combiGaussianBlurBorder3x3ROI, 1);
+      }
+      setBenchmarkSuite(suite, "gaussianBlur", currentCaseId);
+      log(`Running ${totalCaseNum} tests from gaussianBlur`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let gaussianBlurCombinations = [combiGaussianBlurBorder3x3, combiGaussianBlurBorder3x3ROI];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_medianBlur.html b/modules/js/perf/perf_imgproc/perf_medianBlur.html
new file mode 100644
index 0000000000..6e390beec2
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_medianBlur.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>MedianBlur</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1280x720, CV_8UC1, 3)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_medianBlur.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_medianBlur.js b/modules/js/perf/perf_imgproc/perf_medianBlur.js
new file mode 100644
index 0000000000..69b7ba3ead
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_medianBlur.js
@@ -0,0 +1,118 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const MedianBlurSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA, cvSize.sz720p];
+    const MedianBlurType = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1"];
+    const combiMedianBlur = combine(MedianBlurSize, MedianBlurType, [3,5]);
+
+    function addMedianBlurCase(suite, type) {
+        suite.add('medianBlur', function() {
+            cv.medianBlur(src, dst, ksize);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let matType = cv[this.params.matType];
+                let ksize = this.params.ksize;
+                let src = new cv.Mat(size, matType);
+                let dst = new cv.Mat(size, matType);
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+              }
+          });
+    }
+
+    function addMedianBlurModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let matType = combination[i][1];
+        let ksize = combination[i][2];
+
+        let params = {size: size, matType:matType, ksize: ksize};
+        addKernelCase(suite, params, type, addMedianBlurCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1});
+          paramObjs.push({name:"ksize", value: "", reg:["/\\b[0-9]\\b/"], index:2});
+          let locationList = decodeParams2Case(params, paramObjs, medianBlurCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addMedianBlurModeCase(suite, [medianBlurCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addMedianBlurModeCase(suite, combiMedianBlur, 0);
+      }
+      setBenchmarkSuite(suite, "medianBlur", currentCaseId);
+      log(`Running ${totalCaseNum} tests from medianBlur`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let medianBlurCombinations = [combiMedianBlur];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_pyrDown.html b/modules/js/perf/perf_imgproc/perf_pyrDown.html
new file mode 100644
index 0000000000..f90ac5f55e
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_pyrDown.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>pyrDown</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1920x1080, CV_8UC3)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_pyrDown.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_pyrDown.js b/modules/js/perf/perf_imgproc/perf_pyrDown.js
new file mode 100644
index 0000000000..a98b109ade
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_pyrDown.js
@@ -0,0 +1,116 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const PyrDownSize = [cvSize.sz1080p, cvSize.sz720p, cvSize.szVGA, cvSize.szQVGA, cvSize.szODD];
+    const PyrDownType = ["CV_8UC1", "CV_8UC3", "CV_8UC4", "CV_16SC1", "CV_16SC3", "CV_16SC4", "CV_32FC1", "CV_32FC3", "CV_32FC4"];
+
+    const combiPyrDown = combine(PyrDownSize, PyrDownType);
+
+    function addPryDownCase(suite, type) {
+        suite.add('pyrDown', function() {
+            cv.pyrDown(src, dst);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let matType = cv[this.params.matType];
+                let src = new cv.Mat(size, matType);
+                let dst = new cv.Mat((size.height + 1)/2, (size.height + 1)/2, matType)
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+              }
+          });
+    }
+
+    function addPyrDownModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let matType = combination[i][1];
+
+        let params = {size: size, matType:matType};
+        addKernelCase(suite, params, type, addPryDownCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1});
+          let locationList = decodeParams2Case(params, paramObjs, pyrDownCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addPyrDownModeCase(suite, [pyrDownCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addPyrDownModeCase(suite, combiPyrDown, 0);
+      }
+      setBenchmarkSuite(suite, "pyrDown", currentCaseId);
+      log(`Running ${totalCaseNum} tests from pyrDown`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let pyrDownCombinations = [combiPyrDown];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_remap.html b/modules/js/perf/perf_imgproc/perf_remap.html
new file mode 100644
index 0000000000..6812adb0a0
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_remap.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Remap</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (640x480, CV_16UC1, CV_16SC2, INTER_NEAREST)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_remap.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_remap.js b/modules/js/perf/perf_imgproc/perf_remap.js
new file mode 100644
index 0000000000..fe2e5d7541
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_remap.js
@@ -0,0 +1,182 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const RemapSize = [cvSize.szVGA, cvSize.sz1080p];
+    const RemapSrcType = ["CV_16UC1", "CV_16SC1", "CV_32FC1"];
+    const RemapType = ["CV_16SC2", "CV_32FC1", "CV_32FC2"];
+    const InterType = ["INTER_NEAREST", "INTER_LINEAR", "INTER_CUBIC", "INTER_LANCZOS4"];
+    const combiRemap = combine(RemapSize, RemapSrcType, RemapType, InterType);
+
+    function addRemapCase(suite, type) {
+        suite.add('remap', function() {
+            cv.remap(src, dst, map1, map2, interType);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let matType = cv[this.params.matType];
+                let mapType = cv[this.params.mapType];
+                let interType = cv[this.params.interType];
+
+
+                let src = new cv.Mat(size, matType);
+                let dst = new cv.Mat(size, matType);
+                let map1 = new cv.Mat(size, mapType);
+                let map2;
+                if (mapType == cv.CV_32FC1) {
+                  map2 = new cv.Mat(size, mapType);
+                } else if (interType != cv.INTER_NEAREST && mapType == cv.CV_16SC2) {
+                  map2 = new cv.Mat.zeros(size, cv.CV_16UC1);
+                } else {
+                  map2 = new cv.Mat();
+                }
+
+                for (let j = 0; j < map1.rows; j++) {
+                  for (let i = 0; i <  map1.cols; i++) {
+                    let randNum = Math.random();
+                    let view, view1;
+                    switch(matType) {
+                      case cv.CV_16UC1:
+                        view = src.ushortPtr(j,i);
+                        view[0] = Math.floor(randNum*256);
+                        break;
+                      case cv.CV_16SC1:
+                        view = src.shortPtr(j,i);
+                        view[0] = Math.floor(randNum*256);
+                        break;
+                      case cv.CV_32FC1:
+                        view = src.floatPtr(j,i);
+                        view[0] = randNum*256;
+                        break;
+                      default:
+                        console.error("Unknown conversion type 1");
+                        break;
+                    }
+
+                    switch(mapType) {
+                      case cv.CV_32FC1:
+                        view1 = map1.floatPtr(j,i);
+                        let view2 = map2.floatPtr(j,i);
+                        view1[0] = src.cols - i - 1;
+                        view2[0] = j;
+                        break;
+                      case cv.CV_32FC2:
+                        view1 = map1.floatPtr(j,i);
+                        view1[0] = src.cols - i - 1;
+                        view1[1] = j;
+                        break;
+                      case cv.CV_16SC2:
+                        view1 = map1.shortPtr(j,i);
+                        view1[0] = src.cols - i - 1;
+                        view1[1] = j;
+                        break;
+                      default:
+                        console.error("Unknown conversion type 2");
+                        break;
+                    }
+                  }
+                }
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+                map1.delete();
+                map2.delete();
+              }
+          });
+    }
+
+    function addRemapModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let matType = combination[i][1];
+        let mapType = combination[i][2];
+        let interType = combination[i][3];
+
+        let params = {size: size, matType:matType, mapType:mapType, interType:interType};
+        addKernelCase(suite, params, type, addRemapCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1});
+          paramObjs.push({name:"mapType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/g"], index:2, loc:1});
+          paramObjs.push({name:"interType", value: "", reg:["/INTER\_\\w+/"], index:3});
+          let locationList = decodeParams2Case(params, paramObjs, remapCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addRemapModeCase(suite, [remapCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addRemapModeCase(suite, combiRemap, 0);
+      }
+      setBenchmarkSuite(suite, "remap", currentCaseId);
+      log(`Running ${totalCaseNum} tests from remap`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let remapCombinations = [combiRemap];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_resize.js b/modules/js/perf/perf_imgproc/perf_resize.js
index 4e71db3806..3eef30f0e3 100644
--- a/modules/js/perf/perf_imgproc/perf_resize.js
+++ b/modules/js/perf/perf_imgproc/perf_resize.js
@@ -11,18 +11,17 @@ if (isNodeJs) {
   var logElement = document.getElementById('log');
 }
 
-cv.onRuntimeInitialized = () => {
+function perf() {
+
   console.log('opencv.js loaded');
   if (isNodeJs) {
     global.cv = cv;
     global.combine = HelpFunc.combine;
-    global.fillGradient = HelpFunc.fillGradient;
     global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
-    global.cvSize = Base.cvSize;
+    global.cvSize = Base.getCvSize();
   } else {
-    runButton.removeAttribute('disabled');
-    runButton.setAttribute('class', 'btn btn-primary');
-    runButton.innerHTML = 'Run';
+    enableButton();
+    cvSize = getCvSize();
   }
   let totalCaseNum, currentCaseId;
 
@@ -59,185 +58,80 @@ cv.onRuntimeInitialized = () => {
   const scalesAreaFast = [2];
   const combiAreaFast = combine(matTypesAreaFast, sizesAreaFast, scalesAreaFast);
 
-  function addResizeUpLinearCase(suite, combination) {
-    totalCaseNum += combination.length;
-    for (let i = 0; i < combination.length; ++i) {
-      let matType = combination[i][0];
-      let from = combination[i][1];
-      let to = combination[i][2];
-
-      suite.add('resize', function() {
-        cv.resize(src, dst, to, 0, 0, cv.INTER_LINEAR_EXACT);
-        }, {
-          'setup': function() {
-            let from = this.params.from;
-            let to = this.params.to;
-            let matType = cv[this.params.matType];
-            let src = new cv.Mat(from, matType);
-            let dst = new cv.Mat(to, matType);
-            fillGradient(cv, src);
-              },
-          'teardown': function() {
-            src.delete();
-            dst.delete();
-          }
-      });
-
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        from: from,
-        to: to,
-        matType: matType
-      };
-    }
-  }
-
-  function addResizeDownLinearCase(suite, combination) {
-    totalCaseNum += combination.length;
-    for (let i = 0; i < combination.length; ++i) {
-      let matType = combination[i][0];
-      let from = combination[i][1];
-      let to = combination[i][2];
-
-      suite.add('resize', function() {
+  function addResizeCase(suite, type) {
+    suite.add('resize', function() {
+      if (type == "area") {
+        cv.resize(src, dst, dst.size(), 0, 0, cv.INTER_AREA);
+      } else {
         cv.resize(src, dst, to, 0, 0, cv.INTER_LINEAR_EXACT);
-        }, {
-          'setup': function() {
-            let from = this.params.from;
-            let to = this.params.to;
-            let matType = cv[this.params.matType];
-            let src = new cv.Mat(from, matType);
-            let dst = new cv.Mat(to, matType);
+      }
+    }, {
+        'setup': function() {
+          let from = this.params.from;
+          let to = this.params.to;
+          let matType = cv[this.params.matType];
+          let src = new cv.Mat(from, matType);
+          let type = this.params.modeType;
+          let dst;
+          if (type == "area") {
+            dst = new cv.Mat(from.height/scale, from.width/scale, matType);
+          } else {
+            dst = new cv.Mat(to, matType);
             fillGradient(cv, src);
-              },
-          'teardown': function() {
-            src.delete();
-            dst.delete();
           }
-      });
-
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        from: from,
-        to: to,
-        matType: matType
-      };
-    }
+          },
+        'teardown': function() {
+          src.delete();
+          dst.delete();
+        }
+    });
   }
 
-  function addResizeAreaFastCase(suite, combination) {
+  function addResizeModeCase(suite, combination, type) {
     totalCaseNum += combination.length;
     for (let i = 0; i < combination.length; ++i) {
       let matType = combination[i][0];
       let from = combination[i][1];
-      let scale = combination[i][2];
-      from.width = (Math.floor(from.width/scale))*scale;
-      from.height = (Math.floor(from.height/scale))*scale;
-      let to = {
-        width: from.width/scale,
-        height: from.height/scale};  // for params print
-
-      suite.add('resize', function() {
-        cv.resize(src, dst, dst.size(), 0, 0, cv.INTER_AREA);
-        }, {
-          'setup': function() {
-            let from = this.params.from;
-            let scale = this.params.scale;
-            let matType = cv[this.params.matType];
-            let src = new cv.Mat(from, matType);
-            let dst = new cv.Mat(from.height/scale, from.width/scale, matType);
-              },
-          'teardown': function() {
-            src.delete();
-            dst.delete();
-          }
-      });
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        from: from,
-        scale: scale,
-        matType: matType
-      };
-    }
-  }
-
-  function decodeParams2Case(suite, params) {
-    let sizeString = (params.match(/[0-9]+x[0-9]+/g) || []).slice(0, 2).toString();
-    let sizes = (sizeString.match(/[0-9]+/g) || []);
-    let size1Str = sizes.slice(0, 2).toString();
-    let size2Str = sizes.slice(2, 5).toString();
-    let matType = (params.match(/CV\_[0-9]+[A-z][A-z][0-9]/) || []).toString();
-    let size1 = cvtStr2cvSize(size1Str);
-    let size2 = cvtStr2cvSize(size2Str);
-    // check if the params match and add case
-    for (let i = 0; i < combinations.length; ++i) {
-      let combination = combinations[i];
-      for (let j = 0; j < combination.length; ++j) {
-        if (matType === combination[j][0] && size1 === combination[j][1] && size2 === combination[j][2]) {
-          resizeFunc[i](suite, [combination[j]]);
-        }
+      let params;
+      if (type == "area") {
+        let scale = combination[i][2];
+        params = { from: from, scale: scale, matType: matType, modeType: type };
+      } else {
+        let to = combination[i][2];
+        params = { from: from, to: to, matType: matType, modeType: type};
       }
+      addKernelCase(suite, params, type, addResizeCase)
     }
   }
 
-  function log(message) {
-    console.log(message);
-    if (!isNodeJs) {
-      logElement.innerHTML += `\n${'\t'.repeat(1) + message}`;
-    }
-  }
-
-  function setBenchmarkSuite(suite) {
-    suite
-    // add listeners
-    .on('cycle', function(event) {
-      ++currentCaseId;
-      let params = event.target.params;
-      let matType = params.matType;
-      let size1 = params.from;
-      let size2 = params.to;
-      log(`=== ${event.target.name} ${currentCaseId} ===`);
-      log(`params: (${matType},${parseInt(size1.width)}x${parseInt(size1.height)},`+
-          `${parseInt(size2.width)}x${parseInt(size2.height)})`);
-      log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
-      log('mean time:' +String(event.target.stats.mean*1000)+' ms');
-      log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
-      log(String(event.target));
-    })
-    .on('error', function(event) { log(`test case ${event.target.name} failed`); })
-    .on('complete', function(event) {
-      log(`\n ###################################`)
-      log(`Finished testing ${event.currentTarget.length} cases \n`);
-      if (!isNodeJs) {
-        runButton.removeAttribute('disabled');
-        runButton.setAttribute('class', 'btn btn-primary');
-        runButton.innerHTML = 'Run';
-      }
-    });
-  }
-
   function genBenchmarkCase(paramsContent) {
     let suite = new Benchmark.Suite;
     totalCaseNum = 0;
     currentCaseId = 0;
     if (/\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g.test(paramsContent.toString())) {
       let params = paramsContent.toString().match(/\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g)[0];
-      decodeParams2Case(suite, params);
+      let paramObjs = [];
+      paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[A-z][A-z][0-9]/"], index:0});
+      paramObjs.push({name:"size1", value:"", reg:[""], index:1});
+      paramObjs.push({name:"size2", value:"", reg:[""], index:2});
+      let locationList = decodeParams2Case(params, paramObjs,combinations);
+
+      for (let i = 0; i < locationList.length; i++){
+        let first = locationList[i][0];
+        let second = locationList[i][1];
+        addResizeModeCase(suite, [combinations[first][second]], "linear");
+      }
     } else {
       log("no filter or getting invalid params, run all the cases");
-      addResizeUpLinearCase(suite, combiUpLinear);
-      addResizeDownLinearCase(suite, combiDownLinear);
+      addResizeModeCase(suite, combiUpLinear, "linear");
+      addResizeModeCase(suite, combiDownLinear, "linear");
     }
-    setBenchmarkSuite(suite);
+    setBenchmarkSuite(suite, "resize", currentCaseId);
     log(`Running ${totalCaseNum} tests from Resize`);
     suite.run({ 'async': true }); // run the benchmark
   }
 
   // init
-  let resizeFunc = [addResizeUpLinearCase, addResizeDownLinearCase];//, addResizeAreaFastCase];
   let combinations = [combiUpLinear, combiDownLinear];//, combiAreaFast];
 
   // set test filter params
@@ -253,10 +147,19 @@ cv.onRuntimeInitialized = () => {
       let paramsContent = paramsElement.value;
       genBenchmarkCase(paramsContent);
       if (totalCaseNum !== 0) {
-        runButton.setAttribute("disabled", "disabled");
-        runButton.setAttribute('class', 'btn btn-primary disabled');
-        runButton.innerHTML = "Running";
+        disableButton();
       }
     }
   }
-};
\ No newline at end of file
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_scharr.html b/modules/js/perf/perf_imgproc/perf_scharr.html
new file mode 100644
index 0000000000..720ca741eb
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_scharr.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Scharr</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (640x480, CV_16SC1, (0,1), BORDER_REPLICATE)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_scharr.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_scharr.js b/modules/js/perf/perf_imgproc/perf_scharr.js
new file mode 100644
index 0000000000..a76a93078c
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_scharr.js
@@ -0,0 +1,156 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const ScharrSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA];
+    const Scharrdxdy = ["(1,0)", "(0,1)"];
+    const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"];
+    const BorderType3x3ROI = ["BORDER_DEFAULT", "BORDER_REPLICATE|BORDER_ISOLATED", "BORDER_CONSTANT|BORDER_ISOLATED"];
+
+    const combiScharrBorder3x3 = combine(ScharrSize, ["CV_16SC1", "CV_32FC1"], Scharrdxdy, BorderType3x3);
+    const combiScharrBorder3x3ROI = combine(ScharrSize, ["CV_16SC1", "CV_32FC1"], Scharrdxdy, BorderType3x3ROI);
+
+    function addScharrCase(suite, type) {
+        suite.add('scharr', function() {
+            cv.Scharr(src, dst, ddepth, dx, dy, 1, 0, borderType);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let ddepth = cv[this.params.ddepth];
+                let dxdy = this.params.dxdy;
+                let type = this.params.type;
+                let src, dst;
+                if (type == 0) {
+                  src = new cv.Mat(size[1], size[0], cv.CV_8U);
+                  dst = new cv.Mat(size[1], size[0], ddepth);
+                } else {
+                  src = new cv.Mat(size[1]+10, size[0]+10, cv.CV_8U);
+                  dst = new cv.Mat(size[1]+10, size[0]+10, ddepth);
+                  src = src.colRange(5, size[0]+5);
+                  src = src.rowRange(5, size[1]+5);
+                  dst = dst.colRange(5, size[0]+5);
+                  dst = dst.rowRange(5, size[1]+5);
+                }
+
+                let dx = parseInt(dxdy[1]);
+                let dy = parseInt(dxdy[3]);
+                let borderTypeArray = this.params.borderType;
+                let borderType;
+                if (borderTypeArray.length == 1) {
+                  borderType = cv[borderTypeArray[0]];
+                } else {
+                  borderType = cv[borderTypeArray[0]] | cv[borderTypeArray[1]];
+                }
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+              }
+          });
+    }
+
+    function addScharrModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let ddepth = combination[i][1];
+        let dxdy = combination[i][2];
+        let borderType = combination[i][3];
+        let sizeArray = [size.width, size.height];
+
+        let borderTypeArray = borderType.split("|");
+        let params = {size: sizeArray, ddepth: ddepth, dxdy: dxdy, borderType:borderTypeArray, type:type};
+        addKernelCase(suite, params, type, addScharrCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+        let suite = new Benchmark.Suite;
+        totalCaseNum = 0;
+        currentCaseId = 0;
+        let params = "";
+        let paramObjs = [];
+        paramObjs.push({name:"size", value:"", reg:[""], index:0});
+        paramObjs.push({name:"ddepth", value:"", reg:["/CV\_[0-9]+[FSUfsu]C1/g"], index:1});
+        paramObjs.push({name:"dxdy", value:"", reg:["/\\([0-2],[0-2]\\)/"], index:2});
+
+        if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(paramsContent.toString())) {
+            params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0];
+            paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+/"], index:3});
+        } else if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(paramsContent.toString())) {
+            params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0];
+            paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+\\|BORDER\_\\w+/"], index:3});
+        }
+
+        if (params != ""){
+            let locationList = decodeParams2Case(params, paramObjs,scharrCombinations);
+            for (let i = 0; i < locationList.length; i++){
+                let first = locationList[i][0];
+                let second = locationList[i][1];
+                addScharrModeCase(suite, [scharrCombinations[first][second]], first);
+              }
+        } else {
+          log("no filter or getting invalid params, run all the cases");
+          addScharrModeCase(suite, combiScharrBorder3x3, 0);
+          addScharrModeCase(suite, combiScharrBorder3x3ROI, 1);
+        }
+        setBenchmarkSuite(suite, "scharr", currentCaseId);
+        log(`Running ${totalCaseNum} tests from Scharr`);
+        suite.run({ 'async': true }); // run the benchmark
+    }
+
+    let scharrCombinations = [combiScharrBorder3x3, combiScharrBorder3x3ROI];
+
+    if (isNodeJs) {
+        const args = process.argv.slice(2);
+        let paramsContent = '';
+        if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(args.toString())) {
+          paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0];
+        } else if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(args.toString())) {
+          paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0];
+        }
+        genBenchmarkCase(paramsContent);
+      } else {
+        runButton.onclick = function() {
+          let paramsContent = paramsElement.value;
+          genBenchmarkCase(paramsContent);
+          if (totalCaseNum !== 0) {
+            disableButton();
+          }
+        }
+      }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_sobel.html b/modules/js/perf/perf_imgproc/perf_sobel.html
new file mode 100644
index 0000000000..b41c940a23
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_sobel.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Sobel</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (640x480, CV_16SC1, (0,1), BORDER_REPLICATE)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_sobel.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_sobel.js b/modules/js/perf/perf_imgproc/perf_sobel.js
new file mode 100644
index 0000000000..b7064e852a
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_sobel.js
@@ -0,0 +1,170 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const SobelSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA];
+    const Sobel3x3dxdy = ["(0,1)", "(1,0)", "(1,1)", "(0,2)", "(2,0)", "(2,2)"];
+    const Sobeldxdy = ["(0,1)", "(1,0)", "(1,1)", "(0,2)", "(2,0)"];
+    const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"];
+    const BorderType3x3ROI = ["BORDER_DEFAULT", "BORDER_REPLICATE|BORDER_ISOLATED", "BORDER_CONSTANT|BORDER_ISOLATED"];
+    const BorderType = ["BORDER_REPLICATE", "BORDER_CONSTANT", "BORDER_REFLECT", "BORDER_REFLECT101"];
+    const BorderTypeROI = ["BORDER_DEFAULT", "BORDER_REPLICATE|BORDER_ISOLATED", "BORDER_CONSTANT|BORDER_ISOLATED", "BORDER_REFLECT|BORDER_ISOLATED", "BORDER_REFLECT101|BORDER_ISOLATED"]
+
+    const combiSobelBorder3x3 = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobel3x3dxdy, BorderType3x3);
+    const combiSobelBorder3x3ROI = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobel3x3dxdy, BorderType3x3ROI);
+    const combiSobelBorder5x5 = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobeldxdy, BorderType);
+    const combiSobelBorder5x5ROI = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobeldxdy, BorderTypeROI);
+
+    function addSobelCase(suite, type) {
+        suite.add('sobel', function() {
+            cv.Sobel(src, dst, ddepth, dx, dy, ksize, 1, 0, borderType);
+          }, {
+              'setup': function() {
+                let size = this.params.size;
+                let ddepth = cv[this.params.ddepth];
+                let dxdy = this.params.dxdy;
+                let ksize = this.params.ksize;
+                let type = this.params.type;
+                let src, dst;
+                if (type %2 == 0) {
+                  src = new cv.Mat(size[1], size[0], cv.CV_8U);
+                  dst = new cv.Mat(size[1], size[0], ddepth);
+                } else {
+                  src = new cv.Mat(size[1]+10, size[0]+10, cv.CV_8U);
+                  dst = new cv.Mat(size[1]+10, size[0]+10, ddepth);
+                  src = src.colRange(5, size[0]+5);
+                  src = src.rowRange(5, size[1]+5);
+                  dst = dst.colRange(5, size[0]+5);
+                  dst = dst.rowRange(5, size[1]+5);
+                }
+
+                let dx = parseInt(dxdy[1]);
+                let dy = parseInt(dxdy[3]);
+                let borderTypeArray = this.params.borderType;
+                let borderType;
+                if (borderTypeArray.length == 1) {
+                  borderType = cv[borderTypeArray[0]];
+                } else {
+                  borderType = cv[borderTypeArray[0]] | cv[borderTypeArray[1]];
+                }
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+              }
+          });
+    }
+
+    function addSobelModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let ddepth = combination[i][1];
+        let dxdy = combination[i][2];
+        let borderType = combination[i][3];
+        let sizeArray = [size.width, size.height];
+        let ksize;
+        if (type < 2) {
+          ksize = 3;
+        } else {
+          ksize = 5;
+        }
+
+        let borderTypeArray = borderType.split("|");
+        let params = {size: sizeArray, ddepth: ddepth, dxdy: dxdy, ksize:ksize, borderType:borderTypeArray, type:type};
+        addKernelCase(suite, params, type, addSobelCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+        let suite = new Benchmark.Suite;
+        totalCaseNum = 0;
+        currentCaseId = 0;
+        let params = "";
+        let paramObjs = [];
+        paramObjs.push({name:"size", value:"", reg:[""], index:0});
+        paramObjs.push({name:"ddepth", value:"", reg:["/CV\_[0-9]+[FSUfsu]C1/g"], index:1});
+        paramObjs.push({name:"dxdy", value:"", reg:["/\\([0-2],[0-2]\\)/"], index:2});
+
+        if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(paramsContent.toString())) {
+            params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0];
+            paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+/"], index:3});
+        } else if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(paramsContent.toString())) {
+            params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0];
+            paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+\\|BORDER\_\\w+/"], index:3});
+        }
+
+        if (params != ""){
+            let locationList = decodeParams2Case(params, paramObjs,sobelCombinations);
+            for (let i = 0; i < locationList.length; i++){
+                let first = locationList[i][0];
+                let second = locationList[i][1];
+                addSobelModeCase(suite, [sobelCombinations[first][second]], first);
+              }
+        } else {
+          log("no filter or getting invalid params, run all the cases");
+          addSobelModeCase(suite, combiSobelBorder3x3, 0);
+          addSobelModeCase(suite, combiSobelBorder3x3ROI, 1);
+          addSobelModeCase(suite, combiSobelBorder5x5, 2);
+          addSobelModeCase(suite, combiSobelBorder5x5ROI, 3);
+        }
+        setBenchmarkSuite(suite, "sobel", currentCaseId);
+        log(`Running ${totalCaseNum} tests from Sobel`);
+        suite.run({ 'async': true }); // run the benchmark
+    }
+
+    let sobelCombinations = [combiSobelBorder3x3, combiSobelBorder3x3ROI, combiSobelBorder5x5, combiSobelBorder5x5ROI];
+
+    if (isNodeJs) {
+        const args = process.argv.slice(2);
+        let paramsContent = '';
+        if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(args.toString())) {
+          paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0];
+        } else if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(args.toString())) {
+          paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0];
+        }
+        genBenchmarkCase(paramsContent);
+      } else {
+        runButton.onclick = function() {
+          let paramsContent = paramsElement.value;
+          genBenchmarkCase(paramsContent);
+          if (totalCaseNum !== 0) {
+            disableButton();
+          }
+        }
+      }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_threshold.js b/modules/js/perf/perf_imgproc/perf_threshold.js
index 2616a2feaa..381ddaeade 100644
--- a/modules/js/perf/perf_imgproc/perf_threshold.js
+++ b/modules/js/perf/perf_imgproc/perf_threshold.js
@@ -11,17 +11,17 @@ if (isNodeJs) {
   var logElement = document.getElementById('log');
 }
 
-cv.onRuntimeInitialized = () => {
+function perf() {
+
   console.log('opencv.js loaded');
   if (isNodeJs) {
     global.cv = cv;
     global.combine = HelpFunc.combine;
     global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
-    global.cvSize = Base.cvSize;
+    global.cvSize = Base.getCvSize();
   } else {
-    runButton.removeAttribute('disabled');
-    runButton.setAttribute('class', 'btn btn-primary');
-    runButton.innerHTML = 'Run';
+    enableButton();
+    cvSize = getCvSize();
   }
   let totalCaseNum, currentCaseId;
 
@@ -32,173 +32,105 @@ cv.onRuntimeInitialized = () => {
   const combiSizeMatTypeThreshType = combine(typicalMatSizes, matTypes, threshTypes);
   const combiSizeOnly = combine(typicalMatSizes, ['CV_8UC1'], ['THRESH_BINARY|THRESH_OTSU']);
 
-  function addSizeMatTypeThreshTypeCase(suite, combination) {
-    totalCaseNum += combination.length;
-    for (let i = 0; i < combination.length; ++i) {
-      let matSize = combination[i][0];
-      let matType = combination[i][1];
-      let threshType = combination[i][2];
 
-      suite.add('threshold', function() {
+  function addThresholdCase(suite, type) {
+    suite.add('threshold', function() {
+      if (type == "sizeonly") {
+        cv.threshold(src, dst, threshold, thresholdMax, cv.THRESH_BINARY|cv.THRESH_OTSU);
+      } else {
         cv.threshold(src, dst, threshold, thresholdMax, threshType);
-        }, {
-          'setup': function() {
-            let matSize = this.params.matSize;
-            let matType = cv[this.params.matType];
-            let threshType = cv[this.params.threshType];
-            let threshold = 127.0;
-            let thresholdMax = 210.0;
-            let src = new cv.Mat(matSize, matType);
-            let dst = new cv.Mat(matSize, matType);
-            let srcView = src.data;
-            srcView[0] = 0;
-            srcView[1] = 100;
-            srcView[2] = 200;
-              },
-          'teardown': function() {
-            src.delete();
-            dst.delete();
+      }
+      }, {
+        'setup': function() {
+          let matSize = this.params.matSize;
+          let type =  this.params.modeType;
+          let src, dst, matType, threshType;
+          if (type == "sizeonly") {
+            src = new cv.Mat(matSize, cv.CV_8UC1);
+            dst = new cv.Mat(matSize, cv.CV_8UC1);
+          } else {
+            matType = cv[this.params.matType];
+            threshType = cv[this.params.threshType];
+            src = new cv.Mat(matSize, matType);
+            dst = new cv.Mat(matSize, matType);
           }
-      });
-
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        matSize: matSize,
-        matType: matType,
-        threshType: threshType
-      };
-    }
+          let threshold = 127.0;
+          let thresholdMax = 210.0;
+          let srcView = src.data;
+          srcView[0] = 0;
+          srcView[1] = 100;
+          srcView[2] = 200;
+            },
+        'teardown': function() {
+          src.delete();
+          dst.delete();
+        }
+    });
   }
 
-  function addSizeOnlyCase(suite, combination) {
+  function addThresholdModecase(suite, combination, type) {
     totalCaseNum += combination.length;
     for (let i = 0; i < combination.length; ++i) {
       let matSize = combination[i][0];
-
-      suite.add('threshold', function() {
-        cv.threshold(src, dst, threshold, thresholdMax, cv.THRESH_BINARY|cv.THRESH_OTSU);
-        }, {
-          'setup': function() {
-            let matSize = this.params.matSize;
-            let threshold = 127.0;
-            let thresholdMax = 210.0;
-            let src = new cv.Mat(matSize, cv.CV_8UC1);
-            let dst = new cv.Mat(matSize, cv.CV_8UC1);
-            let srcView = src.data;
-            srcView[0] = 0;
-            srcView[1] = 100;
-            srcView[2] = 200;
-              },
-          'teardown': function() {
-            src.delete();
-            dst.delete();
-          }
-      });
-
-      // set init params
-      let index = suite.length - 1;
-      suite[index].params = {
-        matSize: matSize,
-        matType: 'CV_8UC1',
-        threshType: 'THRESH_BINARY|THRESH_OTSU'
-      };
-    }
-  }
-
-  function decodeParams2Case(suite, params, isSizeOnly) {
-    let sizeString = params.match(/[0-9]+x[0-9]+/g).toString();
-    let sizes = sizeString.match(/[0-9]+/g);
-    let size1Str = sizes.slice(0, 2).toString();
-    let matSize = cvtStr2cvSize(size1Str);
-    let matType, threshType;
-    if (isSizeOnly) {
-      matType = 'CV_8UC1';
-      threshType = 'THRESH_BINARY|THRESH_OTSU';
-    } else {
-      matType = (params.match(/CV\_[0-9]+[A-z][A-z][0-9]/) || []).toString();
-      threshType = (params.match(/THRESH\_[A-z]+\_?[A-z]*/) || []).toString();
-    }
-    // check if the params match and add case
-    for (let i = 0; i < combinations.length; ++i) {
-      let combination = combinations[i];
-      for (let j = 0; j < combination.length; ++j) {
-        if (matSize === combination[j][0] && matType === combination[j][1] && threshType === combination[j][2]) {
-          thresholdFunc[i](suite, [combination[j]]);
-        }
+      let matType = 'CV_8UC1';
+      let threshType = 'THRESH_BINARY|THRESH_OTSU';
+      if (type != "sizeonly") {
+        matType = combination[i][1];
+        threshType = combination[i][2];
       }
+      let params = {matSize: matSize, matType: matType, threshType: threshType, modeType: type};
+      addKernelCase(suite, params, type, addThresholdCase);
     }
   }
 
-  function log(message) {
-    console.log(message);1
-    if (!isNodeJs) {
-      logElement.innerHTML += `\n${'\t'.repeat(1) + message}`;
-    }
-  }
-
-  function setBenchmarkSuite(suite) {
-    suite
-    // add listeners
-    .on('cycle', function(event) {
-      ++currentCaseId;
-      let params = event.target.params;
-      let matSize = params.matSize;
-      let matType = params.matType;
-      let threshType = params.threshType;
-      log(`=== ${event.target.name} ${currentCaseId} ===`);
-      log(`params: (${parseInt(matSize.width)}x${parseInt(matSize.height)},`+
-          `${matType},${threshType})`);
-      log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
-      log('mean time:' +String(event.target.stats.mean*1000)+' ms');
-      log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
-      log(String(event.target));
-    })
-    .on('error', function(event) { log(`test case ${event.target.name} failed`); })
-    .on('complete', function(event) {
-      log(`\n ###################################`)
-      log(`Finished testing ${event.currentTarget.length} cases \n`);
-      if (!isNodeJs) {
-        runButton.removeAttribute('disabled');
-        runButton.setAttribute('class', 'btn btn-primary');
-        runButton.innerHTML = 'Run';
-      }
-    });
-  }
-
   function genBenchmarkCase(paramsContent) {
     let suite = new Benchmark.Suite;
     totalCaseNum = 0;
     currentCaseId = 0;
-    if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g.test(paramsContent.toString())) {
-      let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g)[0];
-      let isSizeOnly = 0;
-      decodeParams2Case(suite, params, isSizeOnly);
+    let params = "";
+    let paramObjs = [];
+    paramObjs.push({name:"size", value:"", reg:[""], index:0});
+
+    if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g.test(paramsContent.toString())) {
+      params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g)[0];
+      paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[A-z][A-z][0-9]/"], index:1});
+      paramObjs.push({name:"threshType", value:"", reg:["/THRESH\_[A-z]+\_?[A-z]*/"], index:2});
     } else if (/[\ ]*[0-9]+x[0-9]+[\ ]*/g.test(paramsContent.toString())) {
-      let params = paramsContent.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0];
-      let isSizeOnly = 1;
-      decodeParams2Case(suite, params, isSizeOnly);
+      params = paramsContent.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0];
+      paramObjs.push({name:"matType", value:"CV_8UC1", reg:[""], index:1});
+      paramObjs.push({name:"threshType", value:"THRESH_BINARY|THRESH_OTSU", reg:[""], index:2});
     }
-    else {
+
+    if(params != ""){
+      let locationList = decodeParams2Case(params, paramObjs,combinations);
+      for (let i = 0; i < locationList.length; i++){
+        let first = locationList[i][0];
+        let second = locationList[i][1];
+        if (first == 0) {
+          addThresholdModecase(suite, [combinations[first][second]], "normal");
+        } else {
+          addThresholdModecase(suite, [combinations[first][second]], "sizeonly");
+        }
+      }
+    } else {
       log("no filter or getting invalid params, run all the cases");
-      addSizeMatTypeThreshTypeCase(suite, combiSizeMatTypeThreshType);
-      addSizeOnlyCase(suite, combiSizeOnly);
+      addThresholdModecase(suite, combiSizeMatTypeThreshType, "normal");
+      addThresholdModecase(suite, combiSizeOnly, "sizeonly");
     }
-    setBenchmarkSuite(suite);
+    setBenchmarkSuite(suite, "threshold", currentCaseId);
     log(`Running ${totalCaseNum} tests from Threshold`);
     suite.run({ 'async': true }); // run the benchmark
   }
 
   // init
-  let thresholdFunc = [addSizeMatTypeThreshTypeCase, addSizeOnlyCase];
   let combinations = [combiSizeMatTypeThreshType, combiSizeOnly];
 
   // set test filter params
   if (isNodeJs) {
     const args = process.argv.slice(2);
     let paramsContent = '';
-    if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g.test(args.toString())) {
-      paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g)[0];
+    if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g.test(args.toString())) {
+      paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g)[0];
     } else if (/--test_param_filter=[\ ]*[0-9]+x[0-9]+[\ ]*/g.test(args.toString())) {
       paramsContent = args.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0];
     }
@@ -208,10 +140,19 @@ cv.onRuntimeInitialized = () => {
       let paramsContent = paramsElement.value;
       genBenchmarkCase(paramsContent);
       if (totalCaseNum !== 0) {
-        runButton.setAttribute("disabled", "disabled");
-        runButton.setAttribute('class', 'btn btn-primary disabled');
-        runButton.innerHTML = "Running";
+        disableButton();
       }
     }
   }
-};
\ No newline at end of file
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_warpAffine.html b/modules/js/perf/perf_imgproc/perf_warpAffine.html
new file mode 100644
index 0000000000..53a0fd9d67
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_warpAffine.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>warpAffine</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (640x480, INTER_NEAREST, BORDER_CONSTANT)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_warpAffine.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_warpAffine.js b/modules/js/perf/perf_imgproc/perf_warpAffine.js
new file mode 100644
index 0000000000..c63cd60e61
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_warpAffine.js
@@ -0,0 +1,130 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const WarpAffineSize = [cvSize.szVGA, cvSize.sz720p, cvSize.sz1080p];
+    const InterType = ["INTER_NEAREST", "INTER_LINEAR"];
+    const BorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE"]
+    const combiWarpAffine = combine(WarpAffineSize, InterType, BorderMode);
+
+    function addWarpAffineCase(suite, type) {
+        suite.add('warpAffine', function() {
+            cv.warpAffine(src, dst, warpMat, sz, interType, borderMode, borderColor);
+          }, {
+              'setup': function() {
+                let sz = this.params.size;
+                let interType = cv[this.params.interType];
+                let borderMode = cv[this.params.borderMode];
+                let srcSize = new cv.Size(512, 512);
+
+                let borderColor = new cv.Scalar.all(150);
+                let src = new cv.Mat(srcSize, cv.CV_8UC4);
+                let dst = new cv.Mat(sz, cv.CV_8UC4);
+                fillGradient(cv, src);
+                if (borderMode == cv.BORDER_CONSTANT) {
+                  smoothBorder(cv, src, borderMode, 1);
+                }
+
+                let point = new cv.Point(src.cols/2.0, src.rows/2.0);
+                let warpMat = cv.getRotationMatrix2D(point, 30.0, 2.2);
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+                warpMat.delete();
+              }
+          });
+    }
+
+    function addWarpAffineModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let interType = combination[i][1];
+        let borderMode = combination[i][2];
+
+        let params = {size: size, interType:interType, borderMode:borderMode};
+        addKernelCase(suite, params, type, addWarpAffineCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"interType", value: "", reg:["/INTER\_\\w+/"], index:1});
+          paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2});
+          let locationList = decodeParams2Case(params, paramObjs, warpAffineCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addWarpAffineModeCase(suite, [warpAffineCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addWarpAffineModeCase(suite, combiWarpAffine, 0);
+      }
+      setBenchmarkSuite(suite, "warpAffine", currentCaseId);
+      log(`Running ${totalCaseNum} tests from warpAffine`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let warpAffineCombinations = [combiWarpAffine];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_warpPerspective.html b/modules/js/perf/perf_imgproc/perf_warpPerspective.html
new file mode 100644
index 0000000000..7fc4c89ad2
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_warpPerspective.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>warpPerspective</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (640x480, INTER_NEAREST, BORDER_CONSTANT)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_warpPerspective.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_warpPerspective.js b/modules/js/perf/perf_imgproc/perf_warpPerspective.js
new file mode 100644
index 0000000000..dcde2fb22c
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_warpPerspective.js
@@ -0,0 +1,143 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+function perf() {
+
+    console.log('opencv.js loaded');
+    if (isNodeJs) {
+      global.cv = cv;
+      global.combine = HelpFunc.combine;
+      global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+      global.cvSize = Base.getCvSize();
+    } else {
+      enableButton();
+      cvSize = getCvSize();
+    }
+    let totalCaseNum, currentCaseId;
+
+    const WarpPersSize = [cvSize.szVGA, cvSize.sz720p, cvSize.sz1080p];
+    const InterType = ["INTER_NEAREST", "INTER_LINEAR"];
+    const BorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE"]
+    const combiWarpPers = combine(WarpPersSize, InterType, BorderMode);
+
+    function addWarpPerspectiveCase(suite, type) {
+        suite.add('warpPerspective', function() {
+            cv.warpPerspective(src, dst, warpMat, sz, interType, borderMode, borderColor);
+          }, {
+              'setup': function() {
+                let sz = this.params.size;
+                let interType = cv[this.params.interType];
+                let borderMode = cv[this.params.borderMode];
+                let srcSize = new cv.Size(512, 512);
+
+                let borderColor = new cv.Scalar.all(150);
+                let src = new cv.Mat(srcSize, cv.CV_8UC4);
+                let dst = new cv.Mat(sz, cv.CV_8UC4);
+                fillGradient(cv, src);
+                if (borderMode == cv.BORDER_CONSTANT) {
+                  smoothBorder(cv, src, borderMode, 1);
+                }
+
+                let rotMat = cv.getRotationMatrix2D(new cv.Point(src.cols/2.0, src.rows/2.0), 30.0, 2.2);
+                let warpMat = new cv.Mat(3, 3, cv.CV_64FC1);
+
+                for(r=0; r<2; r++) {
+                  for(c=0; c<3; c++) {
+                    view = warpMat.doublePtr(r,c)
+                    view[0] = rotMat.doubleAt(r, c);
+                  }
+                }
+                view = warpMat.doublePtr(2,0);
+                view[0] = 0.3/sz.width;
+                view = warpMat.doublePtr(2,1);
+                view[0] = 0.3/sz.height;
+                view = warpMat.doublePtr(2,2);
+                view[0] = 1;
+                },
+              'teardown': function() {
+                src.delete();
+                dst.delete();
+                warpMat.delete();
+              }
+          });
+    }
+
+    function addWarpPerspectiveModeCase(suite, combination, type) {
+      totalCaseNum += combination.length;
+      for (let i = 0; i < combination.length; ++i) {
+        let size =  combination[i][0];
+        let interType = combination[i][1];
+        let borderMode = combination[i][2];
+
+        let params = {size: size, interType:interType, borderMode:borderMode};
+        addKernelCase(suite, params, type, addWarpPerspectiveCase);
+      }
+    }
+
+    function genBenchmarkCase(paramsContent) {
+      let suite = new Benchmark.Suite;
+      totalCaseNum = 0;
+      currentCaseId = 0;
+
+      if (/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) {
+          let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+          let paramObjs = [];
+          paramObjs.push({name:"size", value:"", reg:[""], index:0});
+          paramObjs.push({name:"interType", value: "", reg:["/INTER\_\\w+/"], index:1});
+          paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2});
+          let locationList = decodeParams2Case(params, paramObjs, warpPersCombinations);
+
+          for (let i = 0; i < locationList.length; i++){
+              let first = locationList[i][0];
+              let second = locationList[i][1];
+              addWarpPerspectiveModeCase(suite, [warpPersCombinations[first][second]], first);
+            }
+      } else {
+        log("no filter or getting invalid params, run all the cases");
+        addWarpPerspectiveModeCase(suite, combiWarpPers, 0);
+      }
+      setBenchmarkSuite(suite, "warpPerspective", currentCaseId);
+      log(`Running ${totalCaseNum} tests from warpPerspective`);
+      suite.run({ 'async': true }); // run the benchmark
+  }
+
+    let warpPersCombinations = [combiWarpPers];
+
+    if (isNodeJs) {
+      const args = process.argv.slice(2);
+      let paramsContent = '';
+      if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) {
+        paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0];
+      }
+      genBenchmarkCase(paramsContent);
+    } else {
+      runButton.onclick = function() {
+        let paramsContent = paramsElement.value;
+        genBenchmarkCase(paramsContent);
+        if (totalCaseNum !== 0) {
+          disableButton();
+        }
+      }
+    }
+};
+
+async function main() {
+  if (cv instanceof Promise) {
+    cv = await cv;
+    perf();
+  } else {
+    cv.onRuntimeInitialized = perf;
+  }
+}
+
+main();
\ No newline at end of file
diff --git a/modules/js/src/loader.js b/modules/js/src/loader.js
new file mode 100644
index 0000000000..ea100e8601
--- /dev/null
+++ b/modules/js/src/loader.js
@@ -0,0 +1,96 @@
+async function loadOpenCV(paths, onloadCallback) {
+    let OPENCV_URL = "";
+    let asmPath = "";
+    let wasmPath = "";
+    let simdPath = "";
+    let threadsPath = "";
+    let threadsSimdPath = "";
+
+    if(!(paths instanceof Object)) {
+        throw new Error("The first input should be a object that points the path to the OpenCV.js");
+    }
+
+    if ("asm" in paths) {
+        asmPath = paths["asm"];
+    }
+
+    if ("wasm" in paths) {
+        wasmPath = paths["wasm"];
+    }
+
+    if ("threads" in paths) {
+        threadsPath = paths["threads"];
+    }
+
+    if ("simd" in paths) {
+        simdPath = paths["simd"];
+    }
+
+    if ("threadsSimd" in paths) {
+        threadsSimdPath = paths["threadsSimd"];
+    }
+
+    let wasmSupported = !(typeof WebAssembly === 'undefined');
+    if (!wasmSupported && OPENCV_URL === "" && asmPath != "") {
+        OPENCV_URL = asmPath;
+        console.log("The OpenCV.js for Asm.js is loaded now");
+    } else if (!wasmSupported && asmPath == ""){
+        throw new Error("The browser supports the Asm.js only, but the path of OpenCV.js for Asm.js is empty");
+    }
+
+    let simdSupported = wasmSupported ? await wasmFeatureDetect.simd() : false;
+    let threadsSupported = wasmSupported ? await wasmFeatureDetect.threads() : false;
+
+    if (simdSupported && threadsSupported && threadsSimdPath != "") {
+        OPENCV_URL = threadsSimdPath;
+        console.log("The OpenCV.js with simd and threads optimization is loaded now");
+    } else if (simdSupported && simdPath != "") {
+        if (threadsSupported && threadsSimdPath === "") {
+            console.log("The browser supports simd and threads, but the path of OpenCV.js with simd and threads optimization is empty");
+        }
+        OPENCV_URL = simdPath;
+        console.log("The OpenCV.js with simd optimization is loaded now.");
+    } else if (threadsSupported && threadsPath != "") {
+        if (simdSupported && threadsSimdPath === "") {
+            console.log("The browser supports simd and threads, but the path of OpenCV.js with simd and threads optimization is empty");
+        }
+        OPENCV_URL = threadsPath;
+        console.log("The OpenCV.js with threads optimization is loaded now");
+    } else if (wasmSupported && wasmPath != "") {
+        if(simdSupported && threadsSupported) {
+            console.log("The browser supports simd and threads, but the path of OpenCV.js with simd and threads optimization is empty");
+        }
+
+        if (simdSupported) {
+            console.log("The browser supports simd optimization, but the path of OpenCV.js with simd optimization is empty");
+        }
+
+        if (threadsSupported) {
+            console.log("The browser supports threads optimization, but the path of OpenCV.js with threads optimization is empty");
+        }
+
+        OPENCV_URL = wasmPath;
+        console.log("The OpenCV.js for wasm is loaded now");
+    } else if (wasmSupported) {
+        console.log("The browser supports wasm, but the path of OpenCV.js for wasm is empty");
+    }
+
+    if (OPENCV_URL === "") {
+        throw new Error("No available OpenCV.js, please check your paths");
+    }
+
+    let script = document.createElement('script');
+    script.setAttribute('async', '');
+    script.setAttribute('type', 'text/javascript');
+    script.addEventListener('load', () => {
+        onloadCallback();
+    });
+    script.addEventListener('error', () => {
+        console.log('Failed to load opencv.js');
+    });
+    script.src = OPENCV_URL;
+    let node = document.getElementsByTagName('script')[0];
+    if (node.src != OPENCV_URL) {
+        node.parentNode.insertBefore(script, node);
+    }
+}
\ No newline at end of file
diff --git a/platforms/js/build_js.py b/platforms/js/build_js.py
index fbeb1e4fb3..38e988a3bd 100644
--- a/platforms/js/build_js.py
+++ b/platforms/js/build_js.py
@@ -201,6 +201,9 @@ class Builder:
     def build_doc(self):
         execute(["make", "-j", str(multiprocessing.cpu_count()), "doxygen"])
 
+    def build_loader(self):
+        execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_loader"])
+
 
 #===================================================================================================
 
@@ -221,6 +224,7 @@ if __name__ == "__main__":
     parser.add_argument('--build_test', action="store_true", help="Build tests")
     parser.add_argument('--build_perf', action="store_true", help="Build performance tests")
     parser.add_argument('--build_doc', action="store_true", help="Build tutorials")
+    parser.add_argument('--build_loader', action="store_true", help="Build OpenCV.js loader")
     parser.add_argument('--clean_build_dir', action="store_true", help="Clean build dir")
     parser.add_argument('--skip_config', action="store_true", help="Skip cmake config")
     parser.add_argument('--config_only', action="store_true", help="Only do cmake config")
@@ -292,6 +296,11 @@ if __name__ == "__main__":
         log.info("=====")
         builder.build_doc()
 
+    if args.build_loader:
+        log.info("=====")
+        log.info("===== Building OpenCV.js loader")
+        log.info("=====")
+        builder.build_loader()
 
     log.info("=====")
     log.info("===== Build finished")
@@ -316,3 +325,8 @@ if __name__ == "__main__":
         opencvjs_tutorial_path = find_file("tutorial_js_root.html", os.path.join(builder.build_dir, "doc", "doxygen", "html"))
         if check_file(opencvjs_tutorial_path):
             log.info("OpenCV.js tutorials location: %s", opencvjs_tutorial_path)
+
+    if args.build_loader:
+        opencvjs_loader_path = os.path.join(builder.build_dir, "bin", "loader.js")
+        if check_file(opencvjs_loader_path):
+            log.info("OpenCV.js loader location: %s", opencvjs_loader_path)

From 5ac0712cf1f25af2224afd1776ca9476e39f85d8 Mon Sep 17 00:00:00 2001
From: masa-iwm <37230118+masa-iwm@users.noreply.github.com>
Date: Mon, 19 Oct 2020 06:22:06 +0900
Subject: [PATCH 033/152] Merge pull request #18593 from masa-iwm:master

Add support thread-local directx (OpenCL interop) initialization

* support thread-local directx (OpenCL interop) initialization

* reflect reviews

* Remove verbose function prototype declarations

* Countermeasures for VC warnings. (declaration of 'platform' hides class member)

* core(directx): remove internal stuff from public headers
---
 modules/core/src/directx.cpp | 423 ++++++++++++++++++++---------------
 modules/core/src/directx.hpp |  23 ++
 modules/core/src/ocl.cpp     |  34 +++
 3 files changed, 295 insertions(+), 185 deletions(-)
 create mode 100644 modules/core/src/directx.hpp

diff --git a/modules/core/src/directx.cpp b/modules/core/src/directx.cpp
index c9bd1a4fa1..f028702d7f 100644
--- a/modules/core/src/directx.cpp
+++ b/modules/core/src/directx.cpp
@@ -49,6 +49,7 @@
 #ifdef HAVE_DIRECTX
 #include <vector>
 #include "directx.inc.hpp"
+#include "directx.hpp"
 #else // HAVE_DIRECTX
 #define NO_DIRECTX_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without DirectX support")
 #endif
@@ -234,12 +235,192 @@ int getTypeFromD3DFORMAT(const int iD3DFORMAT)
 #endif
 }
 
-namespace ocl {
-
 #if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL)
-static bool g_isDirect3DDevice9Ex = false; // Direct3DDevice9Ex or Direct3DDevice9 was used
+namespace internal {
+struct OpenCLDirectXImpl
+{
+    cl_platform_id platform_;
+
+    cl_platform_id initializedPlatform9 = NULL;
+    cl_platform_id initializedPlatform10 = NULL;
+    cl_platform_id initializedPlatform11 = NULL;
+public:
+    OpenCLDirectXImpl()
+        : platform_(0)
+    {
+    }
+
+    bool isDirect3DDevice9Ex = false; // Direct3DDevice9Ex or Direct3DDevice9 was used
+
+#ifdef HAVE_OPENCL_D3D11_NV
+    clCreateFromD3D11Texture2DNV_fn clCreateFromD3D11Texture2DNV = NULL;
+    clEnqueueAcquireD3D11ObjectsNV_fn clEnqueueAcquireD3D11ObjectsNV = NULL;
+    clEnqueueReleaseD3D11ObjectsNV_fn clEnqueueReleaseD3D11ObjectsNV = NULL;
+#endif
+    clCreateFromD3D11Texture2DKHR_fn clCreateFromD3D11Texture2DKHR = NULL;
+    clEnqueueAcquireD3D11ObjectsKHR_fn clEnqueueAcquireD3D11ObjectsKHR = NULL;
+    clEnqueueReleaseD3D11ObjectsKHR_fn clEnqueueReleaseD3D11ObjectsKHR = NULL;
+
+    clCreateFromD3D10Texture2DKHR_fn clCreateFromD3D10Texture2DKHR = NULL;
+    clEnqueueAcquireD3D10ObjectsKHR_fn clEnqueueAcquireD3D10ObjectsKHR = NULL;
+    clEnqueueReleaseD3D10ObjectsKHR_fn clEnqueueReleaseD3D10ObjectsKHR = NULL;
+
+    clCreateFromDX9MediaSurfaceKHR_fn clCreateFromDX9MediaSurfaceKHR = NULL;
+    clEnqueueAcquireDX9MediaSurfacesKHR_fn clEnqueueAcquireDX9MediaSurfacesKHR = NULL;
+    clEnqueueReleaseDX9MediaSurfacesKHR_fn clEnqueueReleaseDX9MediaSurfacesKHR = NULL;
+
+    cl_platform_id getPlatform()
+    {
+        if (!platform_)
+        {
+            CV_Assert(cv::ocl::haveOpenCL());
+
+            cl_device_id device = (cl_device_id)ocl::Device::getDefault().ptr();
+            CV_Assert(device);
+            cl_int status = clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform_), &platform_, NULL);
+            if (status != CL_SUCCESS)
+                CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get platform corresponding to device");
+        }
+
+        return platform_;
+    }
+
+
+    bool initializeD3D11()
+    {
+        using namespace cv::ocl;
+        cl_platform_id platform = getPlatform();
+
+        bool useCLNVEXT = false;
+        size_t exts_len;
+        cl_int status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, 0, NULL, &exts_len);
+        if (status != CL_SUCCESS)
+            CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get length of CL_PLATFORM_EXTENSIONS");
+        cv::AutoBuffer<char> extensions(exts_len);
+        status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, exts_len, static_cast<void*>(extensions.data()), NULL);
+        if (status != CL_SUCCESS)
+            CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available CL_PLATFORM_EXTENSIONS");
+        bool is_support_cl_khr_d3d11_sharing = false;
+        if (strstr(extensions.data(), "cl_khr_d3d11_sharing"))
+            is_support_cl_khr_d3d11_sharing = true;
+#ifdef HAVE_OPENCL_D3D11_NV
+        bool is_support_cl_nv_d3d11_sharing = false;
+        if (strstr(extensions.data(), "cl_nv_d3d11_sharing"))
+            is_support_cl_nv_d3d11_sharing = true;
+        if (!is_support_cl_nv_d3d11_sharing && !is_support_cl_khr_d3d11_sharing)
+            CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions");
+#else
+        if (!is_support_cl_khr_d3d11_sharing)
+            CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions");
+#endif
+
+#ifdef HAVE_OPENCL_D3D11_NV
+        if (is_support_cl_nv_d3d11_sharing)
+        {
+            if (initializedPlatform11 != platform)
+            {
+                clCreateFromD3D11Texture2DNV = (clCreateFromD3D11Texture2DNV_fn)
+                    clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DNV");
+                clEnqueueAcquireD3D11ObjectsNV = (clEnqueueAcquireD3D11ObjectsNV_fn)
+                    clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsNV");
+                clEnqueueReleaseD3D11ObjectsNV = (clEnqueueReleaseD3D11ObjectsNV_fn)
+                    clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsNV");
+                initializedPlatform11 = platform;
+            }
+            if (clCreateFromD3D11Texture2DNV && clEnqueueAcquireD3D11ObjectsNV && clEnqueueReleaseD3D11ObjectsNV)
+            {
+                useCLNVEXT = true;
+            }
+        }
+        else
+#endif
+        {
+            if (is_support_cl_khr_d3d11_sharing)
+            {
+                if (initializedPlatform11 != platform)
+                {
+                    clCreateFromD3D11Texture2DKHR = (clCreateFromD3D11Texture2DKHR_fn)
+                        clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DKHR");
+                    clEnqueueAcquireD3D11ObjectsKHR = (clEnqueueAcquireD3D11ObjectsKHR_fn)
+                        clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsKHR");
+                    clEnqueueReleaseD3D11ObjectsKHR = (clEnqueueReleaseD3D11ObjectsKHR_fn)
+                        clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsKHR");
+                    initializedPlatform11 = platform;
+                }
+                if (!clCreateFromD3D11Texture2DKHR || !clEnqueueAcquireD3D11ObjectsKHR || !clEnqueueReleaseD3D11ObjectsKHR)
+                {
+                    CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D11");
+                }
+            }
+        }
+        return useCLNVEXT;
+    }
+
+    void initializeD3D9()
+    {
+        using namespace cv::ocl;
+        cl_platform_id platform = getPlatform();
+        if (initializedPlatform9 != platform)
+        {
+            clCreateFromDX9MediaSurfaceKHR = (clCreateFromDX9MediaSurfaceKHR_fn)
+                clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromDX9MediaSurfaceKHR");
+            clEnqueueAcquireDX9MediaSurfacesKHR = (clEnqueueAcquireDX9MediaSurfacesKHR_fn)
+                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireDX9MediaSurfacesKHR");
+            clEnqueueReleaseDX9MediaSurfacesKHR = (clEnqueueReleaseDX9MediaSurfacesKHR_fn)
+                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseDX9MediaSurfacesKHR");
+            initializedPlatform9 = platform;
+        }
+        if (!clCreateFromDX9MediaSurfaceKHR || !clEnqueueAcquireDX9MediaSurfacesKHR || !clEnqueueReleaseDX9MediaSurfacesKHR)
+        {
+            CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D9");
+        }
+    }
+
+    void initializeD3D10()
+    {
+        using namespace cv::ocl;
+        cl_platform_id platform = getPlatform();
+        if (initializedPlatform10 != platform)
+        {
+            clCreateFromD3D10Texture2DKHR = (clCreateFromD3D10Texture2DKHR_fn)
+                clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D10Texture2DKHR");
+            clEnqueueAcquireD3D10ObjectsKHR = (clEnqueueAcquireD3D10ObjectsKHR_fn)
+                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D10ObjectsKHR");
+            clEnqueueReleaseD3D10ObjectsKHR = (clEnqueueReleaseD3D10ObjectsKHR_fn)
+                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D10ObjectsKHR");
+            initializedPlatform10 = platform;
+        }
+        if (!clCreateFromD3D10Texture2DKHR || !clEnqueueAcquireD3D10ObjectsKHR || !clEnqueueReleaseD3D10ObjectsKHR)
+        {
+            CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D10");
+        }
+    }
+};
+
+OpenCLDirectXImpl* createDirectXImpl()
+{
+    return new OpenCLDirectXImpl();
+}
+void deleteDirectXImpl(OpenCLDirectXImpl** p)
+{
+    if (*p)
+    {
+        delete (*p);
+        *p = NULL;
+    }
+}
+OpenCLDirectXImpl& getImpl()
+{
+    OpenCLDirectXImpl* i = getDirectXImpl(ocl::Context::getDefault());
+    CV_Assert(i);
+    return *i;
+}
+}
+using namespace internal;
 #endif
 
+namespace ocl {
+
 Context& initializeContextFromD3D11Device(ID3D11Device* pD3D11Device)
 {
     CV_UNUSED(pD3D11Device);
@@ -715,7 +896,7 @@ Context& initializeContextFromDirect3DDevice9Ex(IDirect3DDevice9Ex* pDirect3DDev
         throw;
     }
     clExecCtx.bind();
-    g_isDirect3DDevice9Ex = true;
+    getImpl().isDirect3DDevice9Ex = true;
     return const_cast<Context&>(clExecCtx.getContext());
 #endif
 }
@@ -838,96 +1019,13 @@ Context& initializeContextFromDirect3DDevice9(IDirect3DDevice9* pDirect3DDevice9
         throw;
     }
     clExecCtx.bind();
-    g_isDirect3DDevice9Ex = false;
+    getImpl().isDirect3DDevice9Ex = false;
     return const_cast<Context&>(clExecCtx.getContext());
 #endif
 }
 
 } // namespace cv::ocl
 
-#if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL)
-
-#ifdef HAVE_OPENCL_D3D11_NV
-clCreateFromD3D11Texture2DNV_fn clCreateFromD3D11Texture2DNV = NULL;
-clEnqueueAcquireD3D11ObjectsNV_fn clEnqueueAcquireD3D11ObjectsNV = NULL;
-clEnqueueReleaseD3D11ObjectsNV_fn clEnqueueReleaseD3D11ObjectsNV = NULL;
-#endif
-clCreateFromD3D11Texture2DKHR_fn clCreateFromD3D11Texture2DKHR = NULL;
-clEnqueueAcquireD3D11ObjectsKHR_fn clEnqueueAcquireD3D11ObjectsKHR = NULL;
-clEnqueueReleaseD3D11ObjectsKHR_fn clEnqueueReleaseD3D11ObjectsKHR = NULL;
-
-static bool __OpenCLinitializeD3D11()
-{
-    using namespace cv::ocl;
-    static cl_platform_id initializedPlatform = NULL;
-    cl_platform_id platform = (cl_platform_id)Platform::getDefault().ptr();
-
-    bool useCLNVEXT = false;
-    size_t exts_len;
-    cl_int status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, 0, NULL, &exts_len);
-    if (status != CL_SUCCESS)
-        CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get length of CL_PLATFORM_EXTENSIONS");
-    cv::AutoBuffer<char> extensions(exts_len);
-    status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, exts_len, static_cast<void*>(extensions.data()), NULL);
-    if (status != CL_SUCCESS)
-        CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available CL_PLATFORM_EXTENSIONS");
-    bool is_support_cl_khr_d3d11_sharing = false;
-    if (strstr(extensions.data(), "cl_khr_d3d11_sharing"))
-        is_support_cl_khr_d3d11_sharing = true;
-#ifdef HAVE_OPENCL_D3D11_NV
-    bool is_support_cl_nv_d3d11_sharing = false;
-    if (strstr(extensions.data(), "cl_nv_d3d11_sharing"))
-        is_support_cl_nv_d3d11_sharing = true;
-    if (!is_support_cl_nv_d3d11_sharing && !is_support_cl_khr_d3d11_sharing)
-        CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions");
-#else
-    if (!is_support_cl_khr_d3d11_sharing)
-        CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions");
-#endif
-
-#ifdef HAVE_OPENCL_D3D11_NV
-    if (is_support_cl_nv_d3d11_sharing)
-    {
-        if (initializedPlatform != platform)
-        {
-            clCreateFromD3D11Texture2DNV = (clCreateFromD3D11Texture2DNV_fn)
-                    clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DNV");
-            clEnqueueAcquireD3D11ObjectsNV = (clEnqueueAcquireD3D11ObjectsNV_fn)
-                    clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsNV");
-            clEnqueueReleaseD3D11ObjectsNV = (clEnqueueReleaseD3D11ObjectsNV_fn)
-                    clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsNV");
-            initializedPlatform = platform;
-        }
-        if (clCreateFromD3D11Texture2DNV && clEnqueueAcquireD3D11ObjectsNV && clEnqueueReleaseD3D11ObjectsNV)
-        {
-            useCLNVEXT = true;
-        }
-    }
-    else
-#endif
-    {
-        if (is_support_cl_khr_d3d11_sharing)
-        {
-            if (initializedPlatform != platform)
-            {
-                clCreateFromD3D11Texture2DKHR = (clCreateFromD3D11Texture2DKHR_fn)
-                        clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DKHR");
-                clEnqueueAcquireD3D11ObjectsKHR = (clEnqueueAcquireD3D11ObjectsKHR_fn)
-                        clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsKHR");
-                clEnqueueReleaseD3D11ObjectsKHR = (clEnqueueReleaseD3D11ObjectsKHR_fn)
-                        clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsKHR");
-                initializedPlatform = platform;
-            }
-            if (!clCreateFromD3D11Texture2DKHR || !clEnqueueAcquireD3D11ObjectsKHR || !clEnqueueReleaseD3D11ObjectsKHR)
-            {
-                CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D11");
-            }
-        }
-    }
-    return useCLNVEXT;
-}
-#endif // defined(HAVE_DIRECTX) && defined(HAVE_OPENCL)
-
 } // namespace directx
 
 
@@ -1009,20 +1107,21 @@ static void __convertToD3D11Texture2DKHR(InputArray src, ID3D11Texture2D* pD3D11
     using namespace cv::ocl;
     Context& ctx = Context::getDefault();
     cl_context context = (cl_context)ctx.ptr();
+    OpenCLDirectXImpl& impl = getImpl();
 
     cl_int status = 0;
     cl_mem clImage = 0;
 #ifdef HAVE_DIRECTX_NV12
     cl_mem clImageUV = 0;
 #endif
-    clImage = clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status);
+    clImage = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed");
 
 #ifdef HAVE_DIRECTX_NV12
     if(DXGI_FORMAT_NV12 == desc.Format)
     {
-        clImageUV = clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status);
+        clImageUV = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed");
     }
@@ -1030,21 +1129,21 @@ static void __convertToD3D11Texture2DKHR(InputArray src, ID3D11Texture2D* pD3D11
 
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
 
-    status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed");
 
 #ifdef HAVE_DIRECTX_NV12
     if(DXGI_FORMAT_NV12 == desc.Format)
     {
-        status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed");
 
         if(!ocl::ocl_convert_bgr_to_nv12(clBuffer, (int)u.step[0], u.cols, u.rows, clImage, clImageUV))
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_bgr_to_nv12 failed");
 
-        status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed");
     }
@@ -1060,7 +1159,7 @@ static void __convertToD3D11Texture2DKHR(InputArray src, ID3D11Texture2D* pD3D11
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed");
     }
 
-    status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed");
 
@@ -1107,40 +1206,41 @@ static void __convertToD3D11Texture2DNV(InputArray src, ID3D11Texture2D* pD3D11T
     using namespace cv::ocl;
     Context& ctx = Context::getDefault();
     cl_context context = (cl_context)ctx.ptr();
+    OpenCLDirectXImpl& impl = getImpl();
 
     cl_int status = 0;
     cl_mem clImage = 0;
 #ifdef HAVE_DIRECTX_NV12
     cl_mem clImageUV = 0;
 #endif
-    clImage = clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status);
+    clImage = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed");
 
 #ifdef HAVE_DIRECTX_NV12
     if (DXGI_FORMAT_NV12 == desc.Format)
     {
-        clImageUV = clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status);
+        clImageUV = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed");
     }
 #endif
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
-    status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed");
 
 #ifdef HAVE_DIRECTX_NV12
     if(DXGI_FORMAT_NV12 == desc.Format)
     {
-        status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed");
 
         if(!ocl::ocl_convert_bgr_to_nv12(clBuffer, (int)u.step[0], u.cols, u.rows, clImage, clImageUV))
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_bgr_to_nv12 failed");
 
-        status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed");
     }
@@ -1156,7 +1256,7 @@ static void __convertToD3D11Texture2DNV(InputArray src, ID3D11Texture2D* pD3D11T
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed");
     }
 
-    status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed");
 
@@ -1201,11 +1301,12 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out
     using namespace cv::ocl;
     Context& ctx = Context::getDefault();
     cl_context context = (cl_context)ctx.ptr();
+    OpenCLDirectXImpl& impl = getImpl();
 
     cl_int status = 0;
     cl_mem clImage = 0;
 
-    clImage = clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status);
+    clImage = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed");
 
@@ -1213,7 +1314,7 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out
     cl_mem clImageUV = 0;
     if(DXGI_FORMAT_NV12 == desc.Format)
     {
-        clImageUV = clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status);
+        clImageUV = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed");
     }
@@ -1221,21 +1322,21 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out
 
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
 
-    status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed");
 
 #ifdef HAVE_DIRECTX_NV12
     if(DXGI_FORMAT_NV12 == desc.Format)
     {
-        status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed");
 
         if(!ocl::ocl_convert_nv12_to_bgr(clImage, clImageUV, clBuffer, (int)u.step[0], u.cols, u.rows))
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_nv12_to_bgr failed");
 
-        status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed");
     }
@@ -1251,7 +1352,7 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed");
     }
 
-    status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed");
 
@@ -1296,11 +1397,12 @@ static void __convertFromD3D11Texture2DNV(ID3D11Texture2D* pD3D11Texture2D, Outp
     using namespace cv::ocl;
     Context& ctx = Context::getDefault();
     cl_context context = (cl_context)ctx.ptr();
+    OpenCLDirectXImpl& impl = getImpl();
 
     cl_int status = 0;
     cl_mem clImage = 0;
 
-    clImage = clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status);
+    clImage = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed");
 
@@ -1308,28 +1410,28 @@ static void __convertFromD3D11Texture2DNV(ID3D11Texture2D* pD3D11Texture2D, Outp
     cl_mem clImageUV = 0;
     if(DXGI_FORMAT_NV12 == desc.Format)
     {
-        clImageUV = clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status);
+        clImageUV = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed");
     }
 #endif
 
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
-    status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed");
 
 #ifdef HAVE_DIRECTX_NV12
     if (DXGI_FORMAT::DXGI_FORMAT_NV12 == desc.Format)
     {
-        status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed");
 
         if (!ocl::ocl_convert_nv12_to_bgr(clImage, clImageUV, clBuffer, (int)u.step[0], u.cols, u.rows))
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_nv12_to_bgr failed");
 
-        status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
+        status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL);
         if (status != CL_SUCCESS)
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed");
     }
@@ -1345,7 +1447,7 @@ static void __convertFromD3D11Texture2DNV(ID3D11Texture2D* pD3D11Texture2D, Outp
             CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed");
     }
 
-    status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed");
 
@@ -1377,7 +1479,7 @@ void convertToD3D11Texture2D(InputArray src, ID3D11Texture2D* pD3D11Texture2D)
     NO_OPENCL_SUPPORT_ERROR;
 #else
 
-    bool useCLNVEXT = __OpenCLinitializeD3D11();
+    bool useCLNVEXT = getImpl().initializeD3D11();
     if(!useCLNVEXT){
         __convertToD3D11Texture2DKHR(src,pD3D11Texture2D);
     }
@@ -1399,7 +1501,7 @@ void convertFromD3D11Texture2D(ID3D11Texture2D* pD3D11Texture2D, OutputArray dst
     NO_OPENCL_SUPPORT_ERROR;
 #else
 
-    bool useCLNVEXT = __OpenCLinitializeD3D11();
+    bool useCLNVEXT = getImpl().initializeD3D11();
     if(!useCLNVEXT){
         __convertFromD3D11Texture2DKHR(pD3D11Texture2D,dst);
     }
@@ -1412,40 +1514,14 @@ void convertFromD3D11Texture2D(ID3D11Texture2D* pD3D11Texture2D, OutputArray dst
 #endif
 }
 
-#if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL)
-clCreateFromD3D10Texture2DKHR_fn clCreateFromD3D10Texture2DKHR = NULL;
-clEnqueueAcquireD3D10ObjectsKHR_fn clEnqueueAcquireD3D10ObjectsKHR = NULL;
-clEnqueueReleaseD3D10ObjectsKHR_fn clEnqueueReleaseD3D10ObjectsKHR = NULL;
-
-static void __OpenCLinitializeD3D10()
-{
-    using namespace cv::ocl;
-    static cl_platform_id initializedPlatform = NULL;
-    cl_platform_id platform = (cl_platform_id)Platform::getDefault().ptr();
-    if (initializedPlatform != platform)
-    {
-        clCreateFromD3D10Texture2DKHR = (clCreateFromD3D10Texture2DKHR_fn)
-                clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D10Texture2DKHR");
-        clEnqueueAcquireD3D10ObjectsKHR = (clEnqueueAcquireD3D10ObjectsKHR_fn)
-                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D10ObjectsKHR");
-        clEnqueueReleaseD3D10ObjectsKHR = (clEnqueueReleaseD3D10ObjectsKHR_fn)
-                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D10ObjectsKHR");
-        initializedPlatform = platform;
-    }
-    if (!clCreateFromD3D10Texture2DKHR || !clEnqueueAcquireD3D10ObjectsKHR || !clEnqueueReleaseD3D10ObjectsKHR)
-    {
-        CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D10");
-    }
-}
-#endif // defined(HAVE_DIRECTX) && defined(HAVE_OPENCL)
-
 void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D)
 {
     CV_UNUSED(src); CV_UNUSED(pD3D10Texture2D);
 #if !defined(HAVE_DIRECTX)
     NO_DIRECTX_SUPPORT_ERROR;
 #elif defined(HAVE_OPENCL)
-    __OpenCLinitializeD3D10();
+    OpenCLDirectXImpl& impl = getImpl();
+    impl.initializeD3D10();
 
     D3D10_TEXTURE2D_DESC desc = { 0 };
     pD3D10Texture2D->GetDesc(&desc);
@@ -1468,14 +1544,14 @@ void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D)
     CV_Assert(u.isContinuous());
 
     cl_int status = 0;
-    cl_mem clImage = clCreateFromD3D10Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D10Texture2D, 0, &status);
+    cl_mem clImage = impl.clCreateFromD3D10Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D10Texture2D, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D10Texture2DKHR failed");
 
     cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);
 
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
-    status = clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D10ObjectsKHR failed");
     size_t offset = 0; // TODO
@@ -1484,7 +1560,7 @@ void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D)
     status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed");
-    status = clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D10ObjectsKHR failed");
 
@@ -1506,7 +1582,8 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst
 #if !defined(HAVE_DIRECTX)
     NO_DIRECTX_SUPPORT_ERROR;
 #elif defined(HAVE_OPENCL)
-    __OpenCLinitializeD3D10();
+    OpenCLDirectXImpl& impl = getImpl();
+    impl.initializeD3D10();
 
     D3D10_TEXTURE2D_DESC desc = { 0 };
     pD3D10Texture2D->GetDesc(&desc);
@@ -1527,14 +1604,14 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst
     CV_Assert(u.isContinuous());
 
     cl_int status = 0;
-    cl_mem clImage = clCreateFromD3D10Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D10Texture2D, 0, &status);
+    cl_mem clImage = impl.clCreateFromD3D10Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D10Texture2D, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D10Texture2DKHR failed");
 
     cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);
 
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
-    status = clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D10ObjectsKHR failed");
     size_t offset = 0; // TODO
@@ -1543,7 +1620,7 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst
     status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed");
-    status = clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D10ObjectsKHR failed");
 
@@ -1560,32 +1637,6 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst
 #endif
 }
 
-#if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL)
-clCreateFromDX9MediaSurfaceKHR_fn clCreateFromDX9MediaSurfaceKHR = NULL;
-clEnqueueAcquireDX9MediaSurfacesKHR_fn clEnqueueAcquireDX9MediaSurfacesKHR = NULL;
-clEnqueueReleaseDX9MediaSurfacesKHR_fn clEnqueueReleaseDX9MediaSurfacesKHR = NULL;
-
-static void __OpenCLinitializeD3D9()
-{
-    using namespace cv::ocl;
-    static cl_platform_id initializedPlatform = NULL;
-    cl_platform_id platform = (cl_platform_id)Platform::getDefault().ptr();
-    if (initializedPlatform != platform)
-    {
-        clCreateFromDX9MediaSurfaceKHR = (clCreateFromDX9MediaSurfaceKHR_fn)
-                clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromDX9MediaSurfaceKHR");
-        clEnqueueAcquireDX9MediaSurfacesKHR = (clEnqueueAcquireDX9MediaSurfacesKHR_fn)
-                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireDX9MediaSurfacesKHR");
-        clEnqueueReleaseDX9MediaSurfacesKHR = (clEnqueueReleaseDX9MediaSurfacesKHR_fn)
-                clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseDX9MediaSurfacesKHR");
-        initializedPlatform = platform;
-    }
-    if (!clCreateFromDX9MediaSurfaceKHR || !clEnqueueAcquireDX9MediaSurfacesKHR || !clEnqueueReleaseDX9MediaSurfacesKHR)
-    {
-        CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D9");
-    }
-}
-#endif // defined(HAVE_DIRECTX) && defined(HAVE_OPENCL)
 
 void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurface9, void* surfaceSharedHandle)
 {
@@ -1593,7 +1644,8 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa
 #if !defined(HAVE_DIRECTX)
     NO_DIRECTX_SUPPORT_ERROR;
 #elif defined(HAVE_OPENCL)
-    __OpenCLinitializeD3D9();
+    OpenCLDirectXImpl& impl = getImpl();
+    impl.initializeD3D9();
 
     D3DSURFACE_DESC desc;
     if (FAILED(pDirect3DSurface9->GetDesc(&desc)))
@@ -1620,8 +1672,8 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa
 
     cl_int status = 0;
     cl_dx9_surface_info_khr surfaceInfo = {pDirect3DSurface9, (HANDLE)surfaceSharedHandle};
-    cl_mem clImage = clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_WRITE_ONLY,
-            ocl::g_isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR,
+    cl_mem clImage = impl.clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_WRITE_ONLY,
+        impl.isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR,
             &surfaceInfo, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromDX9MediaSurfaceKHR failed");
@@ -1629,7 +1681,7 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa
     cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);
 
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
-    status = clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireDX9MediaSurfacesKHR failed");
     size_t offset = 0; // TODO
@@ -1638,7 +1690,7 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa
     status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed");
-    status = clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseDX9MediaSurfacesKHR failed");
 
@@ -1661,7 +1713,8 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr
 #if !defined(HAVE_DIRECTX)
     NO_DIRECTX_SUPPORT_ERROR;
 #elif defined(HAVE_OPENCL)
-    __OpenCLinitializeD3D9();
+    OpenCLDirectXImpl& impl = getImpl();
+    impl.initializeD3D9();
 
     D3DSURFACE_DESC desc;
     if (FAILED(pDirect3DSurface9->GetDesc(&desc)))
@@ -1686,8 +1739,8 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr
 
     cl_int status = 0;
     cl_dx9_surface_info_khr surfaceInfo = {pDirect3DSurface9, (HANDLE)surfaceSharedHandle};
-    cl_mem clImage = clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_READ_ONLY,
-            ocl::g_isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR,
+    cl_mem clImage = impl.clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_READ_ONLY,
+            impl.isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR,
             &surfaceInfo, 0, &status);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromDX9MediaSurfaceKHR failed");
@@ -1695,7 +1748,7 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr
     cl_mem clBuffer = (cl_mem)u.handle(ACCESS_WRITE);
 
     cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
-    status = clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireDX9MediaSurfacesKHR failed");
     size_t offset = 0; // TODO
@@ -1704,7 +1757,7 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr
     status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed");
-    status = clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
+    status = impl.clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL);
     if (status != CL_SUCCESS)
         CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseDX9MediaSurfacesKHR failed");
 
diff --git a/modules/core/src/directx.hpp b/modules/core/src/directx.hpp
new file mode 100644
index 0000000000..9f23352d4d
--- /dev/null
+++ b/modules/core/src/directx.hpp
@@ -0,0 +1,23 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_CORE_SRC_DIRECTX_HPP
+#define OPENCV_CORE_SRC_DIRECTX_HPP
+
+#ifndef HAVE_DIRECTX
+#error Invalid build configuration
+#endif
+
+namespace cv {
+namespace directx {
+namespace internal {
+
+struct OpenCLDirectXImpl;
+OpenCLDirectXImpl* createDirectXImpl();
+void deleteDirectXImpl(OpenCLDirectXImpl**);
+OpenCLDirectXImpl* getDirectXImpl(ocl::Context& ctx);
+
+}}} // namespace internal
+
+#endif  // OPENCV_CORE_SRC_DIRECTX_HPP
diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp
index 0a82424ba1..a9bd974b9a 100644
--- a/modules/core/src/ocl.cpp
+++ b/modules/core/src/ocl.cpp
@@ -113,6 +113,10 @@
 
 #include "opencv2/core/opencl/runtime/opencl_core.hpp"
 
+#ifdef HAVE_DIRECTX
+#include "directx.hpp"
+#endif
+
 #ifdef HAVE_OPENCL_SVM
 #include "opencv2/core/opencl/runtime/opencl_svm_20.hpp"
 #include "opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp"
@@ -2327,6 +2331,9 @@ protected:
         , contextId(CV_XADD(&g_contextId, 1))
         , configuration(configuration_)
         , handle(0)
+#ifdef HAVE_DIRECTX
+        , p_directx_impl(0)
+#endif
 #ifdef HAVE_OPENCL_SVM
         , svmInitialized(false)
 #endif
@@ -2352,6 +2359,9 @@ protected:
                 handle = NULL;
             }
             devices.clear();
+#ifdef HAVE_DIRECTX
+            directx::internal::deleteDirectXImpl(&p_directx_impl);
+#endif
         }
 
         {
@@ -2658,6 +2668,19 @@ public:
         return *bufferPoolHostPtr_.get();
     }
 
+#ifdef HAVE_DIRECTX
+    directx::internal::OpenCLDirectXImpl* p_directx_impl;
+
+    directx::internal::OpenCLDirectXImpl* getDirectXImpl()
+    {
+        if (!p_directx_impl)
+        {
+            p_directx_impl = directx::internal::createDirectXImpl();
+        }
+        return p_directx_impl;
+    }
+#endif
+
 #ifdef HAVE_OPENCL_SVM
     bool svmInitialized;
     bool svmAvailable;
@@ -7286,4 +7309,15 @@ uint64 Timer::durationNS() const
 
 }} // namespace
 
+#ifdef HAVE_DIRECTX
+namespace cv { namespace directx { namespace internal {
+OpenCLDirectXImpl* getDirectXImpl(ocl::Context& ctx)
+{
+    ocl::Context::Impl* i = ctx.getImpl();
+    CV_Assert(i);
+    return i->getDirectXImpl();
+}
+}}} // namespace cv::directx::internal
+#endif
+
 #endif // HAVE_OPENCL

From dcfa23d5d2c3f158191b13c9004f33f87d3c397c Mon Sep 17 00:00:00 2001
From: Dmitry Budnikov <Dmitry.Budnikov@intel.com>
Date: Mon, 19 Oct 2020 13:46:23 +0300
Subject: [PATCH 034/152] KW fixes

---
 modules/gapi/include/opencv2/gapi/media.hpp      | 1 +
 modules/gapi/test/common/gapi_core_tests_inl.hpp | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/modules/gapi/include/opencv2/gapi/media.hpp b/modules/gapi/include/opencv2/gapi/media.hpp
index a7fe258757..f27cb80913 100644
--- a/modules/gapi/include/opencv2/gapi/media.hpp
+++ b/modules/gapi/include/opencv2/gapi/media.hpp
@@ -51,6 +51,7 @@ public:
     View(Ptrs&& ptrs, Strides&& strs, Callback &&cb = [](){});
     View(const View&) = delete;
     View(View&&) = default;
+    View& operator = (const View&) = delete;
     ~View();
 
     Ptrs    ptr;
diff --git a/modules/gapi/test/common/gapi_core_tests_inl.hpp b/modules/gapi/test/common/gapi_core_tests_inl.hpp
index e350a14e65..e11324f070 100644
--- a/modules/gapi/test/common/gapi_core_tests_inl.hpp
+++ b/modules/gapi/test/common/gapi_core_tests_inl.hpp
@@ -618,7 +618,8 @@ TEST_P(SumTest, AccuracyTest)
 #undef countNonZero
 TEST_P(CountNonZeroTest, AccuracyTest)
 {
-    int out_cnz_gapi, out_cnz_ocv;
+    int out_cnz_gapi = -1;
+    int out_cnz_ocv = -2;
 
     // G-API code //////////////////////////////////////////////////////////////
     cv::GMat in;

From 49d5960a3275f58a7f98afecb2065c6368e914c2 Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Mon, 19 Oct 2020 14:19:17 +0300
Subject: [PATCH 035/152] Fix namespace for OCVCallHelper

---
 modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
index ef67930909..741fbe18f0 100644
--- a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
+++ b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
@@ -443,7 +443,7 @@ struct OCVStCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...>> :
 template<class Impl, class K>
 class GCPUKernelImpl: public cv::detail::KernelTag
 {
-    using CallHelper = detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>;
+    using CallHelper = cv::detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>;
 
 public:
     using API = K;
@@ -497,7 +497,7 @@ private:
 template<typename K, typename Callable>
 gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c)
 {
-    using P = detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
+    using P = cv::detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
     return GOCVFunctor{ K::id()
                       , &K::getOutMeta
                       , std::bind(&P::callFunctor, std::placeholders::_1, std::ref(c))
@@ -507,7 +507,7 @@ gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c)
 template<typename K, typename Callable>
 gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(const Callable& c)
 {
-    using P = detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
+    using P = cv::detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
     return GOCVFunctor{ K::id()
                       , &K::getOutMeta
                       , std::bind(&P::callFunctor, std::placeholders::_1, c)

From bf49149c97fff299384e48831767d7d2ef7badd8 Mon Sep 17 00:00:00 2001
From: Daniel Motilla <daniel.motilla@sony.com>
Date: Mon, 19 Oct 2020 14:35:03 +0200
Subject: [PATCH 036/152] Enable imshow to take GpuMat inputs in Python

---
 modules/python/src2/hdr_parser.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py
index a486e0b71a..3a977e8605 100755
--- a/modules/python/src2/hdr_parser.py
+++ b/modules/python/src2/hdr_parser.py
@@ -958,7 +958,9 @@ class CppHeaderParser(object):
                         else:
                             decls.append(decl)
 
-                            if self._generate_gpumat_decls and "cv.cuda" in decl[0]:
+                            if self._generate_gpumat_decls and ("cv.cuda" in decl[0] or decl[0] in [
+                                "cv.imshow", # https://github.com/opencv/opencv/issues/18553
+                            ]):
                                 # If function takes as one of arguments Mat or vector<Mat> - we want to create the
                                 # same declaration working with GpuMat
                                 args = decl[3]

From b87f7a625eb0cd5239a27f1d15c4ce791d0951f4 Mon Sep 17 00:00:00 2001
From: Nikolai <samox2@yandex.ru>
Date: Mon, 19 Oct 2020 20:39:19 +0300
Subject: [PATCH 037/152] Merge pull request #18426 from
 Varvrar:add-HEVC-codec-iOS-Mac

* add HEVC(H.265) codec to iOS,Mac VideoWriter

* Update cap_avfoundation_mac.mm

add CV_FOURCC('h','v','c','1') for HEVC codec

* Update cap_avfoundation.mm

add CV_FOURCC('h','v','c','1') for HEVC codec

* feat: add availability check for HEVC codec on iOS and OS X

Co-authored-by: Vadim Levin <vadim.levin@xperience.ai>
---
 modules/videoio/src/cap_avfoundation.mm     | 16 +++++++++++++---
 modules/videoio/src/cap_avfoundation_mac.mm | 14 ++++++++++++--
 2 files changed, 25 insertions(+), 5 deletions(-)

diff --git a/modules/videoio/src/cap_avfoundation.mm b/modules/videoio/src/cap_avfoundation.mm
index 8ac8d85d8d..19f54be8c3 100644
--- a/modules/videoio/src/cap_avfoundation.mm
+++ b/modules/videoio/src/cap_avfoundation.mm
@@ -36,6 +36,7 @@
 #include "opencv2/imgproc.hpp"
 #include "cap_interface.hpp"
 #include <iostream>
+#include <Availability.h>
 #import <AVFoundation/AVFoundation.h>
 #import <Foundation/NSException.h>
 
@@ -1255,16 +1256,25 @@ CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const char* filename, int
         //exception;
     }
 
-    // Two codec supported AVVideoCodecH264 AVVideoCodecJPEG
+    // Three codec supported AVVideoCodecH264 AVVideoCodecJPEG AVVideoCodecTypeHEVC
     // On iPhone 3G H264 is not supported.
     if (fourcc == CV_FOURCC('J','P','E','G') || fourcc == CV_FOURCC('j','p','e','g') ||
-            fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g') ){
+            fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g')){
         codec = [AVVideoCodecJPEG copy]; // Use JPEG codec if specified, otherwise H264
     }else if(fourcc == CV_FOURCC('H','2','6','4') || fourcc == CV_FOURCC('a','v','c','1')){
             codec = [AVVideoCodecH264 copy];
+// Available since iOS 11
+#if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 110000
+    }else if(fourcc == CV_FOURCC('H','2','6','5') || fourcc == CV_FOURCC('h','v','c','1') ||
+            fourcc == CV_FOURCC('H','E','V','C') || fourcc == CV_FOURCC('h','e','v','c')){
+        if (@available(iOS 11, *)) {
+            codec = [AVVideoCodecTypeHEVC copy];
+        } else {
+            codec = [AVVideoCodecH264 copy];
+        }
+#endif
     }else{
         codec = [AVVideoCodecH264 copy]; // default canonical H264.
-
     }
 
     //NSLog(@"Path: %@", path);
diff --git a/modules/videoio/src/cap_avfoundation_mac.mm b/modules/videoio/src/cap_avfoundation_mac.mm
index 011bc08466..ed966ceffa 100644
--- a/modules/videoio/src/cap_avfoundation_mac.mm
+++ b/modules/videoio/src/cap_avfoundation_mac.mm
@@ -1199,13 +1199,23 @@ CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const std::string &filena
         is_good = false;
     }
 
-    // Two codec supported AVVideoCodecH264 AVVideoCodecJPEG
+    // Three codec supported AVVideoCodecH264 AVVideoCodecJPEG AVVideoCodecTypeHEVC
     // On iPhone 3G H264 is not supported.
     if (fourcc == CV_FOURCC('J','P','E','G') || fourcc == CV_FOURCC('j','p','e','g') ||
-            fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g') ){
+            fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g')){
         codec = [AVVideoCodecJPEG copy]; // Use JPEG codec if specified, otherwise H264
     }else if(fourcc == CV_FOURCC('H','2','6','4') || fourcc == CV_FOURCC('a','v','c','1')){
             codec = [AVVideoCodecH264 copy];
+    // Available since macOS 10.13
+#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101300
+    }else if(fourcc == CV_FOURCC('H','2','6','5') || fourcc == CV_FOURCC('h','v','c','1') ||
+            fourcc == CV_FOURCC('H','E','V','C') || fourcc == CV_FOURCC('h','e','v','c')){
+        if (@available(macOS 10.13, *)) {
+            codec = [AVVideoCodecTypeHEVC copy];
+        } else {
+            is_good = false;
+        }
+#endif
     }else{
         is_good = false;
     }

From 456af21d8bbe29341141b149360593811f608a59 Mon Sep 17 00:00:00 2001
From: Zhiyuan Chen <this@zyc.ai>
Date: Sun, 18 Oct 2020 22:33:45 +0800
Subject: [PATCH 038/152] fixes #18613

---
 samples/dnn/siamrpnpp.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/samples/dnn/siamrpnpp.py b/samples/dnn/siamrpnpp.py
index bb126b71e5..c7c49b1b85 100644
--- a/samples/dnn/siamrpnpp.py
+++ b/samples/dnn/siamrpnpp.py
@@ -234,10 +234,10 @@ class SiamRPNTracker:
         """
         Args:
             img(np.ndarray):    bgr based input image frame
-            bbox: (x,y,w,h):    bounding box
+            bbox: (x, y, w, h): bounding box
         """
-        x,y,h,w = bbox
-        self.center_pos = np.array([x + (h - 1) / 2, y + (w - 1) / 2])
+        x, y, w, h = bbox
+        self.center_pos = np.array([x + (w - 1) / 2, y + (h - 1) / 2])
         self.h = h
         self.w = w
         w_z = self.w + self.track_context_amount * np.add(h, w)

From 331b73c8e4cbbade998ab24ac45f3fd8f7e306db Mon Sep 17 00:00:00 2001
From: lizz <innerlee@users.noreply.github.com>
Date: Tue, 20 Oct 2020 12:09:03 +0800
Subject: [PATCH 039/152] Typo in docstring of distanceTransform

---
 modules/imgproc/include/opencv2/imgproc.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp
index d75f3bcffc..2739d28ff9 100644
--- a/modules/imgproc/include/opencv2/imgproc.hpp
+++ b/modules/imgproc/include/opencv2/imgproc.hpp
@@ -3500,7 +3500,7 @@ but also identifies the nearest connected component consisting of zero pixels
 (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the
 component/pixel is stored in `labels(x, y)`. When labelType==#DIST_LABEL_CCOMP, the function
 automatically finds connected components of zero pixels in the input image and marks them with
-distinct labels. When labelType==#DIST_LABEL_CCOMP, the function scans through the input image and
+distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and
 marks all the zero pixels with distinct labels.
 
 In this mode, the complexity is still linear. That is, the function provides a very fast way to

From 2669d8ce73e1ab82c5edd107a5179a54daed7997 Mon Sep 17 00:00:00 2001
From: Alexey Smirnov <alexey.smirnov@intel.com>
Date: Tue, 20 Oct 2020 23:58:54 +0300
Subject: [PATCH 040/152] Merge pull request #18584 from
 smirnov-alexey:as/rmat_s11n

[G-API]: Introduce RMat serialization API

* Introduce RMat serialization API

* Fix RunArgs deserialization

* Address review comments

* Export operators for GRunArg serialization

* Fix warning and add handling for RMat in bind()

* Update CMakeLists.txt

* G-API: RMat S11N -- probably fix the Windows warning
---
 modules/gapi/include/opencv2/gapi/rmat.hpp    |  20 ++++
 modules/gapi/include/opencv2/gapi/s11n.hpp    | 112 +++++++++++++++++-
 modules/gapi/src/api/s11n.cpp                 |   6 +
 .../src/backends/common/serialization.cpp     |   6 +-
 .../src/backends/common/serialization.hpp     |  60 ----------
 modules/gapi/test/s11n/gapi_s11n_tests.cpp    |  63 ++++++++++
 6 files changed, 203 insertions(+), 64 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/rmat.hpp b/modules/gapi/include/opencv2/gapi/rmat.hpp
index 626e67e9ee..ff834b46b1 100644
--- a/modules/gapi/include/opencv2/gapi/rmat.hpp
+++ b/modules/gapi/include/opencv2/gapi/rmat.hpp
@@ -10,6 +10,16 @@
 #include <opencv2/gapi/gmat.hpp>
 #include <opencv2/gapi/own/exports.hpp>
 
+// Forward declaration
+namespace cv {
+namespace gapi {
+namespace s11n {
+    struct IOStream;
+    struct IIStream;
+} // namespace s11n
+} // namespace gapi
+} // namespace cv
+
 namespace cv {
 
 // "Remote Mat", a general class which provides an abstraction layer over the data
@@ -90,6 +100,12 @@ public:
         // the view when accessed for writing, to ensure that the data from the view
         // is transferred to the device when the view is destroyed
         virtual View access(Access) = 0;
+        virtual void serialize(cv::gapi::s11n::IOStream&) {
+            GAPI_Assert(false && "Generic serialize method should never be called for RMat adapter");
+        }
+        virtual void deserialize(cv::gapi::s11n::IIStream&) {
+            GAPI_Assert(false && "Generic deserialize method should never be called for RMat adapter");
+        }
     };
     using AdapterP = std::shared_ptr<Adapter>;
 
@@ -113,6 +129,10 @@ public:
         return dynamic_cast<T*>(m_adapter.get());
     }
 
+    void serialize(cv::gapi::s11n::IOStream& os) const {
+        m_adapter->serialize(os);
+    }
+
 private:
     AdapterP m_adapter = nullptr;
 };
diff --git a/modules/gapi/include/opencv2/gapi/s11n.hpp b/modules/gapi/include/opencv2/gapi/s11n.hpp
index e8a8dbcab4..2fa4e51176 100644
--- a/modules/gapi/include/opencv2/gapi/s11n.hpp
+++ b/modules/gapi/include/opencv2/gapi/s11n.hpp
@@ -12,6 +12,7 @@
 #include <unordered_map>
 #include <opencv2/gapi/s11n/base.hpp>
 #include <opencv2/gapi/gcomputation.hpp>
+#include <opencv2/gapi/rmat.hpp>
 
 namespace cv {
 namespace gapi {
@@ -25,6 +26,9 @@ namespace detail {
 
     template<typename... Types>
     cv::GCompileArgs getCompileArgs(const std::vector<char> &p);
+
+    template<typename RMatAdapterType>
+    cv::GRunArgs getRunArgsWithRMats(const std::vector<char> &p);
 } // namespace detail
 
 GAPI_EXPORTS std::vector<char> serialize(const cv::GComputation &c);
@@ -59,6 +63,12 @@ typename std::enable_if<std::is_same<T, GCompileArgs>::value, GCompileArgs>::
 type deserialize(const std::vector<char> &p) {
     return detail::getCompileArgs<Types...>(p);
 }
+
+template<typename T, typename RMatAdapterType> inline
+typename std::enable_if<std::is_same<T, GRunArgs>::value, GRunArgs>::
+type deserialize(const std::vector<char> &p) {
+    return detail::getRunArgsWithRMats<RMatAdapterType>(p);
+}
 } // namespace gapi
 } // namespace cv
 
@@ -123,6 +133,27 @@ GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::Scalar &s);
 GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Mat &m);
 GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::Mat &m);
 
+// FIXME: for GRunArgs serailization
+#if !defined(GAPI_STANDALONE)
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat &);
+GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::UMat &);
+#endif // !defined(GAPI_STANDALONE)
+
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::RMat &r);
+GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::RMat &r);
+
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &);
+GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::gapi::wip::IStreamSource::Ptr &);
+
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::VectorRef &);
+GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::detail::VectorRef &);
+
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef &);
+GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::detail::OpaqueRef &);
+
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::MediaFrame &);
+GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::MediaFrame &);
+
 // Generic STL types ////////////////////////////////////////////////////////////////
 template<typename K, typename V>
 IOStream& operator<< (IOStream& os, const std::map<K, V> &m) {
@@ -184,6 +215,52 @@ IIStream& operator>> (IIStream& is, std::vector<T> &ts) {
     }
     return is;
 }
+
+// Generic: variant serialization
+namespace detail {
+template<typename V>
+IOStream& put_v(IOStream&, const V&, std::size_t) {
+    GAPI_Assert(false && "variant>>: requested index is invalid");
+};
+template<typename V, typename X, typename... Xs>
+IOStream& put_v(IOStream& os, const V& v, std::size_t x) {
+    return (x == 0u)
+        ? os << cv::util::get<X>(v)
+        : put_v<V, Xs...>(os, v, x-1);
+}
+template<typename V>
+IIStream& get_v(IIStream&, V&, std::size_t, std::size_t) {
+    GAPI_Assert(false && "variant<<: requested index is invalid");
+}
+template<typename V, typename X, typename... Xs>
+IIStream& get_v(IIStream& is, V& v, std::size_t i, std::size_t gi) {
+    if (i == gi) {
+        X x{};
+        is >> x;
+        v = V{std::move(x)};
+        return is;
+    } else return get_v<V, Xs...>(is, v, i+1, gi);
+}
+} // namespace detail
+
+template<typename... Ts>
+IOStream& operator<< (IOStream& os, const cv::util::variant<Ts...> &v) {
+    os << static_cast<uint32_t>(v.index());
+    return detail::put_v<cv::util::variant<Ts...>, Ts...>(os, v, v.index());
+}
+template<typename... Ts>
+IIStream& operator>> (IIStream& is, cv::util::variant<Ts...> &v) {
+    int idx = -1;
+    is >> idx;
+    GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts));
+    return detail::get_v<cv::util::variant<Ts...>, Ts...>(is, v, 0u, idx);
+}
+
+// FIXME: consider a better solution
+template<typename... Ts>
+void getRunArgByIdx (IIStream& is, cv::util::variant<Ts...> &v, uint32_t idx) {
+    is = detail::get_v<cv::util::variant<Ts...>, Ts...>(is, v, 0u, idx);
+}
 } // namespace s11n
 
 namespace detail
@@ -204,11 +281,27 @@ static GCompileArg exec(cv::gapi::s11n::IIStream& is, const std::string& tag) {
             cv::gapi::s11n::detail::S11N<T>::deserialize(is)
         };
     }
-
     return deserialize_arg<std::tuple<Types...>>::exec(is, tag);
 }
 };
 
+template<typename T> struct deserialize_runarg;
+
+template<typename RMatAdapterType>
+struct deserialize_runarg {
+static GRunArg exec(cv::gapi::s11n::IIStream& is, uint32_t idx) {
+    if (idx == GRunArg::index_of<RMat>()) {
+        auto ptr = std::make_shared<RMatAdapterType>();
+        ptr->deserialize(is);
+        return GRunArg { RMat(std::move(ptr)) };
+    } else { // non-RMat arg - use default deserialization
+        GRunArg arg;
+        getRunArgByIdx(is, arg, idx);
+        return arg;
+    }
+}
+};
+
 template<typename... Types>
 cv::GCompileArgs getCompileArgs(const std::vector<char> &p) {
     std::unique_ptr<cv::gapi::s11n::IIStream> pIs = cv::gapi::s11n::detail::getInStream(p);
@@ -225,6 +318,23 @@ cv::GCompileArgs getCompileArgs(const std::vector<char> &p) {
 
     return args;
 }
+
+template<typename RMatAdapterType>
+cv::GRunArgs getRunArgsWithRMats(const std::vector<char> &p) {
+    std::unique_ptr<cv::gapi::s11n::IIStream> pIs = cv::gapi::s11n::detail::getInStream(p);
+    cv::gapi::s11n::IIStream& is = *pIs;
+    cv::GRunArgs args;
+
+    uint32_t sz = 0;
+    is >> sz;
+    for (uint32_t i = 0; i < sz; ++i) {
+        uint32_t idx = 0;
+        is >> idx;
+        args.push_back(cv::gapi::detail::deserialize_runarg<RMatAdapterType>::exec(is, idx));
+    }
+
+    return args;
+}
 } // namespace detail
 } // namespace gapi
 } // namespace cv
diff --git a/modules/gapi/src/api/s11n.cpp b/modules/gapi/src/api/s11n.cpp
index 52c276fd5d..b6acf28ea4 100644
--- a/modules/gapi/src/api/s11n.cpp
+++ b/modules/gapi/src/api/s11n.cpp
@@ -79,6 +79,9 @@ cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results)
         case T::index_of<cv::detail::OpaqueRef>() :
             outputs.emplace_back(cv::util::get<cv::detail::OpaqueRef>(res_obj));
             break;
+        case cv::GRunArg::index_of<cv::RMat>() :
+            outputs.emplace_back((cv::RMat*)(&(cv::util::get<cv::RMat>(res_obj))));
+            break;
         default:
             GAPI_Assert(false && "This value type is not supported!"); // ...maybe because of STANDALONE mode.
             break;
@@ -112,6 +115,9 @@ cv::GRunArg cv::gapi::bind(cv::GRunArgP &out)
     case T::index_of<cv::Scalar*>() :
         return cv::GRunArg(*cv::util::get<cv::Scalar*>(out));
 
+    case T::index_of<cv::RMat*>() :
+        return cv::GRunArg(*cv::util::get<cv::RMat*>(out));
+
     default:
         // ...maybe our types were extended
         GAPI_Assert(false && "This value type is UNKNOWN!");
diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp
index ca73d29ffb..2b23b33cc8 100644
--- a/modules/gapi/src/backends/common/serialization.cpp
+++ b/modules/gapi/src/backends/common/serialization.cpp
@@ -165,12 +165,12 @@ IOStream& operator<< (IOStream& os, const cv::Scalar &s) {
 IIStream& operator>> (IIStream& is, cv::Scalar& s) {
     return is >> s.val[0] >> s.val[1] >> s.val[2] >> s.val[3];
 }
-IOStream& operator<< (IOStream& os, const cv::RMat&) {
-    util::throw_error(std::logic_error("Serialization of RMat is not supported"));
+IOStream& operator<< (IOStream& os, const cv::RMat& mat) {
+    mat.serialize(os);
     return os;
 }
 IIStream& operator>> (IIStream& is, cv::RMat&) {
-    util::throw_error(std::logic_error("Serialization of RMat is not supported"));
+    util::throw_error(std::logic_error("operator>> for RMat should never be called"));
     return is;
 }
 
diff --git a/modules/gapi/src/backends/common/serialization.hpp b/modules/gapi/src/backends/common/serialization.hpp
index e2aa56c45b..a3134d84d2 100644
--- a/modules/gapi/src/backends/common/serialization.hpp
+++ b/modules/gapi/src/backends/common/serialization.hpp
@@ -88,26 +88,6 @@ GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::GArrayDesc &);
 GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GFrameDesc &);
 GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::GFrameDesc &);
 
-#if !defined(GAPI_STANDALONE)
-GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat &);
-GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::UMat &);
-#endif // !defined(GAPI_STANDALONE)
-
-GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::RMat &r);
-GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::RMat &r);
-
-GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &);
-GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::gapi::wip::IStreamSource::Ptr &);
-
-GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::VectorRef &);
-GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::detail::VectorRef &);
-
-GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef &);
-GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::detail::OpaqueRef &);
-
-GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::MediaFrame &);
-GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::MediaFrame &);
-
 GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::RcDesc &rc);
 GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::gimpl::RcDesc &rc);
 
@@ -178,46 +158,6 @@ GAPI_EXPORTS void serialize( IOStream& os
 GAPI_EXPORTS GSerialized deserialize(IIStream& is);
 GAPI_EXPORTS void reconstruct(const GSerialized &s, ade::Graph &g);
 
-// Generic: variant serialization //////////////////////////////////////////////
-namespace detail { // FIXME: breaks old code
-template<typename V>
-IOStream& put_v(IOStream&, const V&, std::size_t) {
-    GAPI_Assert(false && "variant>>: requested index is invalid");
-};
-template<typename V, typename X, typename... Xs>
-IOStream& put_v(IOStream& os, const V& v, std::size_t x) {
-    return (x == 0u)
-        ? os << cv::util::get<X>(v)
-        : put_v<V, Xs...>(os, v, x-1);
-}
-template<typename V>
-IIStream& get_v(IIStream&, V&, std::size_t, std::size_t) {
-    GAPI_Assert(false && "variant<<: requested index is invalid");
-}
-template<typename V, typename X, typename... Xs>
-IIStream& get_v(IIStream& is, V& v, std::size_t i, std::size_t gi) {
-    if (i == gi) {
-        X x{};
-        is >> x;
-        v = std::move(x);
-        return is;
-    } else return get_v<V, Xs...>(is, v, i+1, gi);
-}
-} // namespace detail FIXME: breaks old code
-
-template<typename... Ts>
-IOStream& operator<< (IOStream& os, const cv::util::variant<Ts...> &v) {
-    os << (uint32_t)v.index();
-    return detail::put_v<cv::util::variant<Ts...>, Ts...>(os, v, v.index());
-}
-template<typename... Ts>
-IIStream& operator>> (IIStream& is, cv::util::variant<Ts...> &v) {
-    int idx = -1;
-    is >> idx;
-    GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts));
-    return detail::get_v<cv::util::variant<Ts...>, Ts...>(is, v, 0u, idx);
-}
-
 // FIXME: Basic Stream implementaions //////////////////////////////////////////
 
 // Basic in-memory stream implementations.
diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
index 1a4faec12c..3fe632e449 100644
--- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp
+++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
@@ -1,6 +1,7 @@
 #include "../test_precomp.hpp"
 
 #include "backends/common/serialization.hpp"
+#include <opencv2/gapi/rmat.hpp>
 
 namespace {
     struct MyCustomType {
@@ -45,6 +46,35 @@ template<> struct CompileArgTag<MyCustomType> {
 } // namespace detail
 } // namespace cv
 
+namespace {
+class MyRMatAdapter : public cv::RMat::Adapter {
+    cv::Mat m_mat;
+    int m_value;
+    std::string m_str;
+public:
+    MyRMatAdapter() = default;
+    MyRMatAdapter(cv::Mat m, int value, const std::string& str)
+        : m_mat(m), m_value(value), m_str(str)
+    {}
+    virtual cv::RMat::View access(cv::RMat::Access access) override {
+        if (access == cv::RMat::Access::W) {
+            return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step);
+        } else {
+            return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step);
+        }
+    }
+    virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); }
+    virtual void serialize(cv::gapi::s11n::IOStream& os) override {
+        os << m_value << m_str;
+    }
+    virtual void deserialize(cv::gapi::s11n::IIStream& is) override {
+        is >> m_value >> m_str;
+    }
+    int getVal() { return m_value; }
+    std::string getStr() { return m_str; }
+};
+}
+
 namespace opencv_test {
 
 struct S11N_Basic: public ::testing::Test {
@@ -460,6 +490,39 @@ TEST_F(S11N_Basic, Test_Bind_RunArgs_MatScalar) {
     }
 }
 
+TEST_F(S11N_Basic, Test_RunArg_RMat) {
+    cv::Mat mat = cv::Mat::eye(cv::Size(128, 64), CV_8UC3);
+    cv::RMat rmat = cv::make_rmat<MyRMatAdapter>(mat, 42, "It actually works");
+    auto v = cv::GRunArgs{ cv::GRunArg{ rmat } };
+
+    const std::vector<char> sargsin = cv::gapi::serialize(v);
+    cv::GRunArgs out = cv::gapi::deserialize<cv::GRunArgs, MyRMatAdapter>(sargsin);
+    cv::RMat out_mat = cv::util::get<cv::RMat>(out[0]);
+    auto adapter = out_mat.get<MyRMatAdapter>();
+    EXPECT_EQ(42, adapter->getVal());
+    EXPECT_EQ("It actually works", adapter->getStr());
+}
+
+TEST_F(S11N_Basic, Test_RunArg_RMat_Scalar_Mat) {
+    cv::Mat mat = cv::Mat::eye(cv::Size(128, 64), CV_8UC3);
+    cv::RMat rmat = cv::make_rmat<MyRMatAdapter>(mat, 42, "It actually works");
+    cv::Scalar sc(111);
+    auto v = cv::GRunArgs{ cv::GRunArg{ rmat }, cv::GRunArg{ sc }, cv::GRunArg{ mat } };
+
+    const std::vector<char> sargsin = cv::gapi::serialize(v);
+    cv::GRunArgs out = cv::gapi::deserialize<cv::GRunArgs, MyRMatAdapter>(sargsin);
+    cv::RMat out_rmat = cv::util::get<cv::RMat>(out[0]);
+    auto adapter = out_rmat.get<MyRMatAdapter>();
+    EXPECT_EQ(42, adapter->getVal());
+    EXPECT_EQ("It actually works", adapter->getStr());
+
+    cv::Scalar out_sc = cv::util::get<cv::Scalar>(out[1]);
+    EXPECT_EQ(sc, out_sc);
+
+    cv::Mat out_mat = cv::util::get<cv::Mat>(out[2]);
+    EXPECT_EQ(0, cv::norm(mat, out_mat));
+}
+
 namespace {
     template <cv::detail::OpaqueKind K, typename T>
     bool verifyOpaqueKind(T&& in) {

From 510dc17c2e3f408de1349e342be4cda5cd6873f8 Mon Sep 17 00:00:00 2001
From: Anna Khakimova <anna.khakimova@intel.com>
Date: Wed, 21 Oct 2020 13:52:03 +0300
Subject: [PATCH 041/152] Merge pull request #18338 from
 anna-khakimova:ak/opt_arithm_kernel

Univ Intrinsics implementation of Add, Sub, Absdiff kernels

* Add, sub, absdiff kernels optimization

* avoid unused conditions

* add conditions for tail processing
---
 .../gapi/src/backends/fluid/gfluidcore.cpp    | 423 ++++++++++++++++--
 1 file changed, 398 insertions(+), 25 deletions(-)

diff --git a/modules/gapi/src/backends/fluid/gfluidcore.cpp b/modules/gapi/src/backends/fluid/gfluidcore.cpp
index a6f8d56e4c..edc91f0179 100644
--- a/modules/gapi/src/backends/fluid/gfluidcore.cpp
+++ b/modules/gapi/src/backends/fluid/gfluidcore.cpp
@@ -151,6 +151,348 @@ GAPI_FLUID_KERNEL(GFluidAddW, cv::gapi::core::GAddW, false)
 
 enum Arithm { ARITHM_ABSDIFF, ARITHM_ADD, ARITHM_SUBTRACT, ARITHM_MULTIPLY, ARITHM_DIVIDE };
 
+#if CV_SIMD
+CV_ALWAYS_INLINE void absdiff_store(short out[], const v_int16& a, const v_int16& b, int x)
+{
+    vx_store(&out[x], v_absdiffs(a, b));
+}
+
+CV_ALWAYS_INLINE void absdiff_store(ushort out[], const v_uint16& a, const v_uint16& b, int x)
+{
+    vx_store(&out[x], v_absdiff(a, b));
+}
+
+CV_ALWAYS_INLINE void absdiff_store(uchar out[], const v_uint8& a, const v_uint8& b, int x)
+{
+    vx_store(&out[x], v_absdiff(a, b));
+}
+
+CV_ALWAYS_INLINE void absdiff_store(float out[], const v_float32& a, const v_float32& b, int x)
+{
+    vx_store(&out[x], v_absdiff(a, b));
+}
+
+template<typename T, typename VT>
+CV_ALWAYS_INLINE int absdiff_impl(const T in1[], const T in2[], T out[], int length)
+{
+    constexpr int nlanes = static_cast<int>(VT::nlanes);
+
+    if (length < nlanes)
+        return 0;
+
+    int x = 0;
+    for (;;)
+    {
+        for (; x <= length - nlanes; x += nlanes)
+        {
+            VT a = vx_load(&in1[x]);
+            VT b = vx_load(&in2[x]);
+            absdiff_store(out, a, b, x);
+        }
+
+        if (x < length && (in1 != out) && (in2 != out))
+        {
+            x = length - nlanes;
+            continue;  // process one more time (unaligned tail)
+        }
+        break;
+    }
+
+    return x;
+}
+
+template<typename T>
+CV_ALWAYS_INLINE int absdiff_simd(const T in1[], const T in2[], T out[], int length)
+{
+    if (std::is_same<T, uchar>::value)
+    {
+        return absdiff_impl<uchar, v_uint8>(reinterpret_cast<const uchar*>(in1),
+                                            reinterpret_cast<const uchar*>(in2),
+                                            reinterpret_cast<uchar*>(out), length);
+    }
+    else if (std::is_same<T, ushort>::value)
+    {
+        return absdiff_impl<ushort, v_uint16>(reinterpret_cast<const ushort*>(in1),
+                                              reinterpret_cast<const ushort*>(in2),
+                                              reinterpret_cast<ushort*>(out), length);
+    }
+    else if (std::is_same<T, short>::value)
+    {
+        return absdiff_impl<short, v_int16>(reinterpret_cast<const short*>(in1),
+                                            reinterpret_cast<const short*>(in2),
+                                            reinterpret_cast<short*>(out), length);
+    }
+    else if (std::is_same<T, float>::value)
+    {
+        return absdiff_impl<float, v_float32>(reinterpret_cast<const float*>(in1),
+                                              reinterpret_cast<const float*>(in2),
+                                              reinterpret_cast<float*>(out), length);
+    }
+
+    return 0;
+}
+
+template<typename T, typename VT>
+CV_ALWAYS_INLINE int add_simd_sametype(const T in1[], const T in2[], T out[], int length)
+{
+    constexpr int nlanes = static_cast<int>(VT::nlanes);
+
+    if (length < nlanes)
+        return 0;
+
+    int x = 0;
+    for (;;)
+    {
+        for (; x <= length - nlanes; x += nlanes)
+        {
+            VT a = vx_load(&in1[x]);
+            VT b = vx_load(&in2[x]);
+            vx_store(&out[x], a + b);
+        }
+
+        if (x < length && (in1 != out) && (in2 != out))
+        {
+            x = length - nlanes;
+            continue;  // process one more time (unaligned tail)
+        }
+        break;
+    }
+
+    return x;
+}
+
+template<typename SRC, typename DST>
+CV_ALWAYS_INLINE int add_simd(const SRC in1[], const SRC in2[], DST out[], int length)
+{
+    if (std::is_same<DST, float>::value && !std::is_same<SRC, float>::value)
+        return 0;
+
+    if (std::is_same<DST, SRC>::value)
+    {
+        if (std::is_same<DST, uchar>::value)
+        {
+            return add_simd_sametype<uchar, v_uint8>(reinterpret_cast<const uchar*>(in1),
+                                                     reinterpret_cast<const uchar*>(in2),
+                                                     reinterpret_cast<uchar*>(out), length);
+        }
+        else if (std::is_same<DST, short>::value)
+        {
+            return add_simd_sametype<short, v_int16>(reinterpret_cast<const short*>(in1),
+                                                     reinterpret_cast<const short*>(in2),
+                                                     reinterpret_cast<short*>(out), length);
+        }
+        else if (std::is_same<DST, float>::value)
+        {
+            return add_simd_sametype<float, v_float32>(reinterpret_cast<const float*>(in1),
+                                                       reinterpret_cast<const float*>(in2),
+                                                       reinterpret_cast<float*>(out), length);
+        }
+    }
+    else if (std::is_same<SRC, short>::value && std::is_same<DST, uchar>::value)
+    {
+        constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
+
+        if (length < nlanes)
+            return 0;
+
+        int x = 0;
+        for (;;)
+        {
+            for (; x <= length - nlanes; x += nlanes)
+            {
+                v_int16 a1 = vx_load(reinterpret_cast<const short*>(&in1[x]));
+                v_int16 a2 = vx_load(reinterpret_cast<const short*>(&in1[x + nlanes / 2]));
+                v_int16 b1 = vx_load(reinterpret_cast<const short*>(&in2[x]));
+                v_int16 b2 = vx_load(reinterpret_cast<const short*>(&in2[x + nlanes / 2]));
+
+                vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(a1 + b1, a2 + b2));
+            }
+
+            if (x < length)
+            {
+                CV_DbgAssert((reinterpret_cast<const short*>(in1) != reinterpret_cast<const short*>(out)) &&
+                             (reinterpret_cast<const short*>(in2) != reinterpret_cast<const short*>(out)));
+                x = length - nlanes;
+                continue;  // process one more time (unaligned tail)
+            }
+            break;
+        }
+
+        return x;
+    }
+    else if (std::is_same<SRC, float>::value && std::is_same<DST, uchar>::value)
+    {
+        constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
+
+        if (length < nlanes)
+            return 0;
+
+        int x = 0;
+        for (;;)
+        {
+            for (; x <= length - nlanes; x += nlanes)
+            {
+                v_float32 a1 = vx_load(reinterpret_cast<const float*>(&in1[x]));
+                v_float32 a2 = vx_load(reinterpret_cast<const float*>(&in1[x + nlanes / 4]));
+                v_float32 a3 = vx_load(reinterpret_cast<const float*>(&in1[x + 2 * nlanes / 4]));
+                v_float32 a4 = vx_load(reinterpret_cast<const float*>(&in1[x + 3 * nlanes / 4]));
+
+                v_float32 b1 = vx_load(reinterpret_cast<const float*>(&in2[x]));
+                v_float32 b2 = vx_load(reinterpret_cast<const float*>(&in2[x + nlanes / 4]));
+                v_float32 b3 = vx_load(reinterpret_cast<const float*>(&in2[x + 2 * nlanes / 4]));
+                v_float32 b4 = vx_load(reinterpret_cast<const float*>(&in2[x + 3 * nlanes / 4]));
+
+                vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(v_pack(v_round(a1 + b1), v_round(a2 + b2)),
+                                                                     v_pack(v_round(a3 + b3), v_round(a4 + b4))));
+            }
+
+            if (x < length)
+            {
+                CV_DbgAssert((reinterpret_cast<const float*>(in1) != reinterpret_cast<const float*>(out)) &&
+                             (reinterpret_cast<const float*>(in2) != reinterpret_cast<const float*>(out)));
+                x = length - nlanes;
+                continue;  // process one more time (unaligned tail)
+            }
+            break;
+        }
+
+        return x;
+    }
+
+    return 0;
+}
+
+template<typename T, typename VT>
+CV_ALWAYS_INLINE int sub_simd_sametype(const T in1[], const T in2[], T out[], int length)
+{
+    constexpr int nlanes = static_cast<int>(VT::nlanes);
+
+    if (length < nlanes)
+        return 0;
+
+    int x = 0;
+    for (;;)
+    {
+        for (; x <= length - nlanes; x += nlanes)
+        {
+            VT a = vx_load(&in1[x]);
+            VT b = vx_load(&in2[x]);
+            vx_store(&out[x], a - b);
+        }
+
+        if (x < length && (in1 != out) && (in2 != out))
+        {
+            x = length - nlanes;
+            continue;  // process one more time (unaligned tail)
+        }
+        break;
+    }
+
+    return x;
+}
+
+template<typename SRC, typename DST>
+CV_ALWAYS_INLINE int sub_simd(const SRC in1[], const SRC in2[], DST out[], int length)
+{
+    if (std::is_same<DST, float>::value && !std::is_same<SRC, float>::value)
+        return 0;
+
+    if (std::is_same<DST, SRC>::value)
+    {
+        if (std::is_same<DST, uchar>::value)
+        {
+            return sub_simd_sametype<uchar, v_uint8>(reinterpret_cast<const uchar*>(in1),
+                                                     reinterpret_cast<const uchar*>(in2),
+                                                     reinterpret_cast<uchar*>(out), length);
+        }
+        else if (std::is_same<DST, short>::value)
+        {
+            return sub_simd_sametype<short, v_int16>(reinterpret_cast<const short*>(in1),
+                                                     reinterpret_cast<const short*>(in2),
+                                                     reinterpret_cast<short*>(out), length);
+        }
+        else if (std::is_same<DST, float>::value)
+        {
+            return sub_simd_sametype<float, v_float32>(reinterpret_cast<const float*>(in1),
+                                                       reinterpret_cast<const float*>(in2),
+                                                       reinterpret_cast<float*>(out), length);
+        }
+    }
+    else if (std::is_same<SRC, short>::value && std::is_same<DST, uchar>::value)
+    {
+        constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
+
+        if (length < nlanes)
+            return 0;
+
+        int x = 0;
+        for (;;)
+        {
+            for (; x <= length - nlanes; x += nlanes)
+            {
+                v_int16 a1 = vx_load(reinterpret_cast<const short*>(&in1[x]));
+                v_int16 a2 = vx_load(reinterpret_cast<const short*>(&in1[x + nlanes / 2]));
+                v_int16 b1 = vx_load(reinterpret_cast<const short*>(&in2[x]));
+                v_int16 b2 = vx_load(reinterpret_cast<const short*>(&in2[x + nlanes / 2]));
+
+                vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(a1 - b1, a2 - b2));
+            }
+
+            if (x < length)
+            {
+                CV_DbgAssert((reinterpret_cast<const short*>(in1) != reinterpret_cast<const short*>(out)) &&
+                             (reinterpret_cast<const short*>(in2) != reinterpret_cast<const short*>(out)));
+                x = length - nlanes;
+                continue;  // process one more time (unaligned tail)
+            }
+            break;
+        }
+
+        return x;
+    }
+    else if (std::is_same<SRC, float>::value && std::is_same<DST, uchar>::value)
+    {
+        constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
+
+        if (length < nlanes)
+            return 0;
+
+        int x = 0;
+        for (;;)
+        {
+            for (; x <= length - nlanes; x += nlanes)
+            {
+                v_float32 a1 = vx_load(reinterpret_cast<const float*>(&in1[x]));
+                v_float32 a2 = vx_load(reinterpret_cast<const float*>(&in1[x + nlanes / 4]));
+                v_float32 a3 = vx_load(reinterpret_cast<const float*>(&in1[x + 2 * nlanes / 4]));
+                v_float32 a4 = vx_load(reinterpret_cast<const float*>(&in1[x + 3 * nlanes / 4]));
+
+                v_float32 b1 = vx_load(reinterpret_cast<const float*>(&in2[x]));
+                v_float32 b2 = vx_load(reinterpret_cast<const float*>(&in2[x + nlanes / 4]));
+                v_float32 b3 = vx_load(reinterpret_cast<const float*>(&in2[x + 2 * nlanes / 4]));
+                v_float32 b4 = vx_load(reinterpret_cast<const float*>(&in2[x + 3 * nlanes / 4]));
+
+                vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(v_pack(v_round(a1 - b1), v_round(a2 - b2)),
+                                                                     v_pack(v_round(a3 - b3), v_round(a4 - b4))));
+            }
+
+            if (x < length)
+            {
+                CV_DbgAssert((reinterpret_cast<const float*>(in1) != reinterpret_cast<const float*>(out)) &&
+                             (reinterpret_cast<const float*>(in2) != reinterpret_cast<const float*>(out)));
+                x = length - nlanes;
+                continue;  // process one more time (unaligned tail)
+            }
+            break;
+        }
+
+        return x;
+    }
+
+    return 0;
+}
+#endif
+
 template<typename DST, typename SRC1, typename SRC2>
 static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm arithm,
                        double scale=1)
@@ -168,29 +510,37 @@ static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm a
     // NB: assume in/out types are not 64-bits
     float _scale = static_cast<float>( scale );
 
+    int x = 0;
+
     switch (arithm)
     {
-    case ARITHM_ABSDIFF:
-        for (int l=0; l < length; l++)
-            out[l] = absdiff<DST>(in1[l], in2[l]);
-        break;
-    case ARITHM_ADD:
-        for (int l=0; l < length; l++)
-            out[l] = add<DST>(in1[l], in2[l]);
-        break;
-    case ARITHM_SUBTRACT:
-        for (int l=0; l < length; l++)
-            out[l] = sub<DST>(in1[l], in2[l]);
-        break;
-    case ARITHM_MULTIPLY:
-        for (int l=0; l < length; l++)
-            out[l] = mul<DST>(in1[l], in2[l], _scale);
-        break;
-    case ARITHM_DIVIDE:
-        for (int l=0; l < length; l++)
-            out[l] = div<DST>(in1[l], in2[l], _scale);
-        break;
-    default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation");
+        case ARITHM_ADD:
+        {
+#if CV_SIMD
+            x = add_simd(in1, in2, out, length);
+#endif
+            for (; x < length; ++x)
+                out[x] = add<DST>(in1[x], in2[x]);
+            break;
+        }
+        case ARITHM_SUBTRACT:
+        {
+#if CV_SIMD
+            x = sub_simd(in1, in2, out, length);
+#endif
+            for (; x < length; ++x)
+                out[x] = sub<DST>(in1[x], in2[x]);
+            break;
+        }
+        case ARITHM_MULTIPLY:
+            for (; x < length; ++x)
+                out[x] = mul<DST>(in1[x], in2[x], _scale);
+            break;
+        case ARITHM_DIVIDE:
+            for (; x < length; ++x)
+                out[x] = div<DST>(in1[x], in2[x], _scale);
+            break;
+        default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation");
     }
 }
 
@@ -270,6 +620,29 @@ GAPI_FLUID_KERNEL(GFluidDiv, cv::gapi::core::GDiv, false)
     }
 };
 
+template<typename DST, typename SRC1, typename SRC2>
+static void run_absdiff(Buffer &dst, const View &src1, const View &src2)
+{
+    static_assert(std::is_same<SRC1, SRC2>::value, "wrong types");
+    static_assert(std::is_same<SRC1, DST>::value, "wrong types");
+
+    const auto *in1 = src1.InLine<SRC1>(0);
+    const auto *in2 = src2.InLine<SRC2>(0);
+    auto *out = dst.OutLine<DST>();
+
+    int width = dst.length();
+    int chan = dst.meta().chan;
+    int length = width * chan;
+
+    int x = 0;
+
+#if CV_SIMD
+    x = absdiff_simd(in1, in2, out, length);
+#endif
+    for (; x < length; ++x)
+        out[x] = absdiff<DST>(in1[x], in2[x]);
+}
+
 GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false)
 {
     static const int Window = 1;
@@ -277,10 +650,10 @@ GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false)
     static void run(const View &src1, const View &src2, Buffer &dst)
     {
         //      DST     SRC1    SRC2    OP          __VA_ARGS__
-        BINARY_(uchar , uchar , uchar , run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
-        BINARY_(ushort, ushort, ushort, run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
-        BINARY_( short,  short,  short, run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
-        BINARY_( float,  float,  float, run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
+        BINARY_(uchar , uchar , uchar , run_absdiff, dst, src1, src2);
+        BINARY_(ushort, ushort, ushort, run_absdiff, dst, src1, src2);
+        BINARY_( short,  short,  short, run_absdiff, dst, src1, src2);
+        BINARY_( float,  float,  float, run_absdiff, dst, src1, src2);
 
         CV_Error(cv::Error::StsBadArg, "unsupported combination of types");
     }

From 95ff9282286a55bcbb5a241dc30629406b50dd88 Mon Sep 17 00:00:00 2001
From: Dmitry Matveev <dmitry.matveev@intel.com>
Date: Thu, 1 Oct 2020 00:18:04 +0300
Subject: [PATCH 042/152] G-API: Introduced a Text Detection sample

This sample models the Text Detection demo from OMZ:
https://github.com/openvinotoolkit/open_model_zoo/tree/2020.4/demos/text_detection_demo

Also: renamed cv::gapi::size() to cv::gapi::streaming::size()
---
 modules/gapi/include/opencv2/gapi/core.hpp    |  32 +-
 .../gapi/include/opencv2/gapi/gstreaming.hpp  |  16 +
 .../include/opencv2/gapi/infer/parsers.hpp    |  12 +
 .../perf/common/gapi_core_perf_tests_inl.hpp  |   4 +-
 modules/gapi/samples/text_detection.cpp       | 698 ++++++++++++++++++
 modules/gapi/src/api/kernels_core.cpp         |   8 +-
 modules/gapi/src/backends/cpu/gcpucore.cpp    |   4 +-
 .../gapi/test/common/gapi_core_tests_inl.hpp  |   4 +-
 8 files changed, 755 insertions(+), 23 deletions(-)
 create mode 100644 modules/gapi/samples/text_detection.cpp

diff --git a/modules/gapi/include/opencv2/gapi/core.hpp b/modules/gapi/include/opencv2/gapi/core.hpp
index 2c01328f09..8825585696 100644
--- a/modules/gapi/include/opencv2/gapi/core.hpp
+++ b/modules/gapi/include/opencv2/gapi/core.hpp
@@ -508,19 +508,23 @@ namespace core {
             return in.withType(in.depth, in.chan).withSize(dsize);
         }
     };
+} // namespace core
 
-    G_TYPED_KERNEL(GSize, <GOpaque<Size>(GMat)>, "org.opencv.core.size") {
-        static GOpaqueDesc outMeta(const GMatDesc&) {
-            return empty_gopaque_desc();
-        }
-    };
+namespace streaming {
 
-    G_TYPED_KERNEL(GSizeR, <GOpaque<Size>(GOpaque<Rect>)>, "org.opencv.core.sizeR") {
-        static GOpaqueDesc outMeta(const GOpaqueDesc&) {
-            return empty_gopaque_desc();
-        }
-    };
-}
+// Operations for Streaming (declared in this header for convenience)
+G_TYPED_KERNEL(GSize, <GOpaque<Size>(GMat)>, "org.opencv.streaming.size") {
+    static GOpaqueDesc outMeta(const GMatDesc&) {
+        return empty_gopaque_desc();
+    }
+};
+
+G_TYPED_KERNEL(GSizeR, <GOpaque<Size>(GOpaque<Rect>)>, "org.opencv.streaming.sizeR") {
+    static GOpaqueDesc outMeta(const GOpaqueDesc&) {
+        return empty_gopaque_desc();
+    }
+};
+} // namespace streaming
 
 //! @addtogroup gapi_math
 //! @{
@@ -1753,9 +1757,10 @@ GAPI_EXPORTS GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, i
                              int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar());
 //! @} gapi_transform
 
+namespace streaming {
 /** @brief Gets dimensions from Mat.
 
-@note Function textual ID is "org.opencv.core.size"
+@note Function textual ID is "org.opencv.streaming.size"
 
 @param src Input tensor
 @return Size (tensor dimensions).
@@ -1765,12 +1770,13 @@ GAPI_EXPORTS GOpaque<Size> size(const GMat& src);
 /** @overload
 Gets dimensions from rectangle.
 
-@note Function textual ID is "org.opencv.core.sizeR"
+@note Function textual ID is "org.opencv.streaming.sizeR"
 
 @param r Input rectangle.
 @return Size (rectangle dimensions).
 */
 GAPI_EXPORTS GOpaque<Size> size(const GOpaque<Rect>& r);
+} //namespace streaming
 } //namespace gapi
 } //namespace cv
 
diff --git a/modules/gapi/include/opencv2/gapi/gstreaming.hpp b/modules/gapi/include/opencv2/gapi/gstreaming.hpp
index f45c30bdae..037fa94452 100644
--- a/modules/gapi/include/opencv2/gapi/gstreaming.hpp
+++ b/modules/gapi/include/opencv2/gapi/gstreaming.hpp
@@ -111,6 +111,22 @@ public:
      */
     GAPI_WRAP void setSource(const gapi::wip::IStreamSource::Ptr& s);
 
+    /**
+     * @brief Constructs and specifies an input video stream for a
+     * single-input computation pipeline with the given parameters.
+     *
+     * Throws if pipeline is already running. Use stop() and then
+     * setSource() to run the graph on a new video stream.
+     *
+     * @overload
+     * @param args arguments used to contruct and initialize a stream
+     * source.
+     */
+    template<typename T, typename... Args>
+    void setSource(Args&&... args) {
+        setSource(cv::gapi::wip::make_src<T>(std::forward<Args>(args)...));
+    }
+
     /**
      * @brief Start the pipeline execution.
      *
diff --git a/modules/gapi/include/opencv2/gapi/infer/parsers.hpp b/modules/gapi/include/opencv2/gapi/infer/parsers.hpp
index c3488f5799..15742c6e55 100644
--- a/modules/gapi/include/opencv2/gapi/infer/parsers.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer/parsers.hpp
@@ -122,4 +122,16 @@ GAPI_EXPORTS std::tuple<GArray<Rect>, GArray<int>> parseYolo(const GMat& in,
 } // namespace gapi
 } // namespace cv
 
+// Reimport parseSSD & parseYolo under their initial namespace
+namespace cv {
+namespace gapi {
+namespace streaming {
+
+using cv::gapi::parseSSD;
+using cv::gapi::parseYolo;
+
+} // namespace streaming
+} // namespace gapi
+} // namespace cv
+
 #endif // OPENCV_GAPI_PARSERS_HPP
diff --git a/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp b/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp
index 91d08bba06..ac90181184 100644
--- a/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp
+++ b/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp
@@ -2124,7 +2124,7 @@ PERF_TEST_P_(SizePerfTest, TestPerformance)
 
     // G-API code //////////////////////////////////////////////////////////////
     cv::GMat in;
-    auto out = cv::gapi::size(in);
+    auto out = cv::gapi::streaming::size(in);
     cv::GComputation c(cv::GIn(in), cv::GOut(out));
     cv::Size out_sz;
 
@@ -2156,7 +2156,7 @@ PERF_TEST_P_(SizeRPerfTest, TestPerformance)
 
     // G-API code //////////////////////////////////////////////////////////////
     cv::GOpaque<cv::Rect> op_rect;
-    auto out = cv::gapi::size(op_rect);
+    auto out = cv::gapi::streaming::size(op_rect);
     cv::GComputation c(cv::GIn(op_rect), cv::GOut(out));
     cv::Size out_sz;
 
diff --git a/modules/gapi/samples/text_detection.cpp b/modules/gapi/samples/text_detection.cpp
new file mode 100644
index 0000000000..da1bab6ca9
--- /dev/null
+++ b/modules/gapi/samples/text_detection.cpp
@@ -0,0 +1,698 @@
+#include <algorithm>
+#include <cctype>
+#include <cmath>
+#include <iostream>
+#include <limits>
+#include <numeric>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include <opencv2/gapi.hpp>
+#include <opencv2/gapi/core.hpp>
+#include <opencv2/gapi/cpu/gcpukernel.hpp>
+#include <opencv2/gapi/infer.hpp>
+#include <opencv2/gapi/infer/ie.hpp>
+#include <opencv2/gapi/streaming/cap.hpp>
+
+#include <opencv2/highgui.hpp>
+#include <opencv2/core/utility.hpp>
+
+const std::string about =
+    "This is an OpenCV-based version of OMZ Text Detection example";
+const std::string keys =
+    "{ h help |                           | Print this help message }"
+    "{ input  |                           | Path to the input video file }"
+    "{ tdm    | text-detection-0004.xml   | Path to OpenVINO text detection model (.xml), versions 0003 and 0004 work }"
+    "{ tdd    | CPU                       | Target device for the text detector (e.g. CPU, GPU, VPU, ...) }"
+    "{ trm    | text-recognition-0012.xml | Path to OpenVINO text recognition model (.xml) }"
+    "{ trd    | CPU                       | Target device for the text recognition (e.g. CPU, GPU, VPU, ...) }"
+    "{ bw     | 0                         | CTC beam search decoder bandwidth, if 0, a CTC greedy decoder is used}"
+    "{ sset   | 0123456789abcdefghijklmnopqrstuvwxyz | Symbol set to use with text recognition decoder. Shouldn't contain symbol #. }"
+    "{ thr    | 0.2                       | Text recognition confidence threshold}"
+    ;
+
+namespace {
+std::string weights_path(const std::string &model_path) {
+    const auto EXT_LEN = 4u;
+    const auto sz = model_path.size();
+    CV_Assert(sz > EXT_LEN);
+
+    const auto ext = model_path.substr(sz - EXT_LEN);
+    CV_Assert(cv::toLowerCase(ext) == ".xml");
+    return model_path.substr(0u, sz - EXT_LEN) + ".bin";
+}
+
+//////////////////////////////////////////////////////////////////////
+// Taken from OMZ samples as-is
+template<typename Iter>
+void softmax_and_choose(Iter begin, Iter end, int *argmax, float *prob) {
+    auto max_element = std::max_element(begin, end);
+    *argmax = static_cast<int>(std::distance(begin, max_element));
+    float max_val = *max_element;
+    double sum = 0;
+    for (auto i = begin; i != end; i++) {
+       sum += std::exp((*i) - max_val);
+    }
+    if (std::fabs(sum) < std::numeric_limits<double>::epsilon()) {
+        throw std::logic_error("sum can't be equal to zero");
+    }
+    *prob = 1.0f / static_cast<float>(sum);
+}
+
+template<typename Iter>
+std::vector<float> softmax(Iter begin, Iter end) {
+    std::vector<float> prob(end - begin, 0.f);
+    std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); });
+    float sum = std::accumulate(prob.begin(), prob.end(), 0.0f);
+    for (int i = 0; i < static_cast<int>(prob.size()); i++)
+        prob[i] /= sum;
+    return prob;
+}
+
+struct BeamElement {
+    std::vector<int> sentence;   //!< The sequence of chars that will be a result of the beam element
+
+    float prob_blank;            //!< The probability that the last char in CTC sequence
+                                 //!< for the beam element is the special blank char
+
+    float prob_not_blank;        //!< The probability that the last char in CTC sequence
+                                 //!< for the beam element is NOT the special blank char
+
+    float prob() const {         //!< The probability of the beam element.
+        return prob_blank + prob_not_blank;
+    }
+};
+
+std::string CTCGreedyDecoder(const float *data,
+                             const std::size_t sz,
+                             const std::string &alphabet,
+                             const char pad_symbol,
+                             double *conf) {
+    std::string res = "";
+    bool prev_pad = false;
+    *conf = 1;
+
+    const auto num_classes = alphabet.length();
+    for (auto it = data; it != (data+sz); it += num_classes) {
+        int argmax = 0;
+        float prob = 0.f;
+
+        softmax_and_choose(it, it + num_classes, &argmax, &prob);
+        (*conf) *= prob;
+
+        auto symbol = alphabet[argmax];
+        if (symbol != pad_symbol) {
+            if (res.empty() || prev_pad || (!res.empty() && symbol != res.back())) {
+                prev_pad = false;
+                res += symbol;
+            }
+        } else {
+            prev_pad = true;
+        }
+    }
+    return res;
+}
+
+std::string CTCBeamSearchDecoder(const float *data,
+                                 const std::size_t sz,
+                                 const std::string &alphabet,
+                                 double *conf,
+                                 int bandwidth) {
+    const auto num_classes = alphabet.length();
+
+    std::vector<BeamElement> curr;
+    std::vector<BeamElement> last;
+
+    last.push_back(BeamElement{std::vector<int>(), 1.f, 0.f});
+
+    for (auto it = data; it != (data+sz); it += num_classes) {
+        curr.clear();
+
+        std::vector<float> prob = softmax(it, it + num_classes);
+
+        for(const auto& candidate: last) {
+            float prob_not_blank = 0.f;
+            const std::vector<int>& candidate_sentence = candidate.sentence;
+            if (!candidate_sentence.empty()) {
+                int n = candidate_sentence.back();
+                prob_not_blank = candidate.prob_not_blank * prob[n];
+            }
+            float prob_blank = candidate.prob() * prob[num_classes - 1];
+
+            auto check_res = std::find_if(curr.begin(),
+                                          curr.end(),
+                                          [&candidate_sentence](const BeamElement& n) {
+                                              return n.sentence == candidate_sentence;
+                                          });
+            if (check_res == std::end(curr)) {
+                curr.push_back(BeamElement{candidate.sentence, prob_blank, prob_not_blank});
+            } else {
+                check_res->prob_not_blank  += prob_not_blank;
+                if (check_res->prob_blank != 0.f) {
+                    throw std::logic_error("Probability that the last char in CTC-sequence "
+                                           "is the special blank char must be zero here");
+                }
+                check_res->prob_blank = prob_blank;
+            }
+
+            for (int i = 0; i < static_cast<int>(num_classes) - 1; i++) {
+                auto extend = candidate_sentence;
+                extend.push_back(i);
+
+                if (candidate_sentence.size() > 0 && candidate.sentence.back() == i) {
+                    prob_not_blank = prob[i] * candidate.prob_blank;
+                } else {
+                    prob_not_blank = prob[i] * candidate.prob();
+                }
+
+                auto check_res2 = std::find_if(curr.begin(),
+                                              curr.end(),
+                                              [&extend](const BeamElement &n) {
+                                                  return n.sentence == extend;
+                                              });
+                if (check_res2 == std::end(curr)) {
+                    curr.push_back(BeamElement{extend, 0.f, prob_not_blank});
+                } else {
+                    check_res2->prob_not_blank += prob_not_blank;
+                }
+            }
+        }
+
+        sort(curr.begin(), curr.end(), [](const BeamElement &a, const BeamElement &b) -> bool {
+            return a.prob() > b.prob();
+        });
+
+        last.clear();
+        int num_to_copy = std::min(bandwidth, static_cast<int>(curr.size()));
+        for (int b = 0; b < num_to_copy; b++) {
+            last.push_back(curr[b]);
+        }
+    }
+
+    *conf = last[0].prob();
+    std::string res="";
+    for (const auto& idx: last[0].sentence) {
+        res += alphabet[idx];
+    }
+
+    return res;
+}
+
+//////////////////////////////////////////////////////////////////////
+} // anonymous namespace
+
+namespace custom {
+namespace {
+
+//////////////////////////////////////////////////////////////////////
+// Define networks for this sample
+using GMat2 = std::tuple<cv::GMat, cv::GMat>;
+G_API_NET(TextDetection,
+          <GMat2(cv::GMat)>,
+          "sample.custom.text_detect");
+
+G_API_NET(TextRecognition,
+          <cv::GMat(cv::GMat)>,
+          "sample.custom.text_recogn");
+
+// Define custom operations
+using GSize = cv::GOpaque<cv::Size>;
+using GRRects = cv::GArray<cv::RotatedRect>;
+G_API_OP(PostProcess,
+        <GRRects(cv::GMat,cv::GMat,GSize,float,float)>,
+        "sample.custom.text.post_proc") {
+    static cv::GArrayDesc outMeta(const cv::GMatDesc &,
+                                  const cv::GMatDesc &,
+                                  const cv::GOpaqueDesc &,
+                                  float,
+                                  float) {
+        return cv::empty_array_desc();
+    }
+};
+
+using GMats = cv::GArray<cv::GMat>;
+G_API_OP(CropLabels,
+         <GMats(cv::GMat,GRRects,GSize)>,
+         "sample.custom.text.crop") {
+    static cv::GArrayDesc outMeta(const cv::GMatDesc &,
+                                  const cv::GArrayDesc &,
+                                  const cv::GOpaqueDesc &) {
+        return cv::empty_array_desc();
+    }
+};
+
+//////////////////////////////////////////////////////////////////////
+// Implement custom operations
+GAPI_OCV_KERNEL(OCVPostProcess, PostProcess) {
+    static void run(const cv::Mat &link,
+                    const cv::Mat &segm,
+                    const cv::Size &img_size,
+                    const float link_threshold,
+                    const float segm_threshold,
+                    std::vector<cv::RotatedRect> &out) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        const int kMinArea = 300;
+        const int kMinHeight = 10;
+
+        const float *link_data_pointer = link.ptr<float>();
+        std::vector<float> link_data(link_data_pointer, link_data_pointer + link.total());
+        link_data = transpose4d(link_data, dimsToShape(link.size), {0, 2, 3, 1});
+        softmax(link_data);
+        link_data = sliceAndGetSecondChannel(link_data);
+        std::vector<int> new_link_data_shape = {
+            link.size[0],
+            link.size[2],
+            link.size[3],
+            link.size[1]/2,
+        };
+
+        const float *cls_data_pointer = segm.ptr<float>();
+        std::vector<float> cls_data(cls_data_pointer, cls_data_pointer + segm.total());
+        cls_data = transpose4d(cls_data, dimsToShape(segm.size), {0, 2, 3, 1});
+        softmax(cls_data);
+        cls_data = sliceAndGetSecondChannel(cls_data);
+        std::vector<int> new_cls_data_shape = {
+            segm.size[0],
+            segm.size[2],
+            segm.size[3],
+            segm.size[1]/2,
+        };
+
+        out = maskToBoxes(decodeImageByJoin(cls_data, new_cls_data_shape,
+                                            link_data, new_link_data_shape,
+                                            segm_threshold, link_threshold),
+                          static_cast<float>(kMinArea),
+                          static_cast<float>(kMinHeight),
+                          img_size);
+    }
+
+    static std::vector<std::size_t> dimsToShape(const cv::MatSize &sz) {
+        const int n_dims = sz.dims();
+        std::vector<std::size_t> result;
+        result.reserve(n_dims);
+
+        // cv::MatSize is not iterable...
+        for (int i = 0; i < n_dims; i++) {
+            result.emplace_back(static_cast<std::size_t>(sz[i]));
+        }
+        return result;
+    }
+
+    static void softmax(std::vector<float> &rdata) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        const size_t last_dim = 2;
+        for (size_t i = 0 ; i < rdata.size(); i+=last_dim) {
+            float m = std::max(rdata[i], rdata[i+1]);
+            rdata[i] = std::exp(rdata[i] - m);
+            rdata[i + 1] = std::exp(rdata[i + 1] - m);
+            float s = rdata[i] + rdata[i + 1];
+            rdata[i] /= s;
+            rdata[i + 1] /= s;
+        }
+    }
+
+    static std::vector<float> transpose4d(const std::vector<float> &data,
+                                          const std::vector<size_t> &shape,
+                                          const std::vector<size_t> &axes) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        if (shape.size() != axes.size())
+            throw std::runtime_error("Shape and axes must have the same dimension.");
+
+        for (size_t a : axes) {
+            if (a >= shape.size())
+                throw std::runtime_error("Axis must be less than dimension of shape.");
+        }
+        size_t total_size = shape[0]*shape[1]*shape[2]*shape[3];
+        std::vector<size_t> steps {
+            shape[axes[1]]*shape[axes[2]]*shape[axes[3]],
+            shape[axes[2]]*shape[axes[3]],
+            shape[axes[3]],
+            1
+         };
+
+        size_t source_data_idx = 0;
+        std::vector<float> new_data(total_size, 0);
+        std::vector<size_t> ids(shape.size());
+        for (ids[0] = 0; ids[0] < shape[0]; ids[0]++) {
+            for (ids[1] = 0; ids[1] < shape[1]; ids[1]++) {
+                for (ids[2] = 0; ids[2] < shape[2]; ids[2]++) {
+                    for (ids[3]= 0; ids[3] < shape[3]; ids[3]++) {
+                        size_t new_data_idx = ids[axes[0]]*steps[0] + ids[axes[1]]*steps[1] +
+                            ids[axes[2]]*steps[2] + ids[axes[3]]*steps[3];
+                        new_data[new_data_idx] = data[source_data_idx++];
+                    }
+                }
+            }
+        }
+        return new_data;
+    }
+
+    static std::vector<float> sliceAndGetSecondChannel(const std::vector<float> &data) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        std::vector<float> new_data(data.size() / 2, 0);
+        for (size_t i = 0; i < data.size() / 2; i++) {
+            new_data[i] = data[2 * i + 1];
+        }
+        return new_data;
+    }
+
+    static void join(const int p1,
+                     const int p2,
+                     std::unordered_map<int, int> &group_mask) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        const int root1 = findRoot(p1, group_mask);
+        const int root2 = findRoot(p2, group_mask);
+        if (root1 != root2) {
+            group_mask[root1] = root2;
+        }
+    }
+
+    static cv::Mat decodeImageByJoin(const std::vector<float> &cls_data,
+                                     const std::vector<int>   &cls_data_shape,
+                                     const std::vector<float> &link_data,
+                                     const std::vector<int>   &link_data_shape,
+                                     float cls_conf_threshold,
+                                     float link_conf_threshold) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        const int h = cls_data_shape[1];
+        const int w = cls_data_shape[2];
+
+        std::vector<uchar> pixel_mask(h * w, 0);
+        std::unordered_map<int, int> group_mask;
+        std::vector<cv::Point> points;
+        for (int i = 0; i < static_cast<int>(pixel_mask.size()); i++) {
+            pixel_mask[i] = cls_data[i] >= cls_conf_threshold;
+            if (pixel_mask[i]) {
+                points.emplace_back(i % w, i / w);
+                group_mask[i] = -1;
+            }
+        }
+        std::vector<uchar> link_mask(link_data.size(), 0);
+        for (size_t i = 0; i < link_mask.size(); i++) {
+            link_mask[i] = link_data[i] >= link_conf_threshold;
+        }
+        size_t neighbours = size_t(link_data_shape[3]);
+        for (const auto &point : points) {
+            size_t neighbour = 0;
+            for (int ny = point.y - 1; ny <= point.y + 1; ny++) {
+                for (int nx = point.x - 1; nx <= point.x + 1; nx++) {
+                    if (nx == point.x && ny == point.y)
+                        continue;
+                    if (nx >= 0 && nx < w && ny >= 0 && ny < h) {
+                        uchar pixel_value = pixel_mask[size_t(ny) * size_t(w) + size_t(nx)];
+                        uchar link_value = link_mask[(size_t(point.y) * size_t(w) + size_t(point.x))
+                                                     *neighbours + neighbour];
+                        if (pixel_value && link_value) {
+                            join(point.x + point.y * w, nx + ny * w, group_mask);
+                        }
+                    }
+                    neighbour++;
+                }
+            }
+        }
+        return get_all(points, w, h, group_mask);
+    }
+
+    static cv::Mat get_all(const std::vector<cv::Point> &points,
+                           const int w,
+                           const int h,
+                           std::unordered_map<int, int> &group_mask) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        std::unordered_map<int, int> root_map;
+        cv::Mat mask(h, w, CV_32S, cv::Scalar(0));
+        for (const auto &point : points) {
+            int point_root = findRoot(point.x + point.y * w, group_mask);
+            if (root_map.find(point_root) == root_map.end()) {
+                root_map.emplace(point_root, static_cast<int>(root_map.size() + 1));
+            }
+            mask.at<int>(point.x + point.y * w) = root_map[point_root];
+        }
+        return mask;
+    }
+
+    static int findRoot(const int point,
+                        std::unordered_map<int, int> &group_mask) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        int root = point;
+        bool update_parent = false;
+        while (group_mask.at(root) != -1) {
+            root = group_mask.at(root);
+            update_parent = true;
+        }
+        if (update_parent) {
+            group_mask[point] = root;
+        }
+        return root;
+    }
+
+    static std::vector<cv::RotatedRect> maskToBoxes(const cv::Mat &mask,
+                                                    const float min_area,
+                                                    const float min_height,
+                                                    const cv::Size &image_size) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        std::vector<cv::RotatedRect> bboxes;
+        double min_val = 0.;
+        double max_val = 0.;
+        cv::minMaxLoc(mask, &min_val, &max_val);
+        int max_bbox_idx = static_cast<int>(max_val);
+        cv::Mat resized_mask;
+        cv::resize(mask, resized_mask, image_size, 0, 0, cv::INTER_NEAREST);
+
+        for (int i = 1; i <= max_bbox_idx; i++) {
+            cv::Mat bbox_mask = resized_mask == i;
+            std::vector<std::vector<cv::Point>> contours;
+
+            cv::findContours(bbox_mask, contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
+            if (contours.empty())
+                continue;
+            cv::RotatedRect r = cv::minAreaRect(contours[0]);
+            if (std::min(r.size.width, r.size.height) < min_height)
+                continue;
+            if (r.size.area() < min_area)
+                continue;
+            bboxes.emplace_back(r);
+        }
+        return bboxes;
+    }
+}; // GAPI_OCV_KERNEL(PostProcess)
+
+GAPI_OCV_KERNEL(OCVCropLabels, CropLabels) {
+    static void run(const cv::Mat &image,
+                    const std::vector<cv::RotatedRect> &detections,
+                    const cv::Size &outSize,
+                    std::vector<cv::Mat> &out) {
+        out.clear();
+        out.reserve(detections.size());
+        cv::Mat crop(outSize, CV_8UC3, cv::Scalar(0));
+        cv::Mat gray(outSize, CV_8UC1, cv::Scalar(0));
+        std::vector<int> blob_shape = {1,1,outSize.height,outSize.width};
+
+        for (auto &&rr : detections) {
+            std::vector<cv::Point2f> points(4);
+            rr.points(points.data());
+
+            const auto top_left_point_idx = topLeftPointIdx(points);
+            cv::Point2f point0 = points[static_cast<size_t>(top_left_point_idx)];
+            cv::Point2f point1 = points[(top_left_point_idx + 1) % 4];
+            cv::Point2f point2 = points[(top_left_point_idx + 2) % 4];
+
+            std::vector<cv::Point2f> from{point0, point1, point2};
+            std::vector<cv::Point2f> to{
+                cv::Point2f(0.0f, 0.0f),
+                cv::Point2f(static_cast<float>(outSize.width-1), 0.0f),
+                cv::Point2f(static_cast<float>(outSize.width-1),
+                            static_cast<float>(outSize.height-1))
+            };
+            cv::Mat M = cv::getAffineTransform(from, to);
+            cv::warpAffine(image, crop, M, outSize);
+            cv::cvtColor(crop, gray, cv::COLOR_BGR2GRAY);
+
+            cv::Mat blob;
+            gray.convertTo(blob, CV_32F);
+            out.push_back(blob.reshape(1, blob_shape)); // pass as 1,1,H,W instead of H,W
+        }
+    }
+
+    static int topLeftPointIdx(const std::vector<cv::Point2f> &points) {
+        // NOTE: Taken from the OMZ text detection sample almost as-is
+        cv::Point2f most_left(std::numeric_limits<float>::max(),
+                              std::numeric_limits<float>::max());
+        cv::Point2f almost_most_left(std::numeric_limits<float>::max(),
+                                     std::numeric_limits<float>::max());
+        int most_left_idx = -1;
+        int almost_most_left_idx = -1;
+
+        for (size_t i = 0; i < points.size() ; i++) {
+            if (most_left.x > points[i].x) {
+                if (most_left.x < std::numeric_limits<float>::max()) {
+                    almost_most_left = most_left;
+                    almost_most_left_idx = most_left_idx;
+                }
+                most_left = points[i];
+                most_left_idx = static_cast<int>(i);
+            }
+            if (almost_most_left.x > points[i].x && points[i] != most_left) {
+                almost_most_left = points[i];
+                almost_most_left_idx = static_cast<int>(i);
+            }
+        }
+
+        if (almost_most_left.y < most_left.y) {
+            most_left = almost_most_left;
+            most_left_idx = almost_most_left_idx;
+        }
+        return most_left_idx;
+    }
+
+}; // GAPI_OCV_KERNEL(CropLabels)
+
+} // anonymous namespace
+} // namespace custom
+
+namespace vis {
+namespace {
+
+void drawRotatedRect(cv::Mat &m, const cv::RotatedRect &rc) {
+    std::vector<cv::Point2f> tmp_points(5);
+    rc.points(tmp_points.data());
+    tmp_points[4] = tmp_points[0];
+    auto prev = tmp_points.begin(), it = prev+1;
+    for (; it != tmp_points.end(); ++it) {
+        cv::line(m, *prev, *it, cv::Scalar(50, 205, 50), 2);
+        prev = it;
+    }
+}
+
+void drawText(cv::Mat &m, const cv::RotatedRect &rc, const std::string &str) {
+    const int    fface   = cv::FONT_HERSHEY_SIMPLEX;
+    const double scale   = 0.7;
+    const int    thick   = 1;
+          int    base    = 0;
+    const auto text_size = cv::getTextSize(str, fface, scale, thick, &base);
+
+    std::vector<cv::Point2f> tmp_points(4);
+    rc.points(tmp_points.data());
+    const auto tl_point_idx = custom::OCVCropLabels::topLeftPointIdx(tmp_points);
+    cv::Point text_pos = tmp_points[tl_point_idx];
+    text_pos.x = std::max(0, text_pos.x);
+    text_pos.y = std::max(text_size.height, text_pos.y);
+
+    cv::rectangle(m,
+                  text_pos + cv::Point{0, base},
+                  text_pos + cv::Point{text_size.width, -text_size.height},
+                  CV_RGB(50, 205, 50),
+                  cv::FILLED);
+    const auto white = CV_RGB(255, 255, 255);
+    cv::putText(m, str, text_pos, fface, scale, white, thick, 8);
+}
+
+} // anonymous namespace
+} // namespace vis
+
+int main(int argc, char *argv[])
+{
+    cv::CommandLineParser cmd(argc, argv, keys);
+    cmd.about(about);
+    if (cmd.has("help")) {
+        cmd.printMessage();
+        return 0;
+    }
+    const auto input_file_name = cmd.get<std::string>("input");
+    const auto tdet_model_path = cmd.get<std::string>("tdm");
+    const auto trec_model_path = cmd.get<std::string>("trm");
+    const auto tdet_target_dev = cmd.get<std::string>("tdd");
+    const auto trec_target_dev = cmd.get<std::string>("trd");
+    const auto ctc_beam_dec_bw = cmd.get<int>("bw");
+    const auto dec_conf_thresh = cmd.get<double>("thr");
+
+    const auto pad_symbol      = '#';
+    const auto symbol_set      = cmd.get<std::string>("sset") + pad_symbol;
+
+    cv::GMat in;
+    cv::GOpaque<cv::Size> in_rec_sz;
+    cv::GMat link, segm;
+    std::tie(link, segm) = cv::gapi::infer<custom::TextDetection>(in);
+    cv::GOpaque<cv::Size> size = cv::gapi::streaming::size(in);
+    cv::GArray<cv::RotatedRect> rrs = custom::PostProcess::on(link, segm, size, 0.8f, 0.8f);
+    cv::GArray<cv::GMat> labels = custom::CropLabels::on(in, rrs, in_rec_sz);
+    cv::GArray<cv::GMat> text = cv::gapi::infer2<custom::TextRecognition>(in, labels);
+
+    cv::GComputation graph(cv::GIn(in, in_rec_sz),
+                           cv::GOut(cv::gapi::copy(in), rrs, text));
+
+    // Text detection network
+    auto tdet_net = cv::gapi::ie::Params<custom::TextDetection> {
+        tdet_model_path,                // path to topology IR
+        weights_path(tdet_model_path),  // path to weights
+        tdet_target_dev,                // device specifier
+    }.cfgOutputLayers({"model/link_logits_/add", "model/segm_logits/add"});
+
+    auto trec_net = cv::gapi::ie::Params<custom::TextRecognition> {
+        trec_model_path,                // path to topology IR
+        weights_path(trec_model_path),  // path to weights
+        trec_target_dev,                // device specifier
+    };
+    auto networks = cv::gapi::networks(tdet_net, trec_net);
+
+    auto kernels = cv::gapi::kernels< custom::OCVPostProcess
+                                    , custom::OCVCropLabels
+                                    >();
+    auto pipeline = graph.compileStreaming(cv::compile_args(kernels, networks));
+
+    std::cout << "Reading " << input_file_name << std::endl;
+
+    // Input stream
+    auto in_src = cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input_file_name);
+
+    // Text recognition input size (also an input parameter to the graph)
+    auto in_rsz = cv::Size{ 120, 32 };
+
+    // Set the pipeline source & start the pipeline
+    pipeline.setSource(cv::gin(in_src, in_rsz));
+    pipeline.start();
+
+    // Declare the output data & run the processing loop
+    cv::TickMeter tm;
+    cv::Mat image;
+    std::vector<cv::RotatedRect> out_rcs;
+    std::vector<cv::Mat> out_text;
+
+    tm.start();
+    int frames = 0;
+    while (pipeline.pull(cv::gout(image, out_rcs, out_text))) {
+        frames++;
+
+        CV_Assert(out_rcs.size() == out_text.size());
+        const auto num_labels = out_rcs.size();
+
+        std::vector<cv::Point2f> tmp_points(4);
+        for (std::size_t l = 0; l < num_labels; l++) {
+            // Decode the recognized text in the rectangle
+            const auto &blob = out_text[l];
+            const float *data = blob.ptr<float>();
+            const auto sz = blob.total();
+            double conf = 1.0;
+            const std::string res = ctc_beam_dec_bw == 0
+                ? CTCGreedyDecoder(data, sz, symbol_set, pad_symbol, &conf)
+                : CTCBeamSearchDecoder(data, sz, symbol_set, &conf, ctc_beam_dec_bw);
+
+            // Draw a bounding box for this rotated rectangle
+            const auto &rc = out_rcs[l];
+            vis::drawRotatedRect(image, rc);
+
+            // Draw text, if decoded
+            if (conf >= dec_conf_thresh) {
+                vis::drawText(image, rc, res);
+            }
+        }
+        tm.stop();
+        cv::imshow("Out", image);
+        cv::waitKey(1);
+        tm.start();
+    }
+    tm.stop();
+    std::cout << "Processed " << frames << " frames"
+              << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl;
+    return 0;
+}
diff --git a/modules/gapi/src/api/kernels_core.cpp b/modules/gapi/src/api/kernels_core.cpp
index 55c43594af..82aceb1f26 100644
--- a/modules/gapi/src/api/kernels_core.cpp
+++ b/modules/gapi/src/api/kernels_core.cpp
@@ -388,14 +388,14 @@ GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, int flags,
     return core::GWarpAffine::on(src, M, dsize, flags, borderMode, borderValue);
 }
 
-GOpaque<Size> size(const GMat& src)
+GOpaque<Size> streaming::size(const GMat& src)
 {
-    return core::GSize::on(src);
+    return streaming::GSize::on(src);
 }
 
-GOpaque<Size> size(const GOpaque<Rect>& r)
+GOpaque<Size> streaming::size(const GOpaque<Rect>& r)
 {
-    return core::GSizeR::on(r);
+    return streaming::GSizeR::on(r);
 }
 
 } //namespace gapi
diff --git a/modules/gapi/src/backends/cpu/gcpucore.cpp b/modules/gapi/src/backends/cpu/gcpucore.cpp
index f2b8f7077d..fc460149c6 100644
--- a/modules/gapi/src/backends/cpu/gcpucore.cpp
+++ b/modules/gapi/src/backends/cpu/gcpucore.cpp
@@ -625,7 +625,7 @@ GAPI_OCV_KERNEL(GCPUParseYolo, cv::gapi::nn::parsers::GParseYolo)
     }
 };
 
-GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize)
+GAPI_OCV_KERNEL(GCPUSize, cv::gapi::streaming::GSize)
 {
     static void run(const cv::Mat& in, cv::Size& out)
     {
@@ -634,7 +634,7 @@ GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize)
     }
 };
 
-GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::core::GSizeR)
+GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::streaming::GSizeR)
 {
     static void run(const cv::Rect& in, cv::Size& out)
     {
diff --git a/modules/gapi/test/common/gapi_core_tests_inl.hpp b/modules/gapi/test/common/gapi_core_tests_inl.hpp
index e11324f070..1a167ad5ea 100644
--- a/modules/gapi/test/common/gapi_core_tests_inl.hpp
+++ b/modules/gapi/test/common/gapi_core_tests_inl.hpp
@@ -1691,7 +1691,7 @@ TEST_P(SizeTest, ParseTest)
     cv::GMat in;
     cv::Size out_sz;
 
-    auto out = cv::gapi::size(in);
+    auto out = cv::gapi::streaming::size(in);
     cv::GComputation c(cv::GIn(in), cv::GOut(out));
     c.apply(cv::gin(in_mat1), cv::gout(out_sz), getCompileArgs());
 
@@ -1704,7 +1704,7 @@ TEST_P(SizeRTest, ParseTest)
     cv::Size out_sz;
 
     cv::GOpaque<cv::Rect> op_rect;
-    auto out = cv::gapi::size(op_rect);
+    auto out = cv::gapi::streaming::size(op_rect);
     cv::GComputation c(cv::GIn(op_rect), cv::GOut(out));
     c.apply(cv::gin(rect), cv::gout(out_sz), getCompileArgs());
 

From d31b6c3480c601305fa1e153a819aad0bd247fe2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Martin=20=C5=A0tefa=C5=88=C3=A1k?= <zteffi@gmail.com>
Date: Fri, 16 Oct 2020 00:28:15 +0200
Subject: [PATCH 043/152] stitching: add warpPointBackward to warpers

test by projecting and reprojecting back random points
---
 .../opencv2/stitching/detail/warpers.hpp      |  31 +++++
 .../opencv2/stitching/detail/warpers_inl.hpp  |   8 ++
 .../include/opencv2/stitching/warpers.hpp     |  16 +++
 modules/stitching/src/warpers.cpp             |  28 ++++
 modules/stitching/test/test_reprojection.cpp  | 131 ++++++++++++++++++
 5 files changed, 214 insertions(+)
 create mode 100644 modules/stitching/test/test_reprojection.cpp

diff --git a/modules/stitching/include/opencv2/stitching/detail/warpers.hpp b/modules/stitching/include/opencv2/stitching/detail/warpers.hpp
index bc2c6e3546..ff005e8da2 100644
--- a/modules/stitching/include/opencv2/stitching/detail/warpers.hpp
+++ b/modules/stitching/include/opencv2/stitching/detail/warpers.hpp
@@ -70,6 +70,23 @@ public:
      */
     virtual Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) = 0;
 
+    /** @brief Projects the image point backward.
+
+    @param pt Projected point
+    @param K Camera intrinsic parameters
+    @param R Camera rotation matrix
+    @return Backward-projected point
+    */
+#if CV_VERSION_MAJOR == 4
+    virtual Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
+    {
+        CV_UNUSED(pt); CV_UNUSED(K); CV_UNUSED(R);
+        CV_Error(Error::StsNotImplemented, "");
+    }
+#else
+    virtual Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) = 0;
+#endif
+
     /** @brief Builds the projection maps according to the given camera data.
 
     @param src_size Source image size
@@ -143,6 +160,8 @@ class CV_EXPORTS_TEMPLATE RotationWarperBase : public RotationWarper
 public:
     Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
 
+    Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
+
     Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
 
     Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
@@ -189,6 +208,9 @@ public:
     Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
     Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T);
 
+    Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) CV_OVERRIDE;
+    Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R, InputArray T);
+
     virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap);
     Rect buildMaps(Size src_size, InputArray K, InputArray R, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap) CV_OVERRIDE;
 
@@ -228,6 +250,15 @@ public:
      */
     Point2f warpPoint(const Point2f &pt, InputArray K, InputArray H) CV_OVERRIDE;
 
+    /** @brief Projects the image point backward.
+
+    @param pt Projected point
+    @param K Camera intrinsic parameters
+    @param H Camera extrinsic parameters
+    @return Backward-projected point
+    */
+    Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray H) CV_OVERRIDE;
+
     /** @brief Builds the projection maps according to the given camera data.
 
     @param src_size Source image size
diff --git a/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp b/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
index f4a19d9c24..5e2375621e 100644
--- a/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
+++ b/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
@@ -61,6 +61,14 @@ Point2f RotationWarperBase<P>::warpPoint(const Point2f &pt, InputArray K, InputA
     return uv;
 }
 
+template <class P>
+Point2f RotationWarperBase<P>::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
+{
+    projector_.setCameraParams(K, R);
+    Point2f xy;
+    projector_.mapBackward(pt.x, pt.y, xy.x, xy.y);
+    return xy;
+}
 
 template <class P>
 Rect RotationWarperBase<P>::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray _xmap, OutputArray _ymap)
diff --git a/modules/stitching/include/opencv2/stitching/warpers.hpp b/modules/stitching/include/opencv2/stitching/warpers.hpp
index ff43386107..aa1ce5a6a7 100644
--- a/modules/stitching/include/opencv2/stitching/warpers.hpp
+++ b/modules/stitching/include/opencv2/stitching/warpers.hpp
@@ -65,6 +65,22 @@ namespace cv {
         */
         CV_WRAP Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R);
 
+        /** @brief Projects the image point backward.
+
+        @param pt Projected point
+        @param K Camera intrinsic parameters
+        @param R Camera rotation matrix
+        @return Backward-projected point
+        */
+#if CV_VERSION_MAJOR == 4
+        CV_WRAP Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
+        {
+            CV_UNUSED(pt); CV_UNUSED(K); CV_UNUSED(R);
+            CV_Error(Error::StsNotImplemented, "");
+        }
+#else
+        CV_WRAP Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray R);
+#endif
         /** @brief Builds the projection maps according to the given camera data.
 
         @param src_size Source image size
diff --git a/modules/stitching/src/warpers.cpp b/modules/stitching/src/warpers.cpp
index 4360590c94..85ac939074 100644
--- a/modules/stitching/src/warpers.cpp
+++ b/modules/stitching/src/warpers.cpp
@@ -92,6 +92,14 @@ Point2f PyRotationWarper::warpPoint(const Point2f &pt, InputArray K, InputArray
 {
     return rw.get()->warpPoint(pt, K, R);
 }
+
+#if CV_VERSION_MAJOR != 4
+Point2f PyRotationWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
+{
+    return rw.get()->warpPointBackward(pt, K, R);
+}
+#endif
+
 Rect PyRotationWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
 {
     return rw.get()->buildMaps(src_size, K, R, xmap, ymap);
@@ -164,6 +172,20 @@ Point2f PlaneWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R)
     Mat_<float> T(3, 1, tz);
     return warpPoint(pt, K, R, T);
 }
+Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R, InputArray T)
+{
+    projector_.setCameraParams(K, R, T);
+    Point2f xy;
+    projector_.mapBackward(pt.x, pt.y, xy.x, xy.y);
+    return xy;
+}
+
+Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
+{
+    float tz[] = { 0.f, 0.f, 0.f };
+    Mat_<float> T(3, 1, tz);
+    return warpPointBackward(pt, K, R, T);
+}
 
 Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
 {
@@ -299,6 +321,12 @@ Point2f AffineWarper::warpPoint(const Point2f &pt, InputArray K, InputArray H)
     return PlaneWarper::warpPoint(pt, K, R, T);
 }
 
+Point2f AffineWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray H)
+{
+    Mat R, T;
+    getRTfromHomogeneous(H, R, T);
+    return PlaneWarper::warpPointBackward(pt, K, R, T);
+}
 
 Rect AffineWarper::buildMaps(Size src_size, InputArray K, InputArray H, OutputArray xmap, OutputArray ymap)
 {
diff --git a/modules/stitching/test/test_reprojection.cpp b/modules/stitching/test/test_reprojection.cpp
new file mode 100644
index 0000000000..076bbb769d
--- /dev/null
+++ b/modules/stitching/test/test_reprojection.cpp
@@ -0,0 +1,131 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "test_precomp.hpp"
+#include "opencv2/stitching/warpers.hpp"
+
+namespace opencv_test { namespace {
+class ReprojectionTest : public ::testing::Test {
+
+protected:
+    const size_t TEST_COUNT = 15;
+    Mat K, R;
+    RNG rng = RNG(0);
+    ReprojectionTest()
+    {
+        K = Mat::eye(3, 3, CV_32FC1);
+        float angle = (float)(30.0 * CV_PI / 180.0);
+        float rotationMatrix[9] = {
+                (float)cos(angle), (float)sin(angle), 0,
+                (float)-sin(angle), (float)cos(angle), 0,
+                0, 0, 1
+        };
+        Mat(3, 3, CV_32FC1, rotationMatrix).copyTo(R);
+    }
+    void TestReprojection(Ptr<detail::RotationWarper> warper, Point2f pt) {
+        Point2f projected_pt = warper->warpPoint(pt, K, R);
+        Point2f reprojected_pt = warper->warpPointBackward(projected_pt, K, R);
+        EXPECT_NEAR(pt.x, reprojected_pt.x, float( 1e-5));
+        EXPECT_NEAR(pt.y, reprojected_pt.y, float( 1e-5));
+    }
+};
+
+
+TEST_F(ReprojectionTest, PlaneWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<PlaneWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, AffineWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<AffineWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, CylindricalWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<CylindricalWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, SphericalWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<SphericalWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, FisheyeWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<FisheyeWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, StereographicWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<StereographicWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, CompressedRectilinearWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<CompressedRectilinearWarper>(1.5f, 1.0f);
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, CompressedRectilinearPortraitWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, PaniniWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<PaniniWarper>(1.5f, 1.0f);
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, PaniniPortraitWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<PaniniPortraitWarper>(1.5f, 1.0f);
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, MercatorWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<MercatorWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+TEST_F(ReprojectionTest, TransverseMercatorWarper)
+{
+    Ptr<WarperCreator> creator = makePtr<TransverseMercatorWarper>();
+    for (size_t i = 0; i < TEST_COUNT; ++i) {
+        TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
+    }
+}
+
+}} // namespace

From 22ee5c0c4db9111780c6d34eb2a3c85e0f4046ff Mon Sep 17 00:00:00 2001
From: Rob Timpe <rob@xwing.com>
Date: Wed, 21 Oct 2020 15:51:46 -0700
Subject: [PATCH 044/152] Fix errors when building with cuda stubs

Fixes two errors when building with the options WITH_CUDA=ON and BUILD_CUDA_STUBS=ON on a machine without CUDA.

In the cudaarithm module, make sure cuda_runtime.h only gets included when CUDA is installed.

In the stitching module, don't assume that cuda is present just because cudaarithm and cudawarping are present (as is the case when building with the above options).
---
 modules/cudaarithm/src/lut.cpp     |  5 +++--
 modules/stitching/src/blenders.cpp | 16 ++++++++--------
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/modules/cudaarithm/src/lut.cpp b/modules/cudaarithm/src/lut.cpp
index a4b4e02650..5ef2836017 100644
--- a/modules/cudaarithm/src/lut.cpp
+++ b/modules/cudaarithm/src/lut.cpp
@@ -4,8 +4,6 @@
 
 #include "precomp.hpp"
 
-#include "lut.hpp"
-
 using namespace cv;
 using namespace cv::cuda;
 
@@ -15,6 +13,9 @@ Ptr<LookUpTable> cv::cuda::createLookUpTable(InputArray) { throw_no_cuda(); retu
 
 #else /* !defined (HAVE_CUDA) || defined (CUDA_DISABLER) */
 
+// lut.hpp includes cuda_runtime.h and can only be included when we have CUDA
+#include "lut.hpp"
+
 Ptr<LookUpTable> cv::cuda::createLookUpTable(InputArray lut)
 {
     return makePtr<LookUpTableImpl>(lut);
diff --git a/modules/stitching/src/blenders.cpp b/modules/stitching/src/blenders.cpp
index aeddc142dc..05e7ca85e4 100644
--- a/modules/stitching/src/blenders.cpp
+++ b/modules/stitching/src/blenders.cpp
@@ -219,7 +219,7 @@ MultiBandBlender::MultiBandBlender(int try_gpu, int num_bands, int weight_type)
     num_bands_ = 0;
     setNumBands(num_bands);
 
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     can_use_gpu_ = try_gpu && cuda::getCudaEnabledDeviceCount();
     gpu_feed_idx_ = 0;
 #else
@@ -246,7 +246,7 @@ void MultiBandBlender::prepare(Rect dst_roi)
 
     Blender::prepare(dst_roi);
 
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     if (can_use_gpu_)
     {
         gpu_initialized_ = false;
@@ -332,7 +332,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
 
     UMat img;
 
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     // If using gpu save the top left coordinate when running first time after prepare
     if (can_use_gpu_)
     {
@@ -353,7 +353,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
     {
         img = _img.getUMat();
     }
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     else
     {
         gpu_img_ = _img.getGpuMat();
@@ -394,7 +394,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
     int bottom = br_new.y - tl.y - img.rows;
     int right = br_new.x - tl.x - img.cols;
 
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     if (can_use_gpu_)
     {
         if (!gpu_initialized_)
@@ -603,7 +603,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
 void MultiBandBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
 {
     Rect dst_rc(0, 0, dst_roi_final_.width, dst_roi_final_.height);
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     if (can_use_gpu_)
     {
         if (!gpu_initialized_)
@@ -850,7 +850,7 @@ void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr)
 
 void createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat> &pyr)
 {
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     pyr.resize(num_levels + 1);
 
     std::vector<cuda::GpuMat> gpu_pyr(num_levels + 1);
@@ -891,7 +891,7 @@ void restoreImageFromLaplacePyr(std::vector<UMat> &pyr)
 
 void restoreImageFromLaplacePyrGpu(std::vector<UMat> &pyr)
 {
-#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
     if (pyr.empty())
         return;
 

From aac7c5465ba6ccfe0dc665ab0bae87f765e616ba Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Wed, 21 Oct 2020 22:47:56 +0000
Subject: [PATCH 045/152] core: move inline code from mat.inl.hpp

---
 modules/core/include/opencv2/core/mat.inl.hpp | 562 ------------------
 modules/core/src/matrix.cpp                   | 280 ++++++++-
 modules/core/src/matrix_sparse.cpp            |  88 +++
 modules/core/src/umatrix.cpp                  | 146 +++++
 4 files changed, 513 insertions(+), 563 deletions(-)

diff --git a/modules/core/include/opencv2/core/mat.inl.hpp b/modules/core/include/opencv2/core/mat.inl.hpp
index 9b7df87d8b..b6ffd81795 100644
--- a/modules/core/include/opencv2/core/mat.inl.hpp
+++ b/modules/core/include/opencv2/core/mat.inl.hpp
@@ -489,158 +489,6 @@ CV__DEBUG_NS_END
 
 //////////////////////////////////////////// Mat //////////////////////////////////////////
 
-inline
-Mat::Mat()
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{}
-
-inline
-Mat::Mat(int _rows, int _cols, int _type)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create(_rows, _cols, _type);
-}
-
-inline
-Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create(_rows, _cols, _type);
-    *this = _s;
-}
-
-inline
-Mat::Mat(Size _sz, int _type)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create( _sz.height, _sz.width, _type );
-}
-
-inline
-Mat::Mat(Size _sz, int _type, const Scalar& _s)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create(_sz.height, _sz.width, _type);
-    *this = _s;
-}
-
-inline
-Mat::Mat(int _dims, const int* _sz, int _type)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create(_dims, _sz, _type);
-}
-
-inline
-Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create(_dims, _sz, _type);
-    *this = _s;
-}
-
-inline
-Mat::Mat(const std::vector<int>& _sz, int _type)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create(_sz, _type);
-}
-
-inline
-Mat::Mat(const std::vector<int>& _sz, int _type, const Scalar& _s)
-    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
-      datalimit(0), allocator(0), u(0), size(&rows), step(0)
-{
-    create(_sz, _type);
-    *this = _s;
-}
-
-inline
-Mat::Mat(const Mat& m)
-    : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
-      datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
-      u(m.u), size(&rows), step(0)
-{
-    if( u )
-        CV_XADD(&u->refcount, 1);
-    if( m.dims <= 2 )
-    {
-        step[0] = m.step[0]; step[1] = m.step[1];
-    }
-    else
-    {
-        dims = 0;
-        copySize(m);
-    }
-}
-
-inline
-Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step)
-    : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols),
-      data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),
-      allocator(0), u(0), size(&rows)
-{
-    CV_Assert(total() == 0 || data != NULL);
-
-    size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);
-    size_t minstep = cols * esz;
-    if( _step == AUTO_STEP )
-    {
-        _step = minstep;
-    }
-    else
-    {
-        CV_DbgAssert( _step >= minstep );
-        if (_step % esz1 != 0)
-        {
-            CV_Error(Error::BadStep, "Step must be a multiple of esz1");
-        }
-    }
-    step[0] = _step;
-    step[1] = esz;
-    datalimit = datastart + _step * rows;
-    dataend = datalimit - _step + minstep;
-    updateContinuityFlag();
-}
-
-inline
-Mat::Mat(Size _sz, int _type, void* _data, size_t _step)
-    : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width),
-      data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),
-      allocator(0), u(0), size(&rows)
-{
-    CV_Assert(total() == 0 || data != NULL);
-
-    size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);
-    size_t minstep = cols*esz;
-    if( _step == AUTO_STEP )
-    {
-        _step = minstep;
-    }
-    else
-    {
-        CV_DbgAssert( _step >= minstep );
-
-        if (_step % esz1 != 0)
-        {
-            CV_Error(Error::BadStep, "Step must be a multiple of esz1");
-        }
-    }
-    step[0] = _step;
-    step[1] = esz;
-    datalimit = datastart + _step*rows;
-    dataend = datalimit - _step + minstep;
-    updateContinuityFlag();
-}
-
 template<typename _Tp> inline
 Mat::Mat(const std::vector<_Tp>& vec, bool copyData)
     : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),
@@ -778,43 +626,6 @@ Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer)
     *this = commaInitializer.operator Mat_<_Tp>();
 }
 
-inline
-Mat::~Mat()
-{
-    release();
-    if( step.p != step.buf )
-        fastFree(step.p);
-}
-
-inline
-Mat& Mat::operator = (const Mat& m)
-{
-    if( this != &m )
-    {
-        if( m.u )
-            CV_XADD(&m.u->refcount, 1);
-        release();
-        flags = m.flags;
-        if( dims <= 2 && m.dims <= 2 )
-        {
-            dims = m.dims;
-            rows = m.rows;
-            cols = m.cols;
-            step[0] = m.step[0];
-            step[1] = m.step[1];
-        }
-        else
-            copySize(m);
-        data = m.data;
-        datastart = m.datastart;
-        dataend = m.dataend;
-        datalimit = m.datalimit;
-        allocator = m.allocator;
-        u = m.u;
-    }
-    return *this;
-}
-
 inline
 Mat Mat::row(int y) const
 {
@@ -851,67 +662,6 @@ Mat Mat::colRange(const Range& r) const
     return Mat(*this, Range::all(), r);
 }
 
-inline
-Mat Mat::clone() const
-{
-    Mat m;
-    copyTo(m);
-    return m;
-}
-
-inline
-void Mat::assignTo( Mat& m, int _type ) const
-{
-    if( _type < 0 )
-        m = *this;
-    else
-        convertTo(m, _type);
-}
-
-inline
-void Mat::create(int _rows, int _cols, int _type)
-{
-    _type &= TYPE_MASK;
-    if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data )
-        return;
-    int sz[] = {_rows, _cols};
-    create(2, sz, _type);
-}
-
-inline
-void Mat::create(Size _sz, int _type)
-{
-    create(_sz.height, _sz.width, _type);
-}
-
-inline
-void Mat::addref()
-{
-    if( u )
-        CV_XADD(&u->refcount, 1);
-}
-
-inline
-void Mat::release()
-{
-    if( u && CV_XADD(&u->refcount, -1) == 1 )
-        deallocate();
-    u = NULL;
-    datastart = dataend = datalimit = data = 0;
-    for(int i = 0; i < dims; i++)
-        size.p[i] = 0;
-#ifdef _DEBUG
-    flags = MAGIC_VAL;
-    dims = rows = cols = 0;
-    if(step.p != step.buf)
-    {
-        fastFree(step.p);
-        step.p = step.buf;
-        size.p = &rows;
-    }
-#endif
-}
-
 inline
 Mat Mat::operator()( Range _rowRange, Range _colRange ) const
 {
@@ -980,40 +730,6 @@ int Mat::channels() const
     return CV_MAT_CN(flags);
 }
 
-inline
-size_t Mat::step1(int i) const
-{
-    return step.p[i] / elemSize1();
-}
-
-inline
-bool Mat::empty() const
-{
-    return data == 0 || total() == 0 || dims == 0;
-}
-
-inline
-size_t Mat::total() const
-{
-    if( dims <= 2 )
-        return (size_t)rows * cols;
-    size_t p = 1;
-    for( int i = 0; i < dims; i++ )
-        p *= size[i];
-    return p;
-}
-
-inline
-size_t Mat::total(int startDim, int endDim) const
-{
-    CV_Assert( 0 <= startDim && startDim <= endDim);
-    size_t p = 1;
-    int endDim_ = endDim <= dims ? endDim : dims;
-    for( int i = startDim; i < endDim_; i++ )
-        p *= size[i];
-    return p;
-}
-
 inline
 uchar* Mat::ptr(int y)
 {
@@ -1544,22 +1260,6 @@ MatSize::operator const int*() const
     return p;
 }
 
-inline
-bool MatSize::operator == (const MatSize& sz) const
-{
-    int d = dims();
-    int dsz = sz.dims();
-    if( d != dsz )
-        return false;
-    if( d == 2 )
-        return p[0] == sz.p[0] && p[1] == sz.p[1];
-
-    for( int i = 0; i < d; i++ )
-        if( p[i] != sz.p[i] )
-            return false;
-    return true;
-}
-
 inline
 bool MatSize::operator != (const MatSize& sz) const
 {
@@ -1820,9 +1520,7 @@ template<typename _Tp> inline
 void Mat_<_Tp>::release()
 {
     Mat::release();
-#ifdef _DEBUG
     flags = (flags & ~CV_MAT_TYPE_MASK) | traits::Type<_Tp>::value;
-#endif
 }
 
 template<typename _Tp> inline
@@ -2182,51 +1880,6 @@ Mat_<_Tp>::Mat_(MatExpr&& e)
 
 ///////////////////////////// SparseMat /////////////////////////////
 
-inline
-SparseMat::SparseMat()
-    : flags(MAGIC_VAL), hdr(0)
-{}
-
-inline
-SparseMat::SparseMat(int _dims, const int* _sizes, int _type)
-    : flags(MAGIC_VAL), hdr(0)
-{
-    create(_dims, _sizes, _type);
-}
-
-inline
-SparseMat::SparseMat(const SparseMat& m)
-    : flags(m.flags), hdr(m.hdr)
-{
-    addref();
-}
-
-inline
-SparseMat::~SparseMat()
-{
-    release();
-}
-
-inline
-SparseMat& SparseMat::operator = (const SparseMat& m)
-{
-    if( this != &m )
-    {
-        if( m.hdr )
-            CV_XADD(&m.hdr->refcount, 1);
-        release();
-        flags = m.flags;
-        hdr = m.hdr;
-    }
-    return *this;
-}
-
-inline
-SparseMat& SparseMat::operator = (const Mat& m)
-{
-    return (*this = SparseMat(m));
-}
-
 inline
 SparseMat SparseMat::clone() const
 {
@@ -2235,30 +1888,6 @@ SparseMat SparseMat::clone() const
     return temp;
 }
 
-inline
-void SparseMat::assignTo( SparseMat& m, int _type ) const
-{
-    if( _type < 0 )
-        m = *this;
-    else
-        convertTo(m, _type);
-}
-
-inline
-void SparseMat::addref()
-{
-    if( hdr )
-        CV_XADD(&hdr->refcount, 1);
-}
-
-inline
-void SparseMat::release()
-{
-    if( hdr && CV_XADD(&hdr->refcount, -1) == 1 )
-        delete hdr;
-    hdr = 0;
-}
-
 inline
 size_t SparseMat::elemSize() const
 {
@@ -2318,36 +1947,6 @@ size_t SparseMat::nzcount() const
     return hdr ? hdr->nodeCount : 0;
 }
 
-inline
-size_t SparseMat::hash(int i0) const
-{
-    return (size_t)i0;
-}
-
-inline
-size_t SparseMat::hash(int i0, int i1) const
-{
-    return (size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1;
-}
-
-inline
-size_t SparseMat::hash(int i0, int i1, int i2) const
-{
-    return ((size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1) * HASH_SCALE + (unsigned)i2;
-}
-
-inline
-size_t SparseMat::hash(const int* idx) const
-{
-    size_t h = (unsigned)idx[0];
-    if( !hdr )
-        return 0;
-    int d = hdr->dims;
-    for(int i = 1; i < d; i++ )
-        h = h * HASH_SCALE + (unsigned)idx[i];
-    return h;
-}
-
 template<typename _Tp> inline
 _Tp& SparseMat::ref(int i0, size_t* hashval)
 {
@@ -3667,74 +3266,6 @@ const Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b)
 
 //////////////////////////////// UMat ////////////////////////////////
 
-inline
-UMat::UMat(UMatUsageFlags _usageFlags)
-: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
-{}
-
-inline
-UMat::UMat(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
-: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
-{
-    create(_rows, _cols, _type);
-}
-
-inline
-UMat::UMat(int _rows, int _cols, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
-: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
-{
-    create(_rows, _cols, _type);
-    *this = _s;
-}
-
-inline
-UMat::UMat(Size _sz, int _type, UMatUsageFlags _usageFlags)
-: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
-{
-    create( _sz.height, _sz.width, _type );
-}
-
-inline
-UMat::UMat(Size _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
-: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
-{
-    create(_sz.height, _sz.width, _type);
-    *this = _s;
-}
-
-inline
-UMat::UMat(int _dims, const int* _sz, int _type, UMatUsageFlags _usageFlags)
-: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
-{
-    create(_dims, _sz, _type);
-}
-
-inline
-UMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
-: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
-{
-    create(_dims, _sz, _type);
-    *this = _s;
-}
-
-inline
-UMat::UMat(const UMat& m)
-: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
-  usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
-{
-    addref();
-    if( m.dims <= 2 )
-    {
-        step[0] = m.step[0]; step[1] = m.step[1];
-    }
-    else
-    {
-        dims = 0;
-        copySize(m);
-    }
-}
-
-
 template<typename _Tp> inline
 UMat::UMat(const std::vector<_Tp>& vec, bool copyData)
 : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),
@@ -3751,33 +3282,6 @@ cols(1), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows)
         Mat((int)vec.size(), 1, traits::Type<_Tp>::value, (uchar*)&vec[0]).copyTo(*this);
 }
 
-inline
-UMat& UMat::operator = (const UMat& m)
-{
-    if( this != &m )
-    {
-        const_cast<UMat&>(m).addref();
-        release();
-        flags = m.flags;
-        if( dims <= 2 && m.dims <= 2 )
-        {
-            dims = m.dims;
-            rows = m.rows;
-            cols = m.cols;
-            step[0] = m.step[0];
-            step[1] = m.step[1];
-        }
-        else
-            copySize(m);
-        allocator = m.allocator;
-        if (usageFlags == USAGE_DEFAULT)
-            usageFlags = m.usageFlags;
-        u = m.u;
-        offset = m.offset;
-    }
-    return *this;
-}
-
 inline
 UMat UMat::row(int y) const
 {
@@ -3814,55 +3318,6 @@ UMat UMat::colRange(const Range& r) const
     return UMat(*this, Range::all(), r);
 }
 
-inline
-UMat UMat::clone() const
-{
-    UMat m;
-    copyTo(m);
-    return m;
-}
-
-inline
-void UMat::assignTo( UMat& m, int _type ) const
-{
-    if( _type < 0 )
-        m = *this;
-    else
-        convertTo(m, _type);
-}
-
-inline
-void UMat::create(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
-{
-    _type &= TYPE_MASK;
-    if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && u )
-        return;
-    int sz[] = {_rows, _cols};
-    create(2, sz, _type, _usageFlags);
-}
-
-inline
-void UMat::create(Size _sz, int _type, UMatUsageFlags _usageFlags)
-{
-    create(_sz.height, _sz.width, _type, _usageFlags);
-}
-
-inline
-void UMat::addref()
-{
-    if( u )
-        CV_XADD(&(u->urefcount), 1);
-}
-
-inline void UMat::release()
-{
-    if( u && CV_XADD(&(u->urefcount), -1) == 1 )
-        deallocate();
-    for(int i = 0; i < dims; i++)
-        size.p[i] = 0;
-    u = 0;
-}
-
 inline
 UMat UMat::operator()( Range _rowRange, Range _colRange ) const
 {
@@ -3937,23 +3392,6 @@ size_t UMat::step1(int i) const
     return step.p[i] / elemSize1();
 }
 
-inline
-bool UMat::empty() const
-{
-    return u == 0 || total() == 0 || dims == 0;
-}
-
-inline
-size_t UMat::total() const
-{
-    if( dims <= 2 )
-        return (size_t)rows * cols;
-    size_t p = 1;
-    for( int i = 0; i < dims; i++ )
-        p *= size[i];
-    return p;
-}
-
 #ifdef CV_CXX_MOVE_SEMANTICS
 
 inline
diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp
index fc9e4c69b2..178e291d3f 100644
--- a/modules/core/src/matrix.cpp
+++ b/modules/core/src/matrix.cpp
@@ -204,6 +204,21 @@ MatAllocator* Mat::getStdAllocator()
 
 //==================================================================================================
 
+bool MatSize::operator==(const MatSize& sz) const
+{
+    int d = dims();
+    int dsz = sz.dims();
+    if( d != dsz )
+        return false;
+    if( d == 2 )
+        return p[0] == sz.p[0] && p[1] == sz.p[1];
+
+    for( int i = 0; i < d; i++ )
+        if( p[i] != sz.p[i] )
+            return false;
+    return true;
+}
+
 void setSize( Mat& m, int _dims, const int* _sz, const size_t* _steps, bool autoSteps)
 {
     CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
@@ -320,7 +335,270 @@ void finalizeHdr(Mat& m)
         m.dataend = m.datalimit = 0;
 }
 
-//==================================================================================================
+//======================================= Mat ======================================================
+
+Mat::Mat()
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{}
+
+Mat::Mat(int _rows, int _cols, int _type)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create(_rows, _cols, _type);
+}
+
+Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create(_rows, _cols, _type);
+    *this = _s;
+}
+
+Mat::Mat(Size _sz, int _type)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create( _sz.height, _sz.width, _type );
+}
+
+Mat::Mat(Size _sz, int _type, const Scalar& _s)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create(_sz.height, _sz.width, _type);
+    *this = _s;
+}
+
+Mat::Mat(int _dims, const int* _sz, int _type)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create(_dims, _sz, _type);
+}
+
+Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create(_dims, _sz, _type);
+    *this = _s;
+}
+
+Mat::Mat(const std::vector<int>& _sz, int _type)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create(_sz, _type);
+}
+
+Mat::Mat(const std::vector<int>& _sz, int _type, const Scalar& _s)
+    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
+      datalimit(0), allocator(0), u(0), size(&rows), step(0)
+{
+    create(_sz, _type);
+    *this = _s;
+}
+
+Mat::Mat(const Mat& m)
+    : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
+      datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
+      u(m.u), size(&rows), step(0)
+{
+    if( u )
+        CV_XADD(&u->refcount, 1);
+    if( m.dims <= 2 )
+    {
+        step[0] = m.step[0]; step[1] = m.step[1];
+    }
+    else
+    {
+        dims = 0;
+        copySize(m);
+    }
+}
+
+Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step)
+    : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols),
+      data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),
+      allocator(0), u(0), size(&rows)
+{
+    CV_Assert(total() == 0 || data != NULL);
+
+    size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);
+    size_t minstep = cols * esz;
+    if( _step == AUTO_STEP )
+    {
+        _step = minstep;
+    }
+    else
+    {
+        CV_Assert( _step >= minstep );
+        if (_step % esz1 != 0)
+        {
+            CV_Error(Error::BadStep, "Step must be a multiple of esz1");
+        }
+    }
+    step[0] = _step;
+    step[1] = esz;
+    datalimit = datastart + _step * rows;
+    dataend = datalimit - _step + minstep;
+    updateContinuityFlag();
+}
+
+Mat::Mat(Size _sz, int _type, void* _data, size_t _step)
+    : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width),
+      data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),
+      allocator(0), u(0), size(&rows)
+{
+    CV_Assert(total() == 0 || data != NULL);
+
+    size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);
+    size_t minstep = cols*esz;
+    if( _step == AUTO_STEP )
+    {
+        _step = minstep;
+    }
+    else
+    {
+        CV_Assert(_step >= minstep);
+
+        if (_step % esz1 != 0)
+        {
+            CV_Error(Error::BadStep, "Step must be a multiple of esz1");
+        }
+    }
+    step[0] = _step;
+    step[1] = esz;
+    datalimit = datastart + _step*rows;
+    dataend = datalimit - _step + minstep;
+    updateContinuityFlag();
+}
+
+
+Mat::~Mat()
+{
+    release();
+    if( step.p != step.buf )
+        fastFree(step.p);
+}
+
+Mat& Mat::operator=(const Mat& m)
+{
+    if( this != &m )
+    {
+        if( m.u )
+            CV_XADD(&m.u->refcount, 1);
+        release();
+        flags = m.flags;
+        if( dims <= 2 && m.dims <= 2 )
+        {
+            dims = m.dims;
+            rows = m.rows;
+            cols = m.cols;
+            step[0] = m.step[0];
+            step[1] = m.step[1];
+        }
+        else
+            copySize(m);
+        data = m.data;
+        datastart = m.datastart;
+        dataend = m.dataend;
+        datalimit = m.datalimit;
+        allocator = m.allocator;
+        u = m.u;
+    }
+    return *this;
+}
+
+Mat Mat::clone() const
+{
+    Mat m;
+    copyTo(m);
+    return m;
+}
+
+void Mat::assignTo( Mat& m, int _type ) const
+{
+    if( _type < 0 )
+        m = *this;
+    else
+        convertTo(m, _type);
+}
+
+void Mat::create(int _rows, int _cols, int _type)
+{
+    _type &= TYPE_MASK;
+    if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data )
+        return;
+    int sz[] = {_rows, _cols};
+    create(2, sz, _type);
+}
+
+void Mat::create(Size _sz, int _type)
+{
+    create(_sz.height, _sz.width, _type);
+}
+
+void Mat::addref()
+{
+    if( u )
+        CV_XADD(&u->refcount, 1);
+}
+
+void Mat::release()
+{
+    if( u && CV_XADD(&u->refcount, -1) == 1 )
+        deallocate();
+    u = NULL;
+    datastart = dataend = datalimit = data = 0;
+    for(int i = 0; i < dims; i++)
+        size.p[i] = 0;
+#ifdef _DEBUG
+    flags = MAGIC_VAL;
+    dims = rows = cols = 0;
+    if(step.p != step.buf)
+    {
+        fastFree(step.p);
+        step.p = step.buf;
+        size.p = &rows;
+    }
+#endif
+}
+
+size_t Mat::step1(int i) const
+{
+    return step.p[i] / elemSize1();
+}
+
+bool Mat::empty() const
+{
+    return data == 0 || total() == 0 || dims == 0;
+}
+
+size_t Mat::total() const
+{
+    if( dims <= 2 )
+        return (size_t)rows * cols;
+    size_t p = 1;
+    for( int i = 0; i < dims; i++ )
+        p *= size[i];
+    return p;
+}
+
+size_t Mat::total(int startDim, int endDim) const
+{
+    CV_Assert( 0 <= startDim && startDim <= endDim);
+    size_t p = 1;
+    int endDim_ = endDim <= dims ? endDim : dims;
+    for( int i = startDim; i < endDim_; i++ )
+        p *= size[i];
+    return p;
+}
+
+
 
 void Mat::create(int d, const int* _sizes, int _type)
 {
diff --git a/modules/core/src/matrix_sparse.cpp b/modules/core/src/matrix_sparse.cpp
index 61e7e90a56..05d16d706e 100644
--- a/modules/core/src/matrix_sparse.cpp
+++ b/modules/core/src/matrix_sparse.cpp
@@ -176,6 +176,94 @@ void SparseMat::Hdr::clear()
     nodeCount = freeList = 0;
 }
 
+///////////////////////////// SparseMat /////////////////////////////
+
+SparseMat::SparseMat()
+    : flags(MAGIC_VAL), hdr(0)
+{}
+
+SparseMat::SparseMat(int _dims, const int* _sizes, int _type)
+    : flags(MAGIC_VAL), hdr(0)
+{
+    create(_dims, _sizes, _type);
+}
+
+SparseMat::SparseMat(const SparseMat& m)
+    : flags(m.flags), hdr(m.hdr)
+{
+    addref();
+}
+
+SparseMat::~SparseMat()
+{
+    release();
+}
+
+SparseMat& SparseMat::operator = (const SparseMat& m)
+{
+    if( this != &m )
+    {
+        if( m.hdr )
+            CV_XADD(&m.hdr->refcount, 1);
+        release();
+        flags = m.flags;
+        hdr = m.hdr;
+    }
+    return *this;
+}
+
+SparseMat& SparseMat::operator=(const Mat& m)
+{
+    return (*this = SparseMat(m));
+}
+
+void SparseMat::assignTo(SparseMat& m, int _type) const
+{
+    if( _type < 0 )
+        m = *this;
+    else
+        convertTo(m, _type);
+}
+
+void SparseMat::addref()
+{
+    if( hdr )
+        CV_XADD(&hdr->refcount, 1);
+}
+
+void SparseMat::release()
+{
+    if( hdr && CV_XADD(&hdr->refcount, -1) == 1 )
+        delete hdr;
+    hdr = 0;
+}
+
+size_t SparseMat::hash(int i0) const
+{
+    return (size_t)i0;
+}
+
+size_t SparseMat::hash(int i0, int i1) const
+{
+    return (size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1;
+}
+
+size_t SparseMat::hash(int i0, int i1, int i2) const
+{
+    return ((size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1) * HASH_SCALE + (unsigned)i2;
+}
+
+size_t SparseMat::hash(const int* idx) const
+{
+    size_t h = (unsigned)idx[0];
+    if( !hdr )
+        return 0;
+    int d = hdr->dims;
+    for(int i = 1; i < d; i++ )
+        h = h * HASH_SCALE + (unsigned)idx[i];
+    return h;
+}
+
 
 SparseMat::SparseMat(const Mat& m)
 : flags(MAGIC_VAL), hdr(0)
diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp
index 9fe8122d22..f21cf7b7e2 100644
--- a/modules/core/src/umatrix.cpp
+++ b/modules/core/src/umatrix.cpp
@@ -228,6 +228,152 @@ UMatDataAutoLock::~UMatDataAutoLock()
     getUMatDataAutoLocker().release(u1, u2);
 }
 
+//////////////////////////////// UMat ////////////////////////////////
+
+UMat::UMat(UMatUsageFlags _usageFlags)
+: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
+{}
+
+UMat::UMat(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
+: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
+{
+    create(_rows, _cols, _type);
+}
+
+UMat::UMat(int _rows, int _cols, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
+: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
+{
+    create(_rows, _cols, _type);
+    *this = _s;
+}
+
+UMat::UMat(Size _sz, int _type, UMatUsageFlags _usageFlags)
+: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
+{
+    create( _sz.height, _sz.width, _type );
+}
+
+UMat::UMat(Size _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
+: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
+{
+    create(_sz.height, _sz.width, _type);
+    *this = _s;
+}
+
+UMat::UMat(int _dims, const int* _sz, int _type, UMatUsageFlags _usageFlags)
+: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
+{
+    create(_dims, _sz, _type);
+}
+
+UMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
+: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
+{
+    create(_dims, _sz, _type);
+    *this = _s;
+}
+
+UMat::UMat(const UMat& m)
+: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
+  usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
+{
+    addref();
+    if( m.dims <= 2 )
+    {
+        step[0] = m.step[0]; step[1] = m.step[1];
+    }
+    else
+    {
+        dims = 0;
+        copySize(m);
+    }
+}
+
+UMat& UMat::operator=(const UMat& m)
+{
+    if( this != &m )
+    {
+        const_cast<UMat&>(m).addref();
+        release();
+        flags = m.flags;
+        if( dims <= 2 && m.dims <= 2 )
+        {
+            dims = m.dims;
+            rows = m.rows;
+            cols = m.cols;
+            step[0] = m.step[0];
+            step[1] = m.step[1];
+        }
+        else
+            copySize(m);
+        allocator = m.allocator;
+        if (usageFlags == USAGE_DEFAULT)
+            usageFlags = m.usageFlags;
+        u = m.u;
+        offset = m.offset;
+    }
+    return *this;
+}
+
+UMat UMat::clone() const
+{
+    UMat m;
+    copyTo(m);
+    return m;
+}
+
+void UMat::assignTo(UMat& m, int _type) const
+{
+    if( _type < 0 )
+        m = *this;
+    else
+        convertTo(m, _type);
+}
+
+void UMat::create(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
+{
+    _type &= TYPE_MASK;
+    if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && u )
+        return;
+    int sz[] = {_rows, _cols};
+    create(2, sz, _type, _usageFlags);
+}
+
+void UMat::create(Size _sz, int _type, UMatUsageFlags _usageFlags)
+{
+    create(_sz.height, _sz.width, _type, _usageFlags);
+}
+
+void UMat::addref()
+{
+    if( u )
+        CV_XADD(&(u->urefcount), 1);
+}
+
+void UMat::release()
+{
+    if( u && CV_XADD(&(u->urefcount), -1) == 1 )
+        deallocate();
+    for(int i = 0; i < dims; i++)
+        size.p[i] = 0;
+    u = 0;
+}
+
+bool UMat::empty() const
+{
+    return u == 0 || total() == 0 || dims == 0;
+}
+
+size_t UMat::total() const
+{
+    if( dims <= 2 )
+        return (size_t)rows * cols;
+    size_t p = 1;
+    for( int i = 0; i < dims; i++ )
+        p *= size[i];
+    return p;
+}
+
 
 MatAllocator* UMat::getStdAllocator()
 {

From ea1e3fb90d060939ee4618824529cc0ae0bf018a Mon Sep 17 00:00:00 2001
From: Quentin Chateau <quentin.chateau@gmail.com>
Date: Thu, 22 Oct 2020 14:24:58 +0200
Subject: [PATCH 046/152] Merge pull request #18624 from
 qchateau:similarity-mask

* support similarity masks

* add test for similarity threshold

* short license in test

* use UMat in buildSimilarityMask

* fix win32 warnings

* fix test indentation

* fix umat/mat sync

* no in-place argument for erode/dilate
---
 .../stitching/detail/exposure_compensate.hpp  | 21 +++-
 modules/stitching/src/exposure_compensate.cpp | 99 ++++++++++++++++++-
 .../test/test_exposure_compensate.cpp         | 70 +++++++++++++
 modules/stitching/test/test_precomp.hpp       |  1 +
 4 files changed, 187 insertions(+), 4 deletions(-)
 create mode 100644 modules/stitching/test/test_exposure_compensate.cpp

diff --git a/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp b/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp
index 2b76d0923d..074c9b6dfb 100644
--- a/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp
+++ b/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp
@@ -115,7 +115,7 @@ public:
     CV_WRAP GainCompensator()
             : GainCompensator(1) {}
     CV_WRAP GainCompensator(int nr_feeds)
-            : nr_feeds_(nr_feeds) {}
+            : nr_feeds_(nr_feeds), similarity_threshold_(1) {}
     void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
               const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
     void singleFeed(const std::vector<Point> &corners, const std::vector<UMat> &images,
@@ -125,11 +125,18 @@ public:
     CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE ;
     CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; }
     CV_WRAP int getNrFeeds() { return nr_feeds_; }
+    CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; }
+    CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; }
+    void prepareSimilarityMask(const std::vector<Point> &corners, const std::vector<UMat> &images);
     std::vector<double> gains() const;
 
 private:
+    UMat buildSimilarityMask(InputArray src_array1, InputArray src_array2);
+
     Mat_<double> gains_;
     int nr_feeds_;
+    double similarity_threshold_;
+    std::vector<UMat> similarities_;
 };
 
 /** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
@@ -138,7 +145,8 @@ intensities on each channel independently.
 class CV_EXPORTS_W ChannelsCompensator : public ExposureCompensator
 {
 public:
-    CV_WRAP ChannelsCompensator(int nr_feeds=1) : nr_feeds_(nr_feeds) {}
+    CV_WRAP ChannelsCompensator(int nr_feeds=1)
+        : nr_feeds_(nr_feeds), similarity_threshold_(1) {}
     void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
               const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
     CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
@@ -146,11 +154,14 @@ public:
     CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE;
     CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; }
     CV_WRAP int getNrFeeds() { return nr_feeds_; }
+    CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; }
+    CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; }
     std::vector<Scalar> gains() const { return gains_; }
 
 private:
     std::vector<Scalar> gains_;
     int nr_feeds_;
+    double similarity_threshold_;
 };
 
 /** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image blocks.
@@ -159,12 +170,15 @@ class CV_EXPORTS_W BlocksCompensator : public ExposureCompensator
 {
 public:
     BlocksCompensator(int bl_width=32, int bl_height=32, int nr_feeds=1)
-            : bl_width_(bl_width), bl_height_(bl_height), nr_feeds_(nr_feeds), nr_gain_filtering_iterations_(2) {}
+            : bl_width_(bl_width), bl_height_(bl_height), nr_feeds_(nr_feeds), nr_gain_filtering_iterations_(2),
+              similarity_threshold_(1) {}
     CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
     CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE;
     CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE;
     CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; }
     CV_WRAP int getNrFeeds() { return nr_feeds_; }
+    CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; }
+    CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; }
     CV_WRAP void setBlockSize(int width, int height) { bl_width_ = width; bl_height_ = height; }
     CV_WRAP void setBlockSize(Size size) { setBlockSize(size.width, size.height); }
     CV_WRAP Size getBlockSize() const { return Size(bl_width_, bl_height_); }
@@ -184,6 +198,7 @@ private:
     std::vector<UMat> gain_maps_;
     int nr_feeds_;
     int nr_gain_filtering_iterations_;
+    double similarity_threshold_;
 };
 
 /** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block
diff --git a/modules/stitching/src/exposure_compensate.cpp b/modules/stitching/src/exposure_compensate.cpp
index 7213349ccc..df2b8779bb 100644
--- a/modules/stitching/src/exposure_compensate.cpp
+++ b/modules/stitching/src/exposure_compensate.cpp
@@ -90,6 +90,7 @@ void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<
 
     const int num_images = static_cast<int>(images.size());
     Mat accumulated_gains;
+    prepareSimilarityMask(corners, images);
 
     for (int n = 0; n < nr_feeds_; ++n)
     {
@@ -133,6 +134,8 @@ void GainCompensator::singleFeed(const std::vector<Point> &corners, const std::v
     Mat subimg1, subimg2;
     Mat_<uchar> submask1, submask2, intersect;
 
+    std::vector<UMat>::iterator similarity_it = similarities_.begin();
+
     for (int i = 0; i < num_images; ++i)
     {
         for (int j = i; j < num_images; ++j)
@@ -147,6 +150,13 @@ void GainCompensator::singleFeed(const std::vector<Point> &corners, const std::v
                 submask2 = masks[j].first(Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
                 intersect = (submask1 == masks[i].second) & (submask2 == masks[j].second);
 
+                if (!similarities_.empty())
+                {
+                    CV_Assert(similarity_it != similarities_.end());
+                    UMat similarity = *similarity_it++;
+                    bitwise_and(intersect, similarity, intersect);
+                }
+
                 int intersect_count = countNonZero(intersect);
                 N(i, j) = N(j, i) = std::max(1, intersect_count);
 
@@ -298,6 +308,88 @@ void GainCompensator::setMatGains(std::vector<Mat>& umv)
     }
 }
 
+void GainCompensator::prepareSimilarityMask(
+    const std::vector<Point> &corners, const std::vector<UMat> &images)
+{
+    if (similarity_threshold_ >= 1)
+    {
+        LOGLN("  skipping similarity mask: disabled");
+        return;
+    }
+    if (!similarities_.empty())
+    {
+        LOGLN("  skipping similarity mask: already set");
+        return;
+    }
+
+    LOGLN("  calculating similarity mask");
+    const int num_images = static_cast<int>(images.size());
+    for (int i = 0; i < num_images; ++i)
+    {
+        for (int j = i; j < num_images; ++j)
+        {
+            Rect roi;
+            if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
+            {
+                UMat subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i]));
+                UMat subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j]));
+                UMat similarity = buildSimilarityMask(subimg1, subimg2);
+                similarities_.push_back(similarity);
+            }
+        }
+    }
+}
+
+UMat GainCompensator::buildSimilarityMask(InputArray src_array1, InputArray src_array2)
+{
+    CV_Assert(src_array1.rows() == src_array2.rows() && src_array1.cols() == src_array2.cols());
+    CV_Assert(src_array1.type() == src_array2.type());
+    CV_Assert(src_array1.type() == CV_8UC3 || src_array1.type() == CV_8UC1);
+
+    Mat src1 = src_array1.getMat();
+    Mat src2 = src_array2.getMat();
+
+    UMat umat_similarity(src1.rows, src1.cols, CV_8UC1);
+    Mat similarity = umat_similarity.getMat(ACCESS_WRITE);
+
+    if (src1.channels() == 3)
+    {
+        for (int y = 0; y < similarity.rows; ++y)
+        {
+            for (int x = 0; x < similarity.cols; ++x)
+            {
+                Vec<float, 3> vec_diff =
+                    Vec<float, 3>(*src1.ptr<Vec<uchar, 3>>(y, x))
+                    - Vec<float, 3>(*src2.ptr<Vec<uchar, 3>>(y, x));
+                double diff = norm(vec_diff * (1.f / 255.f));
+
+                *similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
+            }
+        }
+    }
+    else // if (src1.channels() == 1)
+    {
+        for (int y = 0; y < similarity.rows; ++y)
+        {
+            for (int x = 0; x < similarity.cols; ++x)
+            {
+                float diff = std::abs(static_cast<int>(*src1.ptr<uchar>(y, x))
+                    - static_cast<int>(*src2.ptr<uchar>(y, x))) / 255.f;
+
+                *similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
+            }
+        }
+    }
+    similarity.release();
+
+    Mat kernel = getStructuringElement(MORPH_RECT, Size(3,3));
+    UMat umat_erode;
+    erode(umat_similarity, umat_erode, kernel);
+    dilate(umat_erode, umat_similarity, kernel);
+
+    return umat_similarity;
+}
+
 void ChannelsCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
                                const std::vector<std::pair<UMat,uchar> > &masks)
 {
@@ -317,11 +409,15 @@ void ChannelsCompensator::feed(const std::vector<Point> &corners, const std::vec
     // For each channel, feed the channel of each image in a GainCompensator
     gains_.clear();
     gains_.resize(images.size());
+
+    GainCompensator compensator(getNrFeeds());
+    compensator.setSimilarityThreshold(getSimilarityThreshold());
+    compensator.prepareSimilarityMask(corners, images);
+
     for (int c = 0; c < 3; ++c)
     {
         const std::vector<UMat>& channels = images_channels[c];
 
-        GainCompensator compensator(getNrFeeds());
         compensator.feed(corners, channels, masks);
 
         std::vector<double> gains = compensator.gains();
@@ -400,6 +496,7 @@ void BlocksCompensator::feed(const std::vector<Point> &corners, const std::vecto
     {
         Compensator compensator;
         compensator.setNrFeeds(getNrFeeds());
+        compensator.setSimilarityThreshold(getSimilarityThreshold());
         compensator.feed(block_corners, block_images, block_masks);
 
         gain_maps_.clear();
diff --git a/modules/stitching/test/test_exposure_compensate.cpp b/modules/stitching/test/test_exposure_compensate.cpp
new file mode 100644
index 0000000000..3f34742095
--- /dev/null
+++ b/modules/stitching/test/test_exposure_compensate.cpp
@@ -0,0 +1,70 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "test_precomp.hpp"
+
+namespace opencv_test {
+namespace {
+
+double minPSNR(UMat src1, UMat src2)
+{
+    std::vector<UMat> src1_channels, src2_channels;
+    split(src1, src1_channels);
+    split(src2, src2_channels);
+
+    double psnr = cvtest::PSNR(src1_channels[0], src2_channels[0]);
+    psnr = std::min(psnr, cvtest::PSNR(src1_channels[1], src2_channels[1]));
+    return std::min(psnr, cvtest::PSNR(src1_channels[2], src2_channels[2]));
+}
+
+TEST(ExposureCompensate, SimilarityThreshold)
+{
+    UMat source;
+    imread(cvtest::TS::ptr()->get_data_path() + "stitching/s1.jpg").copyTo(source);
+
+    UMat image1 = source.clone();
+    UMat image2 = source.clone();
+
+    // Add a big artifact
+    image2(Rect(150, 150, 100, 100)).setTo(Scalar(0, 0, 255));
+
+    UMat mask(image1.size(), CV_8U);
+    mask.setTo(255);
+
+    detail::BlocksChannelsCompensator compensator;
+    compensator.setNrGainsFilteringIterations(0); // makes it more clear
+
+    // Feed the compensator, image 1 and 2 are perfectly
+    // identical, except for the red artifact in image 2
+    // Apart from that artifact, there is no exposure to compensate
+    compensator.setSimilarityThreshold(1);
+    uchar xff = 255;
+    compensator.feed(
+        {{}, {}},
+        {image1, image2},
+        {{mask, xff}, {mask, xff}}
+    );
+    // Verify that the artifact in image 2 did create
+    // an artifact in image1 during the exposure compensation
+    UMat image1_result = image1.clone();
+    compensator.apply(0, {}, image1_result, mask);
+    double psnr_no_similarity_mask = minPSNR(image1, image1_result);
+    EXPECT_LT(psnr_no_similarity_mask, 45);
+
+    // Add a similarity threshold and verify that
+    // the artifact in image1 is gone
+    compensator.setSimilarityThreshold(0.1);
+    compensator.feed(
+        {{}, {}},
+        {image1, image2},
+        {{mask, xff}, {mask, xff}}
+    );
+    image1_result = image1.clone();
+    compensator.apply(0, {}, image1_result, mask);
+    double psnr_similarity_mask = minPSNR(image1, image1_result);
+    EXPECT_GT(psnr_similarity_mask, 300);
+}
+
+} // namespace
+} // namespace opencv_test
diff --git a/modules/stitching/test/test_precomp.hpp b/modules/stitching/test/test_precomp.hpp
index f3ebc682c0..8e7709a7ec 100644
--- a/modules/stitching/test/test_precomp.hpp
+++ b/modules/stitching/test/test_precomp.hpp
@@ -8,6 +8,7 @@
 #include "opencv2/stitching.hpp"
 #include "opencv2/stitching/detail/matchers.hpp"
 #include "opencv2/stitching/detail/blenders.hpp"
+#include "opencv2/stitching/detail/exposure_compensate.hpp"
 
 #ifdef HAVE_OPENCV_XFEATURES2D
 #include "opencv2/xfeatures2d/nonfree.hpp"

From 61a8cf8ba7ba540904db432e35690fb72cc683b2 Mon Sep 17 00:00:00 2001
From: Justin Frank <justinpfrank@protonmail.com>
Date: Tue, 20 Oct 2020 17:31:34 -0700
Subject: [PATCH 047/152] Fix TypeError when building for WebAssembly with
 Python 3

---
 modules/js/src/make_umd.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/js/src/make_umd.py b/modules/js/src/make_umd.py
index 8e50da585d..08d9e39e13 100644
--- a/modules/js/src/make_umd.py
+++ b/modules/js/src/make_umd.py
@@ -103,7 +103,7 @@ def make_umd(opencvjs, cvjs):
     Module = {};
   return cv(Module);
 }));
-    """ % (content)).lstrip())
+    """ % (content)).lstrip().encode())
 
 if __name__ == "__main__":
     if len(sys.argv) > 2:

From 72dfd4846e184c480f27f137f3b078b538d1b017 Mon Sep 17 00:00:00 2001
From: Giles Payne <gilespayne@telepathix.net>
Date: Fri, 23 Oct 2020 20:19:36 +0900
Subject: [PATCH 048/152] Merge pull request #18637 from
 komakai:build-for-distribution

Add support for Swift version independence

* Build for distribution (Swift version independence) when new Xcode build system is available

* Add module map and set "Defines Modules" flag
---
 modules/objc/generator/gen_objc.py            | 12 +++-
 .../generator/templates/cmakelists.template   | 56 +++++++++++++------
 2 files changed, 49 insertions(+), 19 deletions(-)

diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index c7eabdfb0d..87e42e821d 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -1347,7 +1347,17 @@ typedef NS_ENUM(int, {2}) {{
 
     def finalize(self, output_objc_path):
         opencv_header_file = os.path.join(output_objc_path, framework_name + ".h")
-        self.save(opencv_header_file, '\n'.join(['#import "%s"' % os.path.basename(f) for f in self.header_files]))
+        opencv_header = "#import <Foundation/Foundation.h>\n\n"
+        opencv_header += "// ! Project version number\nFOUNDATION_EXPORT double " + framework_name + "VersionNumber;\n\n"
+        opencv_header += "// ! Project version string\nFOUNDATION_EXPORT const unsigned char " + framework_name + "VersionString[];\n\n"
+        opencv_header += "\n".join(["#import <" + framework_name + "/%s>" % os.path.basename(f) for f in self.header_files])
+        self.save(opencv_header_file, opencv_header)
+        opencv_modulemap_file = os.path.join(output_objc_path, framework_name + ".modulemap")
+        opencv_modulemap = "framework module " + framework_name + " {\n"
+        opencv_modulemap += "  umbrella header \"" + framework_name + ".h\"\n"
+        opencv_modulemap += "\n".join(["  header \"%s\"" % os.path.basename(f) for f in self.header_files])
+        opencv_modulemap += "\n  export *\n  module * {export *}\n}\n"
+        self.save(opencv_modulemap_file, opencv_modulemap)
         cmakelist_template = read_contents(os.path.join(SCRIPT_DIR, 'templates/cmakelists.template'))
         cmakelist = Template(cmakelist_template).substitute(modules = ";".join(modules), framework = framework_name)
         self.save(os.path.join(dstdir, "CMakeLists.txt"), cmakelist)
diff --git a/modules/objc/generator/templates/cmakelists.template b/modules/objc/generator/templates/cmakelists.template
index e928a6d21a..2cfc2474cd 100644
--- a/modules/objc/generator/templates/cmakelists.template
+++ b/modules/objc/generator/templates/cmakelists.template
@@ -13,32 +13,52 @@ set (SUPPRESS_WARNINGS_FLAGS "-Wno-incomplete-umbrella")
 set (CMAKE_CXX_FLAGS  "$${CMAKE_CXX_FLAGS} $${OBJC_COMPILE_FLAGS} $${SUPPRESS_WARNINGS_FLAGS}")
 
 # grab the files
-file(GLOB_RECURSE objc_sources "objc/*\.h" "objc/*\.m" "objc/*\.mm" "objc/*\.swift")
+file(GLOB_RECURSE objc_sources "objc/*\.h" "objc/*\.m" "objc/*\.mm" "objc/*\.swift" "objc/*\.modulemap")
 file(GLOB_RECURSE objc_headers "*\.h")
 
-add_library(opencv_objc_framework STATIC $${objc_sources})
+add_library($framework STATIC $${objc_sources})
 
-set_target_properties(opencv_objc_framework PROPERTIES LINKER_LANGUAGE CXX)
+set_target_properties($framework PROPERTIES LINKER_LANGUAGE CXX)
 
-target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}")
-target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}/install/include")
-target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}/install/include/opencv2")
+target_include_directories($framework PRIVATE "$${BUILD_ROOT}")
+target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include")
+target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include/opencv2")
 foreach(m $${MODULES})
-  target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}/modules/objc/gen/objc/$${m}")
+  target_include_directories($framework PRIVATE "$${BUILD_ROOT}/modules/objc/gen/objc/$${m}")
 endforeach()
 
-install(TARGETS opencv_objc_framework LIBRARY DESTINATION lib)
+install(TARGETS $framework LIBRARY DESTINATION lib)
 
 enable_language(Swift)
 
 # Additional target properties
-set_target_properties(opencv_objc_framework PROPERTIES
-    OUTPUT_NAME "$framework"
-    ARCHIVE_OUTPUT_DIRECTORY "$${BUILD_ROOT}/lib"
-    XCODE_ATTRIBUTE_SWIFT_VERSION 5.0
-    XCODE_ATTRIBUTE_OTHER_SWIFT_FLAGS "-Xcc $${SUPPRESS_WARNINGS_FLAGS}"
-    FRAMEWORK TRUE
-    MACOSX_FRAMEWORK_IDENTIFIER org.opencv.$framework
-    PUBLIC_HEADER "$${objc_headers}"
-    DEFINE_SYMBOL CVAPI_EXPORTS
-    )
+if (CMAKE_XCODE_BUILD_SYSTEM GREATER_EQUAL 12)
+  set_target_properties($framework PROPERTIES
+      OUTPUT_NAME "$framework"
+      ARCHIVE_OUTPUT_DIRECTORY "$${BUILD_ROOT}/lib"
+      XCODE_ATTRIBUTE_SWIFT_VERSION 5.0
+      XCODE_ATTRIBUTE_DEFINES_MODULE YES
+      XCODE_ATTRIBUTE_BUILD_LIBRARY_FOR_DISTRIBUTION YES
+      XCODE_ATTRIBUTE_OTHER_SWIFT_FLAGS "-Xcc $${SUPPRESS_WARNINGS_FLAGS}"
+      XCODE_ATTRIBUTE_MODULEMAP_FILE objc/$framework.modulemap
+      XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER org.opencv.$framework
+      FRAMEWORK TRUE
+      MACOSX_FRAMEWORK_IDENTIFIER org.opencv.$framework
+      PUBLIC_HEADER "$${objc_headers}"
+      DEFINE_SYMBOL CVAPI_EXPORTS
+      )
+else()
+  set_target_properties($framework PROPERTIES
+      OUTPUT_NAME "$framework"
+      ARCHIVE_OUTPUT_DIRECTORY "$${BUILD_ROOT}/lib"
+      XCODE_ATTRIBUTE_SWIFT_VERSION 5.0
+      XCODE_ATTRIBUTE_DEFINES_MODULE YES
+      XCODE_ATTRIBUTE_OTHER_SWIFT_FLAGS "-Xcc $${SUPPRESS_WARNINGS_FLAGS}"
+      XCODE_ATTRIBUTE_MODULEMAP_FILE objc/$framework.modulemap
+      XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER org.opencv.$framework
+      FRAMEWORK TRUE
+      MACOSX_FRAMEWORK_IDENTIFIER org.opencv.$framework
+      PUBLIC_HEADER "$${objc_headers}"
+      DEFINE_SYMBOL CVAPI_EXPORTS
+      )
+endif()

From c71f2714c6c986fac00a46f25c0a49dc7774c4a6 Mon Sep 17 00:00:00 2001
From: ann <44146733+APrigarina@users.noreply.github.com>
Date: Fri, 23 Oct 2020 21:42:45 +0300
Subject: [PATCH 049/152] Merge pull request #18003 from
 APrigarina:curved_qrcodes_decoding

Detection and decoding of curved QR-codes

* temp changes for curved qrcodes

* added api for curved qr code decoding

* fixed prototypes

* refactored curved qr code decoding

* refactored curved qr code decoding 2nd part

* refactored curved qr code decoding 3rd part

* refactored curved qr code decoding 4th part

* added tests for curved qr code decoding

* refactored curved qr code decoding 5th part
---
 .../objdetect/include/opencv2/objdetect.hpp   |   29 +-
 modules/objdetect/src/qrcode.cpp              | 1691 +++++++++++++++--
 modules/objdetect/test/test_qrcode.cpp        |   95 +
 3 files changed, 1617 insertions(+), 198 deletions(-)

diff --git a/modules/objdetect/include/opencv2/objdetect.hpp b/modules/objdetect/include/opencv2/objdetect.hpp
index ea7b1ac801..0387b10239 100644
--- a/modules/objdetect/include/opencv2/objdetect.hpp
+++ b/modules/objdetect/include/opencv2/objdetect.hpp
@@ -702,6 +702,15 @@ public:
      */
     CV_WRAP cv::String decode(InputArray img, InputArray points, OutputArray straight_qrcode = noArray());
 
+    /** @brief Decodes QR code on a curved surface in image once it's found by the detect() method.
+
+     Returns UTF8-encoded output string or empty string if the code cannot be decoded.
+     @param img grayscale or color (BGR) image containing QR code.
+     @param points Quadrangle vertices found by detect() method (or some other algorithm).
+     @param straight_qrcode The optional output image containing rectified and binarized QR code
+     */
+    CV_WRAP cv::String decodeCurved(InputArray img, InputArray points, OutputArray straight_qrcode = noArray());
+
     /** @brief Both detects and decodes QR code
 
      @param img grayscale or color (BGR) image containing QR code.
@@ -709,7 +718,17 @@ public:
      @param straight_qrcode The optional output image containing rectified and binarized QR code
      */
     CV_WRAP cv::String detectAndDecode(InputArray img, OutputArray points=noArray(),
-                                        OutputArray straight_qrcode = noArray());
+                                       OutputArray straight_qrcode = noArray());
+
+    /** @brief Both detects and decodes QR code on a curved surface
+
+     @param img grayscale or color (BGR) image containing QR code.
+     @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found.
+     @param straight_qrcode The optional output image containing rectified and binarized QR code
+     */
+    CV_WRAP cv::String detectAndDecodeCurved(InputArray img, OutputArray points=noArray(),
+                                             OutputArray straight_qrcode = noArray());
+
     /** @brief Detects QR codes in image and returns the vector of the quadrangles containing the codes.
      @param img grayscale or color (BGR) image containing (or not) QR codes.
      @param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes.
@@ -801,6 +820,14 @@ CV_EXPORTS bool detectQRCode(InputArray in, std::vector<Point> &points, double e
     */
 CV_EXPORTS bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray());
 
+/** @brief Decode QR code on a curved surface in image and return text that is encrypted in QR code.
+    @param in  Matrix of the type CV_8UC1 containing an image where QR code are detected.
+    @param points Input vector of vertices of a quadrangle of minimal area that describes QR code.
+    @param decoded_info String information that is encrypted in QR code.
+    @param straight_qrcode Matrix of the type CV_8UC1 containing an binary straight QR code.
+    */
+CV_EXPORTS bool decodeCurvedQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray());
+
 //! @} objdetect
 }
 
diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp
index 5b4bb61e9e..5b86f74614 100644
--- a/modules/objdetect/src/qrcode.cpp
+++ b/modules/objdetect/src/qrcode.cpp
@@ -18,6 +18,7 @@
 #include <iostream>
 #include <queue>
 #include <limits>
+#include <map>
 
 namespace cv
 {
@@ -63,7 +64,40 @@ static void updatePointsResult(OutputArray points_, const vector<Point2f>& point
     }
 }
 
+static Point2f intersectionLines(Point2f a1, Point2f a2, Point2f b1, Point2f b2)
+{
+    const float divisor = (a1.x - a2.x) * (b1.y - b2.y) - (a1.y - a2.y) * (b1.x - b2.x);
+    const float eps = 0.001f;
+    if (abs(divisor) < eps)
+        return a2;
+    Point2f result_square_angle(
+                              ((a1.x * a2.y  -  a1.y * a2.x) * (b1.x - b2.x) -
+                               (b1.x * b2.y  -  b1.y * b2.x) * (a1.x - a2.x)) /
+                               divisor,
+                              ((a1.x * a2.y  -  a1.y * a2.x) * (b1.y - b2.y) -
+                               (b1.x * b2.y  -  b1.y * b2.x) * (a1.y - a2.y)) /
+                               divisor
+                              );
+    return result_square_angle;
+}
+
+//      / | b
+//     /  |
+//    /   |
+//  a/    | c
+
+static inline double getCosVectors(Point2f a, Point2f b, Point2f c)
+{
+    return ((a - b).x * (c - b).x + (a - b).y * (c - b).y) / (norm(a - b) * norm(c - b));
+}
 
+static bool arePointsNearest(Point2f a, Point2f b, float delta = 0.0)
+{
+    if ((abs(a.x - b.x) < delta) && (abs(a.y - b.y) < delta))
+        return true;
+    else
+        return false;
+}
 
 class QRDetect
 {
@@ -74,15 +108,13 @@ public:
     Mat getBinBarcode() { return bin_barcode; }
     Mat getStraightBarcode() { return straight_barcode; }
     vector<Point2f> getTransformationPoints() { return transformation_points; }
-    static Point2f intersectionLines(Point2f a1, Point2f a2, Point2f b1, Point2f b2);
 protected:
     vector<Vec3d> searchHorizontalLines();
     vector<Point2f> separateVerticalLines(const vector<Vec3d> &list_lines);
     vector<Point2f> extractVerticalLines(const vector<Vec3d> &list_lines, double eps);
     void fixationPoints(vector<Point2f> &local_point);
     vector<Point2f> getQuadrilateral(vector<Point2f> angle_list);
-    bool testBypassRoute(vector<Point2f> hull, int start, int finish);
-    inline double getCosVectors(Point2f a, Point2f b, Point2f c);
+    bool testByPassRoute(vector<Point2f> hull, int start, int finish);
 
     Mat barcode, bin_barcode, resized_barcode, resized_bin_barcode, straight_barcode;
     vector<Point2f> localization_points, transformation_points;
@@ -361,7 +393,6 @@ void QRDetect::fixationPoints(vector<Point2f> &local_point)
                               Point2f(static_cast<float>(bin_barcode.cols - 1),
                                       static_cast<float>(bin_barcode.rows - 1))));
 
-
         vector<Point2f> list_area_pnt;
         list_area_pnt.push_back(current_point);
 
@@ -629,7 +660,6 @@ bool QRDetect::computeTransformationPoints()
     transformation_points.push_back(
         intersectionLines(down_left_edge_point, down_max_delta_point,
                           up_right_edge_point, up_max_delta_point));
-
     vector<Point2f> quadrilateral = getQuadrilateral(transformation_points);
     transformation_points = quadrilateral;
 
@@ -643,23 +673,8 @@ bool QRDetect::computeTransformationPoints()
     return true;
 }
 
-Point2f QRDetect::intersectionLines(Point2f a1, Point2f a2, Point2f b1, Point2f b2)
-{
-    Point2f result_square_angle(
-                              ((a1.x * a2.y  -  a1.y * a2.x) * (b1.x - b2.x) -
-                               (b1.x * b2.y  -  b1.y * b2.x) * (a1.x - a2.x)) /
-                              ((a1.x - a2.x) * (b1.y - b2.y) -
-                               (a1.y - a2.y) * (b1.x - b2.x)),
-                              ((a1.x * a2.y  -  a1.y * a2.x) * (b1.y - b2.y) -
-                               (b1.x * b2.y  -  b1.y * b2.x) * (a1.y - a2.y)) /
-                              ((a1.x - a2.x) * (b1.y - b2.y) -
-                               (a1.y - a2.y) * (b1.x - b2.x))
-                              );
-    return result_square_angle;
-}
-
 // test function (if true then ------> else <------ )
-bool QRDetect::testBypassRoute(vector<Point2f> hull, int start, int finish)
+bool QRDetect::testByPassRoute(vector<Point2f> hull, int start, int finish)
 {
     CV_TRACE_FUNCTION();
     int index_hull = start, next_index_hull, hull_size = (int)hull.size();
@@ -764,7 +779,7 @@ vector<Point2f> QRDetect::getQuadrilateral(vector<Point2f> angle_list)
     int index_hull, extra_index_hull, next_index_hull, extra_next_index_hull;
     Point result_side_begin[4], result_side_end[4];
 
-    bool bypass_orientation = testBypassRoute(hull, start_line[0], finish_line[0]);
+    bool bypass_orientation = testByPassRoute(hull, start_line[0], finish_line[0]);
 
     min_norm = std::numeric_limits<double>::max();
     index_hull = start_line[0];
@@ -805,7 +820,7 @@ vector<Point2f> QRDetect::getQuadrilateral(vector<Point2f> angle_list)
 
     min_norm = std::numeric_limits<double>::max();
     index_hull = start_line[1];
-    bypass_orientation = testBypassRoute(hull, start_line[1], finish_line[1]);
+    bypass_orientation = testByPassRoute(hull, start_line[1], finish_line[1]);
     do
     {
         if (bypass_orientation) { next_index_hull = index_hull + 1; }
@@ -840,8 +855,8 @@ vector<Point2f> QRDetect::getQuadrilateral(vector<Point2f> angle_list)
         result_side_end[1]   = angle_list[1];
     }
 
-    bypass_orientation = testBypassRoute(hull, start_line[0], unstable_pnt);
-    const bool extra_bypass_orientation = testBypassRoute(hull, finish_line[1], unstable_pnt);
+    bypass_orientation = testByPassRoute(hull, start_line[0], unstable_pnt);
+    const bool extra_bypass_orientation = testByPassRoute(hull, finish_line[1], unstable_pnt);
 
     vector<Point2f> result_angle_list(4), test_result_angle_list(4);
     double min_diff_area = std::numeric_limits<double>::max();
@@ -918,16 +933,6 @@ vector<Point2f> QRDetect::getQuadrilateral(vector<Point2f> angle_list)
     return result_angle_list;
 }
 
-//      / | b
-//     /  |
-//    /   |
-//  a/    | c
-
-inline double QRDetect::getCosVectors(Point2f a, Point2f b, Point2f c)
-{
-    return ((a - b).x * (c - b).x + (a - b).y * (c - b).y) / (norm(a - b) * norm(c - b));
-}
-
 struct QRCodeDetector::Impl
 {
 public:
@@ -975,17 +980,79 @@ public:
     Mat getStraightBarcode() { return straight; }
     size_t getVersion() { return version; }
     std::string getDecodeInformation() { return result_info; }
-    bool fullDecodingProcess();
+    bool straightDecodingProcess();
+    bool curvedDecodingProcess();
 protected:
     bool updatePerspective();
     bool versionDefinition();
     bool samplingForVersion();
     bool decodingProcess();
-    Mat original, no_border_intermediate, intermediate, straight;
+    inline double pointPosition(Point2f a, Point2f b , Point2f c);
+    float distancePointToLine(Point2f a, Point2f b , Point2f c);
+    void getPointsInsideQRCode(const vector<Point2f> &angle_list);
+    bool computeClosestPoints(const vector<Point> &result_integer_hull);
+    bool computeSidesPoints(const vector<Point> &result_integer_hull);
+    vector<Point> getPointsNearUnstablePoint(const vector<Point> &side, int start, int end, int step);
+    bool findAndAddStablePoint(const vector<Point> &result_integer_hull);
+    bool findIndexesCurvedSides();
+    bool findIncompleteIndexesCurvedSides();
+    Mat getPatternsMask();
+    Point findClosestZeroPoint(Point2f original_point);
+    bool findPatternsContours(vector<vector<Point> > &patterns_contours);
+    bool findPatternsVerticesPoints(vector<vector<Point> > &patterns_vertices_points);
+    bool findTempPatternsAddingPoints(vector<std::pair<int, vector<Point> > > &temp_patterns_add_points);
+    bool computePatternsAddingPoints(std::map<int, vector<Point> > &patterns_add_points);
+    bool addPointsToSides();
+    void completeAndSortSides();
+    vector<vector<float> > computeSpline(const vector<int> &x_arr, const vector<int> &y_arr);
+    bool createSpline(vector<vector<Point2f> > &spline_lines);
+    bool divideIntoEvenSegments(vector<vector<Point2f> > &segments_points);
+    bool straightenQRCodeInParts();
+    bool preparingCurvedQRCodes();
+
+    const static int NUM_SIDES = 2;
+    Mat original, bin_barcode, no_border_intermediate, intermediate, straight, curved_to_straight, test_image;
     vector<Point2f> original_points;
+    vector<Point2f> original_curved_points;
+    vector<Point> qrcode_locations;
+    vector<std::pair<size_t, Point> > closest_points;
+    vector<vector<Point> > sides_points;
+    std::pair<size_t, Point> unstable_pair;
+    vector<int> curved_indexes, curved_incomplete_indexes;
+    std::map<int, vector<Point> > complete_curved_sides;
     std::string result_info;
     uint8_t version, version_size;
     float test_perspective_size;
+    struct sortPairAsc
+    {
+        bool operator()(const std::pair<size_t, double> &a,
+                        const std::pair<size_t, double> &b) const
+        {
+            return a.second < b.second;
+        }
+    };
+    struct sortPairDesc
+    {
+        bool operator()(const std::pair<size_t, double> &a,
+                        const std::pair<size_t, double> &b) const
+        {
+            return a.second > b.second;
+        }
+    };
+    struct sortPointsByX
+    {
+        bool operator()(const Point &a, const Point &b) const
+        {
+            return a.x < b.x;
+        }
+    };
+    struct sortPointsByY
+    {
+        bool operator()(const Point &a, const Point &b) const
+        {
+            return a.y < b.y;
+        }
+    };
 };
 
 void QRDecode::init(const Mat &src, const vector<Point2f> &points)
@@ -993,6 +1060,8 @@ void QRDecode::init(const Mat &src, const vector<Point2f> &points)
     CV_TRACE_FUNCTION();
     vector<Point2f> bbox = points;
     original = src.clone();
+    test_image = src.clone();
+    adaptiveThreshold(original, bin_barcode, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 83, 2);
     intermediate = Mat::zeros(original.size(), CV_8UC1);
     original_points = bbox;
     version = 0;
@@ -1001,221 +1070,1392 @@ void QRDecode::init(const Mat &src, const vector<Point2f> &points)
     result_info = "";
 }
 
-bool QRDecode::updatePerspective()
+inline double QRDecode::pointPosition(Point2f a, Point2f b , Point2f c)
 {
-    CV_TRACE_FUNCTION();
-    const Point2f centerPt = QRDetect::intersectionLines(original_points[0], original_points[2],
-                                                         original_points[1], original_points[3]);
-    if (cvIsNaN(centerPt.x) || cvIsNaN(centerPt.y))
-        return false;
-
-    const Size temporary_size(cvRound(test_perspective_size), cvRound(test_perspective_size));
-
-    vector<Point2f> perspective_points;
-    perspective_points.push_back(Point2f(0.f, 0.f));
-    perspective_points.push_back(Point2f(test_perspective_size, 0.f));
-
-    perspective_points.push_back(Point2f(test_perspective_size, test_perspective_size));
-    perspective_points.push_back(Point2f(0.f, test_perspective_size));
-
-    perspective_points.push_back(Point2f(test_perspective_size * 0.5f, test_perspective_size * 0.5f));
-
-    vector<Point2f> pts = original_points;
-    pts.push_back(centerPt);
-
-    Mat H = findHomography(pts, perspective_points);
-    Mat bin_original;
-    adaptiveThreshold(original, bin_original, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 83, 2);
-    Mat temp_intermediate;
-    warpPerspective(bin_original, temp_intermediate, H, temporary_size, INTER_NEAREST);
-    no_border_intermediate = temp_intermediate(Range(1, temp_intermediate.rows), Range(1, temp_intermediate.cols));
-
-    const int border = cvRound(0.1 * test_perspective_size);
-    const int borderType = BORDER_CONSTANT;
-    copyMakeBorder(no_border_intermediate, intermediate, border, border, border, border, borderType, Scalar(255));
-    return true;
+    return (a.x - b.x) * (c.y - b.y) - (c.x - b.x) * (a.y - b.y);
 }
 
-inline Point computeOffset(const vector<Point>& v)
+float QRDecode::distancePointToLine(Point2f a, Point2f b , Point2f c)
 {
-    // compute the width/height of convex hull
-    Rect areaBox = boundingRect(v);
+    float A, B, C, result;
+    A = c.y - b.y;
+    B = c.x - b.x;
+    C = c.x * b.y - b.x * c.y;
+    float dist = sqrt(A*A + B*B);
+    if (dist == 0) return 0;
+    result = abs((A * a.x - B * a.y + C)) / dist;
 
-    // compute the good offset
-    // the box is consisted by 7 steps
-    // to pick the middle of the stripe, it needs to be 1/14 of the size
-    const int cStep = 7 * 2;
-    Point offset = Point(areaBox.width, areaBox.height);
-    offset /= cStep;
-    return offset;
+    return result;
 }
 
-bool QRDecode::versionDefinition()
+void QRDecode::getPointsInsideQRCode(const vector<Point2f> &angle_list)
 {
     CV_TRACE_FUNCTION();
-    LineIterator line_iter(intermediate, Point2f(0, 0), Point2f(test_perspective_size, test_perspective_size));
-    Point black_point = Point(0, 0);
-    for(int j = 0; j < line_iter.count; j++, ++line_iter)
+    size_t angle_size = angle_list.size();
+    Mat contour_mask = Mat::zeros(bin_barcode.size(), CV_8UC1);
+    for (size_t i = 0; i < angle_size; i++)
     {
-        const uint8_t value = intermediate.at<uint8_t>(line_iter.pos());
-        if (value == 0)
+        LineIterator line_iter(bin_barcode, angle_list[ i      % angle_size],
+                                            angle_list[(i + 1) % angle_size]);
+        for(int j = 0; j < line_iter.count; j++, ++line_iter)
         {
-            black_point = line_iter.pos();
-            break;
+            Point p = line_iter.pos();
+            contour_mask.at<uint8_t>(p + Point(1, 1)) = 255;
         }
     }
+    Point2f center_point = intersectionLines(angle_list[0], angle_list[2],
+                                             angle_list[1], angle_list[3]);
+    floodFill(contour_mask, center_point, 255, 0, Scalar(), Scalar(), FLOODFILL_FIXED_RANGE);
 
-    Mat mask = Mat::zeros(intermediate.rows + 2, intermediate.cols + 2, CV_8UC1);
-    floodFill(intermediate, mask, black_point, 255, 0, Scalar(), Scalar(), FLOODFILL_MASK_ONLY);
-
-    vector<Point> locations, non_zero_elem;
-    Mat mask_roi = mask(Range(1, intermediate.rows - 1), Range(1, intermediate.cols - 1));
-    findNonZero(mask_roi, non_zero_elem);
-    convexHull(non_zero_elem, locations);
-    Point offset = computeOffset(locations);
+    vector<Point> locations;
+    findNonZero(contour_mask, locations);
 
-    Point temp_remote = locations[0], remote_point;
-    const Point delta_diff = offset;
-    for (size_t i = 0; i < locations.size(); i++)
+    Mat fill_bin_barcode = bin_barcode.clone();
+    Mat qrcode_mask = Mat::zeros(bin_barcode.rows + 2, bin_barcode.cols + 2, CV_8UC1);
+    uint8_t value, mask_value;
+    for(size_t i = 0; i < locations.size(); i++)
     {
-        if (norm(black_point - temp_remote) <= norm(black_point - locations[i]))
+        value = bin_barcode.at<uint8_t>(locations[i]);
+        mask_value = qrcode_mask.at<uint8_t>(locations[i] + Point(1, 1));
+        if (value == 0 && mask_value == 0)
         {
-            const uint8_t value = intermediate.at<uint8_t>(temp_remote - delta_diff);
-            temp_remote = locations[i];
-            if (value == 0) { remote_point = temp_remote - delta_diff; }
-            else { remote_point = temp_remote - (delta_diff / 2); }
+            floodFill(fill_bin_barcode, qrcode_mask, locations[i], 255,
+                      0, Scalar(), Scalar(), FLOODFILL_MASK_ONLY);
         }
     }
+    Mat qrcode_mask_roi = qrcode_mask(Range(1, qrcode_mask.rows - 1), Range(1, qrcode_mask.cols - 1));
+    findNonZero(qrcode_mask_roi, qrcode_locations);
+}
 
-    size_t transition_x = 0 , transition_y = 0;
-
-    uint8_t future_pixel = 255;
-    const uint8_t *intermediate_row = intermediate.ptr<uint8_t>(remote_point.y);
-    for(int i = remote_point.x; i < intermediate.cols; i++)
+bool QRDecode::computeClosestPoints(const vector<Point> &result_integer_hull)
+{
+    CV_TRACE_FUNCTION();
+    double min_norm, max_norm = 0.0;
+    size_t idx_min;
+    for (size_t i = 0; i < original_points.size(); i++)
     {
-        if (intermediate_row[i] == future_pixel)
+        min_norm = std::numeric_limits<double>::max();
+
+        Point closest_pnt;
+        for (size_t j = 0; j < result_integer_hull.size(); j++)
         {
-            future_pixel = static_cast<uint8_t>(~future_pixel);
-            transition_x++;
+            Point integer_original_point = original_points[i];
+            double temp_norm = norm(integer_original_point - result_integer_hull[j]);
+            if (temp_norm < min_norm)
+            {
+                min_norm = temp_norm;
+                closest_pnt = result_integer_hull[j];
+                idx_min = j;
+            }
         }
+        if (min_norm > max_norm)
+        {
+            max_norm = min_norm;
+            unstable_pair = std::pair<size_t,Point>(i, closest_pnt);
+        }
+        closest_points.push_back(std::pair<size_t,Point>(idx_min, closest_pnt));
     }
 
-    future_pixel = 255;
-    for(int j = remote_point.y; j < intermediate.rows; j++)
+    if (closest_points.size() != 4)
     {
-        const uint8_t value = intermediate.at<uint8_t>(Point(j, remote_point.x));
-        if (value == future_pixel)
-        {
-            future_pixel = static_cast<uint8_t>(~future_pixel);
-            transition_y++;
-        }
+        return false;
     }
-    version = saturate_cast<uint8_t>((std::min(transition_x, transition_y) - 1) * 0.25 - 1);
-    if ( !(  0 < version && version <= 40 ) ) { return false; }
-    version_size = 21 + (version - 1) * 4;
+
     return true;
 }
 
-bool QRDecode::samplingForVersion()
+bool QRDecode::computeSidesPoints(const vector<Point> &result_integer_hull)
 {
-    CV_TRACE_FUNCTION();
-    const double multiplyingFactor = (version < 3)  ? 1 :
-                                     (version == 3) ? 1.5 :
-                                     version * (5 + version - 4);
-    const Size newFactorSize(
-                  cvRound(no_border_intermediate.size().width  * multiplyingFactor),
-                  cvRound(no_border_intermediate.size().height * multiplyingFactor));
-    Mat postIntermediate(newFactorSize, CV_8UC1);
-    resize(no_border_intermediate, postIntermediate, newFactorSize, 0, 0, INTER_AREA);
+    size_t num_closest_points = closest_points.size();
+    vector<Point> points;
 
-    const int delta_rows = cvRound((postIntermediate.rows * 1.0) / version_size);
-    const int delta_cols = cvRound((postIntermediate.cols * 1.0) / version_size);
-
-    vector<double> listFrequencyElem;
-    for (int r = 0; r < postIntermediate.rows; r += delta_rows)
+    for(size_t i = 0; i < num_closest_points; i++)
     {
-        for (int c = 0; c < postIntermediate.cols; c += delta_cols)
+        points.clear();
+        size_t start = closest_points[i].first,
+               end   = closest_points[(i + 1) % num_closest_points].first;
+        if (start < end)
         {
-            Mat tile = postIntermediate(
-                           Range(r, min(r + delta_rows, postIntermediate.rows)),
-                           Range(c, min(c + delta_cols, postIntermediate.cols)));
-            const double frequencyElem = (countNonZero(tile) * 1.0) / tile.total();
-            listFrequencyElem.push_back(frequencyElem);
+            points.insert(points.end(),
+                          result_integer_hull.begin() + start,
+                          result_integer_hull.begin() + end + 1);
         }
-    }
-
-    double dispersionEFE = std::numeric_limits<double>::max();
-    double experimentalFrequencyElem = 0;
-    for (double expVal = 0; expVal < 1; expVal+=0.001)
-    {
-        double testDispersionEFE = 0.0;
-        for (size_t i = 0; i < listFrequencyElem.size(); i++)
+        else
         {
-            testDispersionEFE += (listFrequencyElem[i] - expVal) *
-                                 (listFrequencyElem[i] - expVal);
+            points.insert(points.end(),
+                          result_integer_hull.begin() + start,
+                          result_integer_hull.end());
+            points.insert(points.end(),
+                          result_integer_hull.begin(),
+                          result_integer_hull.begin() + end + 1);
         }
-        testDispersionEFE /= (listFrequencyElem.size() - 1);
-        if (dispersionEFE > testDispersionEFE)
+        if (abs(result_integer_hull[start].x - result_integer_hull[end].x) >
+            abs(result_integer_hull[start].y - result_integer_hull[end].y))
         {
-            dispersionEFE = testDispersionEFE;
-            experimentalFrequencyElem = expVal;
+            if (points.front().x > points.back().x)
+            {
+                reverse(points.begin(), points.end());
+            }
+        }
+        else
+        {
+            if (points.front().y > points.back().y)
+            {
+                reverse(points.begin(), points.end());
+            }
+        }
+        if (points.empty())
+        {
+            return false;
         }
+        sides_points.push_back(points);
     }
 
-    straight = Mat(Size(version_size, version_size), CV_8UC1, Scalar(0));
-    for (int r = 0; r < version_size * version_size; r++)
-    {
-        int i   = r / straight.cols;
-        int j   = r % straight.cols;
-        straight.ptr<uint8_t>(i)[j] = (listFrequencyElem[r] < experimentalFrequencyElem) ? 0 : 255;
-    }
     return true;
 }
 
-bool QRDecode::decodingProcess()
+vector<Point> QRDecode::getPointsNearUnstablePoint(const vector<Point> &side, int start, int end, int step)
 {
-#ifdef HAVE_QUIRC
-    if (straight.empty()) { return false; }
+    vector<Point> points;
+    Point p1, p2, p3;
 
-    quirc_code qr_code;
-    memset(&qr_code, 0, sizeof(qr_code));
+    double max_neighbour_angle = 1.0;
+    int index_max_angle = start + step;
+    bool enough_points = true;
 
-    qr_code.size = straight.size().width;
-    for (int x = 0; x < qr_code.size; x++)
+    if(side.size() < 3)
     {
-        for (int y = 0; y < qr_code.size; y++)
+        points.insert(points.end(), side.begin(), side.end());
+        return points;
+    }
+    const double cos_angle_threshold = -0.97;
+    for (int i = start + step; i != end; i+= step)
+    {
+        p1 = side[i + step];
+        if (norm(p1 - side[i])        < 5) { continue; }
+        p2 = side[i];
+        if (norm(p2 - side[i - step]) < 5) { continue; }
+        p3 = side[i - step];
+
+        double neighbour_angle = getCosVectors(p1, p2, p3);
+        neighbour_angle = floor(neighbour_angle*1000)/1000;
+
+        if ((neighbour_angle <= max_neighbour_angle) && (neighbour_angle < cos_angle_threshold))
         {
-            int position = y * qr_code.size + x;
-            qr_code.cell_bitmap[position >> 3]
-                |= straight.ptr<uint8_t>(y)[x] ? 0 : (1 << (position & 7));
+            max_neighbour_angle = neighbour_angle;
+            index_max_angle = i;
+        }
+        else if (i == end - step)
+        {
+            enough_points = false;
+            index_max_angle = i;
         }
     }
 
-    quirc_data qr_code_data;
-    quirc_decode_error_t errorCode = quirc_decode(&qr_code, &qr_code_data);
-    if (errorCode != 0) { return false; }
+    if (enough_points)
+    {
+        p1 = side[index_max_angle + step];
+        p2 = side[index_max_angle];
+        p3 = side[index_max_angle - step];
 
-    for (int i = 0; i < qr_code_data.payload_len; i++)
+        points.push_back(p1);
+        points.push_back(p2);
+        points.push_back(p3);
+    }
+    else
     {
-        result_info += qr_code_data.payload[i];
+        p1 = side[index_max_angle];
+        p2 = side[index_max_angle - step];
+
+        points.push_back(p1);
+        points.push_back(p2);
     }
-    return true;
-#else
-    return false;
-#endif
 
+    return points;
 }
 
-bool QRDecode::fullDecodingProcess()
+bool QRDecode::findAndAddStablePoint(const vector<Point> &result_integer_hull)
 {
-#ifdef HAVE_QUIRC
-    if (!updatePerspective())  { return false; }
-    if (!versionDefinition())  { return false; }
-    if (!samplingForVersion()) { return false; }
-    if (!decodingProcess())    { return false; }
-    return true;
-#else
-    std::cout << "Library QUIRC is not linked. No decoding is performed. Take it to the OpenCV repository." << std::endl;
+    size_t idx_unstable_point = unstable_pair.first;
+    Point unstable_point = unstable_pair.second;
+
+    vector<Point> current_side_points, next_side_points;
+    Point a1, a2, b1, b2;
+    int start_current, end_current, step_current, start_next, end_next, step_next;
+    vector<Point>::iterator it_a, it_b;
+
+    vector<Point> &current_side = sides_points[(idx_unstable_point + 3) % 4];
+    vector<Point> &next_side    = sides_points[idx_unstable_point];
+
+    if(current_side.size() < 2 || next_side.size() < 2)
+    {
+        return false;
+    }
+
+    if(arePointsNearest(unstable_point, current_side.front(), 3.0))
+    {
+        start_current = (int)current_side.size() - 1;
+        end_current = 0;
+        step_current = -1;
+        it_a = current_side.begin();
+    }
+    else if(arePointsNearest(unstable_point, current_side.back(), 3.0))
+    {
+        start_current = 0;
+        end_current = (int)current_side.size() - 1;
+        step_current = 1;
+        it_a = current_side.end() - 1;
+    }
+    else
+    {
+        return false;
+    }
+    if(arePointsNearest(unstable_point, next_side.front(), 3.0))
+    {
+        start_next = (int)next_side.size() - 1;
+        end_next = 0;
+        step_next = -1;
+        it_b = next_side.begin();
+    }
+    else if(arePointsNearest(unstable_point, next_side.back(), 3.0))
+    {
+        start_next = 0;
+        end_next = (int)next_side.size() - 1;
+        step_next = 1;
+        it_b = next_side.end() - 1;
+    }
+    else
+    {
+        return false;
+    }
+    current_side_points = getPointsNearUnstablePoint(current_side, start_current, end_current, step_current);
+    next_side_points    = getPointsNearUnstablePoint(next_side, start_next, end_next, step_next);
+
+    if (current_side_points.size() < 2 || next_side_points.size() < 2)
+    {
+        return false;
+    }
+
+    a1 = current_side_points[0];
+    a2 = current_side_points[1];
+
+    b1 = next_side_points[0];
+    b2 = next_side_points[1];
+
+    if(norm(a1 - b1) < 10 && next_side_points.size() > 2)
+    {
+        b1 = next_side_points[1];
+        b2 = next_side_points[2];
+    }
+
+    Point stable_point = intersectionLines(a1, a2, b1, b2);
+
+    const double max_side = std::max(bin_barcode.size().width, bin_barcode.size().height);
+    if ((abs(stable_point.x) > max_side) || (abs(stable_point.y) > max_side))
+    {
+        return false;
+    }
+
+    while (*it_a != a1)
+    {
+        it_a = current_side.erase(it_a);
+        if (it_a == current_side.end())
+        {
+            it_a -= step_current;
+        }
+        Point point_to_remove_from_current = *it_a;
+        if (point_to_remove_from_current.x > max_side || point_to_remove_from_current.y > max_side)
+        {
+            break;
+        }
+    }
+    while (*it_b != b1)
+    {
+        it_b = next_side.erase(it_b);
+        if (it_b == next_side.end())
+        {
+            it_b -= step_next;
+        }
+        Point point_to_remove_from_next = *it_b;
+        if (point_to_remove_from_next.x > max_side || point_to_remove_from_next.y > max_side)
+        {
+            break;
+        }
+    }
+
+    bool add_stable_point = true;
+
+    for (size_t i = 0; i < result_integer_hull.size(); i++)
+    {
+        if(arePointsNearest(stable_point, original_points[i], 3.0))
+        {
+            add_stable_point = false;
+            break;
+        }
+    }
+
+    if(add_stable_point)
+    {
+        current_side.insert(it_a, stable_point);
+        next_side.insert(it_b, stable_point);
+        closest_points[unstable_pair.first].second = stable_point;
+    }
+    else
+    {
+        stable_point = original_points[unstable_pair.first];
+        closest_points[unstable_pair.first].second = stable_point;
+        current_side.insert(it_a, stable_point);
+        next_side.insert(it_b, stable_point);
+    }
+
+    return true;
+}
+
+bool QRDecode::findIndexesCurvedSides()
+{
+    double max_dist_to_arc_side = 0.0;
+    size_t num_closest_points = closest_points.size();
+    int idx_curved_current = -1, idx_curved_opposite = -1;
+
+    for (size_t i = 0; i < num_closest_points; i++)
+    {
+        double dist_to_arc = 0.0;
+
+        Point arc_start = closest_points[i].second;
+        Point arc_end   = closest_points[(i + 1) % num_closest_points].second;
+
+        for (size_t j = 0; j < sides_points[i].size(); j++)
+        {
+            Point arc_point = sides_points[i][j];
+            double dist = distancePointToLine(arc_point, arc_start, arc_end);
+            dist_to_arc += dist;
+        }
+        dist_to_arc /= sides_points[i].size();
+
+        if (dist_to_arc > max_dist_to_arc_side)
+        {
+            max_dist_to_arc_side = dist_to_arc;
+            idx_curved_current = (int)i;
+            idx_curved_opposite = (int)(i + 2) % num_closest_points;
+        }
+    }
+    if (idx_curved_current == -1 || idx_curved_opposite == -1)
+    {
+        return false;
+    }
+
+    curved_indexes.push_back(idx_curved_current);
+    curved_indexes.push_back(idx_curved_opposite);
+
+    return true;
+}
+
+bool QRDecode::findIncompleteIndexesCurvedSides()
+{
+    int num_closest_points = (int)closest_points.size();
+
+    for (int i = 0; i < NUM_SIDES; i++)
+    {
+        int idx_side = curved_indexes[i];
+        int side_size = (int)sides_points[idx_side].size();
+
+        double max_norm = norm(closest_points[idx_side].second -
+                               closest_points[(idx_side + 1) % num_closest_points].second);
+        double real_max_norm = 0;
+
+        for (int j = 0; j < side_size - 1; j++)
+        {
+            double temp_norm = norm(sides_points[idx_side][j] -
+                                    sides_points[idx_side][j + 1]);
+            if (temp_norm > real_max_norm)
+            {
+                real_max_norm = temp_norm;
+            }
+        }
+        if (real_max_norm > (0.5 * max_norm))
+        {
+            curved_incomplete_indexes.push_back(curved_indexes[i]);
+        }
+
+    }
+
+    if (curved_incomplete_indexes.size() == 0)
+    {
+        return false;
+    }
+    return true;
+}
+
+Point QRDecode::findClosestZeroPoint(Point2f original_point)
+{
+    int orig_x = static_cast<int>(original_point.x);
+    int orig_y = static_cast<int>(original_point.y);
+    uint8_t value;
+    Point zero_point;
+
+    const int step = 2;
+    for (int i = orig_x - step; i >= 0 && i <= orig_x + step; i++)
+    {
+        for (int j = orig_y - step; j >= 0 && j <= orig_y + step; j++)
+        {
+            Point p(i, j);
+            value = bin_barcode.at<uint8_t>(p);
+            if (value == 0) zero_point = p;
+        }
+    }
+
+    return zero_point;
+}
+
+Mat QRDecode::getPatternsMask()
+{
+    Mat mask(bin_barcode.rows + 2, bin_barcode.cols + 2, CV_8UC1, Scalar(0));
+    Mat patterns_mask(bin_barcode.rows + 2, bin_barcode.cols + 2, CV_8UC1, Scalar(0));
+    Mat fill_bin_barcode = bin_barcode.clone();
+    for (size_t i = 0; i < original_points.size(); i++)
+    {
+        if (i == 2) continue;
+        Point p = findClosestZeroPoint(original_points[i]);
+        floodFill(fill_bin_barcode, mask, p, 255,
+                        0, Scalar(), Scalar(), FLOODFILL_MASK_ONLY);
+        patterns_mask += mask;
+    }
+    Mat mask_roi = patterns_mask(Range(1, bin_barcode.rows - 1), Range(1, bin_barcode.cols - 1));
+
+    return mask_roi;
+}
+
+bool QRDecode::findPatternsContours(vector<vector<Point> > &patterns_contours)
+{
+    Mat patterns_mask = getPatternsMask();
+    findContours(patterns_mask, patterns_contours, RETR_EXTERNAL, CHAIN_APPROX_NONE, Point(0, 0));
+    if (patterns_contours.size() != 3) {  return false; }
+    return true;
+}
+
+bool QRDecode::findPatternsVerticesPoints(vector<vector<Point> > &patterns_vertices_points)
+{
+    vector<vector<Point> > patterns_contours;
+    if(!findPatternsContours(patterns_contours))
+    {
+        return false;
+    }
+    const int num_vertices = 4;
+    for(size_t i = 0; i < patterns_contours.size(); i++)
+    {
+        vector<Point> convexhull_contours, new_convexhull_contours;
+        convexHull(patterns_contours[i], convexhull_contours);
+
+        size_t number_pnts_in_hull = convexhull_contours.size();
+        vector<std::pair<size_t, double> > cos_angles_in_hull;
+        vector<size_t> min_angle_pnts_indexes;
+
+        for(size_t j = 1; j < number_pnts_in_hull + 1; j++)
+        {
+            double cos_angle = getCosVectors(convexhull_contours[(j - 1) % number_pnts_in_hull],
+                                             convexhull_contours[ j      % number_pnts_in_hull],
+                                             convexhull_contours[(j + 1) % number_pnts_in_hull]);
+            cos_angles_in_hull.push_back(std::pair<size_t, double>(j, cos_angle));
+        }
+
+        sort(cos_angles_in_hull.begin(), cos_angles_in_hull.end(), sortPairDesc());
+
+        for (size_t j = 0; j < cos_angles_in_hull.size(); j++)
+        {
+            bool add_edge = true;
+            for(size_t k = 0; k < min_angle_pnts_indexes.size(); k++)
+            {
+                if(norm(convexhull_contours[cos_angles_in_hull[j].first % number_pnts_in_hull] -
+                        convexhull_contours[min_angle_pnts_indexes[k]   % number_pnts_in_hull]) < 3)
+                {
+                    add_edge = false;
+                }
+            }
+            if (add_edge)
+            {
+                min_angle_pnts_indexes.push_back(cos_angles_in_hull[j].first % number_pnts_in_hull);
+            }
+            if ((int)min_angle_pnts_indexes.size() == num_vertices) { break; }
+        }
+        sort(min_angle_pnts_indexes.begin(), min_angle_pnts_indexes.end());
+
+        vector<Point> contour_vertices_points;
+
+        for (size_t k = 0; k < min_angle_pnts_indexes.size(); k++)
+        {
+            contour_vertices_points.push_back(convexhull_contours[min_angle_pnts_indexes[k]]);
+        }
+        patterns_vertices_points.push_back(contour_vertices_points);
+    }
+    if (patterns_vertices_points.size() != 3)
+    {
+        return false;
+    }
+
+    return true;
+}
+
+bool QRDecode::findTempPatternsAddingPoints(vector<std::pair<int, vector<Point> > > &temp_patterns_add_points)
+{
+    vector<vector<Point> >patterns_contours, patterns_vertices_points;
+    if(!findPatternsVerticesPoints(patterns_vertices_points))
+    {
+        return false;
+    }
+    if(!findPatternsContours(patterns_contours))
+    {
+        return false;
+    }
+
+    for (size_t i = 0; i < curved_incomplete_indexes.size(); i++)
+    {
+        int idx_curved_side = curved_incomplete_indexes[i];
+        Point close_transform_pnt_curr = original_points[idx_curved_side];
+        Point close_transform_pnt_next = original_points[(idx_curved_side + 1) % 4];
+
+        vector<size_t> patterns_indexes;
+
+        for (size_t j = 0; j < patterns_vertices_points.size(); j++)
+        {
+            for (size_t k = 0; k < patterns_vertices_points[j].size(); k++)
+            {
+                if (norm(close_transform_pnt_curr - patterns_vertices_points[j][k]) < 5)
+                {
+                    patterns_indexes.push_back(j);
+                    break;
+                }
+                if (norm(close_transform_pnt_next - patterns_vertices_points[j][k]) < 5)
+                {
+                    patterns_indexes.push_back(j);
+                    break;
+                }
+            }
+        }
+        for (size_t j = 0; j < patterns_indexes.size(); j++)
+        {
+            vector<Point> vertices = patterns_vertices_points[patterns_indexes[j]];
+            vector<std::pair<int, double> > vertices_dist_pair;
+            vector<Point> points;
+            for (size_t k = 0; k < vertices.size(); k++)
+            {
+                double dist_to_side = distancePointToLine(vertices[k], close_transform_pnt_curr,
+                                                                       close_transform_pnt_next);
+                vertices_dist_pair.push_back(std::pair<int, double>((int)k, dist_to_side));
+            }
+            if (vertices_dist_pair.size() == 0)
+            {
+                return false;
+            }
+            sort(vertices_dist_pair.begin(), vertices_dist_pair.end(), sortPairAsc());
+            Point p1, p2;
+            int index_p1_in_vertices = 0, index_p2_in_vertices = 0;
+            for (int k = 4; k > 0; k--)
+            {
+                if((vertices_dist_pair[0].first == k % 4) && (vertices_dist_pair[1].first == (k - 1) % 4))
+                {
+                    index_p1_in_vertices = vertices_dist_pair[0].first;
+                    index_p2_in_vertices = vertices_dist_pair[1].first;
+                }
+                else if((vertices_dist_pair[1].first == k % 4) && (vertices_dist_pair[0].first == (k - 1) % 4))
+                {
+                    index_p1_in_vertices = vertices_dist_pair[1].first;
+                    index_p2_in_vertices = vertices_dist_pair[0].first;
+                }
+            }
+            if (index_p1_in_vertices == index_p2_in_vertices) return false;
+
+            p1 = vertices[index_p1_in_vertices];
+            p2 = vertices[index_p2_in_vertices];
+
+            size_t index_p1_in_contour = 0, index_p2_in_contour = 0;
+            vector<Point> add_points = patterns_contours[patterns_indexes[j]];
+
+            for(size_t k = 0; k < add_points.size(); k++)
+            {
+                if (add_points[k] == p1)
+                {
+                    index_p1_in_contour = k;
+                }
+                if (add_points[k] == p2)
+                {
+                    index_p2_in_contour = k;
+                }
+            }
+
+            if (index_p1_in_contour > index_p2_in_contour)
+            {
+                for (size_t k = index_p1_in_contour; k < add_points.size(); k++)
+                {
+                    points.push_back(add_points[k]);
+                }
+                for (size_t k = 0; k <= index_p2_in_contour; k++)
+                {
+                    points.push_back(add_points[k]);
+                }
+            }
+            else if (index_p1_in_contour < index_p2_in_contour)
+            {
+                for (size_t k = index_p1_in_contour; k <= index_p2_in_contour; k++)
+                {
+                    points.push_back(add_points[k]);
+                }
+            }
+            else
+            {
+                return false;
+            }
+            if (abs(p1.x - p2.x) > abs(p1.y - p2.y))
+            {
+                sort(points.begin(), points.end(), sortPointsByX());
+            }
+            else
+            {
+                sort(points.begin(), points.end(), sortPointsByY());
+            }
+
+            temp_patterns_add_points.push_back(std::pair<int, vector<Point> >(idx_curved_side,points));
+        }
+    }
+
+    return true;
+}
+
+bool QRDecode::computePatternsAddingPoints(std::map<int, vector<Point> > &patterns_add_points)
+{
+    vector<std::pair<int, vector<Point> > > temp_patterns_add_points;
+    if(!findTempPatternsAddingPoints(temp_patterns_add_points))
+    {
+        return false;
+    }
+
+    const int num_points_in_pattern = 3;
+    for(size_t i = 0; i < temp_patterns_add_points.size(); i++)
+    {
+        int idx_side = temp_patterns_add_points[i].first;
+        int size = (int)temp_patterns_add_points[i].second.size();
+
+        float step = static_cast<float>(size) / num_points_in_pattern;
+        vector<Point> temp_points;
+        for (int j = 0; j < num_points_in_pattern; j++)
+        {
+            float val = j * step;
+            int idx = cvRound(val) >= size ? size - 1 : cvRound(val);
+            temp_points.push_back(temp_patterns_add_points[i].second[idx]);
+        }
+        temp_points.push_back(temp_patterns_add_points[i].second.back());
+        if(patterns_add_points.count(idx_side) == 1)
+        {
+            patterns_add_points[idx_side].insert(patterns_add_points[idx_side].end(),
+                                                temp_points.begin(), temp_points.end());
+        }
+        patterns_add_points.insert(std::pair<int, vector<Point> >(idx_side, temp_points));
+
+    }
+    if (patterns_add_points.size() == 0)
+    {
+        return false;
+    }
+
+    return true;
+}
+
+bool QRDecode::addPointsToSides()
+{
+    if(!computePatternsAddingPoints(complete_curved_sides))
+    {
+        return false;
+    }
+    std::map<int, vector<Point> >::iterator it;
+    double mean_step = 0.0;
+    size_t num_points_at_side = 0;
+    for (it = complete_curved_sides.begin(); it != complete_curved_sides.end(); ++it)
+    {
+        int count = -1;
+        const size_t num_points_at_pattern = it->second.size();
+        for(size_t j = 0; j < num_points_at_pattern - 1; j++, count++)
+        {
+            if (count == 3) continue;
+            double temp_norm = norm(it->second[j] -
+                                    it->second[j + 1]);
+            mean_step += temp_norm;
+        }
+        num_points_at_side += num_points_at_pattern;
+    }
+    if (num_points_at_side == 0)
+    {
+        return false;
+    }
+    mean_step /= num_points_at_side;
+
+    const size_t num_incomplete_sides = curved_incomplete_indexes.size();
+    for (size_t i = 0; i < num_incomplete_sides; i++)
+    {
+        int idx = curved_incomplete_indexes[i];
+        vector<int> sides_points_indexes;
+
+        const int num_points_at_side_to_add = (int)sides_points[idx].size();
+        for (int j = 0; j < num_points_at_side_to_add; j++)
+        {
+            bool not_too_close = true;
+            const size_t num_points_at_side_exist = complete_curved_sides[idx].size();
+            for (size_t k = 0; k < num_points_at_side_exist; k++)
+            {
+                double temp_norm = norm(sides_points[idx][j] - complete_curved_sides[idx][k]);
+                if (temp_norm < mean_step)
+                {
+                    not_too_close = false;
+                    break;
+                }
+            }
+            if (not_too_close)
+            {
+                sides_points_indexes.push_back(j);
+            }
+        }
+
+        for (size_t j = 0; j < sides_points_indexes.size(); j++)
+        {
+            bool not_equal = true;
+            for (size_t k = 0; k < complete_curved_sides[idx].size(); k++)
+            {
+                if (sides_points[idx][sides_points_indexes[j]] ==
+                    complete_curved_sides[idx][k])
+                {
+                    not_equal = false;
+                }
+            }
+            if (not_equal)
+            {
+                complete_curved_sides[idx].push_back(sides_points[idx][sides_points_indexes[j]]);
+            }
+        }
+    }
+
+    return true;
+}
+
+void QRDecode::completeAndSortSides()
+{
+    if (complete_curved_sides.size() < 2)
+    {
+        for (int i = 0; i < NUM_SIDES; i++)
+        {
+            if(complete_curved_sides.count(curved_indexes[i]) == 0)
+            {
+                int idx_second_cur_side = curved_indexes[i];
+                complete_curved_sides.insert(std::pair<int,vector<Point> >(idx_second_cur_side, sides_points[idx_second_cur_side]));
+            }
+        }
+    }
+    std::map<int,vector<Point> >::iterator it;
+    for (it = complete_curved_sides.begin(); it != complete_curved_sides.end(); ++it)
+    {
+        Point p1 = it->second.front();
+        Point p2 = it->second.back();
+        if (abs(p1.x - p2.x) > abs(p1.y - p2.y))
+        {
+            sort(it->second.begin(), it->second.end(), sortPointsByX());
+        }
+        else
+        {
+            sort(it->second.begin(), it->second.end(), sortPointsByY());
+        }
+    }
+}
+
+vector<vector<float> > QRDecode::computeSpline(const vector<int> &x_arr, const vector<int> &y_arr)
+{
+    const int n = (int)x_arr.size();
+    vector<float> a, b(n - 1), d(n - 1), h(n - 1), alpha(n - 1), c(n), l(n), mu(n), z(n);
+
+    for (int i = 0; i < (int)y_arr.size(); i++)
+    {
+        a.push_back(static_cast<float>(x_arr[i]));
+    }
+    for (int i = 0; i < n - 1; i++)
+    {
+        h[i] = static_cast<float>(y_arr[i + 1] - y_arr[i]);
+    }
+    for (int i = 1; i < n - 1; i++)
+    {
+        alpha[i] = 3 / h[i] * (a[i + 1] - a[i]) - 3 / (h[i - 1]) * (a[i] - a[i - 1]);
+    }
+    l[0] = 1;
+    mu[0] = 0;
+    z[0] = 0;
+
+    for (int i = 1; i < n - 1; i++)
+    {
+        l[i] = 2 * (y_arr[i + 1] - y_arr[i - 1]) - h[i - 1] * mu[i - 1];
+        mu[i] = h[i] / l[i];
+        z[i] = (alpha[i] - h[i - 1] * z[i - 1]) / l[i];
+    }
+    l[n - 1] = 1;
+    z[n - 1] = 0;
+    c[n - 1] = 0;
+
+    for(int j = n - 2; j >= 0; j--)
+    {
+        c[j] = z[j] - mu[j] * c[j + 1];
+        b[j] = (a[j + 1] - a[j]) / h[j] - (h[j] * (c[j + 1] + 2 * c[j])) / 3;
+        d[j] = (c[j + 1] - c[j]) / (3 * h[j]);
+    }
+
+    vector<vector<float> > S(n - 1);
+    for (int i = 0; i < n - 1; i++)
+    {
+        S[i].push_back(a[i]);
+        S[i].push_back(b[i]);
+        S[i].push_back(c[i]);
+        S[i].push_back(d[i]);
+    }
+
+    return S;
+}
+
+bool QRDecode::createSpline(vector<vector<Point2f> > &spline_lines)
+{
+    int start, end;
+    vector<vector<float> > S;
+
+    for (int idx = 0; idx < NUM_SIDES; idx++)
+    {
+        int idx_curved_side = curved_indexes[idx];
+
+        vector<Point> spline_points = complete_curved_sides.find(idx_curved_side)->second;
+        vector<int> x_arr, y_arr;
+
+        for (size_t j = 0; j < spline_points.size(); j++)
+        {
+            x_arr.push_back(cvRound(spline_points[j].x));
+            y_arr.push_back(cvRound(spline_points[j].y));
+        }
+
+        bool horizontal_order = abs(x_arr.front() - x_arr.back()) > abs(y_arr.front() - y_arr.back());
+        vector<int>& second_arr = horizontal_order ? x_arr : y_arr;
+        vector<int>& first_arr  = horizontal_order ? y_arr : x_arr;
+
+        S = computeSpline(first_arr, second_arr);
+
+        int closest_point_first  = horizontal_order ? closest_points[idx_curved_side].second.x
+                                                    : closest_points[idx_curved_side].second.y;
+        int closest_point_second = horizontal_order ? closest_points[(idx_curved_side + 1) % 4].second.x
+                                                    : closest_points[(idx_curved_side + 1) % 4].second.y;
+
+        start = idx_curved_side;
+        end = (idx_curved_side + 1) % 4;
+        if(closest_point_first > closest_point_second)
+        {
+            start = (idx_curved_side + 1) % 4;
+            end = idx_curved_side;
+        }
+
+        int closest_point_start = horizontal_order ? closest_points[start].second.x : closest_points[start].second.y;
+        int closest_point_end   = horizontal_order ? closest_points[end].second.x   : closest_points[end].second.y;
+
+        for (int index = closest_point_start; index <= closest_point_end; index++)
+        {
+            if (index == second_arr.front())
+            {
+                spline_lines[idx].push_back(closest_points[start].second);
+            }
+            for (size_t i = 0; i < second_arr.size() - 1; i++)
+            {
+                if ((index > second_arr[i]) && (index <= second_arr[i + 1]))
+                {
+                    float val = S[i][0] + S[i][1] * (index - second_arr[i]) + S[i][2] * (index - second_arr[i]) * (index - second_arr[i])
+                                                                            + S[i][3] * (index - second_arr[i]) * (index - second_arr[i]) * (index - second_arr[i]);
+                    spline_lines[idx].push_back(horizontal_order ? Point2f(static_cast<float>(index), val) : Point2f(val, static_cast<float>(index)));
+                }
+            }
+        }
+    }
+    return true;
+}
+
+bool QRDecode::divideIntoEvenSegments(vector<vector<Point2f> > &segments_points)
+{
+    vector<vector<Point2f> > spline_lines(NUM_SIDES);
+    if (!createSpline(spline_lines))
+    {
+        return false;
+    }
+    float mean_num_points_in_line = 0.0;
+    for (int i = 0; i < NUM_SIDES; i++)
+    {
+        mean_num_points_in_line += spline_lines[i].size();
+    }
+    mean_num_points_in_line /= NUM_SIDES;
+    const int min_num_points = 1, max_num_points = cvRound(mean_num_points_in_line / 2.0);
+    float linear_threshold = 0.5f;
+    for (int num = min_num_points; num < max_num_points; num++)
+    {
+        for (int i = 0; i < NUM_SIDES; i++)
+        {
+            segments_points[i].clear();
+
+            int size = (int)spline_lines[i].size();
+            float step = static_cast<float>(size) / num;
+            for (int j = 0; j < num; j++)
+            {
+                float val = j * step;
+                int idx = cvRound(val) >= size ? size - 1 : cvRound(val);
+                segments_points[i].push_back(spline_lines[i][idx]);
+            }
+            segments_points[i].push_back(spline_lines[i].back());
+        }
+        float mean_of_two_sides = 0.0;
+        for (int i = 0; i < NUM_SIDES; i++)
+        {
+            float mean_dist_in_segment = 0.0;
+            for (size_t j = 0; j < segments_points[i].size() - 1; j++)
+            {
+                Point2f segment_start = segments_points[i][j];
+                Point2f segment_end   = segments_points[i][j + 1];
+                vector<Point2f>::iterator it_start, it_end, it;
+                it_start = find(spline_lines[i].begin(), spline_lines[i].end(), segment_start);
+                it_end   = find(spline_lines[i].begin(), spline_lines[i].end(), segment_end);
+                float max_dist_to_line = 0.0;
+                for (it = it_start; it != it_end; it++)
+                {
+                    float temp_dist = distancePointToLine(*it, segment_start, segment_end);
+                    if (temp_dist > max_dist_to_line)
+                    {
+                        max_dist_to_line = temp_dist;
+                    }
+                }
+                mean_dist_in_segment += max_dist_to_line;
+            }
+            mean_dist_in_segment /= segments_points[i].size();
+            mean_of_two_sides    += mean_dist_in_segment;
+        }
+        mean_of_two_sides /= NUM_SIDES;
+        if (mean_of_two_sides < linear_threshold)
+        {
+            break;
+        }
+    }
+
+    return true;
+}
+
+bool QRDecode::straightenQRCodeInParts()
+{
+    vector<vector<Point2f> > segments_points(NUM_SIDES);
+    if (!divideIntoEvenSegments(segments_points))
+    {
+        return false;
+    }
+    vector<Point2f> current_curved_side, opposite_curved_side;
+
+    for (int i = 0; i < NUM_SIDES; i++)
+    {
+        Point2f temp_point_start = segments_points[i].front();
+        Point2f temp_point_end   = segments_points[i].back();
+        bool horizontal_order = (abs(temp_point_start.x - temp_point_end.x) >
+                                 abs(temp_point_start.y - temp_point_end.y));
+        float compare_point_current  = horizontal_order ? segments_points[i].front().y
+                                                        : segments_points[(i + 1) % 2].front().x;
+        float compare_point_opposite = horizontal_order ? segments_points[(i + 1) % 2].front().y
+                                                        : segments_points[i].front().x;
+
+        if (compare_point_current > compare_point_opposite)
+        {
+            current_curved_side  = segments_points[i];
+            opposite_curved_side = segments_points[(i + 1) % 2];
+        }
+    }
+    if (current_curved_side.size() != opposite_curved_side.size())
+    {
+        return false;
+    }
+    size_t number_pnts_to_cut = current_curved_side.size();
+    if (number_pnts_to_cut == 0)
+    {
+        return false;
+    }
+    float perspective_curved_size = 251.0;
+    const Size temporary_size(cvRound(perspective_curved_size), cvRound(perspective_curved_size));
+
+    float dist = perspective_curved_size / (number_pnts_to_cut - 1);
+    Mat perspective_result = Mat::zeros(temporary_size, CV_8UC1);
+    vector<Point2f> curved_parts_points;
+
+    float start_cut = 0.0;
+    vector<Point2f> temp_closest_points(4);
+
+    for (size_t i = 1; i < number_pnts_to_cut; i++)
+    {
+        curved_parts_points.clear();
+        Mat test_mask = Mat::zeros(bin_barcode.size(), CV_8UC1);
+
+        Point2f start_point = current_curved_side[i];
+        Point2f prev_start_point = current_curved_side[i - 1];
+        Point2f finish_point = opposite_curved_side[i];
+        Point2f prev_finish_point = opposite_curved_side[i - 1];
+
+        for (size_t j = 0; j < qrcode_locations.size(); j++)
+        {
+            if ((pointPosition(start_point, finish_point, qrcode_locations[j]) >= 0) &&
+                (pointPosition(prev_start_point, prev_finish_point, qrcode_locations[j]) <= 0))
+            {
+                test_mask.at<uint8_t>(qrcode_locations[j]) = 255;
+            }
+        }
+
+        vector<Point2f> perspective_points;
+
+        perspective_points.push_back(Point2f(0.0, start_cut));
+        perspective_points.push_back(Point2f(perspective_curved_size, start_cut));
+
+        perspective_points.push_back(Point2f(perspective_curved_size, start_cut + dist));
+        perspective_points.push_back(Point2f(0.0, start_cut+dist));
+
+        perspective_points.push_back(Point2f(perspective_curved_size * 0.5f, start_cut + dist * 0.5f));
+
+        if (i == 1)
+        {
+            for (size_t j = 0; j < closest_points.size(); j++)
+            {
+                if (arePointsNearest(closest_points[j].second, prev_start_point, 3.0))
+                {
+                    temp_closest_points[j] = perspective_points[0];
+                }
+                else if (arePointsNearest(closest_points[j].second, prev_finish_point, 3.0))
+                {
+                    temp_closest_points[j] = perspective_points[1];
+                }
+            }
+        }
+        if (i == number_pnts_to_cut - 1)
+        {
+            for (size_t j = 0; j < closest_points.size(); j++)
+            {
+                if (arePointsNearest(closest_points[j].second, finish_point, 3.0))
+                {
+                    temp_closest_points[j] = perspective_points[2];
+                }
+                else if (arePointsNearest(closest_points[j].second, start_point, 3.0))
+                {
+                    temp_closest_points[j] = perspective_points[3];
+                }
+            }
+        }
+        start_cut += dist;
+
+        curved_parts_points.push_back(prev_start_point);
+        curved_parts_points.push_back(prev_finish_point);
+        curved_parts_points.push_back(finish_point);
+        curved_parts_points.push_back(start_point);
+
+        Point2f center_point = intersectionLines(curved_parts_points[0], curved_parts_points[2],
+                                                 curved_parts_points[1], curved_parts_points[3]);
+        if (cvIsNaN(center_point.x) || cvIsNaN(center_point.y))
+            return false;
+
+        vector<Point2f> pts = curved_parts_points;
+        pts.push_back(center_point);
+
+        Mat H = findHomography(pts, perspective_points);
+        Mat temp_intermediate(temporary_size, CV_8UC1);
+        warpPerspective(test_mask, temp_intermediate, H, temporary_size, INTER_NEAREST);
+        perspective_result += temp_intermediate;
+
+    }
+    Mat white_mask = Mat(temporary_size, CV_8UC1, Scalar(255));
+    Mat inversion = white_mask - perspective_result;
+    Mat temp_result;
+
+    original_curved_points = temp_closest_points;
+
+    Point2f original_center_point = intersectionLines(original_curved_points[0], original_curved_points[2],
+                                                      original_curved_points[1], original_curved_points[3]);
+
+    original_curved_points.push_back(original_center_point);
+
+    for (size_t i = 0; i < original_curved_points.size(); i++)
+    {
+        if (cvIsNaN(original_curved_points[i].x) || cvIsNaN(original_curved_points[i].y))
+            return false;
+    }
+
+    vector<Point2f> perspective_straight_points;
+    perspective_straight_points.push_back(Point2f(0.f, 0.f));
+    perspective_straight_points.push_back(Point2f(perspective_curved_size, 0.f));
+
+    perspective_straight_points.push_back(Point2f(perspective_curved_size, perspective_curved_size));
+    perspective_straight_points.push_back(Point2f(0.f, perspective_curved_size));
+
+    perspective_straight_points.push_back(Point2f(perspective_curved_size * 0.5f, perspective_curved_size * 0.5f));
+
+    Mat H = findHomography(original_curved_points, perspective_straight_points);
+    warpPerspective(inversion, temp_result, H, temporary_size, INTER_NEAREST, BORDER_REPLICATE);
+
+    no_border_intermediate = temp_result(Range(1, temp_result.rows), Range(1, temp_result.cols));
+    const int border = cvRound(0.1 * perspective_curved_size);
+    const int borderType = BORDER_CONSTANT;
+    copyMakeBorder(no_border_intermediate, curved_to_straight, border, border, border, border, borderType, Scalar(255));
+    intermediate = curved_to_straight;
+
+    return true;
+}
+
+bool QRDecode::preparingCurvedQRCodes()
+{
+    vector<Point> result_integer_hull;
+    getPointsInsideQRCode(original_points);
+    if (qrcode_locations.size() == 0)
+        return false;
+    convexHull(qrcode_locations, result_integer_hull);
+    if (!computeClosestPoints(result_integer_hull))
+        return false;
+    if (!computeSidesPoints(result_integer_hull))
+        return false;
+    if (!findAndAddStablePoint(result_integer_hull))
+        return false;
+    if (!findIndexesCurvedSides())
+        return false;
+    if (findIncompleteIndexesCurvedSides())
+    {
+        if(!addPointsToSides())
+            return false;
+    }
+    completeAndSortSides();
+    if (!straightenQRCodeInParts())
+        return false;
+
+    return true;
+}
+
+bool QRDecode::updatePerspective()
+{
+    CV_TRACE_FUNCTION();
+    const Point2f centerPt = intersectionLines(original_points[0], original_points[2],
+                                               original_points[1], original_points[3]);
+    if (cvIsNaN(centerPt.x) || cvIsNaN(centerPt.y))
+        return false;
+
+    const Size temporary_size(cvRound(test_perspective_size), cvRound(test_perspective_size));
+
+    vector<Point2f> perspective_points;
+    perspective_points.push_back(Point2f(0.f, 0.f));
+    perspective_points.push_back(Point2f(test_perspective_size, 0.f));
+
+    perspective_points.push_back(Point2f(test_perspective_size, test_perspective_size));
+    perspective_points.push_back(Point2f(0.f, test_perspective_size));
+
+    perspective_points.push_back(Point2f(test_perspective_size * 0.5f, test_perspective_size * 0.5f));
+
+    vector<Point2f> pts = original_points;
+    pts.push_back(centerPt);
+
+    Mat H = findHomography(pts, perspective_points);
+    Mat bin_original;
+    adaptiveThreshold(original, bin_original, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 83, 2);
+    Mat temp_intermediate;
+    warpPerspective(bin_original, temp_intermediate, H, temporary_size, INTER_NEAREST);
+    no_border_intermediate = temp_intermediate(Range(1, temp_intermediate.rows), Range(1, temp_intermediate.cols));
+
+    const int border = cvRound(0.1 * test_perspective_size);
+    const int borderType = BORDER_CONSTANT;
+    copyMakeBorder(no_border_intermediate, intermediate, border, border, border, border, borderType, Scalar(255));
+    return true;
+}
+
+inline Point computeOffset(const vector<Point>& v)
+{
+    // compute the width/height of convex hull
+    Rect areaBox = boundingRect(v);
+
+    // compute the good offset
+    // the box is consisted by 7 steps
+    // to pick the middle of the stripe, it needs to be 1/14 of the size
+    const int cStep = 7 * 2;
+    Point offset = Point(areaBox.width, areaBox.height);
+    offset /= cStep;
+    return offset;
+}
+
+bool QRDecode::versionDefinition()
+{
+    CV_TRACE_FUNCTION();
+    LineIterator line_iter(intermediate, Point2f(0, 0), Point2f(test_perspective_size, test_perspective_size));
+    Point black_point = Point(0, 0);
+    for(int j = 0; j < line_iter.count; j++, ++line_iter)
+    {
+        const uint8_t value = intermediate.at<uint8_t>(line_iter.pos());
+        if (value == 0)
+        {
+            black_point = line_iter.pos();
+            break;
+        }
+    }
+
+    Mat mask = Mat::zeros(intermediate.rows + 2, intermediate.cols + 2, CV_8UC1);
+    floodFill(intermediate, mask, black_point, 255, 0, Scalar(), Scalar(), FLOODFILL_MASK_ONLY);
+
+    vector<Point> locations, non_zero_elem;
+    Mat mask_roi = mask(Range(1, intermediate.rows - 1), Range(1, intermediate.cols - 1));
+    findNonZero(mask_roi, non_zero_elem);
+    convexHull(non_zero_elem, locations);
+    Point offset = computeOffset(locations);
+
+    Point temp_remote = locations[0], remote_point;
+    const Point delta_diff = offset;
+    for (size_t i = 0; i < locations.size(); i++)
+    {
+        if (norm(black_point - temp_remote) <= norm(black_point - locations[i]))
+        {
+            const uint8_t value = intermediate.at<uint8_t>(temp_remote - delta_diff);
+            temp_remote = locations[i];
+            if (value == 0) { remote_point = temp_remote - delta_diff; }
+            else { remote_point = temp_remote - (delta_diff / 2); }
+        }
+    }
+
+    size_t transition_x = 0 , transition_y = 0;
+
+    uint8_t future_pixel = 255;
+    const uint8_t *intermediate_row = intermediate.ptr<uint8_t>(remote_point.y);
+    for(int i = remote_point.x; i < intermediate.cols; i++)
+    {
+        if (intermediate_row[i] == future_pixel)
+        {
+            future_pixel = static_cast<uint8_t>(~future_pixel);
+            transition_x++;
+        }
+    }
+
+    future_pixel = 255;
+    for(int j = remote_point.y; j < intermediate.rows; j++)
+    {
+        const uint8_t value = intermediate.at<uint8_t>(Point(j, remote_point.x));
+        if (value == future_pixel)
+        {
+            future_pixel = static_cast<uint8_t>(~future_pixel);
+            transition_y++;
+        }
+    }
+    version = saturate_cast<uint8_t>((std::min(transition_x, transition_y) - 1) * 0.25 - 1);
+    if ( !(  0 < version && version <= 40 ) ) { return false; }
+    version_size = 21 + (version - 1) * 4;
+    return true;
+}
+
+bool QRDecode::samplingForVersion()
+{
+    CV_TRACE_FUNCTION();
+    const double multiplyingFactor = (version < 3)  ? 1 :
+                                     (version == 3) ? 1.5 :
+                                     version * (version + 1);
+    const Size newFactorSize(
+                  cvRound(no_border_intermediate.size().width  * multiplyingFactor),
+                  cvRound(no_border_intermediate.size().height * multiplyingFactor));
+    Mat postIntermediate(newFactorSize, CV_8UC1);
+    resize(no_border_intermediate, postIntermediate, newFactorSize, 0, 0, INTER_AREA);
+
+    const int delta_rows = cvRound((postIntermediate.rows * 1.0) / version_size);
+    const int delta_cols = cvRound((postIntermediate.cols * 1.0) / version_size);
+
+    vector<double> listFrequencyElem;
+    for (int r = 0; r < postIntermediate.rows; r += delta_rows)
+    {
+        for (int c = 0; c < postIntermediate.cols; c += delta_cols)
+        {
+            Mat tile = postIntermediate(
+                           Range(r, min(r + delta_rows, postIntermediate.rows)),
+                           Range(c, min(c + delta_cols, postIntermediate.cols)));
+            const double frequencyElem = (countNonZero(tile) * 1.0) / tile.total();
+            listFrequencyElem.push_back(frequencyElem);
+        }
+    }
+
+    double dispersionEFE = std::numeric_limits<double>::max();
+    double experimentalFrequencyElem = 0;
+    for (double expVal = 0; expVal < 1; expVal+=0.001)
+    {
+        double testDispersionEFE = 0.0;
+        for (size_t i = 0; i < listFrequencyElem.size(); i++)
+        {
+            testDispersionEFE += (listFrequencyElem[i] - expVal) *
+                                 (listFrequencyElem[i] - expVal);
+        }
+        testDispersionEFE /= (listFrequencyElem.size() - 1);
+        if (dispersionEFE > testDispersionEFE)
+        {
+            dispersionEFE = testDispersionEFE;
+            experimentalFrequencyElem = expVal;
+        }
+    }
+
+    straight = Mat(Size(version_size, version_size), CV_8UC1, Scalar(0));
+    for (int r = 0; r < version_size * version_size; r++)
+    {
+        int i   = r / straight.cols;
+        int j   = r % straight.cols;
+        straight.ptr<uint8_t>(i)[j] = (listFrequencyElem[r] < experimentalFrequencyElem) ? 0 : 255;
+    }
+    return true;
+}
+
+bool QRDecode::decodingProcess()
+{
+#ifdef HAVE_QUIRC
+    if (straight.empty()) { return false; }
+
+    quirc_code qr_code;
+    memset(&qr_code, 0, sizeof(qr_code));
+
+    qr_code.size = straight.size().width;
+    for (int x = 0; x < qr_code.size; x++)
+    {
+        for (int y = 0; y < qr_code.size; y++)
+        {
+            int position = y * qr_code.size + x;
+            qr_code.cell_bitmap[position >> 3]
+                |= straight.ptr<uint8_t>(y)[x] ? 0 : (1 << (position & 7));
+        }
+    }
+
+    quirc_data qr_code_data;
+    quirc_decode_error_t errorCode = quirc_decode(&qr_code, &qr_code_data);
+    if (errorCode != 0) { return false; }
+
+    for (int i = 0; i < qr_code_data.payload_len; i++)
+    {
+        result_info += qr_code_data.payload[i];
+    }
+    return true;
+#else
+    return false;
+#endif
+
+}
+
+bool QRDecode::straightDecodingProcess()
+{
+#ifdef HAVE_QUIRC
+    if (!updatePerspective())  { return false; }
+    if (!versionDefinition())  { return false; }
+    if (!samplingForVersion()) { return false; }
+    if (!decodingProcess())    { return false; }
+    return true;
+#else
+    std::cout << "Library QUIRC is not linked. No decoding is performed. Take it to the OpenCV repository." << std::endl;
+    return false;
+#endif
+}
+
+bool QRDecode::curvedDecodingProcess()
+{
+#ifdef HAVE_QUIRC
+    if (!preparingCurvedQRCodes()) { return false; }
+    if (!versionDefinition())  { return false; }
+    if (!samplingForVersion()) { return false; }
+    if (!decodingProcess())    { return false; }
+    return true;
+#else
+    std::cout << "Library QUIRC is not linked. No decoding is performed. Take it to the OpenCV repository." << std::endl;
     return false;
 #endif
 }
@@ -1227,6 +2467,13 @@ bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, O
     return !decoded_info.empty();
 }
 
+bool decodeCurvedQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode)
+{
+    QRCodeDetector qrcode;
+    decoded_info = qrcode.decodeCurved(in, points, straight_qrcode);
+    return !decoded_info.empty();
+}
+
 cv::String QRCodeDetector::decode(InputArray in, InputArray points,
                                   OutputArray straight_qrcode)
 {
@@ -1241,7 +2488,35 @@ cv::String QRCodeDetector::decode(InputArray in, InputArray points,
 
     QRDecode qrdec;
     qrdec.init(inarr, src_points);
-    bool ok = qrdec.fullDecodingProcess();
+    bool ok = qrdec.straightDecodingProcess();
+
+    std::string decoded_info = qrdec.getDecodeInformation();
+
+    if (ok && straight_qrcode.needed())
+    {
+        qrdec.getStraightBarcode().convertTo(straight_qrcode,
+                                             straight_qrcode.fixedType() ?
+                                             straight_qrcode.type() : CV_32FC2);
+    }
+
+    return ok ? decoded_info : std::string();
+}
+
+cv::String QRCodeDetector::decodeCurved(InputArray in, InputArray points,
+                                        OutputArray straight_qrcode)
+{
+    Mat inarr;
+    if (!checkQRInputImage(in, inarr))
+        return std::string();
+
+    vector<Point2f> src_points;
+    points.copyTo(src_points);
+    CV_Assert(src_points.size() == 4);
+    CV_CheckGT(contourArea(src_points), 0.0, "Invalid QR code source points");
+
+    QRDecode qrdec;
+    qrdec.init(inarr, src_points);
+    bool ok = qrdec.curvedDecodingProcess();
 
     std::string decoded_info = qrdec.getDecodeInformation();
 
@@ -1278,6 +2553,29 @@ cv::String QRCodeDetector::detectAndDecode(InputArray in,
     return decoded_info;
 }
 
+cv::String QRCodeDetector::detectAndDecodeCurved(InputArray in,
+                                                 OutputArray points_,
+                                                 OutputArray straight_qrcode)
+{
+    Mat inarr;
+    if (!checkQRInputImage(in, inarr))
+    {
+        points_.release();
+        return std::string();
+    }
+
+    vector<Point2f> points;
+    bool ok = detect(inarr, points);
+    if (!ok)
+    {
+        points_.release();
+        return std::string();
+    }
+    updatePointsResult(points_, points);
+    std::string decoded_info = decodeCurved(inarr, points, straight_qrcode);
+    return decoded_info;
+}
+
 class QRDetectMulti : public QRDetect
 {
 public:
@@ -1510,7 +2808,6 @@ void QRDetectMulti::fixationPoints(vector<Point2f> &local_point)
                 Point2f(static_cast<float>(bin_barcode_temp.cols - 1),
                         static_cast<float>(bin_barcode_temp.rows - 1))));
 
-
         vector<Point2f> list_area_pnt;
         list_area_pnt.push_back(current_point);
 
@@ -2241,7 +3538,7 @@ public:
         for (int i = range.start; i < range.end; i++)
         {
             qrdec[i].init(inarr, src_points[i]);
-            bool ok = qrdec[i].fullDecodingProcess();
+            bool ok = qrdec[i].straightDecodingProcess();
             if (ok)
             {
                 decoded_info[i] = qrdec[i].getDecodeInformation();
@@ -2261,7 +3558,7 @@ public:
                     src_points[i][j] /= static_cast<float>(coeff_expansion);
                 }
                 qrdec[i].init(inarr2, src_points[i]);
-                ok = qrdec[i].fullDecodingProcess();
+                ok = qrdec[i].straightDecodingProcess();
                 if (ok)
                 {
                     decoded_info[i] = qrdec[i].getDecodeInformation();
diff --git a/modules/objdetect/test/test_qrcode.cpp b/modules/objdetect/test/test_qrcode.cpp
index a716c837ee..c26cd8a4f2 100644
--- a/modules/objdetect/test/test_qrcode.cpp
+++ b/modules/objdetect/test/test_qrcode.cpp
@@ -21,6 +21,9 @@ std::string qrcode_images_close[] = {
 std::string qrcode_images_monitor[] = {
   "monitor_1.png", "monitor_2.png", "monitor_3.png", "monitor_4.png", "monitor_5.png"
 };
+std::string qrcode_images_curved[] = {
+  "curved_1.jpg", "curved_2.jpg", "curved_3.jpg", "curved_4.jpg", "curved_5.jpg", "curved_6.jpg", "curved_7.jpg", "curved_8.jpg"
+};
 std::string qrcode_images_multiple[] = {
   "2_qrcodes.png", "3_close_qrcodes.png", "3_qrcodes.png", "4_qrcodes.png",
   "5_qrcodes.png", "6_qrcodes.png", "7_qrcodes.png", "8_close_qrcodes.png"
@@ -137,7 +140,38 @@ TEST(Objdetect_QRCode_Monitor, generate_test_data)
     file_config << "]";
     file_config.release();
 }
+TEST(Objdetect_QRCode_Curved, generate_test_data)
+{
+    const std::string root = "qrcode/curved/";
+    const std::string dataset_config = findDataFile(root + "dataset_config.json");
+    FileStorage file_config(dataset_config, FileStorage::WRITE);
 
+    file_config << "test_images" << "[";
+    size_t images_count = sizeof(qrcode_images_curved) / sizeof(qrcode_images_curved[0]);
+    for (size_t i = 0; i < images_count; i++)
+    {
+        file_config << "{:" << "image_name" << qrcode_images_curved[i];
+        std::string image_path = findDataFile(root + qrcode_images_curved[i]);
+        std::vector<Point> corners;
+        Mat src = imread(image_path, IMREAD_GRAYSCALE), straight_barcode;
+        std::string decoded_info;
+        ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path;
+        EXPECT_TRUE(detectQRCode(src, corners));
+#ifdef HAVE_QUIRC
+        EXPECT_TRUE(decodeCurvedQRCode(src, corners, decoded_info, straight_barcode));
+#endif
+        file_config << "x" << "[:";
+        for (size_t j = 0; j < corners.size(); j++) { file_config << corners[j].x; }
+        file_config << "]";
+        file_config << "y" << "[:";
+        for (size_t j = 0; j < corners.size(); j++) { file_config << corners[j].y; }
+        file_config << "]";
+        file_config << "info" << decoded_info;
+        file_config << "}";
+    }
+    file_config << "]";
+    file_config.release();
+}
 TEST(Objdetect_QRCode_Multi, generate_test_data)
 {
     const std::string root = "qrcode/multiple/";
@@ -390,6 +424,66 @@ TEST_P(Objdetect_QRCode_Monitor, regression)
     }
 }
 
+typedef testing::TestWithParam< std::string > Objdetect_QRCode_Curved;
+TEST_P(Objdetect_QRCode_Curved, regression)
+{
+    const std::string name_current_image = GetParam();
+    const std::string root = "qrcode/curved/";
+    const int pixels_error = 3;
+
+    std::string image_path = findDataFile(root + name_current_image);
+    Mat src = imread(image_path, IMREAD_GRAYSCALE), straight_barcode;
+    ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path;
+
+    std::vector<Point> corners;
+    std::string decoded_info;
+    QRCodeDetector qrcode;
+#ifdef HAVE_QUIRC
+    decoded_info = qrcode.detectAndDecodeCurved(src, corners, straight_barcode);
+    ASSERT_FALSE(corners.empty());
+    ASSERT_FALSE(decoded_info.empty());
+#else
+    ASSERT_TRUE(qrcode.detect(src, corners));
+#endif
+
+    const std::string dataset_config = findDataFile(root + "dataset_config.json");
+    FileStorage file_config(dataset_config, FileStorage::READ);
+    ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config;
+    {
+        FileNode images_list = file_config["test_images"];
+        size_t images_count = static_cast<size_t>(images_list.size());
+        ASSERT_GT(images_count, 0u) << "Can't find validation data entries in 'test_images': " << dataset_config;
+
+        for (size_t index = 0; index < images_count; index++)
+        {
+            FileNode config = images_list[(int)index];
+            std::string name_test_image = config["image_name"];
+            if (name_test_image == name_current_image)
+            {
+                for (int i = 0; i < 4; i++)
+                {
+                    int x = config["x"][i];
+                    int y = config["y"][i];
+                    EXPECT_NEAR(x, corners[i].x, pixels_error);
+                    EXPECT_NEAR(y, corners[i].y, pixels_error);
+                }
+
+#ifdef HAVE_QUIRC
+                std::string original_info = config["info"];
+                EXPECT_EQ(decoded_info, original_info);
+#endif
+
+                return; // done
+            }
+        }
+        std::cerr
+            << "Not found results for '" << name_current_image
+            << "' image in config file:" << dataset_config << std::endl
+            << "Re-run tests with enabled UPDATE_QRCODE_TEST_DATA macro to update test data."
+            << std::endl;
+    }
+}
+
 typedef testing::TestWithParam < std::string > Objdetect_QRCode_Multi;
 TEST_P(Objdetect_QRCode_Multi, regression)
 {
@@ -478,6 +572,7 @@ TEST_P(Objdetect_QRCode_Multi, regression)
 INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode, testing::ValuesIn(qrcode_images_name));
 INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Close, testing::ValuesIn(qrcode_images_close));
 INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Monitor, testing::ValuesIn(qrcode_images_monitor));
+INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Curved, testing::ValuesIn(qrcode_images_curved));
 INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Multi, testing::ValuesIn(qrcode_images_multiple));
 
 TEST(Objdetect_QRCode_decodeMulti, decode_regression_16491)

From 36598677cffe5ab7a22ffb4af3a2e2a73fe55db4 Mon Sep 17 00:00:00 2001
From: Quentin Chateau <quentin.chateau@gmail.com>
Date: Sun, 25 Oct 2020 16:58:27 +0100
Subject: [PATCH 050/152] Merge pull request #18646 from qchateau:wave-auto

* stitching: add WAVE_CORRECT_AUTO

* stitching: use CV_EXPORTS
---
 .../stitching/detail/motion_estimators.hpp    | 12 ++++-
 modules/stitching/src/motion_estimators.cpp   | 45 +++++++++++++++++
 modules/stitching/test/test_precomp.hpp       |  1 +
 .../stitching/test/test_wave_correction.cpp   | 50 +++++++++++++++++++
 4 files changed, 107 insertions(+), 1 deletion(-)
 create mode 100644 modules/stitching/test/test_wave_correction.cpp

diff --git a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
index ff05af1814..ad21ee1277 100644
--- a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
+++ b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
@@ -328,9 +328,19 @@ private:
 enum WaveCorrectKind
 {
     WAVE_CORRECT_HORIZ,
-    WAVE_CORRECT_VERT
+    WAVE_CORRECT_VERT,
+    WAVE_CORRECT_AUTO
 };
 
+/** @brief Tries to detect the wave correction kind depending
+on whether a panorama spans horizontally or vertically
+
+@param rmats Camera rotation matrices.
+@return The correction kind to use for this panorama
+ */
+CV_EXPORTS
+WaveCorrectKind autoDetectWaveCorrectKind(const std::vector<Mat> &rmats);
+
 /** @brief Tries to make panorama more horizontal (or vertical).
 
 @param rmats Camera rotation matrices.
diff --git a/modules/stitching/src/motion_estimators.cpp b/modules/stitching/src/motion_estimators.cpp
index d9848dbe7f..c0b46b101d 100644
--- a/modules/stitching/src/motion_estimators.cpp
+++ b/modules/stitching/src/motion_estimators.cpp
@@ -886,6 +886,45 @@ void BundleAdjusterAffinePartial::calcJacobian(Mat &jac)
 
 //////////////////////////////////////////////////////////////////////////////
 
+WaveCorrectKind autoDetectWaveCorrectKind(const std::vector<Mat> &rmats)
+{
+    std::vector<float> xs, ys;
+    xs.reserve(rmats.size());
+    ys.reserve(rmats.size());
+
+    // Project a [0, 0, 1, 1] point to the camera image frame
+    // Ignore intrinsic parameters and camera translation as they
+    // have little influence
+    // This also means we can simply use "rmat.col(2)" as the
+    // projected point homogeneous coordinate
+    for (const Mat& rmat: rmats)
+    {
+        CV_Assert(rmat.type() == CV_32F);
+        xs.push_back(rmat.at<float>(0, 2) / rmat.at<float>(2, 2));
+        ys.push_back(rmat.at<float>(1, 2) / rmat.at<float>(2, 2));
+    }
+
+    // Calculate the delta between the max and min values for
+    // both the X and Y axis
+    auto min_max_x = std::minmax_element(xs.begin(), xs.end());
+    auto min_max_y = std::minmax_element(ys.begin(), ys.end());
+    double delta_x = *min_max_x.second - *min_max_x.first;
+    double delta_y = *min_max_y.second - *min_max_y.first;
+
+    // If the Y delta is the biggest, it means the images
+    // mostly span along the vertical axis: correct this axis
+    if (delta_y > delta_x)
+    {
+        LOGLN("  using vertical wave correction");
+        return WAVE_CORRECT_VERT;
+    }
+    else
+    {
+        LOGLN("  using horizontal wave correction");
+        return WAVE_CORRECT_HORIZ;
+    }
+}
+
 void waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind)
 {
     LOGLN("Wave correcting...");
@@ -898,12 +937,18 @@ void waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind)
         return;
     }
 
+    if (kind == WAVE_CORRECT_AUTO)
+    {
+        kind = autoDetectWaveCorrectKind(rmats);
+    }
+
     Mat moment = Mat::zeros(3, 3, CV_32F);
     for (size_t i = 0; i < rmats.size(); ++i)
     {
         Mat col = rmats[i].col(0);
         moment += col * col.t();
     }
+
     Mat eigen_vals, eigen_vecs;
     eigen(moment, eigen_vals, eigen_vecs);
 
diff --git a/modules/stitching/test/test_precomp.hpp b/modules/stitching/test/test_precomp.hpp
index 8e7709a7ec..e761fb1fb0 100644
--- a/modules/stitching/test/test_precomp.hpp
+++ b/modules/stitching/test/test_precomp.hpp
@@ -6,6 +6,7 @@
 
 #include "opencv2/ts.hpp"
 #include "opencv2/stitching.hpp"
+#include "opencv2/stitching/detail/motion_estimators.hpp"
 #include "opencv2/stitching/detail/matchers.hpp"
 #include "opencv2/stitching/detail/blenders.hpp"
 #include "opencv2/stitching/detail/exposure_compensate.hpp"
diff --git a/modules/stitching/test/test_wave_correction.cpp b/modules/stitching/test/test_wave_correction.cpp
new file mode 100644
index 0000000000..1ac8ff07aa
--- /dev/null
+++ b/modules/stitching/test/test_wave_correction.cpp
@@ -0,0 +1,50 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "test_precomp.hpp"
+
+namespace opencv_test {
+namespace {
+
+detail::WaveCorrectKind correctionKind(const std::vector<UMat>& images)
+{
+
+    Ptr<Stitcher> stitcher = Stitcher::create(Stitcher::PANORAMA);
+    stitcher->estimateTransform(images);
+
+    std::vector<Mat> rmats;
+    auto cameras = stitcher->cameras();
+    for (const auto& camera: cameras)
+        rmats.push_back(camera.R);
+
+    return detail::autoDetectWaveCorrectKind(rmats);
+}
+
+TEST(WaveCorrection, AutoWaveCorrection)
+{
+    std::vector<UMat> images(2);
+    imread(cvtest::TS::ptr()->get_data_path() + "stitching/s1.jpg").copyTo(images[0]);
+    imread(cvtest::TS::ptr()->get_data_path() + "stitching/s2.jpg").copyTo(images[1]);
+
+    EXPECT_EQ(detail::WAVE_CORRECT_HORIZ, correctionKind(images));
+
+    std::vector<UMat> rotated_images(2);
+    rotate(images[0], rotated_images[0], cv::ROTATE_90_CLOCKWISE);
+    rotate(images[1], rotated_images[1], cv::ROTATE_90_CLOCKWISE);
+
+    EXPECT_EQ(detail::WAVE_CORRECT_VERT, correctionKind(rotated_images));
+
+    rotate(images[0], rotated_images[0], cv::ROTATE_90_COUNTERCLOCKWISE);
+    rotate(images[1], rotated_images[1], cv::ROTATE_90_COUNTERCLOCKWISE);
+
+    EXPECT_EQ(detail::WAVE_CORRECT_VERT, correctionKind(rotated_images));
+
+    rotate(images[0], rotated_images[0], cv::ROTATE_180);
+    rotate(images[1], rotated_images[1], cv::ROTATE_180);
+
+    EXPECT_EQ(detail::WAVE_CORRECT_HORIZ, correctionKind(rotated_images));
+}
+
+} // namespace
+} // namespace opencv_test

From afbf383ba3ccb75964206b316f120e9675f314e0 Mon Sep 17 00:00:00 2001
From: Ruslan Garnov <ruslan.garnov@intel.com>
Date: Mon, 26 Oct 2020 02:07:03 +0300
Subject: [PATCH 051/152] Minor s11n and RMat improvements:

 - Changed descr_of(RMat) to use canDescribeHelper to correctly handle planar case
 - Added export of createMat
 - Added setting of Storage::INPUT and Storage::OUTPUT in deserialization routine of GComputation
---
 modules/gapi/src/api/gmat.cpp                 |  2 +-
 modules/gapi/src/backends/common/gbackend.hpp |  2 +-
 .../src/backends/common/serialization.cpp     | 21 ++++++++++++-------
 3 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/modules/gapi/src/api/gmat.cpp b/modules/gapi/src/api/gmat.cpp
index d9f135222b..08bb170a86 100644
--- a/modules/gapi/src/api/gmat.cpp
+++ b/modules/gapi/src/api/gmat.cpp
@@ -144,7 +144,7 @@ bool GMatDesc::canDescribe(const cv::Mat& mat) const
 
 bool GMatDesc::canDescribe(const cv::RMat& mat) const
 {
-    return *this == mat.desc();
+    return canDescribeHelper(*this, mat);
 }
 
 }// namespace cv
diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp
index e96d2b0776..8c1749377e 100644
--- a/modules/gapi/src/backends/common/gbackend.hpp
+++ b/modules/gapi/src/backends/common/gbackend.hpp
@@ -134,7 +134,7 @@ inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
     return cv::gapi::getCompileArg<T>(args);
 }
 
-void createMat(const cv::GMatDesc& desc, cv::Mat& mat);
+void GAPI_EXPORTS createMat(const cv::GMatDesc& desc, cv::Mat& mat);
 
 }} // cv::gimpl
 
diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp
index 2b23b33cc8..bb1864823f 100644
--- a/modules/gapi/src/backends/common/serialization.cpp
+++ b/modules/gapi/src/backends/common/serialization.cpp
@@ -94,13 +94,14 @@ void linkNodes(ade::Graph& g) {
 }
 
 void relinkProto(ade::Graph& g) {
+    using namespace cv::gimpl;
     // identify which node handles map to the protocol
     // input/output object in the reconstructed graph
-    using S = std::set<cv::gimpl::RcDesc>;                  // FIXME: use ...
-    using M = std::map<cv::gimpl::RcDesc, ade::NodeHandle>; // FIXME: unordered!
+    using S = std::set<RcDesc>;                  // FIXME: use ...
+    using M = std::map<RcDesc, ade::NodeHandle>; // FIXME: unordered!
 
-    cv::gimpl::GModel::Graph gm(g);
-    auto &proto = gm.metadata().get<cv::gimpl::Protocol>();
+    GModel::Graph gm(g);
+    auto &proto = gm.metadata().get<Protocol>();
 
     const S set_in(proto.inputs.begin(), proto.inputs.end());
     const S set_out(proto.outputs.begin(), proto.outputs.end());
@@ -108,9 +109,9 @@ void relinkProto(ade::Graph& g) {
 
     // Associate the protocol node handles with their resource identifiers
     for (auto &&nh : gm.nodes()) {
-        if (gm.metadata(nh).get<cv::gimpl::NodeType>().t == cv::gimpl::NodeType::DATA) {
-            const auto &d = gm.metadata(nh).get<cv::gimpl::Data>();
-            const auto rc = cv::gimpl::RcDesc{d.rc, d.shape, d.ctor};
+        if (gm.metadata(nh).get<NodeType>().t == NodeType::DATA) {
+            const auto &d = gm.metadata(nh).get<Data>();
+            const auto rc = RcDesc{d.rc, d.shape, d.ctor};
             if (set_in.count(rc) > 0) {
                 GAPI_DbgAssert(set_out.count(rc) == 0);
                 map_in[rc] = nh;
@@ -128,6 +129,12 @@ void relinkProto(ade::Graph& g) {
     proto.out_nhs.clear();
     for (auto &rc : proto.inputs)  { proto.in_nhs .push_back(map_in .at(rc)); }
     for (auto &rc : proto.outputs) { proto.out_nhs.push_back(map_out.at(rc)); }
+
+    // If a subgraph is being serialized it's possible that
+    // some of its in/out nodes are INTERNAL in the full graph.
+    // Set their storage apporpriately
+    for (auto &nh : proto.in_nhs)  { gm.metadata(nh).get<Data>().storage = Data::Storage::INPUT; }
+    for (auto &nh : proto.out_nhs) { gm.metadata(nh).get<Data>().storage = Data::Storage::OUTPUT; }
 }
 
 } // anonymous namespace

From 93c3775927024166e59c467bc99302aac15e833b Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Mon, 26 Oct 2020 22:02:03 +0300
Subject: [PATCH 052/152] Merge pull request #18491 from
 TolyaTalamanov:at/wrap-inference

[G-API] Wrap cv::gapi::infer<Generic> into python

* Introduce generic infer

* Move Generic to infer.hpp

* Removew num_outs

* Fix windows warnings

* Fix comments to review

* Fix doxygen

* Add comment

* Fix comments to review

* Wrap inference to python

* Add default ctor to Params

* Add test

* Fix clang build

* Implement GInferInputs/GInferOutputs as Pimpl

* Add checkIEtarget to infer test

* Fix path

* Supress warning

* Use getAvailableDevices insted of checkIETarget

* Move PyParams to bindings_ie

* Add namespace

* Update CMakeLists.txt
---
 modules/gapi/CMakeLists.txt                   |  3 +
 modules/gapi/include/opencv2/gapi/infer.hpp   | 29 ++++++---
 .../opencv2/gapi/infer/bindings_ie.hpp        | 56 +++++++++++++++++
 .../gapi/include/opencv2/gapi/infer/ie.hpp    |  2 +-
 modules/gapi/misc/python/pyopencv_gapi.hpp    |  2 +
 modules/gapi/misc/python/shadow_gapi.hpp      | 10 +--
 .../gapi/misc/python/test/test_gapi_infer.py  | 62 +++++++++++++++++++
 modules/gapi/src/api/ginfer.cpp               | 47 ++++++++++----
 modules/gapi/src/backends/ie/bindings_ie.cpp  | 39 ++++++++++++
 9 files changed, 223 insertions(+), 27 deletions(-)
 create mode 100644 modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp
 create mode 100644 modules/gapi/misc/python/test/test_gapi_infer.py
 create mode 100644 modules/gapi/src/backends/ie/bindings_ie.cpp

diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
index 88ddeead16..137894cb8f 100644
--- a/modules/gapi/CMakeLists.txt
+++ b/modules/gapi/CMakeLists.txt
@@ -145,6 +145,9 @@ set(gapi_srcs
     # Serialization API and routines
     src/api/s11n.cpp
     src/backends/common/serialization.cpp
+
+    # Python bridge
+    src/backends/ie/bindings_ie.cpp
     )
 
 ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2)
diff --git a/modules/gapi/include/opencv2/gapi/infer.hpp b/modules/gapi/include/opencv2/gapi/infer.hpp
index 4fdd2df875..9b4580ec6b 100644
--- a/modules/gapi/include/opencv2/gapi/infer.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer.hpp
@@ -133,14 +133,18 @@ struct InOutInfo
  * @{
  * @brief G-API object used to collect network inputs
  */
-class GAPI_EXPORTS GInferInputs
+class GAPI_EXPORTS_W_SIMPLE GInferInputs
 {
+using Map = std::unordered_map<std::string, GMat>;
 public:
+    GAPI_WRAP GInferInputs();
+    GAPI_WRAP void setInput(const std::string& name, const cv::GMat& value);
+
     cv::GMat& operator[](const std::string& name);
-    const std::unordered_map<std::string, cv::GMat>& getBlobs() const;
+    const Map& getBlobs() const;
 
 private:
-    std::unordered_map<std::string, cv::GMat> in_blobs;
+    std::shared_ptr<Map> in_blobs;
 };
 /** @} */
 
@@ -148,16 +152,16 @@ private:
  * @{
  * @brief G-API object used to collect network outputs
  */
-struct GAPI_EXPORTS GInferOutputs
+struct GAPI_EXPORTS_W_SIMPLE GInferOutputs
 {
 public:
+    GAPI_WRAP GInferOutputs() = default;
     GInferOutputs(std::shared_ptr<cv::GCall> call);
-    cv::GMat at(const std::string& name);
+    GAPI_WRAP cv::GMat at(const std::string& name);
 
 private:
-    std::shared_ptr<cv::GCall> m_call;
-    InOutInfo* m_info = nullptr;
-    std::unordered_map<std::string, cv::GMat> out_blobs;
+    struct Priv;
+    std::shared_ptr<Priv> m_priv;
 };
 /** @} */
 
@@ -333,6 +337,11 @@ infer(const std::string& tag, const GInferInputs& inputs)
     return GInferOutputs{std::move(call)};
 }
 
+GAPI_EXPORTS_W inline GInferOutputs infer(const String& name, const GInferInputs& inputs)
+{
+    return infer<Generic>(name, inputs);
+}
+
 } // namespace gapi
 } // namespace cv
 
@@ -361,8 +370,8 @@ struct GAPI_EXPORTS GNetParam {
  *
  * @sa cv::gapi::networks
  */
-struct GAPI_EXPORTS GNetPackage {
-    GNetPackage() : GNetPackage({}) {}
+struct GAPI_EXPORTS_W_SIMPLE GNetPackage {
+    GAPI_WRAP GNetPackage() : GNetPackage({}) {}
     explicit GNetPackage(std::initializer_list<GNetParam> &&ii);
     std::vector<GBackend> backends() const;
     std::vector<GNetParam> networks;
diff --git a/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp b/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp
new file mode 100644
index 0000000000..fdd4128b1a
--- /dev/null
+++ b/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp
@@ -0,0 +1,56 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#ifndef OPENCV_GAPI_INFER_BINDINGS_IE_HPP
+#define OPENCV_GAPI_INFER_BINDINGS_IE_HPP
+
+#include <opencv2/gapi/util/any.hpp>
+#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
+#include <opencv2/gapi/gkernel.hpp>     // GKernelPackage
+#include <opencv2/gapi/infer/ie.hpp>    // Params
+
+#include <string>
+
+namespace cv {
+namespace gapi {
+namespace ie {
+
+// NB: Used by python wrapper
+// This class can be marked as SIMPLE, because it's implemented as pimpl
+class GAPI_EXPORTS_W_SIMPLE PyParams {
+public:
+    PyParams() = default;
+
+    PyParams(const std::string &tag,
+             const std::string &model,
+             const std::string &weights,
+             const std::string &device);
+
+    PyParams(const std::string &tag,
+             const std::string &model,
+             const std::string &device);
+
+    GBackend      backend() const;
+    std::string   tag()     const;
+    cv::util::any params()  const;
+
+private:
+    std::shared_ptr<Params<cv::gapi::Generic>> m_priv;
+};
+
+GAPI_EXPORTS_W PyParams params(const std::string &tag,
+                               const std::string &model,
+                               const std::string &weights,
+                               const std::string &device);
+
+GAPI_EXPORTS_W PyParams params(const std::string &tag,
+                               const std::string &model,
+                               const std::string &device);
+} // namespace ie
+} // namespace gapi
+} // namespace cv
+
+#endif // OPENCV_GAPI_INFER_BINDINGS_IE_HPP
diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
index dd2459da08..a8bc0bb05d 100644
--- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
@@ -162,4 +162,4 @@ protected:
 } // namespace gapi
 } // namespace cv
 
-#endif // OPENCV_GAPI_INFER_HPP
+#endif // OPENCV_GAPI_INFER_IE_HPP
diff --git a/modules/gapi/misc/python/pyopencv_gapi.hpp b/modules/gapi/misc/python/pyopencv_gapi.hpp
index 0e862a4010..57c0b3db4f 100644
--- a/modules/gapi/misc/python/pyopencv_gapi.hpp
+++ b/modules/gapi/misc/python/pyopencv_gapi.hpp
@@ -5,6 +5,8 @@
 
 // NB: Python wrapper replaces :: with _ for classes
 using gapi_GKernelPackage = cv::gapi::GKernelPackage;
+using gapi_GNetPackage = cv::gapi::GNetPackage;
+using gapi_ie_PyParams = cv::gapi::ie::PyParams;
 using gapi_wip_IStreamSource_Ptr = cv::Ptr<cv::gapi::wip::IStreamSource>;
 
 // FIXME: Python wrapper generate code without namespace std,
diff --git a/modules/gapi/misc/python/shadow_gapi.hpp b/modules/gapi/misc/python/shadow_gapi.hpp
index 72d7686eeb..0fac222212 100644
--- a/modules/gapi/misc/python/shadow_gapi.hpp
+++ b/modules/gapi/misc/python/shadow_gapi.hpp
@@ -6,23 +6,25 @@ namespace cv
    struct GAPI_EXPORTS_W_SIMPLE GCompileArg { };
 
    GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg);
+   GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GNetPackage pkg);
 
    // NB: This classes doesn't exist in *.so
    // HACK: Mark them as a class to force python wrapper generate code for this entities
    class GAPI_EXPORTS_W_SIMPLE GProtoArg { };
    class GAPI_EXPORTS_W_SIMPLE GProtoInputArgs { };
    class GAPI_EXPORTS_W_SIMPLE GProtoOutputArgs { };
-   class GAPI_EXPORTS_W_SIMPLE GRunArg {  };
-   class GAPI_EXPORTS_W_SIMPLE GMetaArg {  };
+   class GAPI_EXPORTS_W_SIMPLE GRunArg { };
+   class GAPI_EXPORTS_W_SIMPLE GMetaArg { };
 
    using GProtoInputArgs  = GIOProtoArgs<In_Tag>;
    using GProtoOutputArgs = GIOProtoArgs<Out_Tag>;
 
    namespace gapi
    {
+       GAPI_EXPORTS_W gapi::GNetPackage networks(const cv::gapi::ie::PyParams& params);
        namespace wip
        {
            class GAPI_EXPORTS_W IStreamSource { };
-       }
-   }
+       } // namespace wip
+   } // namespace gapi
 } // namespace cv
diff --git a/modules/gapi/misc/python/test/test_gapi_infer.py b/modules/gapi/misc/python/test/test_gapi_infer.py
new file mode 100644
index 0000000000..a6fabf7253
--- /dev/null
+++ b/modules/gapi/misc/python/test/test_gapi_infer.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+import numpy as np
+import cv2 as cv
+import os
+
+from tests_common import NewOpenCVTests
+
+
+class test_gapi_infer(NewOpenCVTests):
+
+    def test_getAvailableTargets(self):
+        targets = cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_OPENCV)
+        self.assertTrue(cv.dnn.DNN_TARGET_CPU in targets)
+
+
+    def test_age_gender_infer(self):
+
+        # NB: Check IE
+        if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
+            return
+
+        root_path    = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
+        model_path   = self.find_file(root_path + '.xml',   [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
+        weights_path = self.find_file(root_path + '.bin',   [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
+        img_path     = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
+        device_id    = 'CPU'
+        img          = cv.resize(cv.imread(img_path), (62,62))
+
+        # OpenCV DNN
+        net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
+        net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
+        net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
+
+        blob = cv.dnn.blobFromImage(img)
+
+        net.setInput(blob)
+        dnn_age, dnn_gender = net.forward(net.getUnconnectedOutLayersNames())
+
+        # OpenCV G-API
+        g_in   = cv.GMat()
+        inputs = cv.GInferInputs()
+        inputs.setInput('data', g_in)
+
+        outputs  = cv.gapi.infer("net", inputs)
+        age_g    = outputs.at("age_conv3")
+        gender_g = outputs.at("prob")
+
+        comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g))
+        pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
+
+        nets = cv.gapi.networks(pp)
+        args = cv.compile_args(nets)
+        gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp)))
+
+        # Check
+        self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
+        self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
+
+
+if __name__ == '__main__':
+    NewOpenCVTests.bootstrap()
diff --git a/modules/gapi/src/api/ginfer.cpp b/modules/gapi/src/api/ginfer.cpp
index 31d851b8e6..20511a4aaf 100644
--- a/modules/gapi/src/api/ginfer.cpp
+++ b/modules/gapi/src/api/ginfer.cpp
@@ -29,29 +29,52 @@ std::vector<cv::gapi::GBackend> cv::gapi::GNetPackage::backends() const {
 // FIXME: Inference API is currently only available in full mode
 #if !defined(GAPI_STANDALONE)
 
+cv::GInferInputs::GInferInputs()
+    : in_blobs(std::make_shared<Map>())
+{
+}
+
 cv::GMat& cv::GInferInputs::operator[](const std::string& name) {
-    return in_blobs[name];
+    return (*in_blobs)[name];
 }
 
-const std::unordered_map<std::string, cv::GMat>& cv::GInferInputs::getBlobs() const {
-    return in_blobs;
+const cv::GInferInputs::Map& cv::GInferInputs::getBlobs() const {
+    return *in_blobs;
 }
 
-cv::GInferOutputs::GInferOutputs(std::shared_ptr<cv::GCall> call)
-    : m_call(std::move(call)), m_info(cv::util::any_cast<InOutInfo>(&m_call->params()))
+void cv::GInferInputs::setInput(const std::string& name, const cv::GMat& value) {
+    in_blobs->emplace(name, value);
+}
+
+struct cv::GInferOutputs::Priv
 {
+    Priv(std::shared_ptr<cv::GCall>);
+
+    std::shared_ptr<cv::GCall> call;
+    InOutInfo* info = nullptr;
+    std::unordered_map<std::string, cv::GMat> out_blobs;
 };
 
+cv::GInferOutputs::Priv::Priv(std::shared_ptr<cv::GCall> c)
+    : call(std::move(c)), info(cv::util::any_cast<InOutInfo>(&call->params()))
+{
+}
+
+cv::GInferOutputs::GInferOutputs(std::shared_ptr<cv::GCall> call)
+    : m_priv(std::make_shared<cv::GInferOutputs::Priv>(std::move(call)))
+{
+}
+
 cv::GMat cv::GInferOutputs::at(const std::string& name)
 {
-    auto it = out_blobs.find(name);
-    if (it == out_blobs.end()) {
+    auto it = m_priv->out_blobs.find(name);
+    if (it == m_priv->out_blobs.end()) {
         // FIXME: Avoid modifying GKernel
-        m_call->kernel().outShapes.push_back(cv::GShape::GMAT);
-        int out_idx = static_cast<int>(out_blobs.size());
-        it = out_blobs.emplace(name, m_call->yield(out_idx)).first;
-        m_info->out_names.push_back(name);
+        m_priv->call->kernel().outShapes.push_back(cv::GShape::GMAT);
+        int out_idx = static_cast<int>(m_priv->out_blobs.size());
+        it = m_priv->out_blobs.emplace(name, m_priv->call->yield(out_idx)).first;
+        m_priv->info->out_names.push_back(name);
     }
     return it->second;
-};
+}
 #endif // GAPI_STANDALONE
diff --git a/modules/gapi/src/backends/ie/bindings_ie.cpp b/modules/gapi/src/backends/ie/bindings_ie.cpp
new file mode 100644
index 0000000000..35191d7bcb
--- /dev/null
+++ b/modules/gapi/src/backends/ie/bindings_ie.cpp
@@ -0,0 +1,39 @@
+#include <opencv2/gapi/infer/bindings_ie.hpp>
+
+cv::gapi::ie::PyParams::PyParams(const std::string &tag,
+                                 const std::string &model,
+                                 const std::string &weights,
+                                 const std::string &device)
+    : m_priv(std::make_shared<Params<cv::gapi::Generic>>(tag, model, weights, device)) {
+}
+
+cv::gapi::ie::PyParams::PyParams(const std::string &tag,
+                                 const std::string &model,
+                                 const std::string &device)
+    : m_priv(std::make_shared<Params<cv::gapi::Generic>>(tag, model, device)) {
+}
+
+cv::gapi::GBackend cv::gapi::ie::PyParams::backend() const {
+    return m_priv->backend();
+}
+
+std::string cv::gapi::ie::PyParams::tag() const {
+    return m_priv->tag();
+}
+
+cv::util::any cv::gapi::ie::PyParams::params() const {
+    return m_priv->params();
+}
+
+cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag,
+                                            const std::string &model,
+                                            const std::string &weights,
+                                            const std::string &device) {
+    return {tag, model, weights, device};
+}
+
+cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag,
+                                            const std::string &model,
+                                            const std::string &device) {
+    return {tag, model, device};
+}

From 3d4563913d59c66b9fbf9a3ff98c1b31fbb1839a Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Mon, 26 Oct 2020 22:55:43 +0300
Subject: [PATCH 053/152] Merge pull request #18600 from
 TolyaTalamanov:at/implement-render-using-stateful

[G-API] Implement render using stateful kernel

* Implement render using stateful kernel

* Move freetype to backends folder

* Fix freetype compilation

* Fix freetype smoke test

* Fix comments

* Refactoring
---
 modules/gapi/CMakeLists.txt                   |   3 +-
 modules/gapi/src/api/render_ocv.cpp           |  24 +--
 modules/gapi/src/api/render_ocv.hpp           |   6 +-
 .../{api => backends/render}/ft_render.cpp    |  26 ++-
 .../{api => backends/render}/ft_render.hpp    |   8 -
 .../render}/ft_render_priv.hpp                |   2 +-
 .../gapi/src/backends/render/grenderocv.cpp   |  54 +++++-
 .../gapi/src/backends/render/grenderocv.hpp   |  55 ------
 .../src/backends/render/grenderocvbackend.cpp | 161 ------------------
 .../src/backends/render/grenderocvbackend.hpp |  73 --------
 modules/gapi/test/render/ftp_render_test.cpp  |   2 +-
 .../test/render/gapi_render_tests_ocv.cpp     |   1 -
 12 files changed, 83 insertions(+), 332 deletions(-)
 rename modules/gapi/src/{api => backends/render}/ft_render.cpp (92%)
 rename modules/gapi/src/{api => backends/render}/ft_render.hpp (91%)
 rename modules/gapi/src/{api => backends/render}/ft_render_priv.hpp (96%)
 delete mode 100644 modules/gapi/src/backends/render/grenderocv.hpp
 delete mode 100644 modules/gapi/src/backends/render/grenderocvbackend.cpp
 delete mode 100644 modules/gapi/src/backends/render/grenderocvbackend.hpp

diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
index 137894cb8f..82b719ad4e 100644
--- a/modules/gapi/CMakeLists.txt
+++ b/modules/gapi/CMakeLists.txt
@@ -77,7 +77,6 @@ set(gapi_srcs
     src/api/render.cpp
     src/api/render_ocv.cpp
     src/api/ginfer.cpp
-    src/api/ft_render.cpp
     src/api/media.cpp
     src/api/rmat.cpp
 
@@ -131,8 +130,8 @@ set(gapi_srcs
     src/backends/ie/giebackend/giewrapper.cpp
 
     # Render Backend.
-    src/backends/render/grenderocvbackend.cpp
     src/backends/render/grenderocv.cpp
+    src/backends/render/ft_render.cpp
 
     #PlaidML Backend
     src/backends/plaidml/gplaidmlcore.cpp
diff --git a/modules/gapi/src/api/render_ocv.cpp b/modules/gapi/src/api/render_ocv.cpp
index a298a958bd..5ab2e1dd07 100644
--- a/modules/gapi/src/api/render_ocv.cpp
+++ b/modules/gapi/src/api/render_ocv.cpp
@@ -2,7 +2,7 @@
 #include <opencv2/gapi/render/render.hpp> // Kernel API's
 
 #include "api/render_ocv.hpp"
-#include "api/ft_render.hpp"
+#include "backends/render/ft_render.hpp"
 
 namespace cv
 {
@@ -146,12 +146,8 @@ struct EmptyConverter
 template <typename ColorConverter>
 void drawPrimitivesOCV(cv::Mat& in,
                        const cv::gapi::wip::draw::Prims& prims,
-                       cv::gapi::wip::draw::FTTextRender* ftpr)
+                       std::shared_ptr<cv::gapi::wip::draw::FTTextRender>& ftpr)
 {
-#ifndef HAVE_FREETYPE
-    cv::util::suppress_unused_warning(ftpr);
-#endif
-
     using namespace cv::gapi::wip::draw;
 
     ColorConverter converter;
@@ -177,7 +173,6 @@ void drawPrimitivesOCV(cv::Mat& in,
 
             case Prim::index_of<FText>():
             {
-#ifdef HAVE_FREETYPE
                 const auto& ftp  = cv::util::get<FText>(p);
                 const auto color = converter.cvtColor(ftp.color);
 
@@ -196,9 +191,6 @@ void drawPrimitivesOCV(cv::Mat& in,
                 cv::Point tl(ftp.org.x, ftp.org.y - mask.size().height + baseline);
 
                 blendTextMask(in, mask, tl, color);
-#else
-                cv::util::throw_error(std::runtime_error("FreeType not found !"));
-#endif
                 break;
             }
 
@@ -251,16 +243,16 @@ void drawPrimitivesOCV(cv::Mat& in,
     }
 }
 
-void drawPrimitivesOCVBGR(cv::Mat &in,
-                          const cv::gapi::wip::draw::Prims &prims,
-                          cv::gapi::wip::draw::FTTextRender* ftpr)
+void drawPrimitivesOCVBGR(cv::Mat                                                  &in,
+                          const cv::gapi::wip::draw::Prims                         &prims,
+                          std::shared_ptr<cv::gapi::wip::draw::FTTextRender> &ftpr)
 {
     drawPrimitivesOCV<EmptyConverter>(in, prims, ftpr);
 }
 
-void drawPrimitivesOCVYUV(cv::Mat &in,
-                          const cv::gapi::wip::draw::Prims &prims,
-                          cv::gapi::wip::draw::FTTextRender* ftpr)
+void drawPrimitivesOCVYUV(cv::Mat                                                  &in,
+                          const cv::gapi::wip::draw::Prims                         &prims,
+                          std::shared_ptr<cv::gapi::wip::draw::FTTextRender> &ftpr)
 {
     drawPrimitivesOCV<BGR2YUVConverter>(in, prims, ftpr);
 }
diff --git a/modules/gapi/src/api/render_ocv.hpp b/modules/gapi/src/api/render_ocv.hpp
index 91194dcdc1..a9a98f93fb 100644
--- a/modules/gapi/src/api/render_ocv.hpp
+++ b/modules/gapi/src/api/render_ocv.hpp
@@ -1,6 +1,6 @@
 #include <vector>
 #include "render_priv.hpp"
-#include "ft_render.hpp"
+#include "backends/render/ft_render.hpp"
 
 #ifndef OPENCV_RENDER_OCV_HPP
 #define OPENCV_RENDER_OCV_HPP
@@ -15,8 +15,8 @@ namespace draw
 {
 
 // FIXME only for tests
-void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc);
-void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc);
+void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, std::shared_ptr<cv::gapi::wip::draw::FTTextRender>& mc);
+void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, std::shared_ptr<cv::gapi::wip::draw::FTTextRender>& mc);
 
 } // namespace draw
 } // namespace wip
diff --git a/modules/gapi/src/api/ft_render.cpp b/modules/gapi/src/backends/render/ft_render.cpp
similarity index 92%
rename from modules/gapi/src/api/ft_render.cpp
rename to modules/gapi/src/backends/render/ft_render.cpp
index 7561dff833..fcf84713ff 100644
--- a/modules/gapi/src/api/ft_render.cpp
+++ b/modules/gapi/src/backends/render/ft_render.cpp
@@ -5,11 +5,11 @@
 // Copyright (C) 2019 Intel Corporation
 
 #include "precomp.hpp"
+#include "ft_render.hpp"
 
 #ifdef HAVE_FREETYPE
 
-#include "api/ft_render.hpp"
-#include "api/ft_render_priv.hpp"
+#include "ft_render_priv.hpp"
 
 #include <opencv2/gapi/util/throw.hpp>
 #include <opencv2/gapi/own/assert.hpp>
@@ -166,6 +166,11 @@ void cv::gapi::wip::draw::FTTextRender::Priv::putText(cv::Mat& mat,
                     "Failed to load char");
         FT_Bitmap *bitmap = &(m_face->glyph->bitmap);
 
+        // FIXME: Skip glyph, if size is 0
+        if (bitmap->rows == 0 || bitmap->width == 0) {
+            continue;
+        }
+
         cv::Mat glyph(bitmap->rows, bitmap->width, CV_8UC1, bitmap->buffer, bitmap->pitch);
 
         int left    = m_face->glyph->bitmap_left;
@@ -211,4 +216,21 @@ void cv::gapi::wip::draw::FTTextRender::putText(cv::Mat& mat,
     m_priv->putText(mat, text, org, fh);
 }
 
+#else
+
+cv::Size cv::gapi::wip::draw::FTTextRender::getTextSize(const std::wstring&, int, int*)
+{
+    cv::util::throw_error(std::runtime_error("Freetype not found"));
+}
+
+void cv::gapi::wip::draw::FTTextRender::putText(cv::Mat&, const std::wstring&, const cv::Point&, int)
+{
+    cv::util::throw_error(std::runtime_error("Freetype not found"));
+}
+
+cv::gapi::wip::draw::FTTextRender::FTTextRender(const std::string&)
+{
+    cv::util::throw_error(std::runtime_error("Freetype not found"));
+}
+
 #endif // HAVE_FREETYPE
diff --git a/modules/gapi/src/api/ft_render.hpp b/modules/gapi/src/backends/render/ft_render.hpp
similarity index 91%
rename from modules/gapi/src/api/ft_render.hpp
rename to modules/gapi/src/backends/render/ft_render.hpp
index 2556c7269c..068c0d4d3f 100644
--- a/modules/gapi/src/api/ft_render.hpp
+++ b/modules/gapi/src/backends/render/ft_render.hpp
@@ -23,8 +23,6 @@ namespace wip
 namespace draw
 {
 
-#ifdef HAVE_FREETYPE
-
 class GAPI_EXPORTS FTTextRender
 {
 public:
@@ -38,12 +36,6 @@ private:
     std::shared_ptr<Priv> m_priv;
 };
 
-#else
-
-class GAPI_EXPORTS FTTextRender {};
-
-#endif // HAVE_FREETYPE
-
 } // namespace draw
 } // namespace wip
 } // namespace gapi
diff --git a/modules/gapi/src/api/ft_render_priv.hpp b/modules/gapi/src/backends/render/ft_render_priv.hpp
similarity index 96%
rename from modules/gapi/src/api/ft_render_priv.hpp
rename to modules/gapi/src/backends/render/ft_render_priv.hpp
index 5a0679dd99..903f439b96 100644
--- a/modules/gapi/src/api/ft_render_priv.hpp
+++ b/modules/gapi/src/backends/render/ft_render_priv.hpp
@@ -10,7 +10,7 @@
 #ifndef OPENCV_FT_RENDER_PRIV_HPP
 #define OPENCV_FT_RENDER_PRIV_HPP
 
-#include "api/ft_render.hpp"
+#include "ft_render.hpp"
 
 #include <ft2build.h>
 #include FT_FREETYPE_H
diff --git a/modules/gapi/src/backends/render/grenderocv.cpp b/modules/gapi/src/backends/render/grenderocv.cpp
index cb4fd1be3a..71be889d79 100644
--- a/modules/gapi/src/backends/render/grenderocv.cpp
+++ b/modules/gapi/src/backends/render/grenderocv.cpp
@@ -1,16 +1,21 @@
 #include <opencv2/imgproc.hpp>
 
 #include "api/render_ocv.hpp"
-#include "backends/render/grenderocv.hpp"
 
 #include <opencv2/gapi/cpu/gcpukernel.hpp>
+#include <opencv2/gapi/fluid/core.hpp>
 
-GAPI_RENDER_OCV_KERNEL(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR)
+struct RenderOCVState
+{
+    std::shared_ptr<cv::gapi::wip::draw::FTTextRender> ftpr;
+};
+
+GAPI_OCV_KERNEL_ST(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR, RenderOCVState)
 {
     static void run(const cv::Mat& in,
                     const cv::gapi::wip::draw::Prims& prims,
-                    cv::gapi::wip::draw::FTTextRender* ftpr,
-                    cv::Mat& out)
+                    cv::Mat& out,
+                    RenderOCVState& state)
     {
         // NB: If in and out cv::Mats are the same object
         // we can avoid copy and render on out cv::Mat
@@ -19,18 +24,33 @@ GAPI_RENDER_OCV_KERNEL(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR)
             in.copyTo(out);
         }
 
-        cv::gapi::wip::draw::drawPrimitivesOCVBGR(out, prims, ftpr);
+        cv::gapi::wip::draw::drawPrimitivesOCVBGR(out, prims, state.ftpr);
+    }
+
+    static void setup(const cv::GMatDesc& /* in */,
+                      const cv::GArrayDesc& /* prims */,
+                      std::shared_ptr<RenderOCVState>& state,
+                      const cv::GCompileArgs& args)
+    {
+        using namespace cv::gapi::wip::draw;
+        auto opt_freetype_font = cv::gapi::getCompileArg<freetype_font>(args);
+        state = std::make_shared<RenderOCVState>();
+
+        if (opt_freetype_font.has_value())
+        {
+            state->ftpr = std::make_shared<FTTextRender>(opt_freetype_font->path);
+        }
     }
 };
 
-GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12)
+GAPI_OCV_KERNEL_ST(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12, RenderOCVState)
 {
     static void run(const cv::Mat& in_y,
                     const cv::Mat& in_uv,
                     const cv::gapi::wip::draw::Prims& prims,
-                    cv::gapi::wip::draw::FTTextRender* ftpr,
                     cv::Mat& out_y,
-                    cv::Mat& out_uv)
+                    cv::Mat& out_uv,
+                    RenderOCVState& state)
     {
         // NB: If in and out cv::Mats are the same object
         // we can avoid copy and render on out cv::Mat
@@ -67,7 +87,7 @@ GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12)
         cv::resize(in_uv, upsample_uv, in_uv.size() * 2, cv::INTER_LINEAR);
         cv::merge(std::vector<cv::Mat>{in_y, upsample_uv}, yuv);
 
-        cv::gapi::wip::draw::drawPrimitivesOCVYUV(yuv, prims, ftpr);
+        cv::gapi::wip::draw::drawPrimitivesOCVYUV(yuv, prims, state.ftpr);
 
         // YUV -> NV12
         cv::Mat out_u, out_v, uv_plane;
@@ -76,6 +96,22 @@ GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12)
         cv::merge(std::vector<cv::Mat>{chs[1], chs[2]}, uv_plane);
         cv::resize(uv_plane, out_uv, uv_plane.size() / 2, cv::INTER_LINEAR);
     }
+
+    static void setup(const cv::GMatDesc&   /* in_y  */,
+                      const cv::GMatDesc&   /* in_uv */,
+                      const cv::GArrayDesc& /* prims */,
+                      std::shared_ptr<RenderOCVState>& state,
+                      const cv::GCompileArgs& args)
+    {
+        using namespace cv::gapi::wip::draw;
+        auto has_freetype_font = cv::gapi::getCompileArg<freetype_font>(args);
+        state = std::make_shared<RenderOCVState>();
+
+        if (has_freetype_font)
+        {
+            state->ftpr = std::make_shared<FTTextRender>(has_freetype_font->path);
+        }
+    }
 };
 
 cv::gapi::GKernelPackage cv::gapi::render::ocv::kernels()
diff --git a/modules/gapi/src/backends/render/grenderocv.hpp b/modules/gapi/src/backends/render/grenderocv.hpp
deleted file mode 100644
index e5091042b2..0000000000
--- a/modules/gapi/src/backends/render/grenderocv.hpp
+++ /dev/null
@@ -1,55 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-//
-// Copyright (C) 2019 Intel Corporation
-
-#ifndef OPENCV_GAPI_GRENDEROCV_HPP
-#define OPENCV_GAPI_GRENDEROCV_HPP
-
-#include <opencv2/gapi/cpu/gcpukernel.hpp>
-#include "api/render_priv.hpp"
-#include "api/ft_render.hpp"
-
-namespace cv
-{
-namespace gapi
-{
-namespace render
-{
-namespace ocv
-{
-
-GAPI_EXPORTS cv::gapi::GBackend backend();
-
-template<typename, typename>
-struct add_type_to_tuple;
-
-template<typename P, typename ...Ts>
-struct add_type_to_tuple<P, std::tuple<Ts...>>
-{
-    using type = std::tuple<Ts..., P>;
-};
-
-template<class Impl, class K>
-class GRenderKernelImpl: public cv::detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>,
-                         public cv::detail::KernelTag
-{
-    using InArgs = typename add_type_to_tuple<cv::gapi::wip::draw::FTTextRender*, typename K::InArgs>::type;
-    using P      = detail::OCVCallHelper<Impl, InArgs, typename K::OutArgs>;
-
-public:
-    using API = K;
-
-    static cv::gapi::GBackend backend()  { return cv::gapi::render::ocv::backend(); }
-    static cv::GCPUKernel     kernel()   { return GCPUKernel(&P::call);             }
-};
-
-#define GAPI_RENDER_OCV_KERNEL(Name, API) struct Name: public cv::gapi::render::ocv::GRenderKernelImpl<Name, API>
-
-} // namespace ocv
-} // namespace render
-} // namespace gapi
-} // namespace cv
-
-#endif // OPENCV_GAPI_GRENDEROCV_HPP
diff --git a/modules/gapi/src/backends/render/grenderocvbackend.cpp b/modules/gapi/src/backends/render/grenderocvbackend.cpp
deleted file mode 100644
index 413d0c3f9c..0000000000
--- a/modules/gapi/src/backends/render/grenderocvbackend.cpp
+++ /dev/null
@@ -1,161 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-//
-// Copyright (C) 2018-2020 Intel Corporation
-
-#include "precomp.hpp"
-
-#include <functional>
-#include <unordered_set>
-
-#include <ade/util/algorithm.hpp>
-
-#include <ade/util/range.hpp>
-#include <ade/util/zip_range.hpp>
-#include <ade/util/chain_range.hpp>
-#include <ade/typed_graph.hpp>
-
-#include <opencv2/gapi/gcommon.hpp>
-#include <opencv2/gapi/garray.hpp>
-#include <opencv2/gapi/util/any.hpp>
-#include <opencv2/gapi/gtype_traits.hpp>
-
-#include "compiler/gobjref.hpp"
-#include "compiler/gmodel.hpp"
-
-#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
-#include "api/render_ocv.hpp"
-
-#include "backends/render/grenderocvbackend.hpp"
-
-#include <opencv2/gapi/render/render.hpp>
-#include "api/ocv_mask_creator.hpp"
-#include "api/ft_render.hpp"
-
-
-using GRenderModel = ade::TypedGraph
-    < cv::gimpl::render::ocv::RenderUnit
-    >;
-
-// FIXME: Same issue with Typed and ConstTyped
-using GConstRenderModel = ade::ConstTypedGraph
-    < cv::gimpl::render::ocv::RenderUnit
-    >;
-
-cv::gimpl::render::ocv::GRenderExecutable::GRenderExecutable(const ade::Graph &g,
-                                                             const std::vector<ade::NodeHandle> &nodes,
-                                                             std::unique_ptr<cv::gapi::wip::draw::FTTextRender>&& ftpr)
-    : m_g(g), m_gm(m_g), m_ftpr(std::move(ftpr)) {
-        GConstRenderModel gcm(m_g);
-
-        auto is_op = [&](ade::NodeHandle nh) {
-            return m_gm.metadata(nh).get<NodeType>().t == NodeType::OP;
-        };
-
-        auto it = ade::util::find_if(nodes, is_op);
-
-        GAPI_Assert(it != nodes.end());
-        this_nh = *it;
-
-        if (!std::none_of(std::next(it), nodes.end(), is_op)) {
-            util::throw_error(std::logic_error("Multi-node rendering is not supported!"));
-        }
-}
-
-void cv::gimpl::render::ocv::GRenderExecutable::run(std::vector<InObj>  &&input_objs,
-                                                    std::vector<OutObj> &&output_objs) {
-    GConstRenderModel gcm(m_g);
-
-    for (auto& it : input_objs)   magazine::bindInArg (m_res, it.first, it.second);
-    for (auto& it : output_objs)  magazine::bindOutArg(m_res, it.first, it.second);
-
-    const auto &op = m_gm.metadata(this_nh).get<Op>();
-
-    // Initialize kernel's execution context:
-    // - Input parameters
-    GCPUContext context;
-    context.m_args.reserve(op.args.size());
-    using namespace std::placeholders;
-    ade::util::transform(op.args,
-                          std::back_inserter(context.m_args),
-                          std::bind(&GRenderExecutable::packArg, this, _1));
-
-    // - Output parameters.
-    for (const auto &out_it : ade::util::indexed(op.outs)) {
-        // FIXME: Can the same GArg type resolution mechanism be reused here?
-        const auto out_port  = ade::util::index(out_it);
-        const auto out_desc  = ade::util::value(out_it);
-        context.m_results[out_port] = magazine::getObjPtr(m_res, out_desc);
-    }
-
-    auto k = gcm.metadata(this_nh).get<RenderUnit>().k;
-
-    context.m_args.emplace_back(m_ftpr.get());
-
-    k.m_runF(context);
-
-    for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second);
-
-    // In/Out args clean-up is mandatory now with RMat
-    for (auto &it : input_objs) magazine::unbind(m_res, it.first);
-    for (auto &it : output_objs) magazine::unbind(m_res, it.first);
-}
-
-cv::GArg cv::gimpl::render::ocv::GRenderExecutable::packArg(const cv::GArg &arg) {
-    // No API placeholders allowed at this point
-    // FIXME: this check has to be done somewhere in compilation stage.
-    GAPI_Assert(   arg.kind != cv::detail::ArgKind::GMAT
-                && arg.kind != cv::detail::ArgKind::GSCALAR
-                && arg.kind != cv::detail::ArgKind::GARRAY);
-
-    if (arg.kind != cv::detail::ArgKind::GOBJREF) {
-        util::throw_error(std::logic_error("Render supports G-types ONLY!"));
-    }
-    GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF);
-
-    const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>();
-    switch (ref.shape)
-    {
-    case GShape::GMAT:   return GArg(m_res.slot<cv::Mat>()[ref.id]);
-    case GShape::GARRAY: return GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
-    default:
-        util::throw_error(std::logic_error("Unsupported GShape type"));
-        break;
-    }
-}
-
-namespace {
-    class GRenderBackendImpl final: public cv::gapi::GBackend::Priv {
-        virtual void unpackKernel(ade::Graph &gr,
-                                  const ade::NodeHandle &op_node,
-                                  const cv::GKernelImpl &impl) override {
-            GRenderModel rm(gr);
-            auto render_impl = cv::util::any_cast<cv::GCPUKernel>(impl.opaque);
-            rm.metadata(op_node).set(cv::gimpl::render::ocv::RenderUnit{render_impl});
-        }
-
-        virtual EPtr compile(const ade::Graph &graph,
-                             const cv::GCompileArgs& args,
-                             const std::vector<ade::NodeHandle> &nodes) const override {
-
-            using namespace cv::gapi::wip::draw;
-            auto has_freetype_font = cv::gapi::getCompileArg<freetype_font>(args);
-            std::unique_ptr<FTTextRender> ftpr;
-            if (has_freetype_font)
-            {
-#ifndef HAVE_FREETYPE
-                throw std::runtime_error("Freetype not found");
-#else
-                ftpr.reset(new FTTextRender(has_freetype_font.value().path));
-#endif
-            }
-            return EPtr{new cv::gimpl::render::ocv::GRenderExecutable(graph, nodes, std::move(ftpr))};
-        }
-    };
-}
-
-cv::gapi::GBackend cv::gapi::render::ocv::backend() {
-    static cv::gapi::GBackend this_backend(std::make_shared<GRenderBackendImpl>());
-    return this_backend;
-}
diff --git a/modules/gapi/src/backends/render/grenderocvbackend.hpp b/modules/gapi/src/backends/render/grenderocvbackend.hpp
deleted file mode 100644
index 69d388ffe6..0000000000
--- a/modules/gapi/src/backends/render/grenderocvbackend.hpp
+++ /dev/null
@@ -1,73 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-//
-// Copyright (C) 2019 Intel Corporation
-
-#ifndef OPENCV_GAPI_GRENDEROCVBACKEND_HPP
-#define OPENCV_GAPI_GRENDEROCVBACKEND_HPP
-
-#include <opencv2/gapi/garg.hpp>
-#include <opencv2/gapi/gproto.hpp>
-#include <opencv2/gapi/render/render.hpp>
-
-#include "api/gorigin.hpp"
-#include "backends/common/gbackend.hpp"
-#include "compiler/gislandmodel.hpp"
-
-#include "backends/render/grenderocv.hpp"
-
-#include <opencv2/gapi/cpu/gcpukernel.hpp>
-
-namespace cv
-{
-namespace gimpl
-{
-namespace render
-{
-namespace ocv
-{
-
-struct RenderUnit
-{
-    static const char *name() { return "RenderUnit"; }
-    GCPUKernel k;
-};
-
-class GRenderExecutable final: public GIslandExecutable
-{
-    const ade::Graph &m_g;
-    GModel::ConstGraph m_gm;
-    std::unique_ptr<cv::gapi::wip::draw::FTTextRender> m_ftpr;
-
-    // The only executable stuff in this graph
-    // (assuming it is always single-op)
-    ade::NodeHandle this_nh;
-
-    //// Actual data of all resources in graph (both internal and external)
-    Mag m_res;
-
-    //// Execution helpers
-    GArg packArg(const GArg &arg);
-
-public:
-    GRenderExecutable(const ade::Graph                   &graph,
-                      const std::vector<ade::NodeHandle> &nodes,
-                      std::unique_ptr<cv::gapi::wip::draw::FTTextRender>&& ftpr);
-
-    virtual inline bool canReshape() const override { return false; }
-
-    virtual inline void reshape(ade::Graph&, const GCompileArgs&) override {
-        GAPI_Assert(false); // Not implemented yet
-    }
-
-    virtual void run(std::vector<InObj>  &&input_objs,
-                     std::vector<OutObj> &&output_objs) override;
-};
-
-} // namespace ocv
-} // namespace render
-} // namespace gimpl
-} // namespace cv
-
-#endif // OPENCV_GAPI_GRENDEROCVBACKEND_HPP
diff --git a/modules/gapi/test/render/ftp_render_test.cpp b/modules/gapi/test/render/ftp_render_test.cpp
index 5bdbb74e30..af9c5c6f13 100644
--- a/modules/gapi/test/render/ftp_render_test.cpp
+++ b/modules/gapi/test/render/ftp_render_test.cpp
@@ -13,7 +13,7 @@
 
 #include <opencv2/core/utils/configuration.private.hpp>
 
-#include "api/ft_render.hpp"
+#include "backends/render/ft_render.hpp"
 
 namespace opencv_test
 {
diff --git a/modules/gapi/test/render/gapi_render_tests_ocv.cpp b/modules/gapi/test/render/gapi_render_tests_ocv.cpp
index f727d977aa..88b5d88075 100644
--- a/modules/gapi/test/render/gapi_render_tests_ocv.cpp
+++ b/modules/gapi/test/render/gapi_render_tests_ocv.cpp
@@ -95,7 +95,6 @@ TEST_P(RenderNV12OCVTestFTexts, AccuracyTest)
                                 cv::compile_args(cv::gapi::wip::draw::freetype_font{
                                 "/usr/share/fonts/truetype/wqy/wqy-microhei.ttc"
                                 })));
-
 }
 
 static std::wstring to_wstring(const char* bytes)

From 1fe276d0418b2edff077b3e9ccff2aac4d10ec14 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 26 Oct 2020 22:58:30 +0000
Subject: [PATCH 054/152] core: move inline code from mat.inl.hpp (OpenCV 4.x
 additions)

base commit: aac7c5465ba6ccfe0dc665ab0bae87f765e616ba
---
 modules/core/include/opencv2/core/mat.inl.hpp | 121 ------------------
 modules/core/src/matrix.cpp                   |  60 +++++++++
 modules/core/src/umatrix.cpp                  |  59 +++++++++
 3 files changed, 119 insertions(+), 121 deletions(-)

diff --git a/modules/core/include/opencv2/core/mat.inl.hpp b/modules/core/include/opencv2/core/mat.inl.hpp
index 36593563a9..d6296f8e2e 100644
--- a/modules/core/include/opencv2/core/mat.inl.hpp
+++ b/modules/core/include/opencv2/core/mat.inl.hpp
@@ -1112,67 +1112,6 @@ void Mat::push_back(const std::vector<_Tp>& v)
     push_back(Mat(v));
 }
 
-inline
-Mat::Mat(Mat&& m)
-    : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
-      datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
-      u(m.u), size(&rows)
-{
-    if (m.dims <= 2)  // move new step/size info
-    {
-        step[0] = m.step[0];
-        step[1] = m.step[1];
-    }
-    else
-    {
-        CV_DbgAssert(m.step.p != m.step.buf);
-        step.p = m.step.p;
-        size.p = m.size.p;
-        m.step.p = m.step.buf;
-        m.size.p = &m.rows;
-    }
-    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
-    m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;
-    m.allocator = NULL;
-    m.u = NULL;
-}
-
-inline
-Mat& Mat::operator = (Mat&& m)
-{
-    if (this == &m)
-      return *this;
-
-    release();
-    flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; data = m.data;
-    datastart = m.datastart; dataend = m.dataend; datalimit = m.datalimit; allocator = m.allocator;
-    u = m.u;
-    if (step.p != step.buf) // release self step/size
-    {
-        fastFree(step.p);
-        step.p = step.buf;
-        size.p = &rows;
-    }
-    if (m.dims <= 2) // move new step/size info
-    {
-        step[0] = m.step[0];
-        step[1] = m.step[1];
-    }
-    else
-    {
-        CV_DbgAssert(m.step.p != m.step.buf);
-        step.p = m.step.p;
-        size.p = m.size.p;
-        m.step.p = m.step.buf;
-        m.size.p = &m.rows;
-    }
-    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
-    m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;
-    m.allocator = NULL;
-    m.u = NULL;
-    return *this;
-}
-
 
 ///////////////////////////// MatSize ////////////////////////////
 
@@ -3342,66 +3281,6 @@ size_t UMat::step1(int i) const
     return step.p[i] / elemSize1();
 }
 
-inline
-UMat::UMat(UMat&& m)
-: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
-  usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
-{
-    if (m.dims <= 2)  // move new step/size info
-    {
-        step[0] = m.step[0];
-        step[1] = m.step[1];
-    }
-    else
-    {
-        CV_DbgAssert(m.step.p != m.step.buf);
-        step.p = m.step.p;
-        size.p = m.size.p;
-        m.step.p = m.step.buf;
-        m.size.p = &m.rows;
-    }
-    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
-    m.allocator = NULL;
-    m.u = NULL;
-    m.offset = 0;
-}
-
-inline
-UMat& UMat::operator = (UMat&& m)
-{
-    if (this == &m)
-      return *this;
-    release();
-    flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols;
-    allocator = m.allocator; usageFlags = m.usageFlags;
-    u = m.u;
-    offset = m.offset;
-    if (step.p != step.buf) // release self step/size
-    {
-        fastFree(step.p);
-        step.p = step.buf;
-        size.p = &rows;
-    }
-    if (m.dims <= 2) // move new step/size info
-    {
-        step[0] = m.step[0];
-        step[1] = m.step[1];
-    }
-    else
-    {
-        CV_DbgAssert(m.step.p != m.step.buf);
-        step.p = m.step.p;
-        size.p = m.size.p;
-        m.step.p = m.step.buf;
-        m.size.p = &m.rows;
-    }
-    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
-    m.allocator = NULL;
-    m.u = NULL;
-    m.offset = 0;
-    return *this;
-}
-
 
 inline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; }
 inline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; }
diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp
index 6c874832c9..122b383379 100644
--- a/modules/core/src/matrix.cpp
+++ b/modules/core/src/matrix.cpp
@@ -599,6 +599,66 @@ size_t Mat::total(int startDim, int endDim) const
 }
 
 
+Mat::Mat(Mat&& m)
+    : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
+      datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
+      u(m.u), size(&rows)
+{
+    if (m.dims <= 2)  // move new step/size info
+    {
+        step[0] = m.step[0];
+        step[1] = m.step[1];
+    }
+    else
+    {
+        CV_Assert(m.step.p != m.step.buf);
+        step.p = m.step.p;
+        size.p = m.size.p;
+        m.step.p = m.step.buf;
+        m.size.p = &m.rows;
+    }
+    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
+    m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;
+    m.allocator = NULL;
+    m.u = NULL;
+}
+
+
+Mat& Mat::operator=(Mat&& m)
+{
+    if (this == &m)
+      return *this;
+
+    release();
+    flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; data = m.data;
+    datastart = m.datastart; dataend = m.dataend; datalimit = m.datalimit; allocator = m.allocator;
+    u = m.u;
+    if (step.p != step.buf) // release self step/size
+    {
+        fastFree(step.p);
+        step.p = step.buf;
+        size.p = &rows;
+    }
+    if (m.dims <= 2) // move new step/size info
+    {
+        step[0] = m.step[0];
+        step[1] = m.step[1];
+    }
+    else
+    {
+        CV_Assert(m.step.p != m.step.buf);
+        step.p = m.step.p;
+        size.p = m.size.p;
+        m.step.p = m.step.buf;
+        m.size.p = &m.rows;
+    }
+    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
+    m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;
+    m.allocator = NULL;
+    m.u = NULL;
+    return *this;
+}
+
 
 void Mat::create(int d, const int* _sizes, int _type)
 {
diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp
index 0a2de6017f..0ec6270a70 100644
--- a/modules/core/src/umatrix.cpp
+++ b/modules/core/src/umatrix.cpp
@@ -375,6 +375,65 @@ size_t UMat::total() const
 }
 
 
+UMat::UMat(UMat&& m)
+: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
+  usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
+{
+    if (m.dims <= 2)  // move new step/size info
+    {
+        step[0] = m.step[0];
+        step[1] = m.step[1];
+    }
+    else
+    {
+        CV_DbgAssert(m.step.p != m.step.buf);
+        step.p = m.step.p;
+        size.p = m.size.p;
+        m.step.p = m.step.buf;
+        m.size.p = &m.rows;
+    }
+    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
+    m.allocator = NULL;
+    m.u = NULL;
+    m.offset = 0;
+}
+
+UMat& UMat::operator=(UMat&& m)
+{
+    if (this == &m)
+      return *this;
+    release();
+    flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols;
+    allocator = m.allocator; usageFlags = m.usageFlags;
+    u = m.u;
+    offset = m.offset;
+    if (step.p != step.buf) // release self step/size
+    {
+        fastFree(step.p);
+        step.p = step.buf;
+        size.p = &rows;
+    }
+    if (m.dims <= 2) // move new step/size info
+    {
+        step[0] = m.step[0];
+        step[1] = m.step[1];
+    }
+    else
+    {
+        CV_DbgAssert(m.step.p != m.step.buf);
+        step.p = m.step.p;
+        size.p = m.size.p;
+        m.step.p = m.step.buf;
+        m.size.p = &m.rows;
+    }
+    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
+    m.allocator = NULL;
+    m.u = NULL;
+    m.offset = 0;
+    return *this;
+}
+
+
 MatAllocator* UMat::getStdAllocator()
 {
 #ifdef HAVE_OPENCL

From d011383a3d1b6430ade24857c5e74942246b89c4 Mon Sep 17 00:00:00 2001
From: Mikkel Green <ackbar345@hotmail.com>
Date: Mon, 26 Oct 2020 16:04:55 -0700
Subject: [PATCH 055/152] Bugfix: Manual page sizes now overwrite the default
 page size if they are both specified. Remove redudant .upper() call, 1 to 1
 key matching enforced by argparse choices.

---
 doc/pattern_tools/gen_pattern.py | 24 ++++++++++++++----------
 1 file changed, 14 insertions(+), 10 deletions(-)

diff --git a/doc/pattern_tools/gen_pattern.py b/doc/pattern_tools/gen_pattern.py
index 1f90615736..a6ffc7ca7e 100755
--- a/doc/pattern_tools/gen_pattern.py
+++ b/doc/pattern_tools/gen_pattern.py
@@ -92,11 +92,11 @@ def main():
                         dest="square_size", type=float)
     parser.add_argument("-R", "--radius_rate", help="circles_radius = square_size/radius_rate", default="5.0",
                         action="store", dest="radius_rate", type=float)
-    parser.add_argument("-w", "--page_width", help="page width in units", default="216", action="store",
+    parser.add_argument("-w", "--page_width", help="page width in units", default=argparse.SUPPRESS, action="store",
                         dest="page_width", type=float)
-    parser.add_argument("-h", "--page_height", help="page height in units", default="279", action="store",
-                        dest="page_width", type=float)
-    parser.add_argument("-a", "--page_size", help="page size, supersedes -h -w arguments", default="A4", action="store",
+    parser.add_argument("-h", "--page_height", help="page height in units", default=argparse.SUPPRESS, action="store",
+                        dest="page_height", type=float)
+    parser.add_argument("-a", "--page_size", help="page size, superseded if -h and -w are set", default="A4", action="store",
                         dest="page_size", choices=["A0", "A1", "A2", "A3", "A4", "A5"])
     args = parser.parse_args()
 
@@ -111,12 +111,16 @@ def main():
     units = args.units
     square_size = args.square_size
     radius_rate = args.radius_rate
-    page_size = args.page_size
-    # page size dict (ISO standard, mm) for easy lookup. format - size: [width, height]
-    page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297],
-                  "A5": [148, 210]}
-    page_width = page_sizes[page_size.upper()][0]
-    page_height = page_sizes[page_size.upper()][1]
+    if 'page_width' and 'page_height' in args:
+        page_width = args.page_width
+        page_height = args.page_height
+    else:
+        page_size = args.page_size
+        # page size dict (ISO standard, mm) for easy lookup. format - size: [width, height]
+        page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297],
+                      "A5": [148, 210]}
+        page_width = page_sizes[page_size][0]
+        page_height = page_sizes[page_size][1]
     pm = PatternMaker(columns, rows, output, units, square_size, radius_rate, page_width, page_height)
     # dict for easy lookup of pattern type
     mp = {"circles": pm.make_circles_pattern, "acircles": pm.make_acircles_pattern,

From 0f7b2eb79f8a2069177cb657e394093ef2ca7c5d Mon Sep 17 00:00:00 2001
From: APrigarina <ann73617@gmail.com>
Date: Wed, 28 Oct 2020 12:48:42 +0300
Subject: [PATCH 056/152] fix curved qrcodes decoding

---
 modules/objdetect/src/qrcode.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp
index 5b86f74614..449e6e6d32 100644
--- a/modules/objdetect/src/qrcode.cpp
+++ b/modules/objdetect/src/qrcode.cpp
@@ -993,7 +993,7 @@ protected:
     bool computeClosestPoints(const vector<Point> &result_integer_hull);
     bool computeSidesPoints(const vector<Point> &result_integer_hull);
     vector<Point> getPointsNearUnstablePoint(const vector<Point> &side, int start, int end, int step);
-    bool findAndAddStablePoint(const vector<Point> &result_integer_hull);
+    bool findAndAddStablePoint();
     bool findIndexesCurvedSides();
     bool findIncompleteIndexesCurvedSides();
     Mat getPatternsMask();
@@ -1274,7 +1274,7 @@ vector<Point> QRDecode::getPointsNearUnstablePoint(const vector<Point> &side, in
     return points;
 }
 
-bool QRDecode::findAndAddStablePoint(const vector<Point> &result_integer_hull)
+bool QRDecode::findAndAddStablePoint()
 {
     size_t idx_unstable_point = unstable_pair.first;
     Point unstable_point = unstable_pair.second;
@@ -1385,7 +1385,7 @@ bool QRDecode::findAndAddStablePoint(const vector<Point> &result_integer_hull)
 
     bool add_stable_point = true;
 
-    for (size_t i = 0; i < result_integer_hull.size(); i++)
+    for (size_t i = 0; i < original_points.size(); i++)
     {
         if(arePointsNearest(stable_point, original_points[i], 3.0))
         {
@@ -2211,7 +2211,7 @@ bool QRDecode::preparingCurvedQRCodes()
         return false;
     if (!computeSidesPoints(result_integer_hull))
         return false;
-    if (!findAndAddStablePoint(result_integer_hull))
+    if (!findAndAddStablePoint())
         return false;
     if (!findIndexesCurvedSides())
         return false;

From 364702b1c98943e4e306e745389d3f464010f069 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Tue, 27 Oct 2020 19:00:25 +0000
Subject: [PATCH 057/152] cmake(3rdparty): use EXCLUDE_FROM_ALL

---
 3rdparty/carotene/CMakeLists.txt      | 4 ++--
 3rdparty/cpufeatures/CMakeLists.txt   | 4 ++--
 3rdparty/ippicv/CMakeLists.txt        | 4 ++--
 3rdparty/ittnotify/CMakeLists.txt     | 4 ++--
 3rdparty/libjasper/CMakeLists.txt     | 4 ++--
 3rdparty/libjpeg-turbo/CMakeLists.txt | 4 ++--
 3rdparty/libjpeg/CMakeLists.txt       | 4 ++--
 3rdparty/libpng/CMakeLists.txt        | 4 ++--
 3rdparty/libtiff/CMakeLists.txt       | 4 ++--
 3rdparty/libwebp/CMakeLists.txt       | 4 ++--
 3rdparty/openexr/CMakeLists.txt       | 4 ++--
 3rdparty/protobuf/CMakeLists.txt      | 4 ++--
 3rdparty/quirc/CMakeLists.txt         | 4 ++--
 3rdparty/tbb/CMakeLists.txt           | 3 ++-
 3rdparty/zlib/CMakeLists.txt          | 2 +-
 cmake/OpenCVUtils.cmake               | 6 ++++++
 16 files changed, 35 insertions(+), 28 deletions(-)

diff --git a/3rdparty/carotene/CMakeLists.txt b/3rdparty/carotene/CMakeLists.txt
index 4319815708..bd26a2d7ef 100644
--- a/3rdparty/carotene/CMakeLists.txt
+++ b/3rdparty/carotene/CMakeLists.txt
@@ -27,7 +27,7 @@ if(CMAKE_COMPILER_IS_GNUCC)
     endif()
 endif()
 
-add_library(carotene_objs OBJECT
+add_library(carotene_objs OBJECT EXCLUDE_FROM_ALL
   ${carotene_headers}
   ${carotene_sources}
 )
@@ -41,4 +41,4 @@ if(WITH_NEON)
 endif()
 
 # we add dummy file to fix XCode build
-add_library(carotene STATIC EXCLUDE_FROM_ALL "$<TARGET_OBJECTS:carotene_objs>" "${CAROTENE_SOURCE_DIR}/dummy.cpp")
+add_library(carotene STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} "$<TARGET_OBJECTS:carotene_objs>" "${CAROTENE_SOURCE_DIR}/dummy.cpp")
diff --git a/3rdparty/cpufeatures/CMakeLists.txt b/3rdparty/cpufeatures/CMakeLists.txt
index 92bce6abf8..bf7af0ecde 100644
--- a/3rdparty/cpufeatures/CMakeLists.txt
+++ b/3rdparty/cpufeatures/CMakeLists.txt
@@ -14,7 +14,7 @@ if(NOT DEFINED CPUFEATURES_SOURCES)
 endif()
 
 include_directories(${CPUFEATURES_INCLUDE_DIRS})
-add_library(${OPENCV_CPUFEATURES_TARGET_NAME} STATIC ${CPUFEATURES_SOURCES})
+add_library(${OPENCV_CPUFEATURES_TARGET_NAME} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${CPUFEATURES_SOURCES})
 
 set_target_properties(${OPENCV_CPUFEATURES_TARGET_NAME}
   PROPERTIES OUTPUT_NAME cpufeatures
@@ -29,7 +29,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${OPENCV_CPUFEATURES_TARGET_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${OPENCV_CPUFEATURES_TARGET_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(cpufeatures LICENSE README.md)
diff --git a/3rdparty/ippicv/CMakeLists.txt b/3rdparty/ippicv/CMakeLists.txt
index 7931832737..43ad806dd7 100644
--- a/3rdparty/ippicv/CMakeLists.txt
+++ b/3rdparty/ippicv/CMakeLists.txt
@@ -17,7 +17,7 @@ file(GLOB lib_hdrs ${IPP_IW_PATH}/include/*.h ${IPP_IW_PATH}/include/iw/*.h ${IP
 #         Define the library target:
 # ----------------------------------------------------------------------------------
 
-add_library(${IPP_IW_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
+add_library(${IPP_IW_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
 
 if(UNIX)
   if(CV_GCC OR CV_CLANG OR CV_ICC)
@@ -41,5 +41,5 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${IPP_IW_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${IPP_IW_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
diff --git a/3rdparty/ittnotify/CMakeLists.txt b/3rdparty/ittnotify/CMakeLists.txt
index c2caf76723..a227aff88e 100644
--- a/3rdparty/ittnotify/CMakeLists.txt
+++ b/3rdparty/ittnotify/CMakeLists.txt
@@ -37,7 +37,7 @@ set(ITT_SRCS
     src/ittnotify/jitprofiling.c
 )
 
-add_library(${ITT_LIBRARY} STATIC ${ITT_SRCS} ${ITT_PUBLIC_HDRS} ${ITT_PRIVATE_HDRS})
+add_library(${ITT_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${ITT_SRCS} ${ITT_PUBLIC_HDRS} ${ITT_PRIVATE_HDRS})
 
 if(NOT WIN32)
   if(HAVE_DL_LIBRARY)
@@ -60,7 +60,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${ITT_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${ITT_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(ittnotify src/ittnotify/LICENSE.BSD src/ittnotify/LICENSE.GPL)
diff --git a/3rdparty/libjasper/CMakeLists.txt b/3rdparty/libjasper/CMakeLists.txt
index 897b6ae606..9f05d89733 100644
--- a/3rdparty/libjasper/CMakeLists.txt
+++ b/3rdparty/libjasper/CMakeLists.txt
@@ -17,7 +17,7 @@ file(GLOB lib_ext_hdrs jasper/*.h)
 #         Define the library target:
 # ----------------------------------------------------------------------------------
 
-add_library(${JASPER_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs})
+add_library(${JASPER_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs})
 
 if(WIN32 AND NOT MINGW)
   add_definitions(-DJAS_WIN_MSVC_BUILD)
@@ -46,7 +46,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(jasper LICENSE README copyright)
diff --git a/3rdparty/libjpeg-turbo/CMakeLists.txt b/3rdparty/libjpeg-turbo/CMakeLists.txt
index 374d7875de..8da98b6020 100644
--- a/3rdparty/libjpeg-turbo/CMakeLists.txt
+++ b/3rdparty/libjpeg-turbo/CMakeLists.txt
@@ -106,7 +106,7 @@ set(JPEG_SOURCES ${JPEG_SOURCES} jsimd_none.c)
 
 ocv_list_add_prefix(JPEG_SOURCES src/)
 
-add_library(${JPEG_LIBRARY} STATIC ${JPEG_SOURCES} ${SIMD_OBJS})
+add_library(${JPEG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${JPEG_SOURCES} ${SIMD_OBJS})
 
 set_target_properties(${JPEG_LIBRARY}
   PROPERTIES OUTPUT_NAME ${JPEG_LIBRARY}
@@ -121,7 +121,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(libjpeg-turbo README.md LICENSE.md README.ijg)
diff --git a/3rdparty/libjpeg/CMakeLists.txt b/3rdparty/libjpeg/CMakeLists.txt
index b50fc09840..c0524cc38a 100644
--- a/3rdparty/libjpeg/CMakeLists.txt
+++ b/3rdparty/libjpeg/CMakeLists.txt
@@ -19,7 +19,7 @@ endif()
 #         Define the library target:
 # ----------------------------------------------------------------------------------
 
-add_library(${JPEG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
+add_library(${JPEG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
 
 if(CV_GCC OR CV_CLANG)
   set_source_files_properties(jcdctmgr.c PROPERTIES COMPILE_FLAGS "-O1")
@@ -42,7 +42,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(libjpeg README)
diff --git a/3rdparty/libpng/CMakeLists.txt b/3rdparty/libpng/CMakeLists.txt
index 31e77676e8..efa59627eb 100644
--- a/3rdparty/libpng/CMakeLists.txt
+++ b/3rdparty/libpng/CMakeLists.txt
@@ -74,7 +74,7 @@ if(MSVC)
   add_definitions(-D_CRT_SECURE_NO_DEPRECATE)
 endif(MSVC)
 
-add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
+add_library(${PNG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
 target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARIES})
 
 ocv_warnings_disable(CMAKE_C_FLAGS -Wundef -Wcast-align -Wimplicit-fallthrough -Wunused-parameter -Wsign-compare)
@@ -92,7 +92,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(libpng LICENSE README)
diff --git a/3rdparty/libtiff/CMakeLists.txt b/3rdparty/libtiff/CMakeLists.txt
index 16cb598955..61e40b2885 100644
--- a/3rdparty/libtiff/CMakeLists.txt
+++ b/3rdparty/libtiff/CMakeLists.txt
@@ -462,7 +462,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4456 /wd4457 /wd4312) # vs2015
 
 ocv_warnings_disable(CMAKE_C_FLAGS /wd4267 /wd4244 /wd4018 /wd4311 /wd4312)
 
-add_library(${TIFF_LIBRARY} STATIC ${lib_srcs})
+add_library(${TIFF_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs})
 target_link_libraries(${TIFF_LIBRARY} ${ZLIB_LIBRARIES})
 
 set_target_properties(${TIFF_LIBRARY}
@@ -479,7 +479,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(libtiff COPYRIGHT)
diff --git a/3rdparty/libwebp/CMakeLists.txt b/3rdparty/libwebp/CMakeLists.txt
index 83884c9d4d..80ab0b86ab 100644
--- a/3rdparty/libwebp/CMakeLists.txt
+++ b/3rdparty/libwebp/CMakeLists.txt
@@ -34,7 +34,7 @@ endif()
 
 add_definitions(-DWEBP_USE_THREAD)
 
-add_library(${WEBP_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
+add_library(${WEBP_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
 if(ANDROID)
   target_link_libraries(${WEBP_LIBRARY} ${CPUFEATURES_LIBRARIES})
 endif()
@@ -59,6 +59,6 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${WEBP_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${WEBP_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
diff --git a/3rdparty/openexr/CMakeLists.txt b/3rdparty/openexr/CMakeLists.txt
index 2ee5146a3d..88f60b23c0 100644
--- a/3rdparty/openexr/CMakeLists.txt
+++ b/3rdparty/openexr/CMakeLists.txt
@@ -125,7 +125,7 @@ if(MSVC AND CV_ICC)
   set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Qrestrict")
 endif()
 
-add_library(IlmImf STATIC ${lib_hdrs} ${lib_srcs})
+add_library(IlmImf STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_hdrs} ${lib_srcs})
 target_link_libraries(IlmImf ${ZLIB_LIBRARIES})
 
 set_target_properties(IlmImf
@@ -142,7 +142,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(openexr LICENSE AUTHORS.ilmbase AUTHORS.openexr)
diff --git a/3rdparty/protobuf/CMakeLists.txt b/3rdparty/protobuf/CMakeLists.txt
index fc9497f928..c71bf9faff 100644
--- a/3rdparty/protobuf/CMakeLists.txt
+++ b/3rdparty/protobuf/CMakeLists.txt
@@ -141,7 +141,7 @@ append_if_exist(Protobuf_SRCS
 )
 
 include_directories(BEFORE "${PROTOBUF_ROOT}/src")  # ensure using if own headers: https://github.com/opencv/opencv/issues/13328
-add_library(libprotobuf STATIC ${Protobuf_SRCS})
+add_library(libprotobuf STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${Protobuf_SRCS})
 target_include_directories(libprotobuf SYSTEM PUBLIC $<BUILD_INTERFACE:${PROTOBUF_ROOT}/src>)
 set_target_properties(libprotobuf
     PROPERTIES
@@ -157,7 +157,7 @@ get_protobuf_version(Protobuf_VERSION "${PROTOBUF_ROOT}/src")
 set(Protobuf_VERSION ${Protobuf_VERSION} CACHE INTERNAL "" FORCE)
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(libprotobuf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(libprotobuf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(protobuf LICENSE README.md)
diff --git a/3rdparty/quirc/CMakeLists.txt b/3rdparty/quirc/CMakeLists.txt
index 7a6b2bb222..c0464c16ae 100644
--- a/3rdparty/quirc/CMakeLists.txt
+++ b/3rdparty/quirc/CMakeLists.txt
@@ -8,7 +8,7 @@ ocv_include_directories(${CURR_INCLUDE_DIR})
 file(GLOB_RECURSE quirc_headers RELATIVE "${CMAKE_CURRENT_LIST_DIR}" "include/*.h")
 file(GLOB_RECURSE quirc_sources RELATIVE "${CMAKE_CURRENT_LIST_DIR}" "src/*.c")
 
-add_library(${PROJECT_NAME} STATIC ${quirc_headers} ${quirc_sources})
+add_library(${PROJECT_NAME} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${quirc_headers} ${quirc_sources})
 ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-variable -Wshadow)
 
 set_target_properties(${PROJECT_NAME}
@@ -24,7 +24,7 @@ if(ENABLE_SOLUTION_FOLDERS)
 endif()
 
 if(NOT BUILD_SHARED_LIBS)
-  ocv_install_target(${PROJECT_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
+  ocv_install_target(${PROJECT_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
 endif()
 
 ocv_install_3rdparty_licenses(${PROJECT_NAME} LICENSE)
diff --git a/3rdparty/tbb/CMakeLists.txt b/3rdparty/tbb/CMakeLists.txt
index 2aa9127da0..a085b0f3ca 100644
--- a/3rdparty/tbb/CMakeLists.txt
+++ b/3rdparty/tbb/CMakeLists.txt
@@ -108,7 +108,7 @@ set(tbb_version_file "version_string.ver")
 configure_file("${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}.cmakein" "${CMAKE_CURRENT_BINARY_DIR}/${tbb_version_file}" @ONLY)
 list(APPEND TBB_SOURCE_FILES "${CMAKE_CURRENT_BINARY_DIR}/${tbb_version_file}")
 
-add_library(tbb ${TBB_SOURCE_FILES})
+add_library(tbb ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${TBB_SOURCE_FILES})
 target_compile_definitions(tbb PUBLIC
     TBB_USE_GCC_BUILTINS=1
     __TBB_GCC_BUILTIN_ATOMICS_PRESENT=1
@@ -165,6 +165,7 @@ ocv_install_target(tbb EXPORT OpenCVModules
     RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs
     LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT libs
     ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev
+    OPTIONAL
     )
 
 ocv_install_3rdparty_licenses(tbb "${tbb_src_dir}/LICENSE" "${tbb_src_dir}/README")
diff --git a/3rdparty/zlib/CMakeLists.txt b/3rdparty/zlib/CMakeLists.txt
index 553700bacc..9758861a6b 100644
--- a/3rdparty/zlib/CMakeLists.txt
+++ b/3rdparty/zlib/CMakeLists.txt
@@ -76,7 +76,7 @@ set(ZLIB_SRCS
     zutil.c
 )
 
-add_library(${ZLIB_LIBRARY} STATIC ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
+add_library(${ZLIB_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
 set_target_properties(${ZLIB_LIBRARY} PROPERTIES DEFINE_SYMBOL ZLIB_DLL)
 
 ocv_warnings_disable(CMAKE_C_FLAGS -Wshorten-64-to-32 -Wattributes -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wshift-negative-value
diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake
index 610f0e6437..693a840ffe 100644
--- a/cmake/OpenCVUtils.cmake
+++ b/cmake/OpenCVUtils.cmake
@@ -1890,3 +1890,9 @@ function(ocv_update_file filepath content)
     file(WRITE "${filepath}" "${content}")
   endif()
 endfunction()
+
+if(NOT BUILD_SHARED_LIBS AND (CMAKE_VERSION VERSION_LESS "3.14.0"))
+  ocv_update(OPENCV_3RDPARTY_EXCLUDE_FROM_ALL "")  # avoid CMake warnings: https://gitlab.kitware.com/cmake/cmake/-/issues/18938
+else()
+  ocv_update(OPENCV_3RDPARTY_EXCLUDE_FROM_ALL "EXCLUDE_FROM_ALL")
+endif()

From 56d2b7137ccf5258f533db9db7b3d8e60084041c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jack=C2=B7Boos=C2=B7Yu?=
 <47264268+JackBoosY@users.noreply.github.com>
Date: Thu, 29 Oct 2020 06:50:25 -0700
Subject: [PATCH 058/152] Merge pull request #18658 from JackBoosY:master

* Fix cmake configure error

* judge the cmake version

* Add comments
---
 modules/videoio/cmake/init.cmake | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/modules/videoio/cmake/init.cmake b/modules/videoio/cmake/init.cmake
index 1efef12c5e..81d5d9fe87 100644
--- a/modules/videoio/cmake/init.cmake
+++ b/modules/videoio/cmake/init.cmake
@@ -12,8 +12,16 @@ function(ocv_add_external_target name inc link def)
   set_target_properties(ocv.3rdparty.${name} PROPERTIES
     INTERFACE_INCLUDE_DIRECTORIES "${inc}"
     INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${inc}"
-    INTERFACE_LINK_LIBRARIES "${link}"
     INTERFACE_COMPILE_DEFINITIONS "${def}")
+  # When cmake version is greater than or equal to 3.11, INTERFACE_LINK_LIBRARIES no longer applies to interface library
+  # See https://github.com/opencv/opencv/pull/18658
+  if (CMAKE_VERSION VERSION_LESS 3.11)
+    set_target_properties(ocv.3rdparty.${name} PROPERTIES
+      INTERFACE_LINK_LIBRARIES "${link}")
+  else()
+    target_link_libraries(ocv.3rdparty.${name} INTERFACE ${link})
+  endif()
+  #
   if(NOT BUILD_SHARED_LIBS)
     install(TARGETS ocv.3rdparty.${name} EXPORT OpenCVModules)
   endif()

From ca8bb8d0532c3070075db66947b58e8894650fdc Mon Sep 17 00:00:00 2001
From: Dmitry Matveev <dmitry.matveev@intel.com>
Date: Wed, 18 Mar 2020 02:38:24 +0300
Subject: [PATCH 059/152] G-API: Introduce streaming::desync and infer(ROI)

- desync() is a new (and for now, the only one) intrinsic
  which splits the graph execution into asynchronous parts
  when running in Streaming mode;
- desync() makes no effect when compiling in Traditional mode;
- Added tests on desync() working in various scenarios;
- Extended GStreamingExecutor to support desync(); also extended
  GStreamingCompiled() with a new version of pull() returning a
  vector of optional values;
- Fixed various issues with storing the type information & proper
  construction callbacks for GArray<> and GOpaque;

- Introduced a new infer(Roi,GMat) overload with a sample;

- Introduced an internal API for Islands to control fusion
  procedure (to fuse or not to fuse);
- Introduced handleStopStream() callback for island executables;
- Added GCompileArgs to metadata of the graph (required for other
  features).
---
 modules/gapi/CMakeLists.txt                   |   2 +
 modules/gapi/include/opencv2/gapi.hpp         |   4 +
 modules/gapi/include/opencv2/gapi/garray.hpp  |  23 +-
 modules/gapi/include/opencv2/gapi/gkernel.hpp |  59 ++-
 modules/gapi/include/opencv2/gapi/gopaque.hpp |  26 +-
 .../gapi/include/opencv2/gapi/gstreaming.hpp  | 122 +++++
 .../include/opencv2/gapi/gtype_traits.hpp     |  23 +
 modules/gapi/include/opencv2/gapi/infer.hpp   |  63 ++-
 .../include/opencv2/gapi/streaming/desync.hpp |  84 +++
 modules/gapi/samples/infer_single_roi.cpp     | 264 ++++++++++
 modules/gapi/src/api/gbackend.cpp             |  15 +
 modules/gapi/src/api/gbackend_priv.hpp        |  18 +-
 modules/gapi/src/api/ginfer.cpp               |   3 +
 modules/gapi/src/api/kernels_streaming.cpp    |  74 +++
 modules/gapi/src/backends/ie/giebackend.cpp   |  60 +++
 modules/gapi/src/backends/ocl/goclbackend.cpp |   4 +
 modules/gapi/src/compiler/gcompiler.cpp       |   8 +
 modules/gapi/src/compiler/gislandmodel.cpp    |  20 +-
 modules/gapi/src/compiler/gislandmodel.hpp    |  21 +
 modules/gapi/src/compiler/gmodel.cpp          |  19 +-
 modules/gapi/src/compiler/gmodel.hpp          |  70 ++-
 modules/gapi/src/compiler/gmodelbuilder.cpp   |   9 +-
 modules/gapi/src/compiler/gobjref.hpp         |  12 +-
 modules/gapi/src/compiler/gstreaming.cpp      |  11 +
 modules/gapi/src/compiler/gstreaming_priv.hpp |   1 +
 modules/gapi/src/compiler/passes/exec.cpp     | 113 ++--
 modules/gapi/src/compiler/passes/intrin.cpp   | 305 +++++++++++
 modules/gapi/src/compiler/passes/kernels.cpp  |  48 +-
 modules/gapi/src/compiler/passes/passes.hpp   |   9 +-
 modules/gapi/src/compiler/transactions.hpp    |  83 ++-
 modules/gapi/src/executor/conc_queue.hpp      |   3 +-
 .../gapi/src/executor/gstreamingexecutor.cpp  | 375 +++++++++++--
 .../gapi/src/executor/gstreamingexecutor.hpp  |  82 ++-
 modules/gapi/src/executor/last_value.hpp      | 105 ++++
 .../internal/gapi_int_gmodel_builder_test.cpp |  10 +-
 .../internal/gapi_int_island_fusion_tests.cpp |  60 ++-
 .../test/internal/gapi_transactions_test.cpp  | 161 +++++-
 modules/gapi/test/own/conc_queue_tests.cpp    |   6 +-
 .../test/own/last_written_value_tests.cpp     | 156 ++++++
 .../test/streaming/gapi_streaming_tests.cpp   | 491 +++++++++++++++++-
 40 files changed, 2827 insertions(+), 195 deletions(-)
 create mode 100644 modules/gapi/include/opencv2/gapi/streaming/desync.hpp
 create mode 100644 modules/gapi/samples/infer_single_roi.cpp
 create mode 100644 modules/gapi/src/api/kernels_streaming.cpp
 create mode 100644 modules/gapi/src/compiler/passes/intrin.cpp
 create mode 100644 modules/gapi/src/executor/last_value.hpp
 create mode 100644 modules/gapi/test/own/last_written_value_tests.cpp

diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
index 82b719ad4e..acfbd1d70e 100644
--- a/modules/gapi/CMakeLists.txt
+++ b/modules/gapi/CMakeLists.txt
@@ -74,6 +74,7 @@ set(gapi_srcs
     src/api/kernels_imgproc.cpp
     src/api/kernels_video.cpp
     src/api/kernels_nnparsers.cpp
+    src/api/kernels_streaming.cpp
     src/api/render.cpp
     src/api/render_ocv.cpp
     src/api/ginfer.cpp
@@ -97,6 +98,7 @@ set(gapi_srcs
     src/compiler/passes/pattern_matching.cpp
     src/compiler/passes/perform_substitution.cpp
     src/compiler/passes/streaming.cpp
+    src/compiler/passes/intrin.cpp
 
     # Executor
     src/executor/gexecutor.cpp
diff --git a/modules/gapi/include/opencv2/gapi.hpp b/modules/gapi/include/opencv2/gapi.hpp
index c6ab3f13fd..8445746710 100644
--- a/modules/gapi/include/opencv2/gapi.hpp
+++ b/modules/gapi/include/opencv2/gapi.hpp
@@ -33,4 +33,8 @@
 #include <opencv2/gapi/gkernel.hpp>
 #include <opencv2/gapi/operators.hpp>
 
+// Include this file here to avoid cyclic dependency between
+// Desync & GKernel & GComputation & GStreamingCompiled.
+#include <opencv2/gapi/streaming/desync.hpp>
+
 #endif // OPENCV_GAPI_HPP
diff --git a/modules/gapi/include/opencv2/gapi/garray.hpp b/modules/gapi/include/opencv2/gapi/garray.hpp
index 9118f4de98..0798655666 100644
--- a/modules/gapi/include/opencv2/gapi/garray.hpp
+++ b/modules/gapi/include/opencv2/gapi/garray.hpp
@@ -284,6 +284,14 @@ namespace detail
             return static_cast<VectorRefT<T>&>(*m_ref).rref();
         }
 
+        // Check if was created for/from std::vector<T>
+        template <typename T> bool holds() const
+        {
+            if (!m_ref) return false;
+            using U = typename std::decay<T>::type;
+            return dynamic_cast<VectorRefT<U>*>(m_ref.get()) != nullptr;
+        }
+
         void mov(VectorRef &v)
         {
             m_ref->mov(*v.m_ref);
@@ -341,15 +349,18 @@ public:
     explicit GArray(detail::GArrayU &&ref) // GArrayU-based constructor
         : m_ref(ref) { putDetails(); }     //   (used by GCall, not for users)
 
-    detail::GArrayU strip() const { return m_ref; }
-
-private:
-    static void VCTor(detail::VectorRef& vref) {
+    /// @private
+    detail::GArrayU strip() const {
+        return m_ref;
+    }
+    /// @private
+    static void VCtor(detail::VectorRef& vref) {
         vref.reset<HT>();
-        vref.storeKind<HT>();
     }
+
+private:
     void putDetails() {
-        m_ref.setConstructFcn(&VCTor);
+        m_ref.setConstructFcn(&VCtor);
         m_ref.specifyType<HT>();  // FIXME: to unify those 2 to avoid excessive dynamic_cast
         m_ref.storeKind<HT>();    //
     }
diff --git a/modules/gapi/include/opencv2/gapi/gkernel.hpp b/modules/gapi/include/opencv2/gapi/gkernel.hpp
index b04cedecad..d4c3e6c634 100644
--- a/modules/gapi/include/opencv2/gapi/gkernel.hpp
+++ b/modules/gapi/include/opencv2/gapi/gkernel.hpp
@@ -28,6 +28,7 @@ namespace cv {
 
 using GShapes = std::vector<GShape>;
 using GKinds = std::vector<cv::detail::OpaqueKind>;
+using GCtors  = std::vector<detail::HostCtor>;
 
 // GKernel describes kernel API to the system
 // FIXME: add attributes of a kernel, (e.g. number and types
@@ -41,6 +42,7 @@ struct GAPI_EXPORTS GKernel
     M           outMeta;    // generic adaptor to API::outMeta(...)
     GShapes     outShapes;  // types (shapes) kernel's outputs
     GKinds      inKinds;    // kinds of kernel's inputs (fixme: below)
+    GCtors      outCtors;   // captured constructors for template output types
 };
 // TODO: It's questionable if inKinds should really be here. Instead,
 // this information could come from meta.
@@ -60,30 +62,27 @@ namespace detail
     // yield() is used in graph construction time as a generic method to obtain
     // lazy "return value" of G-API operations
     //
-    namespace
+    template<typename T> struct Yield;
+    template<> struct Yield<cv::GMat>
     {
-        template<typename T> struct Yield;
-        template<> struct Yield<cv::GMat>
-        {
-            static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); }
-        };
-        template<> struct Yield<cv::GMatP>
-        {
-            static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); }
-        };
-        template<> struct Yield<cv::GScalar>
-        {
-            static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); }
-        };
-        template<typename U> struct Yield<cv::GArray<U> >
-        {
-            static inline cv::GArray<U> yield(cv::GCall &call, int i) { return call.yieldArray<U>(i); }
-        };
-        template<typename U> struct Yield<cv::GOpaque<U> >
-        {
-            static inline cv::GOpaque<U> yield(cv::GCall &call, int i) { return call.yieldOpaque<U>(i); }
-        };
-    } // anonymous namespace
+        static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); }
+    };
+    template<> struct Yield<cv::GMatP>
+    {
+        static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); }
+    };
+    template<> struct Yield<cv::GScalar>
+    {
+        static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); }
+    };
+    template<typename U> struct Yield<cv::GArray<U> >
+    {
+        static inline cv::GArray<U> yield(cv::GCall &call, int i) { return call.yieldArray<U>(i); }
+    };
+    template<typename U> struct Yield<cv::GOpaque<U> >
+    {
+        static inline cv::GOpaque<U> yield(cv::GCall &call, int i) { return call.yieldOpaque<U>(i); }
+    };
 
     ////////////////////////////////////////////////////////////////////////////
     // Helper classes which brings outputMeta() marshalling to kernel
@@ -215,7 +214,8 @@ public:
                               , K::tag()
                               , &K::getOutMeta
                               , {detail::GTypeTraits<R>::shape...}
-                              , {detail::GTypeTraits<Args>::op_kind...}});
+                              , {detail::GTypeTraits<Args>::op_kind...}
+                              , {detail::GObtainCtor<R>::get()...}});
         call.pass(args...); // TODO: std::forward() here?
         return yield(call, typename detail::MkSeq<sizeof...(R)>::type());
     }
@@ -240,7 +240,8 @@ public:
                               , K::tag()
                               , &K::getOutMeta
                               , {detail::GTypeTraits<R>::shape}
-                              , {detail::GTypeTraits<Args>::op_kind...}});
+                              , {detail::GTypeTraits<Args>::op_kind...}
+                              , {detail::GObtainCtor<R>::get()}});
         call.pass(args...);
         return detail::Yield<R>::yield(call, 0);
     }
@@ -459,11 +460,6 @@ namespace gapi {
         std::vector<GTransform> m_transformations;
 
     protected:
-        /// @private
-        // Check if package contains ANY implementation of a kernel API
-        // by API textual id.
-        bool includesAPI(const std::string &id) const;
-
         /// @private
         // Remove ALL implementations of the given API (identified by ID)
         void removeAPI(const std::string &id);
@@ -566,6 +562,9 @@ namespace gapi {
             return includesAPI(KAPI::id());
         }
 
+        /// @private
+        bool includesAPI(const std::string &id) const;
+
         // FIXME: The below comment is wrong, and who needs this function?
         /**
          * @brief Find a kernel (by its API)
diff --git a/modules/gapi/include/opencv2/gapi/gopaque.hpp b/modules/gapi/include/opencv2/gapi/gopaque.hpp
index 3d1394473b..6ab28910d6 100644
--- a/modules/gapi/include/opencv2/gapi/gopaque.hpp
+++ b/modules/gapi/include/opencv2/gapi/gopaque.hpp
@@ -295,25 +295,27 @@ namespace detail
 template<typename T> class GOpaque
 {
 public:
-    GOpaque() { putDetails(); }              // Empty constructor
-    explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor
-        : m_ref(ref) { putDetails(); }       // (used by GCall, not for users)
-
-    detail::GOpaqueU strip() const { return m_ref; }
-
-private:
     // Host type (or Flat type) - the type this GOpaque is actually
     // specified to.
     using HT = typename detail::flatten_g<util::decay_t<T>>::type;
 
-    static void CTor(detail::OpaqueRef& ref) {
+    GOpaque() { putDetails(); }              // Empty constructor
+    explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor
+        : m_ref(ref) { putDetails(); }       // (used by GCall, not for users)
+
+    /// @private
+    detail::GOpaqueU strip() const {
+        return m_ref;
+    }
+    /// @private
+    static void Ctor(detail::OpaqueRef& ref) {
         ref.reset<HT>();
-        ref.storeKind<HT>();
     }
+private:
     void putDetails() {
-        m_ref.setConstructFcn(&CTor);
-        m_ref.specifyType<HT>(); // FIXME: to unify those 2 to avoid excessive dynamic_cast
-        m_ref.storeKind<HT>();   //
+        m_ref.setConstructFcn(&Ctor);
+        m_ref.specifyType<HT>();
+        m_ref.storeKind<HT>();
     }
 
     detail::GOpaqueU m_ref;
diff --git a/modules/gapi/include/opencv2/gapi/gstreaming.hpp b/modules/gapi/include/opencv2/gapi/gstreaming.hpp
index 037fa94452..e09cf8d0f7 100644
--- a/modules/gapi/include/opencv2/gapi/gstreaming.hpp
+++ b/modules/gapi/include/opencv2/gapi/gstreaming.hpp
@@ -8,15 +8,99 @@
 #ifndef OPENCV_GAPI_GSTREAMING_COMPILED_HPP
 #define OPENCV_GAPI_GSTREAMING_COMPILED_HPP
 
+#include <memory>
 #include <vector>
 
 #include <opencv2/gapi/opencv_includes.hpp>
 #include <opencv2/gapi/own/assert.hpp>
+#include <opencv2/gapi/util/optional.hpp>
 #include <opencv2/gapi/garg.hpp>
 #include <opencv2/gapi/streaming/source.hpp>
 
 namespace cv {
 
+template<class T> using optional = cv::util::optional<T>;
+
+namespace detail {
+template<typename T> struct wref_spec {
+    using type = T;
+};
+template<typename T> struct wref_spec<std::vector<T> > {
+    using type = T;
+};
+
+template<typename RefHolder>
+struct OptRef {
+    struct OptHolder {
+        virtual void mov(RefHolder &h) = 0;
+        virtual void reset() = 0;
+        virtual ~OptHolder() = default;
+        using Ptr = std::shared_ptr<OptHolder>;
+    };
+    template<class T> struct Holder final: OptHolder {
+        std::reference_wrapper<cv::optional<T> > m_opt_ref;
+
+        explicit Holder(cv::optional<T>& opt) : m_opt_ref(std::ref(opt)) {
+        }
+        virtual void mov(RefHolder &h) override {
+            using U = typename wref_spec<T>::type;
+            m_opt_ref.get() = cv::util::make_optional(std::move(h.template wref<U>()));
+        }
+        virtual void reset() override {
+            m_opt_ref.get().reset();
+        }
+    };
+    template<class T>
+    explicit OptRef(cv::optional<T>& t) : m_opt{new Holder<T>(t)} {}
+    void mov(RefHolder &h) { m_opt->mov(h); }
+    void reset()           { m_opt->reset();}
+private:
+    typename OptHolder::Ptr m_opt;
+};
+using OptionalVectorRef = OptRef<cv::detail::VectorRef>;
+using OptionalOpaqueRef = OptRef<cv::detail::OpaqueRef>;
+} // namespace detail
+
+// TODO: Keep it in sync with GRunArgP (derive the type automatically?)
+using GOptRunArgP = util::variant<
+    optional<cv::Mat>*,
+    optional<cv::RMat>*,
+    optional<cv::Scalar>*,
+    cv::detail::OptionalVectorRef,
+    cv::detail::OptionalOpaqueRef
+>;
+using GOptRunArgsP = std::vector<GOptRunArgP>;
+
+namespace detail {
+
+template<typename T> inline GOptRunArgP wrap_opt_arg(optional<T>& arg) {
+    // By default, T goes to an OpaqueRef. All other types are specialized
+    return GOptRunArgP{OptionalOpaqueRef(arg)};
+}
+
+template<typename T> inline GOptRunArgP wrap_opt_arg(optional<std::vector<T> >& arg) {
+    return GOptRunArgP{OptionalVectorRef(arg)};
+}
+
+template<> inline GOptRunArgP wrap_opt_arg(optional<cv::Mat> &m) {
+    return GOptRunArgP{&m};
+}
+
+template<> inline GOptRunArgP wrap_opt_arg(optional<cv::Scalar> &s) {
+    return GOptRunArgP{&s};
+}
+
+} // namespace detail
+
+// Now cv::gout() may produce an empty vector (see "dynamic graphs"), so
+// there may be a conflict between these two. State here that Opt version
+// _must_ have at least one input for this overload
+template<typename T, typename... Ts>
+inline GOptRunArgsP gout(optional<T>&arg, optional<Ts>&... args)
+{
+    return GOptRunArgsP{ detail::wrap_opt_arg(arg), detail::wrap_opt_arg(args)... };
+}
+
 /**
  * \addtogroup gapi_main_classes
  * @{
@@ -169,6 +253,44 @@ public:
     // NB: Used from python
     GAPI_WRAP std::tuple<bool, cv::GRunArgs> pull();
 
+    /**
+     * @brief Get some next available data from the pipeline.
+     *
+     * This method takes a vector of cv::optional object. An object is
+     * assigned to some value if this value is available (ready) at
+     * the time of the call, and resets the object to empty() if it is
+     * not.
+     *
+     * This is a blocking method which guarantees that some data has
+     * been written to the output vector on return.
+     *
+     * Using this method only makes sense if the graph has
+     * desynchronized parts (see cv::gapi::desync). If there is no
+     * desynchronized parts in the graph, the behavior of this
+     * method is identical to the regular pull() (all data objects are
+     * produced synchronously in the output vector).
+     *
+     * Use gout() to create an output parameter vector.
+     *
+     * Output vectors must have the same number of elements as defined
+     * in the cv::GComputation protocol (at the moment of its
+     * construction). Shapes of elements also must conform to protocol
+     * (e.g. cv::optional<cv::Mat> needs to be passed where cv::GMat
+     * has been declared as output, and so on). Run-time exception is
+     * generated on type mismatch.
+     *
+     * This method writes new data into objects passed via output
+     * vector.  If there is no data ready yet, this method blocks. Use
+     * try_pull() if you need a non-blocking version.
+     *
+     * @param outs vector of output parameters to obtain.
+     * @return true if next result has been obtained,
+     *    false marks end of the stream.
+     *
+     * @sa cv::gapi::desync
+     */
+    bool pull(cv::GOptRunArgsP &&outs);
+
     /**
      * @brief Try to get the next processed frame from the pipeline.
      *
diff --git a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp
index c9800b2b16..2e8dcb1aec 100644
--- a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp
+++ b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp
@@ -191,6 +191,29 @@ namespace detail
 
     template<typename T> using wrap_gapi_helper = WrapValue<typename std::decay<T>::type>;
     template<typename T> using wrap_host_helper = WrapValue<typename std::decay<g_type_of_t<T> >::type>;
+
+// Union type for various user-defined type constructors (GArray<T>,
+// GOpaque<T>, etc)
+//
+// TODO: Replace construct-only API with a more generic one (probably
+//    with bits of introspection)
+//
+// Not required for non-user-defined types (GMat, GScalar, etc)
+using HostCtor = util::variant
+    < util::monostate
+    , detail::ConstructVec
+    , detail::ConstructOpaque
+    >;
+
+template<typename T> struct GObtainCtor {
+    static HostCtor get() { return HostCtor{}; }
+};
+template<typename T> struct GObtainCtor<GArray<T> > {
+    static HostCtor get() { return HostCtor{ConstructVec{&GArray<T>::VCtor}}; };
+};
+template<typename T> struct GObtainCtor<GOpaque<T> > {
+    static HostCtor get() { return HostCtor{ConstructOpaque{&GOpaque<T>::Ctor}}; };
+};
 } // namespace detail
 } // namespace cv
 
diff --git a/modules/gapi/include/opencv2/gapi/infer.hpp b/modules/gapi/include/opencv2/gapi/infer.hpp
index 9b4580ec6b..b850775a62 100644
--- a/modules/gapi/include/opencv2/gapi/infer.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer.hpp
@@ -2,7 +2,7 @@
 // It is subject to the license terms in the LICENSE file found in the top-level directory
 // of this distribution and at http://opencv.org/license.html.
 //
-// Copyright (C) 2019 Intel Corporation
+// Copyright (C) 2019-2020 Intel Corporation
 
 
 #ifndef OPENCV_GAPI_INFER_HPP
@@ -77,6 +77,9 @@ public:
 
     using ResultL = std::tuple< cv::GArray<R>... >;
     using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
+
+    // FIXME: Args... must be limited to a single GMat
+    using APIRoi = std::function<Result(cv::GOpaque<cv::Rect>, Args...)>;
 };
 
 // Single-return-value network definition (specialized base class)
@@ -92,6 +95,9 @@ public:
 
     using ResultL = cv::GArray<R>;
     using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
+
+    // FIXME: Args... must be limited to a single GMat
+    using APIRoi = std::function<Result(cv::GOpaque<cv::Rect>, Args...)>;
 };
 
 // APIList2 is also template to allow different calling options
@@ -114,10 +120,10 @@ struct InferAPIList2 {
 // a particular backend, not by a network itself.
 struct GInferBase {
     static constexpr const char * id() {
-        return "org.opencv.dnn.infer";     // Universal stub
+        return "org.opencv.dnn.infer";            // Universal stub
     }
     static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
-        return GMetaArgs{};                // One more universal stub
+        return GMetaArgs{};                       // One more universal stub
     }
 };
 
@@ -164,15 +170,25 @@ private:
     std::shared_ptr<Priv> m_priv;
 };
 /** @} */
+// Base "InferROI" kernel.
+// All notes from "Infer" kernel apply here as well.
+struct GInferROIBase {
+    static constexpr const char * id() {
+        return "org.opencv.dnn.infer-roi";        // Universal stub
+    }
+    static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
+        return GMetaArgs{};                       // One more universal stub
+    }
+};
 
 // Base "Infer list" kernel.
 // All notes from "Infer" kernel apply here as well.
 struct GInferListBase {
     static constexpr const char * id() {
-        return "org.opencv.dnn.infer-roi";      // Universal stub
+        return "org.opencv.dnn.infer-roi-list-1"; // Universal stub
     }
     static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
-        return GMetaArgs{};                     // One more universal stub
+        return GMetaArgs{};                       // One more universal stub
     }
 };
 
@@ -180,10 +196,10 @@ struct GInferListBase {
 // All notes from "Infer" kernel apply here as well.
 struct GInferList2Base {
     static constexpr const char * id() {
-        return "org.opencv.dnn.infer-roi-list"; // Universal stub
+        return "org.opencv.dnn.infer-roi-list-2"; // Universal stub
     }
     static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
-        return GMetaArgs{};                     // One more universal stub
+        return GMetaArgs{};                       // One more universal stub
     }
 };
 
@@ -200,6 +216,19 @@ struct GInfer final
     static constexpr const char* tag() { return Net::tag(); }
 };
 
+// A specific roi-inference kernel. API (::on()) is fixed here and
+// verified against Net.
+template<typename Net>
+struct GInferROI final
+    : public GInferROIBase
+    , public detail::KernelTypeMedium< GInferROI<Net>
+                                     , typename Net::APIRoi > {
+    using GInferROIBase::getOutMeta; // FIXME: name lookup conflict workaround?
+
+    static constexpr const char* tag() { return Net::tag(); }
+};
+
+
 // A generic roi-list inference kernel. API (::on()) is derived from
 // the Net template parameter (see more in infer<> overload).
 template<typename Net>
@@ -238,6 +267,23 @@ struct GInferList2 final
 namespace cv {
 namespace gapi {
 
+/** @brief Calculates response for the specified network (template
+ *     parameter) for the specified region in the source image.
+ *     Currently expects a single-input network only.
+ *
+ * @tparam A network type defined with G_API_NET() macro.
+ * @param in input image where to take ROI from.
+ * @param roi an object describing the region of interest
+ *   in the source image. May be calculated in the same graph dynamically.
+ * @return an object of return type as defined in G_API_NET().
+ *   If a network has multiple return values (defined with a tuple), a tuple of
+ *   objects of appropriate type is returned.
+ * @sa  G_API_NET()
+ */
+template<typename Net>
+typename Net::Result infer(cv::GOpaque<cv::Rect> roi, cv::GMat in) {
+    return GInferROI<Net>::on(roi, in);
+}
 
 /** @brief Calculates responses for the specified network (template
  *     parameter) for every region in the source image.
@@ -328,7 +374,8 @@ infer(const std::string& tag, const GInferInputs& inputs)
                 tag,
                 GInferBase::getOutMeta,
                 {}, // outShape will be filled later
-                std::move(kinds)
+                std::move(kinds),
+                {}, // outCtors will be filled later
             });
 
     call->setArgs(std::move(input_args));
diff --git a/modules/gapi/include/opencv2/gapi/streaming/desync.hpp b/modules/gapi/include/opencv2/gapi/streaming/desync.hpp
new file mode 100644
index 0000000000..86de279fe9
--- /dev/null
+++ b/modules/gapi/include/opencv2/gapi/streaming/desync.hpp
@@ -0,0 +1,84 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+
+#ifndef OPENCV_GAPI_GSTREAMING_DESYNC_HPP
+#define OPENCV_GAPI_GSTREAMING_DESYNC_HPP
+
+#include <tuple>
+
+#include <opencv2/gapi/util/util.hpp>
+#include <opencv2/gapi/gtype_traits.hpp>
+#include <opencv2/gapi/garg.hpp>
+#include <opencv2/gapi/gcall.hpp>
+#include <opencv2/gapi/gkernel.hpp>
+
+namespace cv {
+namespace gapi {
+namespace streaming {
+
+namespace detail {
+struct GDesync {
+    static const char *id() {
+        return "org.opencv.streaming.desync";
+    }
+
+    // An universal yield for desync.
+    // Yields output objects according to the input Types...
+    // Reuses gkernel machinery.
+    // FIXME: This function can be generic and declared in gkernel.hpp
+    //        (it is there already, but a part of GKernelType[M]
+    template<typename... R, int... IIs>
+    static std::tuple<R...> yield(cv::GCall &call, cv::detail::Seq<IIs...>) {
+        return std::make_tuple(cv::detail::Yield<R>::yield(call, IIs)...);
+    }
+};
+
+template<typename G>
+G desync(const G &g) {
+    cv::GKernel k{
+          GDesync::id()                                     // kernel id
+        , ""                                                // kernel tag
+        , [](const GMetaArgs &a, const GArgs &) {return a;} // outMeta callback
+        , {cv::detail::GTypeTraits<G>::shape}               // output Shape
+        , {cv::detail::GTypeTraits<G>::op_kind}             // input data kinds
+        , {cv::detail::GObtainCtor<G>::get()}               // output template ctors
+    };
+    cv::GCall call(std::move(k));
+    call.pass(g);
+    return std::get<0>(GDesync::yield<G>(call, cv::detail::MkSeq<1>::type()));
+}
+} // namespace detail
+
+/**
+ * @brief Starts a desynchronized branch in the graph.
+ *
+ * This operation takes a single G-API data object and returns a
+ * graph-level "duplicate" of this object.
+ *
+ * Operations which use this data object can be desynchronized
+ * from the rest of the graph.
+ *
+ * This operation has no effect when a GComputation is compiled with
+ * regular cv::GComputation::compile(), since cv::GCompiled objects
+ * always produce their full output vectors.
+ *
+ * This operation only makes sense when a GComputation is compiled in
+ * straming mode with cv::GComputation::compileStreaming(). If this
+ * operation is used and there are desynchronized outputs, the user
+ * should use a special version of cv::GStreamingCompiled::pull()
+ * which produces an array of cv::util::optional<> objects.
+ *
+ * @note This feature is highly experimental now and is currently
+ * limited to a single GMat argument only.
+ */
+GAPI_EXPORTS GMat desync(const GMat &g);
+
+} // namespace streaming
+} // namespace gapi
+} // namespace cv
+
+#endif // OPENCV_GAPI_GSTREAMING_DESYNC_HPP
diff --git a/modules/gapi/samples/infer_single_roi.cpp b/modules/gapi/samples/infer_single_roi.cpp
new file mode 100644
index 0000000000..6054a3f4a6
--- /dev/null
+++ b/modules/gapi/samples/infer_single_roi.cpp
@@ -0,0 +1,264 @@
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/gapi.hpp>
+#include <opencv2/gapi/core.hpp>
+#include <opencv2/gapi/imgproc.hpp>
+#include <opencv2/gapi/infer.hpp>
+#include <opencv2/gapi/render.hpp>
+#include <opencv2/gapi/infer/ie.hpp>
+#include <opencv2/gapi/cpu/gcpukernel.hpp>
+#include <opencv2/gapi/streaming/cap.hpp>
+#include <opencv2/highgui.hpp>
+
+const std::string keys =
+    "{ h help |                              | Print this help message }"
+    "{ input  |                              | Path to the input video file }"
+    "{ facem  | face-detection-adas-0001.xml | Path to OpenVINO IE face detection model (.xml) }"
+    "{ faced  | CPU                          | Target device for face detection model (e.g. CPU, GPU, VPU, ...) }"
+    "{ r roi  | -1,-1,-1,-1                  | Region of interest (ROI) to use for inference. Identified automatically when not set }";
+
+namespace {
+
+std::string weights_path(const std::string &model_path) {
+    const auto EXT_LEN = 4u;
+    const auto sz = model_path.size();
+    CV_Assert(sz > EXT_LEN);
+
+    auto ext = model_path.substr(sz - EXT_LEN);
+    std::transform(ext.begin(), ext.end(), ext.begin(), [](unsigned char c){
+            return static_cast<unsigned char>(std::tolower(c));
+        });
+    CV_Assert(ext == ".xml");
+    return model_path.substr(0u, sz - EXT_LEN) + ".bin";
+}
+
+cv::util::optional<cv::Rect> parse_roi(const std::string &rc) {
+    cv::Rect rv;
+    char delim[3];
+
+    std::stringstream is(rc);
+    is >> rv.x >> delim[0] >> rv.y >> delim[1] >> rv.width >> delim[2] >> rv.height;
+    if (is.bad()) {
+        return cv::util::optional<cv::Rect>(); // empty value
+    }
+    const auto is_delim = [](char c) {
+        return c == ',';
+    };
+    if (!std::all_of(std::begin(delim), std::end(delim), is_delim)) {
+        return cv::util::optional<cv::Rect>(); // empty value
+
+    }
+    if (rv.x < 0 || rv.y < 0 || rv.width <= 0 || rv.height <= 0) {
+        return cv::util::optional<cv::Rect>(); // empty value
+    }
+    return cv::util::make_optional(std::move(rv));
+}
+
+} // namespace
+
+namespace custom {
+
+G_API_NET(FaceDetector,   <cv::GMat(cv::GMat)>, "face-detector");
+
+using GDetections = cv::GArray<cv::Rect>;
+using GRect       = cv::GOpaque<cv::Rect>;
+using GSize       = cv::GOpaque<cv::Size>;
+using GPrims      = cv::GArray<cv::gapi::wip::draw::Prim>;
+
+G_API_OP(GetSize, <GSize(cv::GMat)>, "sample.custom.get-size") {
+    static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
+        return cv::empty_gopaque_desc();
+    }
+};
+
+G_API_OP(LocateROI, <GRect(cv::GMat)>, "sample.custom.locate-roi") {
+    static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
+        return cv::empty_gopaque_desc();
+    }
+};
+
+G_API_OP(ParseSSD, <GDetections(cv::GMat, GRect, GSize)>, "sample.custom.parse-ssd") {
+    static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &, const cv::GOpaqueDesc &) {
+        return cv::empty_array_desc();
+    }
+};
+
+G_API_OP(BBoxes, <GPrims(GDetections, GRect)>, "sample.custom.b-boxes") {
+    static cv::GArrayDesc outMeta(const cv::GArrayDesc &, const cv::GOpaqueDesc &) {
+        return cv::empty_array_desc();
+    }
+};
+
+GAPI_OCV_KERNEL(OCVGetSize, GetSize) {
+    static void run(const cv::Mat &in, cv::Size &out) {
+        out = {in.cols, in.rows};
+    }
+};
+
+GAPI_OCV_KERNEL(OCVLocateROI, LocateROI) {
+    // This is the place where we can run extra analytics
+    // on the input image frame and select the ROI (region
+    // of interest) where we want to detect our objects (or
+    // run any other inference).
+    //
+    // Currently it doesn't do anything intelligent,
+    // but only crops the input image to square (this is
+    // the most convenient aspect ratio for detectors to use)
+
+    static void run(const cv::Mat &in_mat, cv::Rect &out_rect) {
+
+        // Identify the central point & square size (- some padding)
+        const auto center = cv::Point{in_mat.cols/2, in_mat.rows/2};
+        auto sqside = std::min(in_mat.cols, in_mat.rows);
+
+        // Now build the central square ROI
+        out_rect = cv::Rect{ center.x - sqside/2
+                           , center.y - sqside/2
+                           , sqside
+                           , sqside
+                           };
+    }
+};
+
+GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
+    static void run(const cv::Mat &in_ssd_result,
+                    const cv::Rect &in_roi,
+                    const cv::Size &in_parent_size,
+                    std::vector<cv::Rect> &out_objects) {
+        const auto &in_ssd_dims = in_ssd_result.size;
+        CV_Assert(in_ssd_dims.dims() == 4u);
+
+        const int MAX_PROPOSALS = in_ssd_dims[2];
+        const int OBJECT_SIZE   = in_ssd_dims[3];
+        CV_Assert(OBJECT_SIZE  == 7); // fixed SSD object size
+
+        const cv::Size up_roi = in_roi.size();
+        const cv::Rect surface({0,0}, in_parent_size);
+
+        out_objects.clear();
+
+        const float *data = in_ssd_result.ptr<float>();
+        for (int i = 0; i < MAX_PROPOSALS; i++) {
+            const float image_id   = data[i * OBJECT_SIZE + 0];
+            const float label      = data[i * OBJECT_SIZE + 1];
+            const float confidence = data[i * OBJECT_SIZE + 2];
+            const float rc_left    = data[i * OBJECT_SIZE + 3];
+            const float rc_top     = data[i * OBJECT_SIZE + 4];
+            const float rc_right   = data[i * OBJECT_SIZE + 5];
+            const float rc_bottom  = data[i * OBJECT_SIZE + 6];
+            (void) label; // unused
+
+            if (image_id < 0.f) {
+                break;    // marks end-of-detections
+            }
+            if (confidence < 0.5f) {
+                continue; // skip objects with low confidence
+            }
+
+            // map relative coordinates to the original image scale
+            // taking the ROI into account
+            cv::Rect rc;
+            rc.x      = static_cast<int>(rc_left   * up_roi.width);
+            rc.y      = static_cast<int>(rc_top    * up_roi.height);
+            rc.width  = static_cast<int>(rc_right  * up_roi.width)  - rc.x;
+            rc.height = static_cast<int>(rc_bottom * up_roi.height) - rc.y;
+            rc.x += in_roi.x;
+            rc.y += in_roi.y;
+            out_objects.emplace_back(rc & surface);
+        }
+    }
+};
+
+GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
+    // This kernel converts the rectangles into G-API's
+    // rendering primitives
+    static void run(const std::vector<cv::Rect> &in_face_rcs,
+                    const             cv::Rect  &in_roi,
+                          std::vector<cv::gapi::wip::draw::Prim> &out_prims) {
+        out_prims.clear();
+        const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) {
+            return cv::gapi::wip::draw::Rect(rc, clr, 2);
+        };
+        out_prims.emplace_back(cvt(in_roi, CV_RGB(0,255,255))); // cyan
+        for (auto &&rc : in_face_rcs) {
+            out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0)));   // green
+        }
+    }
+};
+
+} // namespace custom
+
+int main(int argc, char *argv[])
+{
+    cv::CommandLineParser cmd(argc, argv, keys);
+    if (cmd.has("help")) {
+        cmd.printMessage();
+        return 0;
+    }
+
+    // Prepare parameters first
+    const std::string input = cmd.get<std::string>("input");
+    const auto opt_roi = parse_roi(cmd.get<std::string>("roi"));
+
+    const auto face_model_path = cmd.get<std::string>("facem");
+    auto face_net = cv::gapi::ie::Params<custom::FaceDetector> {
+        face_model_path,                 // path to topology IR
+        weights_path(face_model_path),   // path to weights
+        cmd.get<std::string>("faced"),   // device specifier
+    };
+    auto kernels = cv::gapi::kernels
+        < custom::OCVGetSize
+        , custom::OCVLocateROI
+        , custom::OCVParseSSD
+        , custom::OCVBBoxes>();
+    auto networks = cv::gapi::networks(face_net);
+
+    // Now build the graph. The graph structure may vary
+    // pased on the input parameters
+    cv::GStreamingCompiled pipeline;
+    auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
+
+    if (opt_roi.has_value()) {
+        // Use the value provided by user
+        std::cout << "Will run inference for static region "
+                  << opt_roi.value()
+                  << " only"
+                  << std::endl;
+        cv::GMat in;
+        cv::GOpaque<cv::Rect> in_roi;
+        auto blob = cv::gapi::infer<custom::FaceDetector>(in_roi, in);
+        auto  rcs = custom::ParseSSD::on(blob, in_roi, custom::GetSize::on(in));
+        auto  out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, in_roi));
+        pipeline  = cv::GComputation(cv::GIn(in, in_roi), cv::GOut(out))
+            .compileStreaming(cv::compile_args(kernels, networks));
+
+        // Since the ROI to detect is manual, make it part of the input vector
+        inputs.push_back(cv::gin(opt_roi.value())[0]);
+    } else {
+        // Automatically detect ROI to infer. Make it output parameter
+        std::cout << "ROI is not set or invalid. Locating it automatically"
+                  << std::endl;
+        cv::GMat in;
+        cv::GOpaque<cv::Rect> roi = custom::LocateROI::on(in);
+        auto blob = cv::gapi::infer<custom::FaceDetector>(roi, in);
+        auto  rcs = custom::ParseSSD::on(blob, roi, custom::GetSize::on(in));
+        auto  out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, roi));
+        pipeline  = cv::GComputation(cv::GIn(in), cv::GOut(out))
+            .compileStreaming(cv::compile_args(kernels, networks));
+    }
+
+    // The execution part
+    pipeline.setSource(std::move(inputs));
+    pipeline.start();
+
+    cv::Mat out;
+    while (pipeline.pull(cv::gout(out))) {
+        cv::imshow("Out", out);
+        cv::waitKey(1);
+    }
+    return 0;
+}
diff --git a/modules/gapi/src/api/gbackend.cpp b/modules/gapi/src/api/gbackend.cpp
index 600e5cc84d..6b8d0fcbee 100644
--- a/modules/gapi/src/api/gbackend.cpp
+++ b/modules/gapi/src/api/gbackend.cpp
@@ -67,6 +67,21 @@ cv::gapi::GKernelPackage cv::gapi::GBackend::Priv::auxiliaryKernels() const
     return {};
 }
 
+bool cv::gapi::GBackend::Priv::controlsMerge() const
+{
+    return false;
+}
+
+bool cv::gapi::GBackend::Priv::allowsMerge(const cv::gimpl::GIslandModel::Graph &,
+                                           const ade::NodeHandle &,
+                                           const ade::NodeHandle &,
+                                           const ade::NodeHandle &) const
+{
+    GAPI_Assert(controlsMerge());
+    return true;
+}
+
+
 // GBackend public implementation //////////////////////////////////////////////
 cv::gapi::GBackend::GBackend()
 {
diff --git a/modules/gapi/src/api/gbackend_priv.hpp b/modules/gapi/src/api/gbackend_priv.hpp
index 13f39acc86..45237514a5 100644
--- a/modules/gapi/src/api/gbackend_priv.hpp
+++ b/modules/gapi/src/api/gbackend_priv.hpp
@@ -19,7 +19,7 @@
 #include "opencv2/gapi/gkernel.hpp"
 
 #include "compiler/gmodel.hpp"
-
+#include "compiler/gislandmodel.hpp"
 
 namespace cv
 {
@@ -68,6 +68,22 @@ public:
 
     virtual cv::gapi::GKernelPackage auxiliaryKernels() const;
 
+    // Ask backend if it has a custom control over island fusion process
+    // This method is quite redundant but there's nothing better fits
+    // the current fusion process. By default, [existing] backends don't
+    // control the merge.
+    // FIXME: Refactor to a single entity?
+    virtual bool controlsMerge() const;
+
+    // Ask backend if it is ok to merge these two islands connected
+    // via a data slot. By default, [existing] backends allow to merge everything.
+    // FIXME: Refactor to a single entity?
+    // FIXME: Strip down the type details form graph? (make it ade::Graph?)
+    virtual bool allowsMerge(const cv::gimpl::GIslandModel::Graph &g,
+                             const ade::NodeHandle &a_nh,
+                             const ade::NodeHandle &slot_nh,
+                             const ade::NodeHandle &b_nh) const;
+
     virtual ~Priv() = default;
 };
 
diff --git a/modules/gapi/src/api/ginfer.cpp b/modules/gapi/src/api/ginfer.cpp
index 20511a4aaf..156f8938c4 100644
--- a/modules/gapi/src/api/ginfer.cpp
+++ b/modules/gapi/src/api/ginfer.cpp
@@ -70,7 +70,10 @@ cv::GMat cv::GInferOutputs::at(const std::string& name)
     auto it = m_priv->out_blobs.find(name);
     if (it == m_priv->out_blobs.end()) {
         // FIXME: Avoid modifying GKernel
+        // Expect output to be always GMat
         m_priv->call->kernel().outShapes.push_back(cv::GShape::GMAT);
+        // ...so _empty_ constructor is passed here.
+        m_priv->call->kernel().outCtors.emplace_back(cv::util::monostate{});
         int out_idx = static_cast<int>(m_priv->out_blobs.size());
         it = m_priv->out_blobs.emplace(name, m_priv->call->yield(out_idx)).first;
         m_priv->info->out_names.push_back(name);
diff --git a/modules/gapi/src/api/kernels_streaming.cpp b/modules/gapi/src/api/kernels_streaming.cpp
new file mode 100644
index 0000000000..af7bd19dd1
--- /dev/null
+++ b/modules/gapi/src/api/kernels_streaming.cpp
@@ -0,0 +1,74 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#include "precomp.hpp"
+
+#include <opencv2/gapi/streaming/desync.hpp>
+#include <opencv2/gapi/core.hpp>
+
+cv::GMat cv::gapi::streaming::desync(const cv::GMat &g) {
+    // FIXME: this is a limited implementation of desync
+    // The real implementation must be generic (template) and
+    // reside in desync.hpp (and it is detail::desync<>())
+
+    // FIXME: Put a copy here to solve the below problem
+    // FIXME: Because of the copy, the desync functionality is limited
+    // to GMat only (we don't have generic copy kernel for other
+    // object types)
+    return cv::gapi::copy(detail::desync(g));
+
+    // FIXME
+    //
+    // If consumed by multiple different islands (OCV and Fluid by
+    // example, an object needs to be desynchronized individually
+    // for every path.
+    //
+    // This is a limitation of the current implementation. It works
+    // this way: every "desync" link from the main path to a new
+    // desync path gets its "DesyncQueue" object which stores only the
+    // last value written before of the desync object (DO) it consumes
+    // (the container of type "last written value" or LWV.
+    //
+    //                         LWV
+    // [Sync path] -> desync() - - > DO -> [ISL0 @ Desync path #1]
+    //
+    // At the same time, generally, every island in the streaming
+    // graph gets its individual input as a queue (so normally, a
+    // writer pushes the same output MULTIPLE TIMES if it has mutliple
+    // readers):
+    //
+    //                         LWV
+    // [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1]
+    //                       : LWV
+    //                       ' - - > DO2 -> [ISL1 @ Desync path #1]
+    //
+    // For users, it may seem legit to use desync here only once, and
+    // it MUST BE legit once the problem is fixed.
+    // But the problem with the current implementation is that islands
+    // on the same desync path get different desync queues and in fact
+    // stay desynchronized between each other. One shouldn't consider
+    // this as a single desync path anymore.
+    // If these two ISLs are then merged e.g. with add(a,b), the
+    // results will be inconsistent, given that the latency of ISL0
+    // and ISL1 may be different. This is not the same frame anymore
+    // coming as `a` and `b` to add(a,b) because of it.
+    //
+    // To make things clear, we forbid this now and ask to call
+    // desync one more time to allow that. It is bad since the graph
+    // structure and island layout depends on kernel packages used,
+    // not on the sole GComputation structure. This needs to be fixed!
+    // Here's the working configuration:
+    //
+    //                         LWV
+    // [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1]
+    //            :            LWV
+    //            '-> desync() - - > DO2 -> [ISL1 @ Desync path #2] <-(!)
+    //
+    // Put an operation right after desync() is a quick workaround to
+    // this synchronization problem. There will be one "last_written_value"
+    // connected to a desynchronized data object, and this sole last_written_value
+    // object will feed both branches of the streaming executable.
+}
diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp
index 08836163a7..c66fa44361 100644
--- a/modules/gapi/src/backends/ie/giebackend.cpp
+++ b/modules/gapi/src/backends/ie/giebackend.cpp
@@ -519,6 +519,65 @@ struct Infer: public cv::detail::KernelTag {
     }
 };
 
+struct InferROI: public cv::detail::KernelTag {
+    using API = cv::GInferROIBase;
+    static cv::gapi::GBackend backend()  { return cv::gapi::ie::backend(); }
+    static KImpl kernel()                { return KImpl{outMeta, run}; }
+
+    static cv::GMetaArgs outMeta(const ade::Graph      &gr,
+                                 const ade::NodeHandle &nh,
+                                 const cv::GMetaArgs   &in_metas,
+                                 const cv::GArgs       &/*in_args*/) {
+        cv::GMetaArgs result;
+
+        GConstGIEModel gm(gr);
+        const auto &uu = gm.metadata(nh).get<IEUnit>();
+
+        // Initialize input information
+        // FIXME: So far it is pretty limited
+        GAPI_Assert(1u == uu.params.input_names.size());
+        GAPI_Assert(2u == in_metas.size());
+
+        // 0th is ROI, 1st is in0put image
+        auto       &&ii = uu.inputs.at(uu.params.input_names.at(0));
+        const auto &meta = util::get<cv::GMatDesc>(in_metas.at(1));
+        ii->setPrecision(toIE(meta.depth));
+        ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
+
+        // FIXME: It would be nice here to have an exact number of network's
+        // input/output parameters. Probably GCall should store it here for us.
+        // It doesn't, as far as I know..
+        for (const auto &out_name : uu.params.output_names) {
+            // NOTE: our output_names vector follows the API order
+            // of this operation's outputs
+            const IE::DataPtr& ie_out = uu.outputs.at(out_name);
+            const IE::SizeVector dims = ie_out->getTensorDesc().getDims();
+
+            cv::GMatDesc outm(toCV(ie_out->getPrecision()),
+                              toCV(ie_out->getTensorDesc().getDims()));
+            result.emplace_back(outm);
+        }
+        return result;
+    }
+
+    static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
+        // non-generic version for now, per the InferROI's definition
+        GAPI_Assert(uu.params.num_in == 1);
+        const auto& this_roi = ctx.inArg<cv::detail::OpaqueRef>(0).rref<cv::Rect>();
+        const auto  this_mat = ctx.inMat(1);
+        IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
+        IE::Blob::Ptr roi_blob = IE::make_shared_blob(this_blob, toIE(this_roi));
+        iec.this_request.SetBlob(*uu.params.input_names.begin(), roi_blob);
+        iec.this_request.Infer();
+        for (auto i : ade::util::iota(uu.params.num_out)) {
+            cv::Mat& out_mat = ctx.outMatR(i);
+            IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]);
+            copyFromIE(out_blob, out_mat);
+        }
+    }
+};
+
+
 struct InferList: public cv::detail::KernelTag {
     using API = cv::GInferListBase;
     static cv::gapi::GBackend backend()  { return cv::gapi::ie::backend(); }
@@ -780,6 +839,7 @@ namespace {
 
         virtual cv::gapi::GKernelPackage auxiliaryKernels() const override {
             return cv::gapi::kernels< cv::gimpl::ie::Infer
+                                    , cv::gimpl::ie::InferROI
                                     , cv::gimpl::ie::InferList
                                     , cv::gimpl::ie::InferList2
                                     >();
diff --git a/modules/gapi/src/backends/ocl/goclbackend.cpp b/modules/gapi/src/backends/ocl/goclbackend.cpp
index 34dba01afe..847b802fd2 100644
--- a/modules/gapi/src/backends/ocl/goclbackend.cpp
+++ b/modules/gapi/src/backends/ocl/goclbackend.cpp
@@ -272,4 +272,8 @@ void cv::gimpl::GOCLExecutable::run(std::vector<InObj>  &&input_objs,
             GAPI_Assert((out_arg_data == (mag_mat.getMat(ACCESS_RW).data)) && " data for output parameters was reallocated ?");
         }
     }
+
+    // In/Out args clean-up is mandatory now with RMat
+    for (auto &it : input_objs) magazine::unbind(m_res, it.first);
+    for (auto &it : output_objs) magazine::unbind(m_res, it.first);
 }
diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp
index 76c40ddca0..eb75f44e0e 100644
--- a/modules/gapi/src/compiler/gcompiler.cpp
+++ b/modules/gapi/src/compiler/gcompiler.cpp
@@ -238,6 +238,11 @@ cv::gimpl::GCompiler::GCompiler(const cv::GComputation &c,
                                                       // (no compound backend present here)
     m_e.addPass("kernels", "check_islands_content", passes::checkIslandsContent);
 
+    // Special stage for intrinsics handling
+    m_e.addPassStage("intrin");
+    m_e.addPass("intrin", "desync",         passes::intrinDesync);
+    m_e.addPass("intrin", "finalizeIntrin", passes::intrinFinalize);
+
     //Input metas may be empty when a graph is compiled for streaming
     m_e.addPassStage("meta");
     if (!m_metas.empty())
@@ -384,6 +389,9 @@ cv::gimpl::GCompiler::GPtr cv::gimpl::GCompiler::generateGraph()
     {
         GModel::Graph(*g).metadata().set(OriginalInputMeta{m_metas});
     }
+    // FIXME: remove m_args, remove GCompileArgs from backends' method signatures,
+    // rework backends to access GCompileArgs from graph metadata
+    GModel::Graph(*g).metadata().set(CompileArgs{m_args});
     return g;
 }
 
diff --git a/modules/gapi/src/compiler/gislandmodel.cpp b/modules/gapi/src/compiler/gislandmodel.cpp
index aee0477e08..9ffc605372 100644
--- a/modules/gapi/src/compiler/gislandmodel.cpp
+++ b/modules/gapi/src/compiler/gislandmodel.cpp
@@ -175,13 +175,26 @@ void GIslandModel::generateInitial(GIslandModel::Graph &g,
         {
             auto src_data_nh = in_edge->srcNode();
             auto isl_slot_nh = data_to_slot.at(src_data_nh);
-            g.link(isl_slot_nh, nh); // no other data stored yet
+            auto isl_new_eh  = g.link(isl_slot_nh, nh); // no other data stored yet
+            // Propagate some special metadata from the GModel to GIslandModel
+            // TODO: Make it a single place (a function) for both inputs/outputs?
+            // (since it is duplicated in the below code block)
+            if (src_g.metadata(in_edge).contains<DesyncEdge>())
+            {
+                const auto idx = src_g.metadata(in_edge).get<DesyncEdge>().index;
+                g.metadata(isl_new_eh).set(DesyncIslEdge{idx});
+            }
         }
         for (auto out_edge : src_op_nh->outEdges())
         {
             auto dst_data_nh = out_edge->dstNode();
             auto isl_slot_nh = data_to_slot.at(dst_data_nh);
-            g.link(nh, isl_slot_nh);
+            auto isl_new_eh  = g.link(nh, isl_slot_nh);
+            if (src_g.metadata(out_edge).contains<DesyncEdge>())
+            {
+                const auto idx = src_g.metadata(out_edge).get<DesyncEdge>().index;
+                g.metadata(isl_new_eh).set(DesyncIslEdge{idx});
+            }
         }
     } // for(all_operations)
 }
@@ -254,6 +267,9 @@ void GIslandModel::syncIslandTags(Graph &g, ade::Graph &orig_g)
 void GIslandModel::compileIslands(Graph &g, const ade::Graph &orig_g, const GCompileArgs &args)
 {
     GModel::ConstGraph gm(orig_g);
+    if (gm.metadata().contains<HasIntrinsics>()) {
+        util::throw_error(std::logic_error("FATAL: The graph has unresolved intrinsics"));
+    }
 
     auto original_sorted = gm.metadata().get<ade::passes::TopologicalSortData>();
     for (auto nh : g.nodes())
diff --git a/modules/gapi/src/compiler/gislandmodel.hpp b/modules/gapi/src/compiler/gislandmodel.hpp
index 6cf8f98667..c2e7b96d45 100644
--- a/modules/gapi/src/compiler/gislandmodel.hpp
+++ b/modules/gapi/src/compiler/gislandmodel.hpp
@@ -142,6 +142,14 @@ public:
     // at that stage.
     virtual void handleNewStream() {}; // do nothing here by default
 
+    // This method is called for every IslandExecutable when
+    // the stream-based execution is stopped.
+    // All processing is guaranteed to be stopped by this moment,
+    // with no pending or running 'run()' processes ran in background.
+    // FIXME: This method is tightly bound to the GStreamingExecutor
+    // now.
+    virtual void handleStopStream() {} // do nothing here by default
+
     virtual ~GIslandExecutable() = default;
 };
 
@@ -222,8 +230,19 @@ struct IslandsCompiled
     static const char *name() { return "IslandsCompiled"; }
 };
 
+// This flag marks an edge in an GIslandModel as "desynchronized"
+// i.e. it starts a new desynchronized subgraph
+struct DesyncIslEdge
+{
+    static const char *name() { return "DesynchronizedIslandEdge"; }
+
+    // Projection from GModel/DesyncEdge.index
+    int index;
+};
+
 namespace GIslandModel
 {
+
     using Graph = ade::TypedGraph
         < NodeKind
         , FusedIsland
@@ -232,6 +251,7 @@ namespace GIslandModel
         , Emitter
         , Sink
         , IslandsCompiled
+        , DesyncIslEdge
         , ade::passes::TopologicalSortData
         >;
 
@@ -244,6 +264,7 @@ namespace GIslandModel
         , Emitter
         , Sink
         , IslandsCompiled
+        , DesyncIslEdge
         , ade::passes::TopologicalSortData
         >;
 
diff --git a/modules/gapi/src/compiler/gmodel.cpp b/modules/gapi/src/compiler/gmodel.cpp
index b5b76fd1c9..ea4eb880a4 100644
--- a/modules/gapi/src/compiler/gmodel.cpp
+++ b/modules/gapi/src/compiler/gmodel.cpp
@@ -77,7 +77,7 @@ ade::NodeHandle GModel::mkDataNode(GModel::Graph &g, const GShape shape)
     return data_h;
 }
 
-void GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t in_port)
+ade::EdgeHandle GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t in_port)
 {
     // Check if input is already connected
     for (const auto& in_e : opH->inEdges())
@@ -96,9 +96,11 @@ void GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::si
 
     // Replace an API object with a REF (G* -> GOBJREF)
     op.args[in_port] = cv::GArg(RcDesc{gm.rc, gm.shape, {}});
+
+    return eh;
 }
 
-void GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t out_port)
+ade::EdgeHandle GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t out_port)
 {
     // FIXME: check validity using kernel prototype
 
@@ -121,6 +123,8 @@ void GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::s
     const auto min_out_size = std::max(op.outs.size(), storage_with_port);
     op.outs.resize(min_out_size, RcDesc{-1,GShape::GMAT,{}}); // FIXME: Invalid shape instead?
     op.outs[out_port] = RcDesc{gm.rc, gm.shape, {}};
+
+    return eh;
 }
 
 std::vector<ade::NodeHandle> GModel::orderedInputs(const ConstGraph &g, ade::NodeHandle nh)
@@ -210,26 +214,29 @@ ade::NodeHandle GModel::detail::dataNodeOf(const ConstLayoutGraph &g, const GOri
     return g.metadata().get<Layout>().object_nodes.at(origin);
 }
 
-void GModel::redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to)
+std::vector<ade::EdgeHandle> GModel::redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to)
 {
     std::vector<ade::EdgeHandle> ehh(from->outEdges().begin(), from->outEdges().end());
+    std::vector<ade::EdgeHandle> ohh;
+    ohh.reserve(ehh.size());
     for (auto e : ehh)
     {
         auto dst = e->dstNode();
         auto input = g.metadata(e).get<Input>();
         g.erase(e);
-        linkIn(g, dst, to, input.port);
+        ohh.push_back(linkIn(g, dst, to, input.port));
     }
+    return ohh;
 }
 
-void GModel::redirectWriter(Graph &g, ade::NodeHandle from, ade::NodeHandle to)
+ade::EdgeHandle GModel::redirectWriter(Graph &g, ade::NodeHandle from, ade::NodeHandle to)
 {
     GAPI_Assert(from->inEdges().size() == 1);
     auto e = from->inEdges().front();
     auto op = e->srcNode();
     auto output = g.metadata(e).get<Output>();
     g.erase(e);
-    linkOut(g, op, to, output.port);
+    return linkOut(g, op, to, output.port);
 }
 
 GMetaArgs GModel::collectInputMeta(const GModel::ConstGraph &cg, ade::NodeHandle node)
diff --git a/modules/gapi/src/compiler/gmodel.hpp b/modules/gapi/src/compiler/gmodel.hpp
index 5f02e58354..d016766fb5 100644
--- a/modules/gapi/src/compiler/gmodel.hpp
+++ b/modules/gapi/src/compiler/gmodel.hpp
@@ -211,6 +211,58 @@ struct CustomMetaFunction
     CM customOutMeta;
 };
 
+// This is a general flag indicating that this GModel has intrinsics.
+// In the beginning of the compilation, it is a quick check to
+// indicate there are intrinsics.
+//
+// In the end of the compilation, having this flag is fatal -- all
+// intrinsics must be resolved.
+struct HasIntrinsics
+{
+    static const char *name() { return "HasIntrinsicsFlag"; }
+};
+
+// This is a special tag for both DATA and OP nodes indicating
+// which desynchronized path this node belongs to.
+// This tag is set by a special complex pass intrinDesync/accept.
+struct DesyncPath
+{
+    static const char *name() { return "DesynchronizedPath"; }
+
+    // A zero-based index of the desynchronized path in the graph.
+    // Set by intrinDesync() compiler pass
+    int index;
+};
+
+// This is a special tag for graph Edges indicating that this
+// particular edge starts a desynchronized path in the graph.
+// At the execution stage, the data coming "through" these edges
+// (virtually, of course, since our GModel edges never transfer the
+// actual data, they just represent these transfers) is desynchronized
+// from the rest of the pipeline, i.e. may be "lost" (stay unconsumed
+// and then overwritten with some new data when streaming).
+struct DesyncEdge
+{
+    static const char *name() { return "DesynchronizedEdge"; }
+
+    // A zero-based index of the desynchronized path in the graph.
+    // Set by intrinDesync/apply() compiler pass
+    int index;
+};
+
+// This flag marks the island graph as "desynchronized"
+struct Desynchronized
+{
+    static const char *name() { return "Desynchronized"; }
+};
+
+// Reference to compile args of the computation
+struct CompileArgs
+{
+    static const char *name() { return "CompileArgs"; }
+    GCompileArgs args;
+};
+
 namespace GModel
 {
     using Graph = ade::TypedGraph
@@ -232,6 +284,11 @@ namespace GModel
         , CustomMetaFunction
         , Streaming
         , Deserialized
+        , HasIntrinsics
+        , DesyncPath
+        , DesyncEdge
+        , Desynchronized
+        , CompileArgs
         >;
 
     // FIXME: How to define it based on GModel???
@@ -254,6 +311,11 @@ namespace GModel
         , CustomMetaFunction
         , Streaming
         , Deserialized
+        , HasIntrinsics
+        , DesyncPath
+        , DesyncEdge
+        , Desynchronized
+        , CompileArgs
         >;
 
     // FIXME:
@@ -278,11 +340,11 @@ namespace GModel
     // Clears logged messages of a node.
     GAPI_EXPORTS void log_clear(Graph &g, ade::NodeHandle node);
 
-    GAPI_EXPORTS void linkIn   (Graph &g, ade::NodeHandle op,     ade::NodeHandle obj, std::size_t in_port);
-    GAPI_EXPORTS void linkOut  (Graph &g, ade::NodeHandle op,     ade::NodeHandle obj, std::size_t out_port);
+    GAPI_EXPORTS ade::EdgeHandle linkIn   (Graph &g, ade::NodeHandle op,     ade::NodeHandle obj, std::size_t in_port);
+    GAPI_EXPORTS ade::EdgeHandle linkOut  (Graph &g, ade::NodeHandle op,     ade::NodeHandle obj, std::size_t out_port);
 
-    GAPI_EXPORTS void redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to);
-    GAPI_EXPORTS void redirectWriter (Graph &g, ade::NodeHandle from, ade::NodeHandle to);
+    GAPI_EXPORTS std::vector<ade::EdgeHandle> redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to);
+    GAPI_EXPORTS             ade::EdgeHandle  redirectWriter (Graph &g, ade::NodeHandle from, ade::NodeHandle to);
 
     GAPI_EXPORTS std::vector<ade::NodeHandle> orderedInputs (const ConstGraph &g, ade::NodeHandle nh);
     GAPI_EXPORTS std::vector<ade::NodeHandle> orderedOutputs(const ConstGraph &g, ade::NodeHandle nh);
diff --git a/modules/gapi/src/compiler/gmodelbuilder.cpp b/modules/gapi/src/compiler/gmodelbuilder.cpp
index 80abadd9c6..5f8f3518fc 100644
--- a/modules/gapi/src/compiler/gmodelbuilder.cpp
+++ b/modules/gapi/src/compiler/gmodelbuilder.cpp
@@ -134,12 +134,19 @@ cv::gimpl::Unrolled cv::gimpl::unrollExpr(const GProtoArgs &ins,
 
                 // Put the outputs object description of the node
                 // so that they are not lost if they are not consumed by other operations
+                GAPI_Assert(call_p.m_k.outCtors.size() == call_p.m_k.outShapes.size());
                 for (const auto &it : ade::util::indexed(call_p.m_k.outShapes))
                 {
                     std::size_t port  = ade::util::index(it);
                     GShape shape      = ade::util::value(it);
 
-                    GOrigin org { shape, node, port, {}, origin.kind };
+                    // FIXME: then use ZIP
+                    HostCtor ctor     = call_p.m_k.outCtors[port];
+
+                    // NB: Probably this fixes all other "missing host ctor"
+                    // problems.
+                    // TODO: Clean-up the old workarounds if it really is.
+                    GOrigin org {shape, node, port, std::move(ctor), origin.kind};
                     origins.insert(org);
                 }
 
diff --git a/modules/gapi/src/compiler/gobjref.hpp b/modules/gapi/src/compiler/gobjref.hpp
index dd0939c439..bca6fa525e 100644
--- a/modules/gapi/src/compiler/gobjref.hpp
+++ b/modules/gapi/src/compiler/gobjref.hpp
@@ -16,15 +16,9 @@ namespace cv
 
 namespace gimpl
 {
-    // Union type for various user-defined type constructors (GArray<T>, GOpaque<T>, etc)
-    // FIXME: Replace construct-only API with a more generic one
-    //    (probably with bits of introspection)
-    // Not required for non-user-defined types (GMat, GScalar, etc)
-    using HostCtor = util::variant
-    < util::monostate
-    , detail::ConstructVec
-    , detail::ConstructOpaque
-    >;
+    // HostCtor was there, but then moved to public
+    // Redeclare here to avoid changing tons of code
+    using HostCtor = cv::detail::HostCtor;
 
     using ConstVal = util::variant
     < util::monostate
diff --git a/modules/gapi/src/compiler/gstreaming.cpp b/modules/gapi/src/compiler/gstreaming.cpp
index 29c98ddfd4..eb06f3f6f2 100644
--- a/modules/gapi/src/compiler/gstreaming.cpp
+++ b/modules/gapi/src/compiler/gstreaming.cpp
@@ -69,6 +69,11 @@ bool cv::GStreamingCompiled::Priv::pull(cv::GRunArgsP &&outs)
     return m_exec->pull(std::move(outs));
 }
 
+bool cv::GStreamingCompiled::Priv::pull(cv::GOptRunArgsP &&outs)
+{
+    return m_exec->pull(std::move(outs));
+}
+
 bool cv::GStreamingCompiled::Priv::try_pull(cv::GRunArgsP &&outs)
 {
     return m_exec->try_pull(std::move(outs));
@@ -113,6 +118,7 @@ bool cv::GStreamingCompiled::pull(cv::GRunArgsP &&outs)
 
 std::tuple<bool, cv::GRunArgs> cv::GStreamingCompiled::pull()
 {
+    // FIXME: Why it is not @ priv??
     GRunArgs run_args;
     GRunArgsP outs;
     const auto& out_shapes = m_priv->outShapes();
@@ -144,6 +150,11 @@ std::tuple<bool, cv::GRunArgs> cv::GStreamingCompiled::pull()
     return std::make_tuple(is_over, run_args);
 }
 
+bool cv::GStreamingCompiled::pull(cv::GOptRunArgsP &&outs)
+{
+    return m_priv->pull(std::move(outs));
+}
+
 bool cv::GStreamingCompiled::try_pull(cv::GRunArgsP &&outs)
 {
     return m_priv->try_pull(std::move(outs));
diff --git a/modules/gapi/src/compiler/gstreaming_priv.hpp b/modules/gapi/src/compiler/gstreaming_priv.hpp
index 73ca002f85..2f195ca226 100644
--- a/modules/gapi/src/compiler/gstreaming_priv.hpp
+++ b/modules/gapi/src/compiler/gstreaming_priv.hpp
@@ -42,6 +42,7 @@ public:
     void setSource(GRunArgs &&args);
     void start();
     bool pull(cv::GRunArgsP &&outs);
+    bool pull(cv::GOptRunArgsP &&outs);
     bool try_pull(cv::GRunArgsP &&outs);
     void stop();
 
diff --git a/modules/gapi/src/compiler/passes/exec.cpp b/modules/gapi/src/compiler/passes/exec.cpp
index 0eb8352b76..f6a73489eb 100644
--- a/modules/gapi/src/compiler/passes/exec.cpp
+++ b/modules/gapi/src/compiler/passes/exec.cpp
@@ -20,6 +20,7 @@
 #include <opencv2/gapi/util/optional.hpp>  // util::optional
 #include "logger.hpp"    // GAPI_LOG
 
+#include "api/gbackend_priv.hpp" // for canMerge()
 #include "compiler/gmodel.hpp"
 #include "compiler/gislandmodel.hpp"
 #include "compiler/passes/passes.hpp"
@@ -54,11 +55,28 @@ namespace
         // Also check the cases backend can't handle
         // (e.x. GScalar connecting two fluid ops should split the graph)
         const GModel::ConstGraph g(src_graph);
+        if (g.metadata().contains<Desynchronized>()) {
+            // Fusion of a graph having a desynchronized path is
+            // definitely non-trivial
+            return false;
+        }
         const auto& active_backends = g.metadata().get<ActiveBackends>().backends;
-        return active_backends.size() == 1 &&
-                ade::util::all_of(g.nodes(), [&](ade::NodeHandle nh) {
-            return !g.metadata(nh).contains<Island>();
-        });
+        if (active_backends.size() != 1u) {
+            // More than 1 backend involved - non-trivial
+            return false;
+        }
+        const auto& has_island_tags = [&](ade::NodeHandle nh) {
+            return g.metadata(nh).contains<Island>();
+        };
+        if (ade::util::any_of(g.nodes(), has_island_tags)) {
+            // There are user-defined islands - non-trivial
+            return false;
+        }
+        if (active_backends.begin()->priv().controlsMerge()) {
+            // If the only backend controls Island Fusion on its own - non-trivial
+            return false;
+        }
+        return true;
     }
 
     void fuseTrivial(GIslandModel::Graph &g, const ade::Graph &src_graph)
@@ -125,9 +143,9 @@ namespace
     };
 
     bool canMerge(const GIslandModel::Graph &g,
-                  const ade::NodeHandle a_nh,
-                  const ade::NodeHandle /*slot_nh*/,
-                  const ade::NodeHandle b_nh,
+                  const ade::NodeHandle &a_nh,
+                  const ade::NodeHandle &slot_nh,
+                  const ade::NodeHandle &b_nh,
                   const MergeContext &ctx = MergeContext())
     {
         auto a_ptr = g.metadata(a_nh).get<FusedIsland>().object;
@@ -142,8 +160,8 @@ namespace
         // Islands which cause a cycle can't be merged as well
         // (since the flag is set, the procedure already tried to
         // merge these islands in the past)
-        if (ade::util::contains(ctx.cycle_causers, std::make_pair(a_ptr, b_ptr))||
-            ade::util::contains(ctx.cycle_causers, std::make_pair(b_ptr, a_ptr)))
+        if (   ade::util::contains(ctx.cycle_causers, std::make_pair(a_ptr, b_ptr))
+            || ade::util::contains(ctx.cycle_causers, std::make_pair(b_ptr, a_ptr)))
             return false;
 
         // There may be user-defined islands. Initially user-defined
@@ -163,7 +181,13 @@ namespace
                 return false;
         }
 
-        // FIXME: add a backend-specified merge checker
+        // If available, run the backend-specified merge checker
+        const auto &this_backend_p = a_ptr->backend().priv();
+        if (    this_backend_p.controlsMerge()
+            && !this_backend_p.allowsMerge(g, a_nh, slot_nh, b_nh))
+        {
+            return false;
+        }
         return true;
     }
 
@@ -205,10 +229,31 @@ namespace
     {
         using namespace std::placeholders;
 
+        // Before checking for candidates, find and ban neighbor nodes
+        // (input or outputs) which are connected via desynchronized
+        // edges.
+        GIsland::node_set nodes_with_desync_edges;
+        for (const auto& in_eh : nh->inEdges()) {
+            if (g.metadata(in_eh).contains<DesyncIslEdge>()) {
+                nodes_with_desync_edges.insert(in_eh->srcNode());
+            }
+        }
+        for (const auto& output_data_nh : nh->outNodes()) {
+            for (const auto &out_reader_eh : output_data_nh->outEdges()) {
+                if (g.metadata(out_reader_eh).contains<DesyncIslEdge>()) {
+                    nodes_with_desync_edges.insert(out_reader_eh->dstNode());
+                }
+            }
+        }
+
         // Find a first matching candidate GIsland for merge
         // among inputs
-        for (const auto& input_data_nh : nh->inNodes())
+        for (const auto& in_eh : nh->inEdges())
         {
+            if (ade::util::contains(nodes_with_desync_edges, in_eh->srcNode())) {
+                continue; // desync edges can never be fused
+            }
+            const auto& input_data_nh = in_eh->srcNode();
             if (input_data_nh->inNodes().size() != 0)
             {
                 // Data node must have a single producer only
@@ -224,14 +269,17 @@ namespace
         // Ok, now try to find it among the outputs
         for (const auto& output_data_nh : nh->outNodes())
         {
-            auto mergeTest = [&](ade::NodeHandle cons_nh) -> bool {
-                return canMerge(g, nh, output_data_nh, cons_nh, ctx);
+            auto mergeTest = [&](ade::EdgeHandle cons_eh) -> bool {
+                if (ade::util::contains(nodes_with_desync_edges, cons_eh->dstNode())) {
+                    return false;  // desync edges can never be fused
+                }
+                return canMerge(g, nh, output_data_nh, cons_eh->dstNode(), ctx);
             };
-            auto cand_it = std::find_if(output_data_nh->outNodes().begin(),
-                                        output_data_nh->outNodes().end(),
+            auto cand_it = std::find_if(output_data_nh->outEdges().begin(),
+                                        output_data_nh->outEdges().end(),
                                         mergeTest);
-            if (cand_it != output_data_nh->outNodes().end())
-                return std::make_tuple(*cand_it,
+            if (cand_it != output_data_nh->outEdges().end())
+                return std::make_tuple((*cand_it)->dstNode(),
                                        output_data_nh,
                                        Direction::Out);
         } // for(outNodes)
@@ -251,6 +299,7 @@ namespace
         ade::NodeHandle m_slot;
         ade::NodeHandle m_cons;
 
+        using Change = ChangeT<DesyncIslEdge>;
         Change::List m_changes;
 
         struct MergeObjects
@@ -423,10 +472,10 @@ namespace
         auto backend = m_gim.metadata(m_prod).get<FusedIsland>()
             .object->backend();
         auto merged = std::make_shared<GIsland>(backend,
-                                                           std::move(mo.all),
-                                                           std::move(mo.in_ops),
-                                                           std::move(mo.out_ops),
-                                                           std::move(maybe_user_tag));
+                                                std::move(mo.all),
+                                                std::move(mo.in_ops),
+                                                std::move(mo.out_ops),
+                                                std::move(maybe_user_tag));
         // FIXME: move this debugging to some user-controllable log-level
 #ifdef DEBUG_MERGE
         merged->debug();
@@ -440,7 +489,9 @@ namespace
                                                  m_prod->inEdges().end());
         for (auto in_edge : input_edges)
         {
-            m_changes.enqueue<Change::NewLink>(m_g, in_edge->srcNode(), new_nh);
+            // FIXME: Introduce a Relink primitive instead?
+            // (combining the both actions into one?)
+            m_changes.enqueue<Change::NewLink>(m_g, in_edge->srcNode(), new_nh, in_edge);
             m_changes.enqueue<Change::DropLink>(m_g, m_prod, in_edge);
         }
 
@@ -450,7 +501,7 @@ namespace
                                                   m_cons->outEdges().end());
         for (auto out_edge : output_edges)
         {
-            m_changes.enqueue<Change::NewLink>(m_g, new_nh, out_edge->dstNode());
+            m_changes.enqueue<Change::NewLink>(m_g, new_nh, out_edge->dstNode(), out_edge);
             m_changes.enqueue<Change::DropLink>(m_g, m_cons, out_edge);
         }
 
@@ -491,6 +542,10 @@ namespace
                     m_changes.enqueue<Change::DropLink>(m_g, non_opt_slot_nh, eh);
                 }
             }
+            // FIXME: No metadata copied here (from where??)
+            // For DesyncIslEdges it still works, as these tags are
+            // placed to Data->Op edges and this one is an Op->Data
+            // edge.
             m_changes.enqueue<Change::NewLink>(m_g, new_nh, non_opt_slot_nh);
         }
 
@@ -502,7 +557,7 @@ namespace
              m_prod->outEdges().end());
         for (auto extra_out : prod_extra_out_edges)
         {
-            m_changes.enqueue<Change::NewLink>(m_g, new_nh, extra_out->dstNode());
+            m_changes.enqueue<Change::NewLink>(m_g, new_nh, extra_out->dstNode(), extra_out);
             m_changes.enqueue<Change::DropLink>(m_g, m_prod, extra_out);
         }
 
@@ -514,7 +569,7 @@ namespace
              m_cons->inEdges().end());
         for (auto extra_in : cons_extra_in_edges)
         {
-            m_changes.enqueue<Change::NewLink>(m_g, extra_in->srcNode(), new_nh);
+            m_changes.enqueue<Change::NewLink>(m_g, extra_in->srcNode(), new_nh, extra_in);
             m_changes.enqueue<Change::DropLink>(m_g, m_cons, extra_in);
         }
 
@@ -557,10 +612,10 @@ namespace
             there_was_a_merge = false;
 
             // FIXME: move this debugging to some user-controllable log level
-    #ifdef DEBUG_MERGE
+#ifdef DEBUG_MERGE
             GAPI_LOG_INFO(NULL, "Before next merge attempt " << iteration << "...");
             merge_debug(g, iteration);
-    #endif
+#endif
             iteration++;
             auto sorted = pass_helpers::topoSort(im);
             for (auto nh : sorted)
@@ -600,9 +655,9 @@ namespace
                                           "merge(" << l_obj->name() << "," << r_obj->name() <<
                                           ") was successful!");
                             action.commit();
-    #ifdef DEBUG_MERGE
+#ifdef DEBUG_MERGE
                             GIslandModel::syncIslandTags(gim, g);
-    #endif
+#endif
                             there_was_a_merge = true;
                             break; // start do{}while from the beginning
                         }
diff --git a/modules/gapi/src/compiler/passes/intrin.cpp b/modules/gapi/src/compiler/passes/intrin.cpp
new file mode 100644
index 0000000000..5d2707570a
--- /dev/null
+++ b/modules/gapi/src/compiler/passes/intrin.cpp
@@ -0,0 +1,305 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+
+#include "precomp.hpp"
+
+#include <ade/util/algorithm.hpp>
+#include <ade/util/zip_range.hpp>
+#include <opencv2/gapi/streaming/desync.hpp>// GDesync intrinsic
+
+#include "compiler/gmodel.hpp"
+#include "compiler/passes/passes.hpp"
+
+namespace desync {
+namespace {
+
+// Drop the desynchronized node `nh` from the graph, reconnect the
+// graph structure properly.  This is a helper function which is used
+// in both drop(g) and apply(g) passes.
+//
+// @return a vector of new edge handles connecting the "main" graph
+// with its desynchronized part.
+std::vector<ade::EdgeHandle> drop(cv::gimpl::GModel::Graph &g,
+                                  ade::NodeHandle nh) {
+    using namespace cv::gimpl;
+
+    // What we need to do here:
+    // 1. Connect the readers of its produced data objects
+    //    to the input data objects of desync;
+    // 2. Drop the data object it produces.
+    // 3. Drop the desync operation itself;
+    std::vector<ade::NodeHandle> in_data_objs = GModel::orderedInputs(g, nh);
+    std::vector<ade::NodeHandle> out_data_objs = GModel::orderedOutputs(g, nh);
+    std::vector<ade::EdgeHandle> new_links;
+    GAPI_Assert(in_data_objs.size() == out_data_objs.size());
+    GAPI_DbgAssert(ade::util::all_of
+                   (out_data_objs,
+                    [&](const ade::NodeHandle &oh) {
+                       return g.metadata(oh).contains<Data>();
+                   }));
+    // (1)
+    for (auto &&it: ade::util::zip(ade::util::toRange(in_data_objs),
+                                   ade::util::toRange(out_data_objs))) {
+        auto these_new_links = GModel::redirectReaders(g,
+                                                       std::get<1>(it),
+                                                       std::get<0>(it));
+        new_links.insert(new_links.end(),
+                         these_new_links.begin(),
+                         these_new_links.end());
+    }
+    // (2)
+    for (auto &&old_out_nh : out_data_objs) {
+        g.erase(old_out_nh);
+    }
+    // (3)
+    g.erase(nh);
+
+    return new_links;
+}
+
+// Tracing a desynchronizing subgraph is somewhat tricky and happens
+// in both directions: downwards and upwards.
+//
+// The downward process is the basic one: we start with a "desync"
+// OP node and go down to the graph using the "output" edges. We check
+// if all nodes on this path [can] belong to this desynchronized path
+// and don't overlap with others.
+//
+// An important contract to maintain is that the desynchronized part
+// can't have any input references from the "main" graph part or any
+// other desynchronized part in the graph. This contract is validated
+// by checking every node's input which must belong to the same
+// desynchronized part.
+//
+// Here is the pitfall of this check:
+//
+//       v
+//     GMat_0
+//       v
+//   +----------+
+//   | desync() |      <- This point originates the traceDown process
+//   +----------+
+//       v
+//     GMat_0'         <- This node will be tagged for this desync at
+//       :--------.       step 0/1
+//       v        :    <- The order how output nodes are visited is not
+//   +----------+ :       specified, we can visit Op2() first (as there
+//   | Op1()    | :       is a direct link) bypassing visiting and tagging
+//   +----------+ :       Op1() and GMat_1
+//       v        :
+//     GMat_1     :
+//       :    .---'
+//       v    v        <- When we visit Op2() via the 2nd edge on this
+//   +----------+         graph, we check if all inputs belong to the same
+//   | Op2()    |         desynchronized graph and GMat_1 fails this check
+//   +----------+         (since the traceDown() process haven't visited
+//                        it yet).
+//
+// Cases like this originate the traceUp() process: if we find an
+// input node in our desynchronized path which doesn't belong to this
+// path YET, it is not 100% a problem, and we need to trace it back
+// (upwards) to see if it is really a case.
+
+// This recursive function checks the desync_id in the graph upwards.
+// The process doesn't continue for nodes which have a valid
+// desync_id already.
+// The process only continues for nodes which have no desync_id
+// assigned. If there's no such nodes anymore, the procedure is
+// considered complete and a list of nodes to tag is returned to the
+// caller.
+//
+// If NO inputs of this node have a valid desync_id, the desync
+// invariant is broken and the function throws.
+void traceUp(cv::gimpl::GModel::Graph &g,
+             const ade::NodeHandle &nh,
+             int desync_id,
+             std::vector<ade::NodeHandle> &path) {
+    using namespace cv::gimpl;
+
+    GAPI_Assert(!nh->inNodes().empty()
+                && "traceUp: a desynchronized part of the graph is not isolated?");
+
+    if (g.metadata(nh).contains<DesyncPath>()) {
+        // We may face nodes which have DesyncPath already visited during
+        // this recursive process (e.g. via some other output or branch in the
+        // subgraph)
+        if (g.metadata(nh).get<DesyncPath>().index != desync_id) {
+            GAPI_Assert(false && "Desynchronization can't be nested!");
+        }
+        return; // This object belongs to the desync path - exit early.
+    }
+
+    // Regardless of the result, put this nh to the path
+    path.push_back(nh);
+
+    // Check if the input nodes are OK
+    std::vector<ade::NodeHandle> nodes_to_trace;
+    nodes_to_trace.reserve(nh->inNodes().size());
+    for (auto &&in_nh : nh->inNodes()) {
+        if (g.metadata(in_nh).contains<DesyncPath>()) {
+            // We may face nodes which have DesyncPath already visited during
+            // this recursive process (e.g. via some other output or branch in the
+            // subgraph)
+            GAPI_Assert(g.metadata(in_nh).get<DesyncPath>().index == desync_id
+                        && "Desynchronization can't be nested!");
+        } else {
+            nodes_to_trace.push_back(in_nh);
+        }
+    }
+
+    // If there are nodes to trace, continue the recursion
+    for (auto &&up_nh : nodes_to_trace) {
+        traceUp(g, up_nh, desync_id, path);
+    }
+}
+
+// This recursive function propagates the desync_id down to the graph
+// starting at nh, and also checks:
+// - if this desync path is isolated;
+// - if this desync path is not overlapped.
+// It also originates the traceUp() process at the points of
+// uncertainty (as described in the comment above).
+void traceDown(cv::gimpl::GModel::Graph &g,
+               const ade::NodeHandle &nh,
+               int desync_id) {
+    using namespace cv::gimpl;
+
+    if (g.metadata(nh).contains<DesyncPath>()) {
+        // We may face nodes which have DesyncPath already visited during
+        // this recursive process (e.g. via some other output or branch in the
+        // subgraph)
+        GAPI_Assert(g.metadata(nh).get<DesyncPath>().index == desync_id
+                    && "Desynchronization can't be nested!");
+    } else {
+        g.metadata(nh).set(DesyncPath{desync_id});
+    }
+
+    // All inputs of this data object must belong to the same
+    // desync path.
+    for (auto &&in_nh : nh->inNodes()) {
+        // If an input object is not assigned to this desync path,
+        // it does not means that the object doesn't belong to
+        // this path. Check it.
+        std::vector<ade::NodeHandle> path_up;
+        traceUp(g, in_nh, desync_id, path_up);
+        // We get here on success. Just set the proper tags for
+        // the identified input path.
+        for (auto &&up_nh : path_up) {
+            g.metadata(up_nh).set(DesyncPath{desync_id});
+        }
+    }
+
+    // Propagate the tag & check down
+    for (auto &&out_nh : nh->outNodes()) {
+        traceDown(g, out_nh, desync_id);
+    }
+}
+
+// Streaming case: ensure the graph has proper isolation of the
+// desynchronized parts, set proper Edge metadata hints for
+// GStreamingExecutable
+void apply(cv::gimpl::GModel::Graph &g) {
+    using namespace cv::gimpl;
+
+    // Stage 0. Trace down the desync operations in the graph.
+    // Tag them with their unique (per graph) identifiers.
+    int total_desync = 0;
+    for (auto &&nh : g.nodes()) {
+        if (g.metadata(nh).get<NodeType>().t == NodeType::OP) {
+            const auto &op = g.metadata(nh).get<Op>();
+            if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) {
+                GAPI_Assert(!g.metadata(nh).contains<DesyncPath>()
+                            && "Desynchronization can't be nested!");
+                const int this_desync_id = total_desync++;
+                g.metadata(nh).set(DesyncPath{this_desync_id});
+                for (auto &&out_nh: nh->outNodes()) {
+                    traceDown(g, out_nh, this_desync_id);
+                }
+            } // if (desync)
+        } // if(OP)
+    } // for(nodes)
+
+    // Tracing is done for all desync ops in the graph now.
+    // Stage 1. Drop the desync operations from the graph, but mark
+    // the desynchronized edges a special way.
+    // The desynchronized edge is the edge which connects a main
+    // subgraph data with a desynchronized subgraph data.
+    std::vector<ade::NodeHandle> nodes(g.nodes().begin(), g.nodes().end());
+    for (auto &&nh : nodes) {
+        if (nh == nullptr) {
+            // Some nodes could be dropped already during the procedure
+            // thanks ADE their NodeHandles updated automatically
+            continue;
+        }
+        if (g.metadata(nh).get<NodeType>().t == NodeType::OP) {
+            const auto &op = g.metadata(nh).get<Op>();
+            if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) {
+                auto index = g.metadata(nh).get<DesyncPath>().index;
+                auto new_links = drop(g, nh);
+                for (auto &&eh : new_links) {
+                    g.metadata(eh).set(DesyncEdge{index});
+                }
+            } // if (desync)
+        } // if (Op)
+    } // for(nodes)
+
+    // Stage 2. Put a synchronized tag if there were changes applied
+    if (total_desync > 0) {
+        g.metadata().set(Desynchronized{});
+    }
+}
+
+// Probably the simplest case: desync makes no sense in the regular
+// compilation process, so just drop all its occurences in the graph,
+// reconnecting nodes properly.
+void drop(cv::gimpl::GModel::Graph &g) {
+    // FIXME: LOG here that we're dropping the desync operations as
+    // they have no sense when compiling in the regular mode.
+    using namespace cv::gimpl;
+    std::vector<ade::NodeHandle> nodes(g.nodes().begin(), g.nodes().end());
+    for (auto &&nh : nodes) {
+        if (nh == nullptr) {
+            // Some nodes could be dropped already during the procedure
+            // thanks ADE their NodeHandles updated automatically
+            continue;
+        }
+        if (g.metadata(nh).get<NodeType>().t == NodeType::OP) {
+            const auto &op = g.metadata(nh).get<Op>();
+            if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) {
+                drop(g, nh);
+            } // if (desync)
+        } // if (Op)
+    } // for(nodes)
+}
+
+} // anonymous namespace
+} // namespace desync
+
+void cv::gimpl::passes::intrinDesync(ade::passes::PassContext &ctx) {
+    GModel::Graph gr(ctx.graph);
+    if (!gr.metadata().contains<HasIntrinsics>())
+        return;
+
+    gr.metadata().contains<Streaming>()
+        ? desync::apply(gr) // Streaming compilation
+        : desync::drop(gr); // Regular compilation
+}
+
+// Clears the HasIntrinsics flag if all intrinsics have been handled.
+void cv::gimpl::passes::intrinFinalize(ade::passes::PassContext &ctx) {
+    GModel::Graph gr(ctx.graph);
+    for (auto &&nh : gr.nodes()) {
+        if (gr.metadata(nh).get<NodeType>().t == NodeType::OP) {
+            const auto &op = gr.metadata(nh).get<Op>();
+            if (is_intrinsic(op.k.name)) {
+                return;
+            }
+        }
+    }
+    // If reached here, really clear the flag
+    gr.metadata().erase<HasIntrinsics>();
+}
diff --git a/modules/gapi/src/compiler/passes/kernels.cpp b/modules/gapi/src/compiler/passes/kernels.cpp
index 69b339fb1e..100a32ec57 100644
--- a/modules/gapi/src/compiler/passes/kernels.cpp
+++ b/modules/gapi/src/compiler/passes/kernels.cpp
@@ -14,6 +14,7 @@
 #include <opencv2/gapi/gcompoundkernel.hpp> // compound::backend()
 #include <opencv2/gapi/gkernel.hpp>         // GKernelPackage
 #include <opencv2/gapi/infer.hpp>           // GNetPackage
+#include <opencv2/gapi/streaming/desync.hpp>// GDesync intrinsic
 
 #include "compiler/gmodel.hpp"
 #include "compiler/passes/passes.hpp"
@@ -24,6 +25,20 @@
 #include "logger.hpp"    // GAPI_LOG
 #include "api/gproto_priv.hpp" // is_dynamic, rewrap
 
+namespace
+{
+    // FIXME: This may be not the right design choice, but so far it works
+    const std::vector<std::string> known_intrinsics = {
+        cv::gapi::streaming::detail::GDesync::id()
+    };
+}
+bool cv::gimpl::is_intrinsic(const std::string &s) {
+    // FIXME: This search might be better in time once we start using string
+    return std::find(known_intrinsics.begin(),
+                     known_intrinsics.end(),
+                     s) != known_intrinsics.end();
+}
+
 namespace
 {
     struct ImplInfo
@@ -130,8 +145,13 @@ void cv::gimpl::passes::bindNetParams(ade::passes::PassContext &ctx,
     }
 }
 
-// This pass, given the kernel package, selects a kernel implementation
-// for every operation in the graph
+// This pass, given the kernel package, selects a kernel
+// implementation for every operation in the graph
+//
+// Starting OpenCV 4.3, G-API may have some special "intrinsic"
+// operations.  Those can be implemented by backends as regular
+// kernels, but if not, they are handled by the framework itself in
+// its optimization/execution passes.
 void cv::gimpl::passes::resolveKernels(ade::passes::PassContext   &ctx,
                                        const gapi::GKernelPackage &kernels)
 {
@@ -142,7 +162,25 @@ void cv::gimpl::passes::resolveKernels(ade::passes::PassContext   &ctx,
     {
         if (gr.metadata(nh).get<NodeType>().t == NodeType::OP)
         {
+            // If the operation is known to be intrinsic and is NOT
+            // implemented in the package, just skip it - there should
+            // be some pass which handles it.
             auto &op = gr.metadata(nh).get<Op>();
+            if (is_intrinsic(op.k.name) && !kernels.includesAPI(op.k.name)) {
+                gr.metadata().set(HasIntrinsics{});
+                continue;
+            }
+            // FIXME: And this logic is terribly wrong. The right
+            // thing is to assign an intrinsic to a particular island
+            // if and only if it is:
+            // (a) surrounded by nodes of backend X, AND
+            // (b) is supported by backend X.
+            // Here we may have multiple backends supporting an
+            // intrinsic but only one of those gets selected.  And
+            // this is exactly a situation we need multiple versions
+            // of the same kernel to be presented in the kernel
+            // package (as it was designed originally).
+
             cv::gapi::GBackend selected_backend;
             cv::GKernelImpl    selected_impl;
             std::tie(selected_backend, selected_impl) = kernels.lookup(op.k.name);
@@ -181,6 +219,12 @@ void cv::gimpl::passes::expandKernels(ade::passes::PassContext &ctx, const gapi:
             if (gr.metadata(nh).get<NodeType>().t == NodeType::OP)
             {
                 const auto& op = gr.metadata(nh).get<Op>();
+                // FIXME: Essentially the same problem as in the above resolveKernels
+                if (is_intrinsic(op.k.name) && !kernels.includesAPI(op.k.name)) {
+                    // Note: There's no need to set HasIntrinsics flag here
+                    // since resolveKernels would do it later.
+                    continue;
+                }
 
                 cv::gapi::GBackend selected_backend;
                 cv::GKernelImpl    selected_impl;
diff --git a/modules/gapi/src/compiler/passes/passes.hpp b/modules/gapi/src/compiler/passes/passes.hpp
index 84142fc055..8f187f6bb7 100644
--- a/modules/gapi/src/compiler/passes/passes.hpp
+++ b/modules/gapi/src/compiler/passes/passes.hpp
@@ -31,7 +31,11 @@ namespace gapi {
     struct GNetPackage;
 }  // namespace gapi
 
-namespace gimpl { namespace passes {
+namespace gimpl {
+
+bool is_intrinsic(const std::string &op_name);
+
+namespace passes {
 
 void dumpDot(const ade::Graph &g, std::ostream& os);
 void dumpDot(ade::passes::PassContext &ctx, std::ostream& os);
@@ -66,6 +70,9 @@ void applyTransformations(ade::passes::PassContext &ctx,
 
 void addStreaming(ade::passes::PassContext &ctx);
 
+void intrinDesync(ade::passes::PassContext &ctx);
+void intrinFinalize(ade::passes::PassContext &ctx);
+
 }} // namespace gimpl::passes
 
 } // namespace cv
diff --git a/modules/gapi/src/compiler/transactions.hpp b/modules/gapi/src/compiler/transactions.hpp
index 54af8a6e69..bdc1723e19 100644
--- a/modules/gapi/src/compiler/transactions.hpp
+++ b/modules/gapi/src/compiler/transactions.hpp
@@ -14,6 +14,7 @@
 
 #include <ade/graph.hpp>
 
+#include "opencv2/gapi/util/util.hpp" // Seq
 #include "opencv2/gapi/own/assert.hpp"
 
 enum class Direction: int {Invalid, In, Out};
@@ -21,8 +22,50 @@ enum class Direction: int {Invalid, In, Out};
 ////////////////////////////////////////////////////////////////////////////
 ////
 // TODO: Probably it can be moved to ADE
-
-namespace Change
+template<class H, class... Metatypes>
+class Preserved
+{
+    using S = typename cv::detail::MkSeq<sizeof...(Metatypes)>::type;
+    std::tuple<cv::util::optional<Metatypes>...> m_data;
+
+    template<class T>
+    cv::util::optional<T> get(ade::ConstTypedGraph<Metatypes...> g, H h) {
+        return g.metadata(h).template contains<T>()
+            ? cv::util::make_optional(g.metadata(h).template get<T>())
+            : cv::util::optional<T>{};
+    }
+    template<std::size_t Id>
+    int set(ade::TypedGraph<Metatypes...> &g, H &h) {
+        const auto &opt = std::get<Id>(m_data);
+        if (opt.has_value())
+            g.metadata(h).set(opt.value());
+        return 0;
+    }
+    template<int... IIs>
+    void copyTo_impl(ade::TypedGraph<Metatypes...> &g, H h, cv::detail::Seq<IIs...>) {
+        int unused[] = {0, set<IIs>(g, h)...};
+        (void) unused;
+    }
+public:
+    Preserved(const ade::Graph &g, H h) {
+        ade::ConstTypedGraph<Metatypes...> tg(g);
+        m_data = std::make_tuple(get<Metatypes>(tg, h)...);
+    }
+    void copyTo(ade::Graph &g, H h) {
+        ade::TypedGraph<Metatypes...> tg(g);
+        copyTo_impl(tg, h, S{});
+    }
+};
+// Do nothing if there's no metadata
+template<class H>
+class Preserved<H> {
+public:
+    Preserved(const ade::Graph &, H) {}
+    void copyTo(ade::Graph &, H) {}
+};
+
+template<class... Metatypes>
+struct ChangeT
 {
     struct Base
     {
@@ -31,6 +74,8 @@ namespace Change
         virtual ~Base() = default;
     };
 
+    template<typename H> using Preserved = ::Preserved<H, Metatypes...>;
+
     class NodeCreated final: public Base
     {
         ade::NodeHandle m_node;
@@ -39,11 +84,7 @@ namespace Change
         virtual void rollback(ade::Graph &g) override { g.erase(m_node); }
     };
 
-    // NB: Drops all metadata stored in the EdgeHandle,
-    // which is not restored even in the rollback
-
-    // FIXME: either add a way for users to preserve meta manually
-    // or extend ADE to manipulate with meta such way
+    // FIXME: maybe extend ADE to clone/copy the whole metadata?
     class DropLink final: public Base
     {
         ade::NodeHandle m_node;
@@ -51,13 +92,15 @@ namespace Change
 
         ade::NodeHandle m_sibling;
 
+        Preserved<ade::EdgeHandle> m_meta;
+
     public:
         DropLink(ade::Graph &g,
                  const ade::NodeHandle &node,
                  const ade::EdgeHandle &edge)
-            : m_node(node), m_dir(node == edge->srcNode()
-                                  ? Direction::Out
-                                  : Direction::In)
+            : m_node(node)
+            , m_dir(node == edge->srcNode() ? Direction::Out : Direction::In)
+            , m_meta(g, edge)
         {
             m_sibling = (m_dir == Direction::In
                          ? edge->srcNode()
@@ -67,12 +110,17 @@ namespace Change
 
         virtual void rollback(ade::Graph &g) override
         {
+            // FIXME: Need to preserve metadata here!
+            // GIslandModel edges now have metadata
+            ade::EdgeHandle eh;
             switch(m_dir)
             {
-            case Direction::In:  g.link(m_sibling, m_node); break;
-            case Direction::Out: g.link(m_node, m_sibling); break;
+            case Direction::In:  eh = g.link(m_sibling, m_node); break;
+            case Direction::Out: eh = g.link(m_node, m_sibling); break;
             default: GAPI_Assert(false);
             }
+            GAPI_Assert(eh != nullptr);
+            m_meta.copyTo(g, eh);
         }
     };
 
@@ -82,10 +130,15 @@ namespace Change
 
     public:
         NewLink(ade::Graph &g,
-                  const ade::NodeHandle &prod,
-                  const ade::NodeHandle &cons)
+                const ade::NodeHandle &prod,
+                const ade::NodeHandle &cons,
+                const ade::EdgeHandle &copy_from = ade::EdgeHandle())
             : m_edge(g.link(prod, cons))
         {
+            if (copy_from != nullptr)
+            {
+                Preserved<ade::EdgeHandle>(g, copy_from).copyTo(g, m_edge);
+            }
         }
 
         virtual void rollback(ade::Graph &g) override
@@ -141,7 +194,7 @@ namespace Change
             }
         }
     };
-} // namespace Change
+}; // struct Change
 ////////////////////////////////////////////////////////////////////////////
 
 #endif // OPENCV_GAPI_COMPILER_TRANSACTIONS_HPP
diff --git a/modules/gapi/src/executor/conc_queue.hpp b/modules/gapi/src/executor/conc_queue.hpp
index 5de50ef34b..9875e8245a 100644
--- a/modules/gapi/src/executor/conc_queue.hpp
+++ b/modules/gapi/src/executor/conc_queue.hpp
@@ -119,8 +119,7 @@ void concurrent_bounded_queue<T>::set_capacity(std::size_t capacity) {
 // Clear the queue. Similar to the TBB version, this method is not
 // thread-safe.
 template<typename T>
-void concurrent_bounded_queue<T>::clear()
-{
+void concurrent_bounded_queue<T>::clear() {
     m_data = std::queue<T>{};
 }
 
diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp
index afdebee020..41cb83f710 100644
--- a/modules/gapi/src/executor/gstreamingexecutor.cpp
+++ b/modules/gapi/src/executor/gstreamingexecutor.cpp
@@ -6,6 +6,7 @@
 
 #include "precomp.hpp"
 
+#include <memory> // make_shared
 #include <iostream>
 
 #include <ade/util/zip_range.hpp>
@@ -60,14 +61,23 @@ public:
 
 struct DataQueue {
     static const char *name() { return "StreamingDataQueue"; }
+    enum tag { DESYNC }; // Enum of 1 element: purely a syntax sugar
 
     explicit DataQueue(std::size_t capacity) {
-        if (capacity) {
-            q.set_capacity(capacity);
+        // Note: `ptr` is shared<SyncQueue>, while the `q` is a shared<Q>
+        auto ptr = std::make_shared<cv::gimpl::stream::SyncQueue>();
+        if (capacity != 0) {
+            ptr->set_capacity(capacity);
         }
+        q = std::move(ptr);
+    }
+    explicit DataQueue(tag t)
+        : q(new cv::gimpl::stream::DesyncQueue()) {
+        GAPI_Assert(t == DESYNC);
     }
 
-    cv::gimpl::stream::Q q;
+    // FIXME: ADE metadata requires types to be copiable
+    std::shared_ptr<cv::gimpl::stream::Q> q;
 };
 
 std::vector<cv::gimpl::stream::Q*> reader_queues(      ade::Graph &g,
@@ -77,7 +87,7 @@ std::vector<cv::gimpl::stream::Q*> reader_queues(      ade::Graph &g,
     std::vector<cv::gimpl::stream::Q*> result;
     for (auto &&out_eh : obj->outEdges())
     {
-        result.push_back(&qgr.metadata(out_eh).get<DataQueue>().q);
+        result.push_back(qgr.metadata(out_eh).get<DataQueue>().q.get());
     }
     return result;
 }
@@ -90,7 +100,7 @@ std::vector<cv::gimpl::stream::Q*> input_queues(      ade::Graph &g,
     for (auto &&in_eh : obj->inEdges())
     {
         result.push_back(qgr.metadata(in_eh).contains<DataQueue>()
-                         ? &qgr.metadata(in_eh).get<DataQueue>().q
+                         ? qgr.metadata(in_eh).get<DataQueue>().q.get()
                          : nullptr);
     }
     return result;
@@ -133,6 +143,77 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs)
     }
 }
 
+// FIXME: Is there a way to derive function from its GRunArgsP version?
+template<class C> using O = cv::util::optional<C>;
+void sync_data(cv::gimpl::stream::Result &r, cv::GOptRunArgsP &outputs)
+{
+    namespace own = cv::gapi::own;
+
+    for (auto && it : ade::util::zip(ade::util::toRange(outputs),
+                                     ade::util::toRange(r.args),
+                                     ade::util::toRange(r.flags)))
+    {
+        auto &out_obj  = std::get<0>(it);
+        auto &res_obj  = std::get<1>(it);
+        bool available = std::get<2>(it);
+
+        using T = cv::GOptRunArgP;
+#define HANDLE_CASE(Type)                                               \
+        case T::index_of<O<Type>*>():                                   \
+            if (available) {                                            \
+                *cv::util::get<O<Type>*>(out_obj)                       \
+                    = cv::util::make_optional(std::move(cv::util::get<Type>(res_obj))); \
+            } else {                                                    \
+                cv::util::get<O<Type>*>(out_obj)->reset();              \
+            }
+
+        // FIXME: this conversion should be unified
+        switch (out_obj.index())
+        {
+            HANDLE_CASE(cv::Scalar); break;
+            HANDLE_CASE(cv::RMat);   break;
+
+        case T::index_of<O<cv::Mat>*>(): {
+            // Mat: special handling.
+            auto &mat_opt = *cv::util::get<O<cv::Mat>*>(out_obj);
+            if (available) {
+                auto q_map = cv::util::get<cv::RMat>(res_obj).access(cv::RMat::Access::R);
+                // FIXME: Copy! Maybe we could do some optimization for this case!
+                // e.g. don't handle RMat for last ilsand in the graph.
+                // It is not always possible though.
+                mat_opt = cv::util::make_optional(cv::gimpl::asMat(q_map).clone());
+            } else {
+                mat_opt.reset();
+            }
+        } break;
+        case T::index_of<cv::detail::OptionalVectorRef>(): {
+            // std::vector<>: special handling
+            auto &vec_opt = cv::util::get<cv::detail::OptionalVectorRef>(out_obj);
+            if (available) {
+                vec_opt.mov(cv::util::get<cv::detail::VectorRef>(res_obj));
+            } else {
+                vec_opt.reset();
+            }
+        } break;
+        case T::index_of<cv::detail::OptionalOpaqueRef>(): {
+            // std::vector<>: special handling
+            auto &opq_opt = cv::util::get<cv::detail::OptionalOpaqueRef>(out_obj);
+            if (available) {
+                opq_opt.mov(cv::util::get<cv::detail::OpaqueRef>(res_obj));
+            } else {
+                opq_opt.reset();
+            }
+        } break;
+        default:
+            // ...maybe because of STANDALONE mode.
+            GAPI_Assert(false && "This value type is not supported!");
+            break;
+        }
+    }
+#undef HANDLE_CASE
+}
+
+
 // Pops an item from every input queue and combine it to the final
 // result.  Blocks the current thread.  Returns true if the vector has
 // been obtained successfully and false if a Stop message has been
@@ -206,12 +287,39 @@ class QueueReader
     bool m_finishing = false; // Set to true once a "soft" stop is received
     std::vector<Cmd> m_cmd;
 
+    void rewindToStop(std::vector<Q*>   &in_queues,
+                      const std::size_t  this_id);
+
 public:
-    bool getInputVector(std::vector<Q*> &in_queues,
-                        cv::GRunArgs    &in_constants,
-                        cv::GRunArgs    &isl_inputs);
+    bool getInputVector  (std::vector<Q*>   &in_queues,
+                          cv::GRunArgs      &in_constants,
+                          cv::GRunArgs      &isl_inputs);
+
+    bool getResultsVector(std::vector<Q*>         &in_queues,
+                          const std::vector<int>  &in_mapping,
+                          const std::size_t        out_size,
+                          cv::GRunArgs            &out_results);
 };
 
+// This method handles a stop sign got from some input
+// island. Reiterate through all _remaining valid_ queues (some of
+// them can be set to nullptr already -- see handling in
+// getInputVector) and rewind data to every Stop sign per queue.
+void QueueReader::rewindToStop(std::vector<Q*>   &in_queues,
+                               const std::size_t  this_id)
+{
+    for (auto &&qit : ade::util::indexed(in_queues))
+    {
+        auto id2 = ade::util::index(qit);
+        auto &q2 = ade::util::value(qit);
+        if (this_id == id2) continue;
+
+        Cmd cmd;
+        while (q2 && !cv::util::holds_alternative<Stop>(cmd))
+            q2->pop(cmd);
+    }
+}
+
 bool QueueReader::getInputVector(std::vector<Q*> &in_queues,
                                  cv::GRunArgs    &in_constants,
                                  cv::GRunArgs    &isl_inputs)
@@ -271,20 +379,7 @@ bool QueueReader::getInputVector(std::vector<Q*> &in_queues,
             else
             {
                 GAPI_Assert(stop.kind == Stop::Kind::HARD);
-                // Just got a stop sign. Reiterate through all
-                // _remaining valid_ queues (some of them can be
-                // set to nullptr already -- see above) and rewind
-                // data to every Stop sign per queue
-                for (auto &&qit : ade::util::indexed(in_queues))
-                {
-                    auto id2 = ade::util::index(qit);
-                    auto &q2 = ade::util::value(qit);
-                    if (id == id2) continue;
-
-                    Cmd cmd2;
-                    while (q2 && !cv::util::holds_alternative<Stop>(cmd2))
-                        q2->pop(cmd2);
-                }
+                rewindToStop(in_queues, id);
                 // After queues are read to the proper indicator,
                 // indicate end-of-stream
                 return false;
@@ -303,6 +398,60 @@ bool QueueReader::getInputVector(std::vector<Q*> &in_queues,
     return true; // A regular case - there is data to process.
 }
 
+// This is a special method to obtain a result vector
+// for the entire pipeline's outputs.
+//
+// After introducing desync(), the pipeline output's vector
+// can be produced just partially. Also, if a desynchronized
+// path has multiple outputs for the pipeline, _these_ outputs
+// should still come synchronized to the end user (via pull())
+//
+//
+// This method handles all this.
+// It takes a number of input queues, which may or may not be
+// equal to the number of pipeline outputs (<=).
+// It also takes indexes saying which queue produces which
+// output in the resulting pipeline.
+//
+// `out_results` is always produced with the size of full output
+// vector. In the desync case, the number of in_queues will
+// be less than this size and some of the items won't be produced.
+// In the sync case, there will be a 1-1 mapping.
+//
+// In the desync case, there _will be_ multiple collector threads
+// calling this method, and pushing their whole-pipeline outputs
+// (_may be_ partially filled) to the same final output queue.
+// The receiver part at the GStreamingExecutor level won't change
+// because of that.
+bool QueueReader::getResultsVector(std::vector<Q*>   &in_queues,
+                                   const std::vector<int>  &in_mapping,
+                                   const std::size_t  out_size,
+                                   cv::GRunArgs      &out_results)
+{
+    m_cmd.resize(out_size);
+    for (auto &&it : ade::util::indexed(in_queues))
+    {
+        auto ii = ade::util::index(it);
+        auto oi = in_mapping[ii];
+        auto &q = ade::util::value(it);
+        q->pop(m_cmd[oi]);
+        if (!cv::util::holds_alternative<Stop>(m_cmd[oi]))
+        {
+            out_results[oi] = std::move(cv::util::get<cv::GRunArg>(m_cmd[oi]));
+        }
+        else // A Stop sign
+        {
+            // In theory, the CNST should never reach here.
+            // Collector thread never handles the inputs directly
+            // (collector's input queues are always produced by
+            // islands in the graph).
+            rewindToStop(in_queues, ii);
+            return false;
+        } // if(Stop)
+    } // for(in_queues)
+    return true;
+}
+
 
 // This thread is a plain dump source actor. What it do is just:
 // - Check input queue (the only one) for a control command
@@ -603,22 +752,78 @@ void islandActorThread(std::vector<cv::gimpl::RcDesc> in_rcs,                //
 // and then put the resulting vector into one single queue.  While it
 // looks redundant, it simplifies dramatically the way how try_pull()
 // is implemented - we need to check one queue instead of many.
-void collectorThread(std::vector<Q*> in_queues,
-                     Q&              out_queue)
+//
+// After desync() is added, there may be multiple collector threads
+// running, every thread producing its own part of the partial
+// pipeline output (optional<T>...). All partial outputs are pushed
+// to the same output queue and then picked by GStreamingExecutor
+// in the end.
+void collectorThread(std::vector<Q*>   in_queues,
+                     std::vector<int>  in_mapping,
+                     const std::size_t out_size,
+                     Q&                out_queue)
 {
+    // These flags are static now: regardless if the sync or
+    // desync branch is collected by this thread, all in_queue
+    // data should come in sync.
+    std::vector<bool> flags(out_size, false);
+    for (auto idx : in_mapping) {
+        flags[idx] = true;
+    }
+
     QueueReader qr;
     while (true)
     {
-        cv::GRunArgs this_result(in_queues.size());
-        cv::GRunArgs this_const(in_queues.size());
-        if (!qr.getInputVector(in_queues, this_const, this_result))
+        cv::GRunArgs this_result(out_size);
+        if (!qr.getResultsVector(in_queues, in_mapping, out_size, this_result))
         {
             out_queue.push(Cmd{Stop{}});
             return;
         }
-        out_queue.push(Cmd{this_result});
+        out_queue.push(Cmd{Result{std::move(this_result), flags}});
     }
 }
+
+void check_DesyncObjectConsumedByMultipleIslands(const cv::gimpl::GIslandModel::Graph &gim) {
+    using namespace cv::gimpl;
+
+    // Since the limitation exists only in this particular
+    // implementation, the check is also done only here but not at the
+    // graph compiler level.
+    //
+    // See comment in desync(GMat) src/api/kernels_streaming.cpp for details.
+    for (auto &&nh : gim.nodes()) {
+        if (gim.metadata(nh).get<NodeKind>().k == NodeKind::SLOT) {
+            // SLOTs are read by ISLANDs, so look for the metadata
+            // of the outbound edges
+            std::unordered_map<int, GIsland*> out_desync_islands;
+            for (auto &&out_eh : nh->outEdges()) {
+                if (gim.metadata(out_eh).contains<DesyncIslEdge>()) {
+                    // This is a desynchronized edge
+                    // Look what Island it leads to
+                    const auto out_desync_idx = gim.metadata(out_eh)
+                        .get<DesyncIslEdge>().index;
+                    const auto out_island = gim.metadata(out_eh->dstNode())
+                        .get<FusedIsland>().object;
+
+                    auto it = out_desync_islands.find(out_desync_idx);
+                    if (it != out_desync_islands.end()) {
+                        // If there's already an edge with this desync
+                        // id, it must point to the same island object
+                        GAPI_Assert(it->second == out_island.get()
+                                    && "A single desync object may only be used by a single island!");
+                    } else {
+                        // Store the island pointer for the further check
+                        out_desync_islands[out_desync_idx] = out_island.get();
+                    }
+                } // if(desync)
+            } // for(out_eh)
+            // There must be only one backend in the end of the day
+            // (under this desync path)
+        } // if(SLOT)
+    } // for(nodes)
+}
+
 } // anonymous namespace
 
 // GStreamingExecutor expects compile arguments as input to have possibility to do
@@ -630,20 +835,28 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr<ade::Graph> &&
                      .get<IslandModel>().model)
     , m_comp_args(comp_args)
     , m_gim(*m_island_graph)
+    , m_desync(GModel::Graph(*m_orig_graph).metadata()
+               .contains<Desynchronized>())
 {
     GModel::Graph gm(*m_orig_graph);
     // NB: Right now GIslandModel is acyclic, and all the below code assumes that.
-    // NB: This naive execution code is taken from GExecutor nearly "as-is"
+    // NB: This naive execution code is taken from GExecutor nearly
+    // "as-is"
+
+    if (m_desync) {
+        check_DesyncObjectConsumedByMultipleIslands(m_gim);
+    }
 
     const auto proto = gm.metadata().get<Protocol>();
     m_emitters      .resize(proto.in_nhs.size());
     m_emitter_queues.resize(proto.in_nhs.size());
     m_sinks         .resize(proto.out_nhs.size());
-    m_sink_queues   .resize(proto.out_nhs.size());
+    m_sink_queues   .resize(proto.out_nhs.size(), nullptr);
+    m_sink_sync     .resize(proto.out_nhs.size(), -1);
 
     // Very rough estimation to limit internal queue sizes.
     // Pipeline depth is equal to number of its (pipeline) steps.
-    const auto queue_capacity = std::count_if
+    const auto queue_capacity = 3*std::count_if
         (m_gim.nodes().begin(),
          m_gim.nodes().end(),
          [&](ade::NodeHandle nh) {
@@ -728,8 +941,12 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr<ade::Graph> &&
                 {
                     // ...only if the data is not compile-const
                     if (const_ins.count(eh->srcNode()) == 0) {
-                        qgr.metadata(eh).set(DataQueue(queue_capacity));
-                        m_internal_queues.insert(&qgr.metadata(eh).get<DataQueue>().q);
+                        if (m_gim.metadata(eh).contains<DesyncIslEdge>()) {
+                            qgr.metadata(eh).set(DataQueue(DataQueue::DESYNC));
+                        } else {
+                            qgr.metadata(eh).set(DataQueue(queue_capacity));
+                        }
+                        m_internal_queues.insert(qgr.metadata(eh).get<DataQueue>().q.get());
                     }
                 }
             }
@@ -760,7 +977,14 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr<ade::Graph> &&
                 ade::TypedGraph<DataQueue> qgr(*m_island_graph);
                 GAPI_Assert(nh->inEdges().size() == 1u);
                 qgr.metadata(nh->inEdges().front()).set(DataQueue(queue_capacity));
-                m_sink_queues[sink_idx] = &qgr.metadata(nh->inEdges().front()).get<DataQueue>().q;
+                m_sink_queues[sink_idx] = qgr.metadata(nh->inEdges().front()).get<DataQueue>().q.get();
+
+                // Assign a desync tag
+                const auto sink_out_nh = gm.metadata().get<Protocol>().out_nhs[sink_idx];
+                if (gm.metadata(sink_out_nh).contains<DesyncPath>()) {
+                    // metadata().get_or<> could make this thing better
+                    m_sink_sync[sink_idx] = gm.metadata(sink_out_nh).get<DesyncPath>().index;
+                }
             }
             break;
         default:
@@ -768,7 +992,23 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr<ade::Graph> &&
             break;
         } // switch(kind)
     } // for(gim nodes)
-    m_out_queue.set_capacity(queue_capacity);
+
+    // If there are desynchronized parts in the graph, there may be
+    // multiple theads polling every separate (desynchronized)
+    // branch in the graph individually. Prepare a mapping information
+    // for any such thread
+    for (auto &&idx : ade::util::iota(m_sink_queues.size())) {
+        auto  path_id = m_sink_sync[idx];
+        auto &info    = m_collector_map[path_id];
+        info.queues.push_back(m_sink_queues[idx]);
+        info.mapping.push_back(static_cast<int>(idx));
+    }
+
+    // Reserve space in the final queue based on the number
+    // of desync parts (they can generate output individually
+    // per the same input frame, so the output traffic multiplies)
+    GAPI_Assert(m_collector_map.size() > 0u);
+    m_out_queue.set_capacity(queue_capacity * m_collector_map.size());
 }
 
 cv::gimpl::GStreamingExecutor::~GStreamingExecutor()
@@ -938,6 +1178,9 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins)
                                real_video_completion_cb);
     }
 
+    for (auto &&op : m_ops) {
+        op.isl_exec->handleNewStream();
+    }
 
     // Now do this for every island (in a topological order)
     for (auto &&op : m_ops)
@@ -974,10 +1217,17 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins)
                                out_queues);
     }
 
-    // Finally, start a collector thread.
-    m_threads.emplace_back(collectorThread,
-                           m_sink_queues,
-                           std::ref(m_out_queue));
+    // Finally, start collector thread(s).
+    // If there are desynchronized parts in the graph, there may be
+    // multiple theads polling every separate (desynchronized)
+    // branch in the graph individually.
+    for (auto &&info : m_collector_map) {
+        m_threads.emplace_back(collectorThread,
+                               info.second.queues,
+                               info.second.mapping,
+                               m_sink_queues.size(),
+                               std::ref(m_out_queue));
+    }
     state = State::READY;
 }
 
@@ -1018,15 +1268,25 @@ void cv::gimpl::GStreamingExecutor::wait_shutdown()
     for (auto &q : m_internal_queues) q->clear();
     m_out_queue.clear();
 
+    for (auto &&op : m_ops) {
+        op.isl_exec->handleStopStream();
+    }
+
     state = State::STOPPED;
 }
 
 bool cv::gimpl::GStreamingExecutor::pull(cv::GRunArgsP &&outs)
 {
+    // This pull() can only be called when there's no desynchronized
+    // parts in the graph.
+    GAPI_Assert(!m_desync &&
+                "This graph has desynchronized parts! Please use another pull()");
+
     if (state == State::STOPPED)
         return false;
     GAPI_Assert(state == State::RUNNING);
-    GAPI_Assert(m_sink_queues.size() == outs.size());
+    GAPI_Assert(m_sink_queues.size() == outs.size() &&
+                "Number of data objects in cv::gout() must match the number of graph outputs in cv::GOut()");
 
     Cmd cmd;
     m_out_queue.pop(cmd);
@@ -1036,12 +1296,39 @@ bool cv::gimpl::GStreamingExecutor::pull(cv::GRunArgsP &&outs)
         return false;
     }
 
-    GAPI_Assert(cv::util::holds_alternative<cv::GRunArgs>(cmd));
-    cv::GRunArgs &this_result = cv::util::get<cv::GRunArgs>(cmd);
+    GAPI_Assert(cv::util::holds_alternative<Result>(cmd));
+    cv::GRunArgs &this_result = cv::util::get<Result>(cmd).args;
     sync_data(this_result, outs);
     return true;
 }
 
+bool cv::gimpl::GStreamingExecutor::pull(cv::GOptRunArgsP &&outs)
+{
+    // This pull() can only be called in both cases: if there are
+    // desyncrhonized parts or not.
+
+    // FIXME: so far it is a full duplicate of standard pull except
+    // the sync_data version called.
+    if (state == State::STOPPED)
+        return false;
+    GAPI_Assert(state == State::RUNNING);
+    GAPI_Assert(m_sink_queues.size() == outs.size() &&
+                "Number of data objects in cv::gout() must match the number of graph outputs in cv::GOut()");
+
+    Cmd cmd;
+    m_out_queue.pop(cmd);
+    if (cv::util::holds_alternative<Stop>(cmd))
+    {
+        wait_shutdown();
+        return false;
+    }
+
+    GAPI_Assert(cv::util::holds_alternative<Result>(cmd));
+    sync_data(cv::util::get<Result>(cmd), outs);
+    return true;
+}
+
+
 bool cv::gimpl::GStreamingExecutor::try_pull(cv::GRunArgsP &&outs)
 {
     if (state == State::STOPPED)
@@ -1059,8 +1346,8 @@ bool cv::gimpl::GStreamingExecutor::try_pull(cv::GRunArgsP &&outs)
         return false;
     }
 
-    GAPI_Assert(cv::util::holds_alternative<cv::GRunArgs>(cmd));
-    cv::GRunArgs &this_result = cv::util::get<cv::GRunArgs>(cmd);
+    GAPI_Assert(cv::util::holds_alternative<Result>(cmd));
+    cv::GRunArgs &this_result = cv::util::get<Result>(cmd).args;
     sync_data(this_result, outs);
     return true;
 }
diff --git a/modules/gapi/src/executor/gstreamingexecutor.hpp b/modules/gapi/src/executor/gstreamingexecutor.hpp
index d10f9eddd0..b6093ac1ef 100644
--- a/modules/gapi/src/executor/gstreamingexecutor.hpp
+++ b/modules/gapi/src/executor/gstreamingexecutor.hpp
@@ -14,6 +14,8 @@
 
 #include <memory> // unique_ptr, shared_ptr
 #include <thread> // thread
+#include <vector>
+#include <unordered_map>
 
 #if defined(HAVE_TBB)
 #  include <tbb/concurrent_queue.h> // FIXME: drop it from here!
@@ -22,6 +24,7 @@ template<typename T> using QueueClass = tbb::concurrent_bounded_queue<T>;
 #  include "executor/conc_queue.hpp"
 template<typename T> using QueueClass = cv::gapi::own::concurrent_bounded_queue<T>;
 #endif // TBB
+#include "executor/last_value.hpp"
 
 #include <ade/graph.hpp>
 
@@ -40,14 +43,61 @@ struct Stop {
     cv::GRunArg cdata; // const data for CNST stop
 };
 
+struct Result {
+    cv::GRunArgs      args;  // Full results vector
+    std::vector<bool> flags; // Availability flags (in case of desync)
+};
+
 using Cmd = cv::util::variant
     < cv::util::monostate
     , Start        // Tells emitters to start working. Not broadcasted to workers.
     , Stop         // Tells emitters to stop working. Broadcasted to workers.
     , cv::GRunArg  // Workers data payload to process.
-    , cv::GRunArgs // Full results vector
+    , Result       // Pipeline's data for gout()
     >;
-using Q = QueueClass<Cmd>;
+
+// Interface over a queue. The underlying queue implementation may be
+// different. This class is mainly introduced to bring some
+// abstraction over the real queues (bounded in-order) and a
+// desynchronized data slots (see required to implement
+// cv::gapi::desync)
+
+class Q {
+public:
+    virtual void push(const Cmd &cmd) = 0;
+    virtual void pop(Cmd &cmd) = 0;
+    virtual bool try_pop(Cmd &cmd) = 0;
+    virtual void clear() = 0;
+    virtual ~Q() = default;
+};
+
+// A regular queue implementation
+class SyncQueue final: public Q {
+    QueueClass<Cmd> m_q;    // FIXME: OWN or WRAP??
+
+public:
+    virtual void push(const Cmd &cmd) override { m_q.push(cmd); }
+    virtual void pop(Cmd &cmd)        override { m_q.pop(cmd);  }
+    virtual bool try_pop(Cmd &cmd)    override { return m_q.try_pop(cmd); }
+    virtual void clear()              override { m_q.clear(); }
+
+    void set_capacity(std::size_t c) { m_q.set_capacity(c);}
+};
+
+// Desynchronized "queue" implementation
+// Every push overwrites value which is not yet popped
+// This container can hold 0 or 1 element
+// Special handling for Stop is implemented (FIXME: not really)
+class DesyncQueue final: public Q {
+    cv::gapi::own::last_written_value<Cmd> m_v;
+
+public:
+    virtual void push(const Cmd &cmd) override { m_v.push(cmd); }
+    virtual void pop(Cmd &cmd)        override { m_v.pop(cmd);  }
+    virtual bool try_pop(Cmd &cmd)    override { return m_v.try_pop(cmd); }
+    virtual void clear()              override { m_v.clear(); }
+};
+
 } // namespace stream
 
 // FIXME: Currently all GExecutor comments apply also
@@ -87,6 +137,7 @@ protected:
     util::optional<bool> m_reshapable;
 
     cv::gimpl::GIslandModel::Graph m_gim; // FIXME: make const?
+    const bool m_desync;
 
     // FIXME: Naive executor details are here for now
     // but then it should be moved to another place
@@ -117,11 +168,27 @@ protected:
     std::vector<ade::NodeHandle> m_sinks;
 
     std::vector<std::thread> m_threads;
-    std::vector<stream::Q>   m_emitter_queues;
-    std::vector<stream::Q*>  m_const_emitter_queues; // a view over m_emitter_queues
-    std::vector<stream::Q*>  m_sink_queues;
-    std::unordered_set<stream::Q*> m_internal_queues;
-    stream::Q m_out_queue;
+    std::vector<stream::SyncQueue>   m_emitter_queues;
+
+    // a view over m_emitter_queues
+    std::vector<stream::SyncQueue*>  m_const_emitter_queues;
+
+    std::vector<stream::Q*>          m_sink_queues;
+
+    // desync path tags for outputs. -1 means that output
+    // doesn't belong to a desync path
+    std::vector<int>                 m_sink_sync;
+
+    std::unordered_set<stream::Q*>   m_internal_queues;
+    stream::SyncQueue m_out_queue;
+
+    // Describes mapping from desync paths to collector threads
+    struct CollectorThreadInfo {
+        std::vector<stream::Q*>  queues;
+        std::vector<int> mapping;
+    };
+    std::unordered_map<int, CollectorThreadInfo> m_collector_map;
+
 
     void wait_shutdown();
 
@@ -132,6 +199,7 @@ public:
     void setSource(GRunArgs &&args);
     void start();
     bool pull(cv::GRunArgsP &&outs);
+    bool pull(cv::GOptRunArgsP &&outs);
     bool try_pull(cv::GRunArgsP &&outs);
     void stop();
     bool running() const;
diff --git a/modules/gapi/src/executor/last_value.hpp b/modules/gapi/src/executor/last_value.hpp
new file mode 100644
index 0000000000..152449a879
--- /dev/null
+++ b/modules/gapi/src/executor/last_value.hpp
@@ -0,0 +1,105 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#ifndef OPENCV_GAPI_EXECUTOR_LAST_VALUE_HPP
+#define OPENCV_GAPI_EXECUTOR_LAST_VALUE_HPP
+
+#include <mutex>
+#include <condition_variable>
+
+#include <opencv2/gapi/util/optional.hpp>
+#include <opencv2/gapi/own/assert.hpp>
+
+namespace cv {
+namespace gapi {
+namespace own {
+
+// This class implements a "Last Written Value" thing.  Writer threads
+// (in our case, it is just one) can write as many values there as it
+// can.
+//
+// The reader thread gets only a value it gets at the time (or blocks
+// if there was no value written since the last read).
+//
+// Again, the implementation is highly inefficient right now.
+template<class T>
+class last_written_value {
+    cv::util::optional<T> m_data;
+
+    std::mutex m_mutex;
+    std::condition_variable m_cond_empty;
+
+    void unsafe_pop(T &t);
+
+public:
+    last_written_value() {}
+    last_written_value(const last_written_value<T> &cc)
+        : m_data(cc.m_data) {
+        // FIXME: what to do with all that locks, etc?
+    }
+    last_written_value(last_written_value<T> &&cc)
+        : m_data(std::move(cc.m_data)) {
+        // FIXME: what to do with all that locks, etc?
+    }
+
+    // FIXME: && versions
+    void push(const T &t);
+    void pop(T &t);
+    bool try_pop(T &t);
+
+    // Not thread-safe
+    void clear();
+};
+
+// Internal: do shared pop things assuming the lock is already there
+template<typename T>
+void last_written_value<T>::unsafe_pop(T &t) {
+    GAPI_Assert(m_data.has_value());
+    t = std::move(m_data.value());
+    m_data.reset();
+}
+
+// Push an element to the queue. Blocking if there's no space left
+template<typename T>
+void last_written_value<T>::push(const T& t) {
+    std::unique_lock<std::mutex> lock(m_mutex);
+    m_data = cv::util::make_optional(t);
+    lock.unlock();
+    m_cond_empty.notify_one();
+}
+
+// Pop an element from the queue. Blocking if there's no items
+template<typename T>
+void last_written_value<T>::pop(T &t) {
+    std::unique_lock<std::mutex> lock(m_mutex);
+    if (!m_data.has_value()) {
+        // if there is no data, wait
+        m_cond_empty.wait(lock, [&](){return m_data.has_value();});
+    }
+    unsafe_pop(t);
+}
+
+// Try pop an element from the queue. Returns false if queue is empty
+template<typename T>
+bool last_written_value<T>::try_pop(T &t) {
+    std::unique_lock<std::mutex> lock(m_mutex);
+    if (!m_data.has_value()) {
+        // if there is no data, return
+        return false;
+    }
+    unsafe_pop(t);
+    return true;
+}
+
+// Clear the value holder. This method is not thread-safe.
+template<typename T>
+void last_written_value<T>::clear() {
+    m_data.reset();
+}
+
+}}} // namespace cv::gapi::own
+
+#endif //  OPENCV_GAPI_EXECUTOR_CONC_QUEUE_HPP
diff --git a/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp b/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp
index f6543e59f7..c9d9926542 100644
--- a/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp
+++ b/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp
@@ -2,7 +2,7 @@
 // It is subject to the license terms in the LICENSE file found in the top-level directory
 // of this distribution and at http://opencv.org/license.html.
 //
-// Copyright (C) 2018 Intel Corporation
+// Copyright (C) 2018-2020 Intel Corporation
 
 
 #include "../test_precomp.hpp"
@@ -29,7 +29,9 @@ namespace
                                     , ""
                                     , nullptr
                                     , { GShape::GMAT }
-                                    , { D::OpaqueKind::CV_UNKNOWN } }).pass(m).yield(0);
+                                    , { D::OpaqueKind::CV_UNKNOWN }
+                                    , { cv::detail::HostCtor{cv::util::monostate{}} }
+                                    }).pass(m).yield(0);
     }
 
     cv::GMat binaryOp(cv::GMat m1, cv::GMat m2)
@@ -38,7 +40,9 @@ namespace
                                     , ""
                                     , nullptr
                                     , { GShape::GMAT }
-                                    , { D::OpaqueKind::CV_UNKNOWN, D::OpaqueKind::CV_UNKNOWN } }).pass(m1, m2).yield(0);
+                                    , { D::OpaqueKind::CV_UNKNOWN, D::OpaqueKind::CV_UNKNOWN }
+                                    , { cv::detail::HostCtor{cv::util::monostate{}} }
+                                    }).pass(m1, m2).yield(0);
     }
 
     std::vector<ade::NodeHandle> collectOperations(const cv::gimpl::GModel::Graph& gr)
diff --git a/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp b/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp
index c247cc7b79..723e42a6df 100644
--- a/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp
+++ b/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp
@@ -513,7 +513,65 @@ TEST(IslandFusion, Regression_ShouldFuseAll)
     EXPECT_EQ(1u, isl_nhs.size());  // 1 island
 }
 
-// FIXME: add more tests on mixed (hetero) graphs
+TEST(IslandFusion, Test_Desync_NoFuse)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = in*0.5f;
+    cv::GMat tmp2 = tmp1 + in;
+
+    cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat tmp4 = tmp3*0.1f;
+
+    const auto in_meta = cv::GMetaArg(cv::GMatDesc{CV_8U,1,cv::Size(32,32)});
+    cv::GComputation comp(cv::GIn(in), cv::GOut(tmp2, tmp4));
+
+    //////////////////////////////////////////////////////////////////
+    // Compile the graph in "regular" mode, it should produce a single island
+    {
+        using namespace cv::gimpl;
+
+        GCompiler compiler(comp, {in_meta}, cv::compile_args());
+        GCompiler::GPtr graph = compiler.generateGraph();
+        compiler.runPasses(*graph);
+
+        auto isl_model = GModel::ConstGraph(*graph).metadata()
+            .get<IslandModel>().model;
+        GIslandModel::ConstGraph gim(*isl_model);
+
+        const auto is_island = [&](ade::NodeHandle nh) {
+            return (NodeKind::ISLAND == gim.metadata(nh).get<NodeKind>().k);
+        };
+        const auto num_isl = std::count_if(gim.nodes().begin(),
+                                           gim.nodes().end(),
+                                           is_island);
+        EXPECT_EQ(1, num_isl);
+    }
+    //////////////////////////////////////////////////////////////////
+    // Now compile the graph in the streaming mode.
+    // It has to produce two islands
+    {
+        using namespace cv::gimpl;
+
+        GCompiler compiler(comp, {in_meta}, cv::compile_args());
+        GCompiler::GPtr graph = compiler.generateGraph();
+        GModel::Graph(*graph).metadata().set(Streaming{});
+        compiler.runPasses(*graph);
+
+        auto isl_model = GModel::ConstGraph(*graph).metadata()
+             .get<IslandModel>().model;
+        GIslandModel::ConstGraph gim(*isl_model);
+
+        const auto is_island = [&](ade::NodeHandle nh) {
+            return (NodeKind::ISLAND == gim.metadata(nh).get<NodeKind>().k);
+        };
+        const auto num_isl = std::count_if(gim.nodes().begin(),
+                                           gim.nodes().end(),
+                                           is_island);
+        EXPECT_EQ(2, num_isl);
+    }
+}
+
+// Fixme: add more tests on mixed (hetero) graphs
 // ADE-222, ADE-223
 
 // FIXME: add test on combination of user-specified island
diff --git a/modules/gapi/test/internal/gapi_transactions_test.cpp b/modules/gapi/test/internal/gapi_transactions_test.cpp
index ac77c33d13..9d36401a71 100644
--- a/modules/gapi/test/internal/gapi_transactions_test.cpp
+++ b/modules/gapi/test/internal/gapi_transactions_test.cpp
@@ -2,11 +2,14 @@
 // It is subject to the license terms in the LICENSE file found in the top-level directory
 // of this distribution and at http://opencv.org/license.html.
 //
-// Copyright (C) 2018 Intel Corporation
+// Copyright (C) 2018 - 2020 Intel Corporation
 
 
 #include "../test_precomp.hpp"
+
 #include <ade/graph.hpp>
+#include <ade/typed_graph.hpp>
+
 #include "compiler/transactions.hpp"
 
 namespace opencv_test
@@ -33,10 +36,11 @@ struct SimpleGraph
 
     enum { node_nums = 5 };
     ade::Graph        graph;
-    ade::NodeHandle   fused_nh;                     /* For check that fusion  node is connected to the
-                                                               inputs of the prod and the outputs of the cons */
+    ade::NodeHandle   fused_nh;  // For check that fusion  node is connected to the
+                                 // inputs of the prod and the outputs of the cons
     std::array<ade::NodeHandle, node_nums>     nhs;
     std::array<ade::EdgeHandle, node_nums - 1> ehs;
+    using Change = ChangeT<>;
     Change::List changes;
 
     SimpleGraph()
@@ -192,8 +196,6 @@ TEST_F(Transactions, DropNode_Commit)
 
 TEST_F(Transactions, Fusion_Commit)
 {
-    namespace C = Change;
-
     fuse();
     commit();
 
@@ -204,8 +206,6 @@ TEST_F(Transactions, Fusion_Commit)
 
 TEST_F(Transactions, Fusion_RollBack)
 {
-    namespace C = Change;
-
     fuse();
     rollback();
 
@@ -219,4 +219,151 @@ TEST_F(Transactions, Fusion_RollBack)
     }
 }
 
+namespace
+{
+    struct MetaInt {
+        static const char *name() { return "int_meta"; }
+        int x;
+    };
+
+    struct MetaStr {
+        static const char *name() { return "string_meta"; }
+        std::string s;
+    };
+}
+
+TEST(PreservedMeta, TestMetaCopy_Full)
+{
+    ade::Graph g;
+    ade::TypedGraph<MetaInt, MetaStr> tg(g);
+
+    auto src_nh = tg.createNode();
+    tg.metadata(src_nh).set(MetaInt{42});
+    tg.metadata(src_nh).set(MetaStr{"hi"});
+
+    auto dst_nh = tg.createNode();
+
+    EXPECT_FALSE(tg.metadata(dst_nh).contains<MetaInt>());
+    EXPECT_FALSE(tg.metadata(dst_nh).contains<MetaStr>());
+
+    // Here we specify all the meta types we know about the src node
+    // Assume Preserved copies its all for us
+    Preserved<ade::NodeHandle, MetaInt, MetaStr>(g, src_nh).copyTo(g, dst_nh);
+
+    ASSERT_TRUE(tg.metadata(dst_nh).contains<MetaInt>());
+    ASSERT_TRUE(tg.metadata(dst_nh).contains<MetaStr>());
+
+    EXPECT_EQ(42,   tg.metadata(dst_nh).get<MetaInt>().x);
+    EXPECT_EQ("hi", tg.metadata(dst_nh).get<MetaStr>().s);
+}
+
+
+TEST(PreservedMeta, TestMetaCopy_Partial_Dst)
+{
+    ade::Graph g;
+    ade::TypedGraph<MetaInt, MetaStr> tg(g);
+
+    auto tmp_nh1 = tg.createNode();
+    auto tmp_nh2 = tg.createNode();
+    auto src_eh  = tg.link(tmp_nh1, tmp_nh2);
+
+    tg.metadata(src_eh).set(MetaInt{42});
+    tg.metadata(src_eh).set(MetaStr{"hi"});
+
+    auto tmp_nh3 = tg.createNode();
+    auto tmp_nh4 = tg.createNode();
+    auto dst_eh  = tg.link(tmp_nh3, tmp_nh4);
+
+    EXPECT_FALSE(tg.metadata(dst_eh).contains<MetaInt>());
+    EXPECT_FALSE(tg.metadata(dst_eh).contains<MetaStr>());
+
+    // Here we specify just a single meta type for the src node
+    // Assume Preserved copies only this type and nothing else
+    Preserved<ade::EdgeHandle, MetaStr>(g, src_eh).copyTo(g, dst_eh);
+
+    ASSERT_FALSE(tg.metadata(dst_eh).contains<MetaInt>());
+    ASSERT_TRUE (tg.metadata(dst_eh).contains<MetaStr>());
+
+    EXPECT_EQ("hi", tg.metadata(dst_eh).get<MetaStr>().s);
+}
+
+TEST(PreservedMeta, TestMetaCopy_Partial_Src)
+{
+    ade::Graph g;
+    ade::TypedGraph<MetaInt, MetaStr> tg(g);
+
+    auto src_nh = tg.createNode();
+    tg.metadata(src_nh).set(MetaInt{42});
+
+    auto dst_nh = tg.createNode();
+
+    EXPECT_FALSE(tg.metadata(dst_nh).contains<MetaInt>());
+    EXPECT_FALSE(tg.metadata(dst_nh).contains<MetaStr>());
+
+    // Here we specify all the meta types we know about the src node
+    // but the src node has just one of them.
+    // A valid situation, only MetaInt to be copied.
+    Preserved<ade::NodeHandle, MetaInt, MetaStr>(g, src_nh).copyTo(g, dst_nh);
+
+    ASSERT_TRUE (tg.metadata(dst_nh).contains<MetaInt>());
+    ASSERT_FALSE(tg.metadata(dst_nh).contains<MetaStr>());
+
+    EXPECT_EQ(42, tg.metadata(dst_nh).get<MetaInt>().x);
+}
+
+TEST(PreservedMeta, TestMetaCopy_Nothing)
+{
+    ade::Graph g;
+    ade::TypedGraph<MetaInt, MetaStr> tg(g);
+
+    auto src_nh = tg.createNode();
+    auto dst_nh = tg.createNode();
+
+    EXPECT_FALSE(tg.metadata(src_nh).contains<MetaInt>());
+    EXPECT_FALSE(tg.metadata(src_nh).contains<MetaStr>());
+
+    EXPECT_FALSE(tg.metadata(dst_nh).contains<MetaInt>());
+    EXPECT_FALSE(tg.metadata(dst_nh).contains<MetaStr>());
+
+    // Here we specify all the meta types we know about the src node
+    // but the src node has none of those. See how it works now
+    Preserved<ade::NodeHandle, MetaInt, MetaStr>(g, src_nh).copyTo(g, dst_nh);
+
+    ASSERT_FALSE(tg.metadata(dst_nh).contains<MetaInt>());
+    ASSERT_FALSE(tg.metadata(dst_nh).contains<MetaStr>());
+}
+
+TEST(PreservedMeta, DropEdge)
+{
+    ade::Graph g;
+    ade::TypedGraph<MetaInt, MetaStr> tg(g);
+
+    auto nh1 = tg.createNode();
+    auto nh2 = tg.createNode();
+    auto eh  = tg.link(nh1, nh2);
+
+    tg.metadata(eh).set(MetaInt{42});
+    tg.metadata(eh).set(MetaStr{"hi"});
+
+    // Drop an edge using the transaction API
+    using Change = ChangeT<MetaInt, MetaStr>;
+    Change::List changes;
+    changes.enqueue<Change::DropLink>(g, nh1, eh);
+
+    EXPECT_EQ(0u,      nh1->outNodes().size());
+    EXPECT_EQ(nullptr, eh);
+
+    // Now restore the edge and check if it's meta was restored
+    changes.rollback(g);
+
+    ASSERT_EQ(1u,      nh1->outNodes().size());
+    eh = *nh1->outEdges().begin();
+
+    ASSERT_TRUE(tg.metadata(eh).contains<MetaInt>());
+    ASSERT_TRUE(tg.metadata(eh).contains<MetaStr>());
+
+    EXPECT_EQ(42,   tg.metadata(eh).get<MetaInt>().x);
+    EXPECT_EQ("hi", tg.metadata(eh).get<MetaStr>().s);
+}
+
 } // opencv_test
diff --git a/modules/gapi/test/own/conc_queue_tests.cpp b/modules/gapi/test/own/conc_queue_tests.cpp
index c3e6fd6e08..6e268f318c 100644
--- a/modules/gapi/test/own/conc_queue_tests.cpp
+++ b/modules/gapi/test/own/conc_queue_tests.cpp
@@ -55,7 +55,7 @@ TEST(ConcQueue, Clear)
     EXPECT_FALSE(q.try_pop(x));
 }
 
-// In this test, every writer thread produce its own range of integer
+// In this test, every writer thread produces its own range of integer
 // numbers, writing those to a shared queue.
 //
 // Every reader thread pops elements from the queue (until -1 is
@@ -64,12 +64,12 @@ TEST(ConcQueue, Clear)
 // Finally, the master thread waits for completion of all other
 // threads and verifies that all the necessary data is
 // produced/obtained.
+namespace
+{
 using StressParam = std::tuple<int           // Num writer threads
                               ,int           // Num elements per writer
                               ,int           // Num reader threads
                               ,std::size_t>; // Queue capacity
-namespace
-{
 constexpr int STOP_SIGN = -1;
 constexpr int BASE      = 1000;
 }
diff --git a/modules/gapi/test/own/last_written_value_tests.cpp b/modules/gapi/test/own/last_written_value_tests.cpp
new file mode 100644
index 0000000000..4bfb27f15f
--- /dev/null
+++ b/modules/gapi/test/own/last_written_value_tests.cpp
@@ -0,0 +1,156 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#include "../test_precomp.hpp"
+
+#include <unordered_set>
+#include <thread>
+
+#include "executor/last_value.hpp"
+
+namespace opencv_test {
+using namespace cv::gapi;
+
+TEST(LastValue, PushPop) {
+    own::last_written_value<int> v;
+    for (int i = 0; i < 100; i++) {
+        v.push(i);
+
+        int x = 1;
+        v.pop(x);
+        EXPECT_EQ(x, i);
+    }
+}
+
+TEST(LastValue, TryPop) {
+    own::last_written_value<int> v;
+    int x = 0;
+    EXPECT_FALSE(v.try_pop(x));
+
+    v.push(1);
+    EXPECT_TRUE(v.try_pop(x));
+    EXPECT_EQ(1, x);
+}
+
+TEST(LastValue, Clear) {
+    own::last_written_value<int> v;
+    v.push(42);
+    v.clear();
+
+    int x = 0;
+    EXPECT_FALSE(v.try_pop(x));
+}
+
+TEST(LastValue, Overwrite) {
+    own::last_written_value<int> v;
+    v.push(42);
+    v.push(0);
+
+    int x = -1;
+    EXPECT_TRUE(v.try_pop(x));
+    EXPECT_EQ(0, x);
+}
+
+// In this test, every writer thread produces its own range of integer
+// numbers, writing those to a shared queue.
+//
+// Every reader thread pops elements from the queue (until -1 is
+// reached) and stores those in its own associated set.
+//
+// Finally, the master thread waits for completion of all other
+// threads and verifies that all the necessary data is
+// produced/obtained.
+namespace {
+using StressParam = std::tuple<int   // Num writer threads
+                              ,int   // Num elements per writer
+                              ,int>; // Num reader threads
+constexpr int STOP_SIGN = -1;
+constexpr int BASE      = 1000;
+}
+struct LastValue_: public ::testing::TestWithParam<StressParam> {
+    using V = own::last_written_value<int>;
+    using S = std::unordered_set<int>;
+
+    static void writer(int base, int writes, V& v) {
+        for (int i = 0; i < writes; i++) {
+            if (i % 2) {
+                std::this_thread::sleep_for(std::chrono::milliseconds{1});
+            }
+            v.push(base + i);
+        }
+        v.push(STOP_SIGN);
+    }
+
+    static void reader(V& v, S& s) {
+        int x = 0;
+        while (true) {
+            v.pop(x);
+            if (x == STOP_SIGN) {
+                // If this thread was lucky enough to read this STOP_SIGN,
+                // push it back to v to make other possible readers able
+                // to read it again (note due to the last_written_value
+                // semantic, those STOP_SIGN could be simply lost i.e.
+                // overwritten.
+                v.push(STOP_SIGN);
+                return;
+            }
+            s.insert(x);
+        }
+    }
+};
+
+TEST_P(LastValue_, Test)
+{
+    int num_writers = 0;
+    int num_writes  = 0;
+    int num_readers = 0;
+    std::tie(num_writers, num_writes, num_readers) = GetParam();
+
+    CV_Assert(num_writers <   20);
+    CV_Assert(num_writes  < BASE);
+
+    V v;
+
+    // Start reader threads
+    std::vector<S> storage(num_readers);
+    std::vector<std::thread> readers;
+    for (S& s : storage) {
+        readers.emplace_back(reader, std::ref(v), std::ref(s));
+    }
+
+    // Start writer threads, also pre-generate reference numbers
+    S reference;
+    std::vector<std::thread> writers;
+    for (int w = 0; w < num_writers; w++) {
+        writers.emplace_back(writer, w*BASE, num_writes, std::ref(v));
+        for (int r = 0; r < num_writes; r++) {
+            reference.insert(w*BASE + r);
+        }
+    }
+
+    // Wait for completions
+    for (auto &t : readers) t.join();
+    for (auto &t : writers) t.join();
+
+    // Validate the result. Some values are read, and the values are
+    // correct (i.e. such values have been written)
+    std::size_t num_values_read = 0u;
+    for (const auto &s : storage) {
+        num_values_read += s.size();
+        for (auto &x : s) {
+            EXPECT_TRUE(reference.count(x) > 0);
+        }
+    }
+    // NOTE: Some combinations may end-up in 0 values read
+    // it is normal, the main thing is that the test shouldn't hang!
+    EXPECT_LE(0u, num_values_read);
+}
+
+INSTANTIATE_TEST_CASE_P(LastValueStress, LastValue_,
+                        Combine( Values(1, 2, 4, 8, 16)  // writers
+                               , Values(32, 96, 256)     // writes
+                               , Values(1, 2, 10)));     // readers
+} // namespace opencv_test
diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp
index dfd2331bfd..69b85c0d34 100644
--- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp
+++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp
@@ -2,11 +2,13 @@
 // It is subject to the license terms in the LICENSE file found in the top-level directory
 // of this distribution and at http://opencv.org/license.html.
 //
-// Copyright (C) 2019 Intel Corporation
+// Copyright (C) 2019-2020 Intel Corporation
 
 
 #include "../test_precomp.hpp"
 
+#include <thread> // sleep_for (Delay)
+
 #include <opencv2/gapi/cpu/core.hpp>
 #include <opencv2/gapi/cpu/imgproc.hpp>
 
@@ -18,6 +20,7 @@
 #include <opencv2/gapi/ocl/imgproc.hpp>
 
 #include <opencv2/gapi/streaming/cap.hpp>
+#include <opencv2/gapi/streaming/desync.hpp>
 
 namespace opencv_test
 {
@@ -100,6 +103,16 @@ struct GAPI_Streaming: public ::testing::TestWithParam<KernelPackage> {
     }
 };
 
+G_API_OP(Delay, <cv::GMat(cv::GMat, int)>, "org.opencv.test.delay") {
+    static cv::GMatDesc outMeta(const cv::GMatDesc &in, int) { return in; }
+};
+GAPI_OCV_KERNEL(OCVDelay, Delay) {
+    static void run(const cv::Mat &in, int ms, cv::Mat &out) {
+        std::this_thread::sleep_for(std::chrono::milliseconds{ms});
+        in.copyTo(out);
+    }
+};
+
 } // anonymous namespace
 
 TEST_P(GAPI_Streaming, SmokeTest_ConstInput_GMat)
@@ -794,6 +807,104 @@ TEST(GAPI_Streaming_Types, OutputVector)
     EXPECT_LT(0u, num_frames);
 }
 
+G_API_OP(DimsChans,
+         <std::tuple<cv::GArray<int>, cv::GOpaque<int>>(cv::GMat)>,
+         "test.streaming.dims_chans") {
+    static std::tuple<cv::GArrayDesc, cv::GOpaqueDesc> outMeta(const cv::GMatDesc &) {
+        return std::make_tuple(cv::empty_array_desc(),
+                               cv::empty_gopaque_desc());
+    }
+};
+
+GAPI_OCV_KERNEL(OCVDimsChans, DimsChans) {
+    static void run(const cv::Mat &in, std::vector<int> &ov, int &oi) {
+        ov = {in.cols, in.rows};
+        oi = in.channels();
+    }
+};
+
+struct GAPI_Streaming_TemplateTypes: ::testing::Test {
+    // There was a problem in GStreamingExecutor
+    // when outputs were formally not used by the graph
+    // but still should be in place as operation need
+    // to produce them, and host data type constructors
+    // were missing for GArray and GOpaque in this case.
+    // This test tests exactly this.
+
+    GAPI_Streaming_TemplateTypes() {
+        // Prepare everything for the test:
+        // Graph itself
+        blur = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+        cv::GMat blur_d = cv::gapi::streaming::desync(blur);
+        std::tie(vec, opq) = DimsChans::on(blur_d);
+
+        // Kernel package
+        pkg = cv::gapi::kernels<OCVDimsChans>();
+
+        // Input mat
+        in_mat = cv::Mat::eye(cv::Size(320,240), CV_8UC3);
+    }
+
+    cv::GMat in;
+    cv::GMat blur;
+    cv::GArray<int> vec;
+    cv::GOpaque<int> opq;
+    cv::gapi::GKernelPackage pkg;
+    cv::Mat in_mat;
+};
+
+TEST_F(GAPI_Streaming_TemplateTypes, UnusedVectorIsOK)
+{
+    // Declare graph without listing vec as output
+    auto sc = cv::GComputation(cv::GIn(in), cv::GOut(blur, opq))
+        .compileStreaming(cv::compile_args(pkg));
+    sc.setSource(cv::gin(in_mat));
+    sc.start();
+
+    cv::optional<cv::Mat> out_mat;
+    cv::optional<int> out_int;
+
+    int counter = 0;
+    while (sc.pull(cv::gout(out_mat, out_int))) {
+        if (counter++ == 10) {
+            // Stop the test after 10 iterations
+            sc.stop();
+            break;
+        }
+        GAPI_Assert(out_mat || out_int);
+        if (out_int) {
+            EXPECT_EQ(  3, out_int.value());
+        }
+    }
+}
+
+TEST_F(GAPI_Streaming_TemplateTypes, UnusedOpaqueIsOK)
+{
+    // Declare graph without listing opq as output
+    auto sc = cv::GComputation(cv::GIn(in), cv::GOut(blur, vec))
+        .compileStreaming(cv::compile_args(pkg));
+    sc.setSource(cv::gin(in_mat));
+    sc.start();
+
+    cv::optional<cv::Mat> out_mat;
+    cv::optional<std::vector<int> > out_vec;
+
+    int counter = 0;
+    while (sc.pull(cv::gout(out_mat, out_vec))) {
+        if (counter++ == 10) {
+            // Stop the test after 10 iterations
+            sc.stop();
+            break;
+        }
+        GAPI_Assert(out_mat || out_vec);
+        if (out_vec) {
+            EXPECT_EQ(320, out_vec.value()[0]);
+            EXPECT_EQ(240, out_vec.value()[1]);
+        }
+    }
+}
+
 struct GAPI_Streaming_Unit: public ::testing::Test {
     cv::Mat m;
 
@@ -882,7 +993,7 @@ TEST_F(GAPI_Streaming_Unit, StartStopStart_NoSetSource)
     EXPECT_NO_THROW(sc.setSource(cv::gin(m, m)));
     EXPECT_NO_THROW(sc.start());
     EXPECT_NO_THROW(sc.stop());
-    EXPECT_ANY_THROW(sc.start()); // Should fails since setSource was not called
+    EXPECT_ANY_THROW(sc.start()); // Should fail since setSource was not called
 }
 
 TEST_F(GAPI_Streaming_Unit, StartStopStress_Const)
@@ -1018,4 +1129,380 @@ TEST(Streaming, Python_Pull_Overload)
     EXPECT_FALSE(ccomp.running());
 }
 
+TEST(GAPI_Streaming_Desync, SmokeTest_Regular)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+    cv::GMat out1 = cv::gapi::Canny(tmp1, 32, 128, 3);
+
+    // FIXME: Unary desync should not require tie!
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out2 = tmp2 / cv::gapi::Sobel(tmp2, CV_8U, 1, 1);;
+
+    cv::Mat test_in = cv::Mat::eye(cv::Size(32,32), CV_8UC3);
+    cv::Mat test_out1, test_out2;
+    cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+        .apply(cv::gin(test_in), cv::gout(test_out1, test_out2));
+}
+
+TEST(GAPI_Streaming_Desync, SmokeTest_Streaming)
+{
+    initTestDataPath();
+
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+    cv::GMat out1 = cv::gapi::Canny(tmp1, 32, 128, 3);
+
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out2 = Delay::on(tmp2,10) / cv::gapi::Sobel(tmp2, CV_8U, 1, 1);
+
+    auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+        .compileStreaming(cv::compile_args(cv::gapi::kernels<OCVDelay>()));
+    auto sc_file = findDataFile("cv/video/768x576.avi");
+    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
+    sc.setSource(cv::gin(sc_src));
+    sc.start();
+
+    std::size_t out1_hits = 0u;
+    std::size_t out2_hits = 0u;
+    cv::optional<cv::Mat> test_out1, test_out2;
+    while (sc.pull(cv::gout(test_out1, test_out2))) {
+        GAPI_Assert(test_out1 || test_out2);
+        if (test_out1) out1_hits++;
+        if (test_out2) out2_hits++;
+    }
+    EXPECT_EQ(100u, out1_hits);      // out1 must be available for all frames
+    EXPECT_LE(out2_hits, out1_hits); // out2 must appear less times than out1
+    std::cout << "Got " << out1_hits << " out1's and " << out2_hits << " out2's" << std::endl;
+}
+
+TEST(GAPI_Streaming_Desync, SmokeTest_Streaming_TwoParts)
+{
+    initTestDataPath();
+
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+    cv::GMat out1 = cv::gapi::Canny(tmp1, 32, 128, 3);
+
+    // Desynchronized path 1
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out2 = tmp2 / cv::gapi::Sobel(tmp2, CV_8U, 1, 1);
+
+    // Desynchronized path 2
+    cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out3 = 0.5*tmp3 +  0.5*cv::gapi::medianBlur(tmp3, 7);
+
+    // The code should compile and execute well (desynchronized parts don't cross)
+    auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2, out3))
+        .compileStreaming();
+    auto sc_file = findDataFile("cv/video/768x576.avi");
+    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
+    sc.setSource(cv::gin(sc_src));
+    sc.start();
+
+    std::size_t test_frames = 0u;
+    cv::optional<cv::Mat> test_out1, test_out2, test_out3;
+    while (sc.pull(cv::gout(test_out1, test_out2, test_out3))) {
+        GAPI_Assert(test_out1 || test_out2 || test_out3);
+        if (test_out1) {
+            // count frames only for synchronized output
+            test_frames++;
+        }
+    }
+    EXPECT_EQ(100u, test_frames);
+}
+
+TEST(GAPI_Streaming_Desync, Negative_NestedDesync_Tier0)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    // Desynchronized path 1
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out1 = cv::gapi::medianBlur(tmp2, 3);
+
+    // Desynchronized path 2, nested from 1 (directly from desync)
+    cv::GMat tmp3 = cv::gapi::streaming::desync(tmp2);
+    cv::GMat out2 = 0.5*tmp3;
+
+    // This shouldn't compile
+    EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+                     .compileStreaming());
+}
+
+TEST(GAPI_Streaming_Desync, Negative_NestedDesync_Tier1)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    // Desynchronized path 1
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out1 = cv::gapi::medianBlur(tmp2, 3);
+
+    // Desynchronized path 2, nested from 1 (indirectly from desync)
+    cv::GMat tmp3 = cv::gapi::streaming::desync(out1);
+    cv::GMat out2 = 0.5*tmp3;
+
+    // This shouldn't compile
+    EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+                     .compileStreaming());
+}
+
+TEST(GAPI_Streaming_Desync, Negative_CrossMainPart_Tier0)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    // Desynchronized path: depends on both tmp1 and tmp2
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out1 = 0.5*tmp1 + 0.5*tmp2;
+
+    // This shouldn't compile
+    EXPECT_ANY_THROW(cv::GComputation(in, out1).compileStreaming());
+}
+
+TEST(GAPI_Streaming_Desync, Negative_CrossMainPart_Tier1)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    // Desynchronized path: depends on both tmp1 and tmp2
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out1 = 0.5*tmp1 + 0.5*cv::gapi::medianBlur(tmp2, 3);
+
+    // This shouldn't compile
+    EXPECT_ANY_THROW(cv::GComputation(in, out1).compileStreaming());
+}
+
+TEST(GAPI_Streaming_Desync, Negative_CrossOtherDesync_Tier0)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    // Desynchronized path 1
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out1 = 0.5*tmp2;
+
+    // Desynchronized path 2 (depends on 1)
+    cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out2 = 0.5*tmp3 + tmp2;
+
+    // This shouldn't compile
+    EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+                     .compileStreaming());
+}
+
+TEST(GAPI_Streaming_Desync, Negative_CrossOtherDesync_Tier1)
+{
+    cv::GMat in;
+    cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    // Desynchronized path 1
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out1 = 0.5*tmp2;
+
+    // Desynchronized path 2 (depends on 1)
+    cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1);
+    cv::GMat out2 = 0.5*cv::gapi::medianBlur(tmp3,3) + 1.0*tmp2;
+
+    // This shouldn't compile
+    EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+                     .compileStreaming());
+}
+
+TEST(GAPI_Streaming_Desync, Negative_SynchronizedPull)
+{
+    initTestDataPath();
+
+    cv::GMat in;
+    cv::GMat out1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    cv::GMat tmp1 = cv::gapi::streaming::desync(out1);
+    cv::GMat out2 = 0.5*tmp1;
+
+    auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+        .compileStreaming();
+
+    auto sc_file = findDataFile("cv/video/768x576.avi");
+    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
+    sc.setSource(cv::gin(sc_src));
+    sc.start();
+
+    cv::Mat o1, o2;
+    EXPECT_ANY_THROW(sc.pull(cv::gout(o1, o2)));
+}
+
+TEST(GAPI_Streaming_Desync, UseSpecialPull)
+{
+    initTestDataPath();
+
+    cv::GMat in;
+    cv::GMat out1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    cv::GMat tmp1 = cv::gapi::streaming::desync(out1);
+    cv::GMat out2 = 0.5*tmp1;
+
+    auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
+        .compileStreaming();
+
+    auto sc_file = findDataFile("cv/video/768x576.avi");
+    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
+    sc.setSource(cv::gin(sc_src));
+    sc.start();
+
+    cv::optional<cv::Mat> o1, o2;
+    std::size_t num_frames = 0u;
+
+    while (sc.pull(cv::gout(o1, o2))) {
+        if (o1) num_frames++;
+    }
+    EXPECT_EQ(100u, num_frames);
+}
+
+G_API_OP(ProduceVector, <cv::GArray<int>(cv::GMat)>, "test.desync.vector") {
+    static cv::GArrayDesc outMeta(const cv::GMatDesc &) {
+        return cv::empty_array_desc();
+    }
+};
+
+G_API_OP(ProduceOpaque, <cv::GOpaque<int>(cv::GMat)>, "test.desync.opaque") {
+    static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
+        return cv::empty_gopaque_desc();
+    }
+};
+
+GAPI_OCV_KERNEL(OCVVector, ProduceVector) {
+    static void run(const cv::Mat& in, std::vector<int> &out) {
+        out = {in.cols, in.rows};
+    }
+};
+
+GAPI_OCV_KERNEL(OCVOpaque, ProduceOpaque) {
+    static void run(const cv::Mat &in, int &v) {
+        v = in.channels();
+    }
+};
+
+namespace {
+cv::GStreamingCompiled desyncTestObject() {
+    cv::GMat in;
+    cv::GMat blur = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    cv::GMat blur_d = cv::gapi::copy(cv::gapi::streaming::desync(blur));
+    cv::GMat d1 = Delay::on(blur_d, 10);
+    cv::GMat d2 = Delay::on(blur_d, 30);
+
+    cv::GArray<int>  vec = ProduceVector::on(d1);
+    cv::GOpaque<int> opq = ProduceOpaque::on(d2);
+
+    auto pkg = cv::gapi::kernels<OCVDelay, OCVVector, OCVOpaque>();
+    return cv::GComputation(cv::GIn(in), cv::GOut(blur, vec, opq))
+        .compileStreaming(cv::compile_args(pkg));
+}
+} // anonymous namespace
+
+TEST(GAPI_Streaming_Desync, MultipleDesyncOutputs_1) {
+    auto sc = desyncTestObject();
+    const cv::Mat in_mat = cv::Mat::eye(cv::Size(320,240), CV_8UC3);
+
+    sc.setSource(cv::gin(in_mat));
+    sc.start();
+
+    cv::optional<cv::Mat> out_mat;
+    cv::optional<std::vector<int> > out_vec;
+    cv::optional<int> out_int;
+
+    int counter = 0;
+    while (sc.pull(cv::gout(out_mat, out_vec, out_int))) {
+        if (counter++ == 1000) {
+            // Stop the test after 1000 iterations
+            sc.stop();
+            break;
+        }
+        GAPI_Assert(out_mat || out_vec || out_int);
+
+        // out_vec and out_int are on the same desynchronized path
+        // they MUST arrive together. If one is available, the other
+        // also must be available.
+        if (out_vec) { ASSERT_TRUE(out_int.has_value()); }
+        if (out_int) { ASSERT_TRUE(out_vec.has_value()); }
+
+        if (out_vec || out_int) {
+            EXPECT_EQ(320, out_vec.value()[0]);
+            EXPECT_EQ(240, out_vec.value()[1]);
+            EXPECT_EQ(  3, out_int.value());
+        }
+    }
+}
+
+TEST(GAPI_Streaming_Desync, StartStop_Stress) {
+    auto sc = desyncTestObject();
+    const cv::Mat in_mat = cv::Mat::eye(cv::Size(320,240), CV_8UC3);
+
+    cv::optional<cv::Mat> out_mat;
+    cv::optional<std::vector<int> > out_vec;
+    cv::optional<int> out_int;
+
+    for (int i = 0; i < 10; i++) {
+        sc.setSource(cv::gin(in_mat));
+        sc.start();
+        int counter = 0;
+        while (counter++ < 100) {
+            sc.pull(cv::gout(out_mat, out_vec, out_int));
+            GAPI_Assert(out_mat || out_vec || out_int);
+            if (out_vec) { ASSERT_TRUE(out_int.has_value()); }
+            if (out_int) { ASSERT_TRUE(out_vec.has_value()); }
+        }
+        sc.stop();
+    }
+}
+
+GAPI_FLUID_KERNEL(FluidCopy, cv::gapi::core::GCopy, false) {
+    static const int Window = 1;
+
+    static void run(const cv::gapi::fluid::View &in,
+                          cv::gapi::fluid::Buffer &out) {
+        const uint8_t *in_ptr = in.InLineB(0);
+        uint8_t *out_ptr = out.OutLineB(0);
+
+        const auto in_type = CV_MAKETYPE(in.meta().depth, in.meta().chan);
+        const auto out_type = CV_MAKETYPE(out.meta().depth, out.meta().chan);
+        GAPI_Assert(in_type == out_type);
+        std::copy_n(in_ptr, in.length()*CV_ELEM_SIZE(in_type), out_ptr);
+    }
+};
+
+
+TEST(GAPI_Streaming_Desync, DesyncObjectConsumedByTwoIslandsViaSeparateDesync) {
+    // See comment in the implementation of cv::gapi::streaming::desync (.cpp)
+    cv::GMat in;
+    cv::GMat tmp = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    cv::GMat tmp1 = cv::gapi::streaming::desync(tmp);
+    cv::GMat out1 = cv::gapi::copy(tmp1); // ran via Fluid backend
+
+    cv::GMat tmp2 = cv::gapi::streaming::desync(tmp);
+    cv::GMat out2 = tmp2 * 0.5;           // ran via OCV backend
+
+    auto c = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2));
+    auto p = cv::gapi::kernels<FluidCopy>();
+
+    EXPECT_NO_THROW(c.compileStreaming(cv::compile_args(p)));
+}
+
+TEST(GAPI_Streaming_Desync, DesyncObjectConsumedByTwoIslandsViaSameDesync) {
+    // See comment in the implementation of cv::gapi::streaming::desync (.cpp)
+    cv::GMat in;
+    cv::GMat tmp = cv::gapi::boxFilter(in, -1, cv::Size(3,3));
+
+    cv::GMat tmp1 = cv::gapi::streaming::desync(tmp);
+    cv::GMat out1 = cv::gapi::copy(tmp1); // ran via Fluid backend
+    cv::GMat out2 = out1 - 0.5*tmp1;      // ran via OCV backend
+
+    auto c = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2));
+    auto p = cv::gapi::kernels<FluidCopy>();
+
+    EXPECT_NO_THROW(c.compileStreaming(cv::compile_args(p)));
+}
+
 } // namespace opencv_test

From 48ccbe39b4024190c2be362f800363ad5160f912 Mon Sep 17 00:00:00 2001
From: AsyaPronina <155jj@mail.ru>
Date: Mon, 2 Nov 2020 18:54:19 +0300
Subject: [PATCH 060/152] Changed behaviour of cv::gapi::serialize,
 cv::gapi::deserialize for GCompileArgs

- cv::gapi::serialize bypasses compile arguments which have no S11N specialization with serialize/deserialize callbacks for underlying types
- cv::gapi::deserialize can accept arbitraty number of serialized compile args in a stream but will return only those which are requested by user via template parameter pack if they are presented in the stream. If some or all of them are not presented cv::gapi::deserialize will ignore and return only those which are presented
- cv::gapi::deserialize can accept only types which can be deserialized (have S11N<T> specialization with the user callbacks)
- Added cv::gapi::s11n::detail::has_S11N_spec<T> trait to separate compile arguments which have S11N<T> specialization with the user callbacks
---
 modules/gapi/include/opencv2/gapi/gcommon.hpp |  13 +-
 modules/gapi/include/opencv2/gapi/s11n.hpp    |  48 +++-
 .../gapi/include/opencv2/gapi/s11n/base.hpp   |  16 +-
 .../src/backends/common/serialization.cpp     |   7 +-
 modules/gapi/test/s11n/gapi_s11n_tests.cpp    | 271 ++++++++++++++++--
 5 files changed, 310 insertions(+), 45 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/gcommon.hpp b/modules/gapi/include/opencv2/gapi/gcommon.hpp
index 2b260ed07c..0242020f6a 100644
--- a/modules/gapi/include/opencv2/gapi/gcommon.hpp
+++ b/modules/gapi/include/opencv2/gapi/gcommon.hpp
@@ -161,7 +161,9 @@ public:
     template<typename T, typename std::enable_if<!detail::is_compile_arg<T>::value, int>::type = 0>
     explicit GCompileArg(T &&t)
         : tag(detail::CompileArgTag<typename std::decay<T>::type>::tag())
-        , serializeF(&cv::gapi::s11n::detail::wrap_serialize<T>::serialize)
+        , serializeF(cv::gapi::s11n::detail::has_S11N_spec<T>::value ?
+                     &cv::gapi::s11n::detail::wrap_serialize<T>::serialize :
+                     nullptr)
         , arg(t)
     {
     }
@@ -178,7 +180,10 @@ public:
 
     void serialize(cv::gapi::s11n::IOStream& os) const
     {
-        serializeF(os, *this);
+        if (serializeF)
+        {
+            serializeF(os, *this);
+        }
     }
 
 private:
@@ -222,8 +227,8 @@ template<typename T> struct wrap_serialize
 {
     static void serialize(IOStream& os, const GCompileArg& arg)
     {
-        using decayed_type = typename std::decay<T>::type;
-        S11N<decayed_type>::serialize(os, arg.get<decayed_type>());
+        using DT = typename std::decay<T>::type;
+        S11N<DT>::serialize(os, arg.get<DT>());
     }
 };
 } // namespace detail
diff --git a/modules/gapi/include/opencv2/gapi/s11n.hpp b/modules/gapi/include/opencv2/gapi/s11n.hpp
index 2fa4e51176..0e3e382328 100644
--- a/modules/gapi/include/opencv2/gapi/s11n.hpp
+++ b/modules/gapi/include/opencv2/gapi/s11n.hpp
@@ -265,23 +265,25 @@ void getRunArgByIdx (IIStream& is, cv::util::variant<Ts...> &v, uint32_t idx) {
 
 namespace detail
 {
-template<typename T> struct deserialize_arg;
+template<typename T> struct try_deserialize_comparg;
 
-template<> struct deserialize_arg<std::tuple<>> {
-static GCompileArg exec(cv::gapi::s11n::IIStream&, const std::string&) {
-        throw std::logic_error("Passed arg can't be deserialized!");
+template<> struct try_deserialize_comparg<std::tuple<>> {
+static cv::util::optional<GCompileArg> exec(const std::string&, cv::gapi::s11n::IIStream&) {
+        return { };
     }
 };
 
 template<typename T, typename... Types>
-struct deserialize_arg<std::tuple<T, Types...>> {
-static GCompileArg exec(cv::gapi::s11n::IIStream& is, const std::string& tag) {
+struct try_deserialize_comparg<std::tuple<T, Types...>> {
+static cv::util::optional<GCompileArg> exec(const std::string& tag, cv::gapi::s11n::IIStream& is) {
     if (tag == cv::detail::CompileArgTag<T>::tag()) {
-        return GCompileArg {
-            cv::gapi::s11n::detail::S11N<T>::deserialize(is)
-        };
+        static_assert(cv::gapi::s11n::detail::has_S11N_spec<T>::value,
+            "cv::gapi::deserialize<GCompileArgs, Types...> expects Types to have S11N "
+            "specializations with deserialization callbacks!");
+        return cv::util::optional<GCompileArg>(
+            GCompileArg { cv::gapi::s11n::detail::S11N<T>::deserialize(is) });
     }
-    return deserialize_arg<std::tuple<Types...>>::exec(is, tag);
+    return try_deserialize_comparg<std::tuple<Types...>>::exec(tag, is);
 }
 };
 
@@ -303,17 +305,35 @@ static GRunArg exec(cv::gapi::s11n::IIStream& is, uint32_t idx) {
 };
 
 template<typename... Types>
-cv::GCompileArgs getCompileArgs(const std::vector<char> &p) {
-    std::unique_ptr<cv::gapi::s11n::IIStream> pIs = cv::gapi::s11n::detail::getInStream(p);
-    cv::gapi::s11n::IIStream& is = *pIs;
+inline cv::util::optional<GCompileArg> tryDeserializeCompArg(const std::string& tag,
+                                                             const std::vector<char>& sArg) {
+    std::unique_ptr<cv::gapi::s11n::IIStream> pArgIs = cv::gapi::s11n::detail::getInStream(sArg);
+    return try_deserialize_comparg<std::tuple<Types...>>::exec(tag, *pArgIs);
+}
+
+template<typename... Types>
+cv::GCompileArgs getCompileArgs(const std::vector<char> &sArgs) {
     cv::GCompileArgs args;
 
+    std::unique_ptr<cv::gapi::s11n::IIStream> pIs = cv::gapi::s11n::detail::getInStream(sArgs);
+    cv::gapi::s11n::IIStream& is = *pIs;
+
     uint32_t sz = 0;
     is >> sz;
     for (uint32_t i = 0; i < sz; ++i) {
         std::string tag;
         is >> tag;
-        args.push_back(cv::gapi::detail::deserialize_arg<std::tuple<Types...>>::exec(is, tag));
+
+        std::vector<char> sArg;
+        is >> sArg;
+
+        cv::util::optional<GCompileArg> dArg =
+            cv::gapi::detail::tryDeserializeCompArg<Types...>(tag, sArg);
+
+        if (dArg.has_value())
+        {
+            args.push_back(dArg.value());
+        }
     }
 
     return args;
diff --git a/modules/gapi/include/opencv2/gapi/s11n/base.hpp b/modules/gapi/include/opencv2/gapi/s11n/base.hpp
index 6bf5d5fb0f..d9335ee9f7 100644
--- a/modules/gapi/include/opencv2/gapi/s11n/base.hpp
+++ b/modules/gapi/include/opencv2/gapi/s11n/base.hpp
@@ -8,6 +8,7 @@
 #define OPENCV_GAPI_S11N_BASE_HPP
 
 #include <opencv2/gapi/own/assert.hpp>
+#include <opencv2/gapi/own/exports.hpp>
 
 namespace cv {
 namespace gapi {
@@ -16,10 +17,14 @@ struct IOStream;
 struct IIStream;
 
 namespace detail {
-// Will be used along with default types if possible in specific cases (compile args, etc)
-// Note: actual implementation is defined by user
+
+struct NotImplemented {
+};
+
+// The default S11N for custom types is NotImplemented
+// Don't! sublass from NotImplemented if you actually implement S11N.
 template<typename T>
-struct S11N {
+struct S11N: public NotImplemented {
     static void serialize(IOStream &, const T &) {
         GAPI_Assert(false && "No serialization routine is provided!");
     }
@@ -28,6 +33,11 @@ struct S11N {
     }
 };
 
+template<typename T> struct has_S11N_spec {
+    static constexpr bool value = !std::is_base_of<NotImplemented,
+                                        S11N<typename std::decay<T>::type>>::value;
+};
+
 } // namespace detail
 } // namespace s11n
 } // namespace gapi
diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp
index bb1864823f..592c03cfed 100644
--- a/modules/gapi/src/backends/common/serialization.cpp
+++ b/modules/gapi/src/backends/common/serialization.cpp
@@ -338,8 +338,13 @@ IIStream& operator>> (IIStream& is,       cv::gapi::wip::draw::Line &l) {
 
 IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg)
 {
+    ByteMemoryOutStream tmpS;
+    arg.serialize(tmpS);
+    std::vector<char> data = tmpS.data();
+
     os << arg.tag;
-    arg.serialize(os);
+    os << data;
+
     return os;
 }
 
diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
index 3fe632e449..2fc1e46253 100644
--- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp
+++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
@@ -4,32 +4,86 @@
 #include <opencv2/gapi/rmat.hpp>
 
 namespace {
-    struct MyCustomType {
-        int val;
-        std::string name;
-        std::vector<float> vec;
-        std::map<int, uint64_t> mmap;
-        bool operator==(const MyCustomType& other) const {
-            return val == other.val && name == other.name &&
-                   vec == other.vec && mmap == other.mmap;
-        }
-    };
-}
+struct EmptyCustomType { };
+
+struct SimpleCustomType {
+    bool val;
+    bool operator==(const SimpleCustomType& other) const {
+        return val == other.val;
+    }
+};
+
+struct SimpleCustomType2 {
+    int id;
+    bool operator==(const SimpleCustomType2& other) const {
+        return id == other.id;
+    }
+};
+
+struct MyCustomType {
+    int val;
+    std::string name;
+    std::vector<float> vec;
+    std::map<int, uint64_t> mmap;
+    bool operator==(const MyCustomType& other) const {
+        return val == other.val && name == other.name &&
+                vec == other.vec && mmap == other.mmap;
+    }
+};
+
+struct MyCustomTypeNoS11N {
+    char sym;
+    int id;
+    std::string name;
+
+    bool operator==(const MyCustomTypeNoS11N& other) const {
+        return sym == other.sym && id == other.id &&
+                name == other.name;
+    }
+};
+} // anonymous namespace
 
 namespace cv {
 namespace gapi {
 namespace s11n {
 namespace detail {
-    template<> struct S11N<MyCustomType> {
-        static void serialize(IOStream &os, const MyCustomType &p) {
-            os << p.val << p.name << p.vec << p.mmap;
-        }
-        static MyCustomType deserialize(IIStream &is) {
-            MyCustomType p;
-            is >> p.val >> p.name >> p.vec >> p.mmap;
-            return p;
-        }
-    };
+template<> struct S11N<EmptyCustomType> {
+    static void serialize(IOStream &, const EmptyCustomType &) { }
+    static EmptyCustomType deserialize(IIStream &) { return EmptyCustomType { }; }
+};
+
+template<> struct S11N<SimpleCustomType> {
+    static void serialize(IOStream &os, const SimpleCustomType &p) {
+        os << p.val;
+    }
+    static SimpleCustomType deserialize(IIStream &is) {
+        SimpleCustomType p;
+        is >> p.val;
+        return p;
+    }
+};
+
+template<> struct S11N<SimpleCustomType2> {
+    static void serialize(IOStream &os, const SimpleCustomType2 &p) {
+        os << p.id;
+    }
+    static SimpleCustomType2 deserialize(IIStream &is) {
+        SimpleCustomType2 p;
+        is >> p.id;
+        return p;
+    }
+};
+
+template<> struct S11N<MyCustomType> {
+    static void serialize(IOStream &os, const MyCustomType &p) {
+        os << p.val << p.name << p.vec << p.mmap;
+    }
+    static MyCustomType deserialize(IIStream &is) {
+        MyCustomType p;
+        is >> p.val >> p.name >> p.vec >> p.mmap;
+        return p;
+    }
+};
 } // namespace detail
 } // namespace s11n
 } // namespace gapi
@@ -38,9 +92,33 @@ namespace detail {
 
 namespace cv {
 namespace detail {
+template<> struct CompileArgTag<EmptyCustomType> {
+    static const char* tag() {
+        return "org.opencv.test.empty_custom_type";
+    }
+};
+
+template<> struct CompileArgTag<SimpleCustomType> {
+    static const char* tag() {
+        return "org.opencv.test.simple_custom_type";
+    }
+};
+
+template<> struct CompileArgTag<SimpleCustomType2> {
+    static const char* tag() {
+        return "org.opencv.test.simple_custom_type_2";
+    }
+};
+
 template<> struct CompileArgTag<MyCustomType> {
     static const char* tag() {
-        return "org.opencv.test.mycustomtype";
+        return "org.opencv.test.my_custom_type";
+    }
+};
+
+template<> struct CompileArgTag<MyCustomTypeNoS11N> {
+    static const char* tag() {
+        return "org.opencv.test.my_custom_type_no_s11n";
     }
 };
 } // namespace detail
@@ -586,7 +664,7 @@ TEST_F(S11N_Basic, Test_Custom_Type) {
     EXPECT_EQ(var, new_var);
 }
 
-TEST_F(S11N_Basic, Test_Custom_CompileArg) {
+TEST_F(S11N_Basic, Test_CompileArg) {
     MyCustomType customVar{1248, "World", {1280, 720, 640, 480}, {{5, 32434142342}, {7, 34242432}}};
 
     std::vector<char> sArgs = cv::gapi::serialize(cv::compile_args(customVar));
@@ -596,4 +674,151 @@ TEST_F(S11N_Basic, Test_Custom_CompileArg) {
     MyCustomType dCustomVar = cv::gapi::getCompileArg<MyCustomType>(dArgs).value();
     EXPECT_EQ(customVar, dCustomVar);
 }
+
+TEST_F(S11N_Basic, Test_CompileArg_Without_UserCallback) {
+    SimpleCustomType   customVar1 { false };
+    MyCustomTypeNoS11N customVar2 { 'z', 189, "Name" };
+    MyCustomType       customVar3 { 1248, "World", {1280, 720, 640, 480},
+                                    {{5, 32434142342}, {7, 34242432}} };
+
+    EXPECT_NO_THROW(cv::gapi::serialize(cv::compile_args(customVar1, customVar2, customVar3)));
+
+    std::vector<char> sArgs = cv::gapi::serialize(
+        cv::compile_args(customVar1, customVar2, customVar3));
+
+    GCompileArgs dArgs = cv::gapi::deserialize<GCompileArgs,
+                                               SimpleCustomType,
+                                               MyCustomType>(sArgs);
+
+    SimpleCustomType dCustomVar1 = cv::gapi::getCompileArg<SimpleCustomType>(dArgs).value();
+    MyCustomType     dCustomVar3 = cv::gapi::getCompileArg<MyCustomType>(dArgs).value();
+
+    EXPECT_EQ(customVar1, dCustomVar1);
+    EXPECT_EQ(customVar3, dCustomVar3);
+}
+
+TEST_F(S11N_Basic, Test_Deserialize_Only_Requested_CompileArgs) {
+    MyCustomType     myCustomVar { 1248, "World", {1280, 720, 640, 480},
+                                   {{5, 32434142342}, {7, 34242432}} };
+    SimpleCustomType simpleCustomVar { false };
+
+    std::vector<char> sArgs = cv::gapi::serialize(cv::compile_args(myCustomVar, simpleCustomVar));
+
+    GCompileArgs dArgs = cv::gapi::deserialize<GCompileArgs, MyCustomType>(sArgs);
+    EXPECT_EQ(1u, dArgs.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgs).value());
+
+    dArgs.clear();
+    dArgs = cv::gapi::deserialize<GCompileArgs, SimpleCustomType>(sArgs);
+    EXPECT_EQ(1u, dArgs.size());
+    EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg<SimpleCustomType>(dArgs).value());
+
+    dArgs.clear();
+    dArgs = cv::gapi::deserialize<GCompileArgs, SimpleCustomType2>(sArgs);
+    EXPECT_EQ(0u, dArgs.size());
+
+    dArgs.clear();
+    dArgs = cv::gapi::deserialize<GCompileArgs, MyCustomType, SimpleCustomType>(sArgs);
+    EXPECT_EQ(2u, dArgs.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgs).value());
+    EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg<SimpleCustomType>(dArgs).value());
+
+    SimpleCustomType2 simpleCustomVar2 { 5 };
+    std::vector<char> sArgs2 = cv::gapi::serialize(
+        cv::compile_args(myCustomVar, simpleCustomVar, simpleCustomVar2));
+    GCompileArgs dArgs2 = cv::gapi::deserialize<GCompileArgs,
+                                                MyCustomType,
+                                                SimpleCustomType2>(sArgs2);
+    EXPECT_EQ(2u, dArgs2.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgs2).value());
+    EXPECT_EQ(simpleCustomVar2, cv::gapi::getCompileArg<SimpleCustomType2>(dArgs2).value());
+}
+
+TEST_F(S11N_Basic, Test_Deserialize_CompileArgs_RandomOrder) {
+    SimpleCustomType  simpleCustomVar { false };
+    SimpleCustomType2 simpleCustomVar2 { 5 };
+
+    std::vector<char> sArgs = cv::gapi::serialize(
+        cv::compile_args(simpleCustomVar, simpleCustomVar2));
+    GCompileArgs dArgs = cv::gapi::deserialize<GCompileArgs,
+                                               // Here, types of passed to serialize() arguments
+                                               // are enumerated in reverse order
+                                               SimpleCustomType2,
+                                               SimpleCustomType>(sArgs);
+
+    EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg<SimpleCustomType>(dArgs).value());
+    EXPECT_EQ(simpleCustomVar2, cv::gapi::getCompileArg<SimpleCustomType2>(dArgs).value());
+}
+
+TEST_F(S11N_Basic, Test_CompileArgs_With_EmptyCompileArg) {
+    MyCustomType      myCustomVar { 1248, "World", {1280, 720, 640, 480},
+                                    {{5, 32434142342}, {7, 34242432}} };
+    SimpleCustomType  simpleCustomVar { false };
+    EmptyCustomType   emptyCustomVar {  };
+
+    //----{ emptyCustomVar, myCustomVar }----
+    std::vector<char> sArgs1 = cv::gapi::serialize(cv::compile_args(emptyCustomVar, myCustomVar));
+    GCompileArgs dArgsEmptyVar1 = cv::gapi::deserialize<GCompileArgs, EmptyCustomType>(sArgs1);
+    GCompileArgs dArgsMyVar1 = cv::gapi::deserialize<GCompileArgs, MyCustomType>(sArgs1);
+    GCompileArgs dArgsEmptyAndMyVars1 = cv::gapi::deserialize<GCompileArgs,
+                                                              EmptyCustomType,
+                                                              MyCustomType>(sArgs1);
+    EXPECT_EQ(1u, dArgsEmptyVar1.size());
+    EXPECT_TRUE(cv::gapi::getCompileArg<EmptyCustomType>(dArgsEmptyVar1).has_value());
+    EXPECT_EQ(1u, dArgsMyVar1.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgsMyVar1).value());
+    EXPECT_EQ(2u, dArgsEmptyAndMyVars1.size());
+    EXPECT_TRUE(cv::gapi::getCompileArg<EmptyCustomType>(dArgsEmptyAndMyVars1).has_value());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgsEmptyAndMyVars1).value());
+
+    //----{ myCustomVar, emptyCustomVar }----
+    std::vector<char> sArgs2 = cv::gapi::serialize(cv::compile_args(myCustomVar, emptyCustomVar));
+    GCompileArgs dArgsMyVar2 = cv::gapi::deserialize<GCompileArgs, MyCustomType>(sArgs2);
+    GCompileArgs dArgsEmptyVar2 = cv::gapi::deserialize<GCompileArgs, EmptyCustomType>(sArgs2);
+    GCompileArgs dArgsMyAndEmptyVars2 = cv::gapi::deserialize<GCompileArgs,
+                                                              MyCustomType,
+                                                              EmptyCustomType>(sArgs2);
+    EXPECT_EQ(1u, dArgsMyVar2.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgsMyVar2).value());
+    EXPECT_EQ(1u, dArgsEmptyVar2.size());
+    EXPECT_TRUE(cv::gapi::getCompileArg<EmptyCustomType>(dArgsEmptyVar2).has_value());
+    EXPECT_EQ(2u, dArgsMyAndEmptyVars2.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgsMyAndEmptyVars2).value());
+    EXPECT_TRUE(cv::gapi::getCompileArg<EmptyCustomType>(dArgsMyAndEmptyVars2).has_value());
+
+    //----{ myCustomVar, emptyCustomVar, simpleCustomVar }----
+    std::vector<char> sArgs3 = cv::gapi::serialize(
+        cv::compile_args(myCustomVar, emptyCustomVar, simpleCustomVar));
+    GCompileArgs dArgsMyVar3 = cv::gapi::deserialize<GCompileArgs, MyCustomType>(sArgs3);
+    GCompileArgs dArgsEmptyVar3 = cv::gapi::deserialize<GCompileArgs, EmptyCustomType>(sArgs3);
+    GCompileArgs dArgsSimpleVar3 = cv::gapi::deserialize<GCompileArgs, SimpleCustomType>(sArgs3);
+    GCompileArgs dArgsMyAndSimpleVars3 = cv::gapi::deserialize<GCompileArgs,
+                                                               MyCustomType,
+                                                               SimpleCustomType>(sArgs3);
+    GCompileArgs dArgs3 = cv::gapi::deserialize<GCompileArgs,
+                                                MyCustomType,
+                                                EmptyCustomType,
+                                                SimpleCustomType>(sArgs3);
+    EXPECT_EQ(1u, dArgsMyVar3.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgsMyVar3).value());
+    EXPECT_EQ(1u, dArgsEmptyVar3.size());
+    EXPECT_TRUE(cv::gapi::getCompileArg<EmptyCustomType>(dArgsEmptyVar3).has_value());
+    EXPECT_EQ(1u, dArgsSimpleVar3.size());
+    EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg<SimpleCustomType>(dArgsSimpleVar3).value());
+    EXPECT_EQ(2u, dArgsMyAndSimpleVars3.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgsMyAndSimpleVars3).value());
+    EXPECT_EQ(simpleCustomVar,
+              cv::gapi::getCompileArg<SimpleCustomType>(dArgsMyAndSimpleVars3).value());
+    EXPECT_EQ(3u, dArgs3.size());
+    EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg<MyCustomType>(dArgs3).value());
+    EXPECT_TRUE(cv::gapi::getCompileArg<EmptyCustomType>(dArgs3).has_value());
+    EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg<SimpleCustomType>(dArgs3).value());
+
+    //----{ emptyCustomVar }----
+    std::vector<char> sArgs4 = cv::gapi::serialize(cv::compile_args(emptyCustomVar));
+    GCompileArgs dArgsEmptyVar4 = cv::gapi::deserialize<GCompileArgs, EmptyCustomType>(sArgs4);
+    EXPECT_EQ(1u, dArgsEmptyVar4.size());
+    EXPECT_TRUE(cv::gapi::getCompileArg<EmptyCustomType>(dArgsEmptyVar4).has_value());
+}
+
 } // namespace opencv_test

From 099ad1a259f1f83af6da6826e78ebd645ee94004 Mon Sep 17 00:00:00 2001
From: Dmitry Matveev <dmitry.matveev@intel.com>
Date: Wed, 28 Oct 2020 16:35:38 +0300
Subject: [PATCH 061/152] G-API: Desync -- fix the queue saturation problem

Set queue size = 1 to Copy island right after the desync.
In this case, Copy won't read more data from a "last_written"
container than required, while feeding the desynchronized path.

Sometimes Copy don't get fused into an island and behaves
on its own -- in this case, it reads more data in advance
so the slow (desync) part actually processes some data in-sync
(more than actually required)
---
 .../gapi/src/executor/gstreamingexecutor.cpp  | 44 ++++++++++++++++++-
 1 file changed, 43 insertions(+), 1 deletion(-)

diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp
index 41cb83f710..653d20e712 100644
--- a/modules/gapi/src/executor/gstreamingexecutor.cpp
+++ b/modules/gapi/src/executor/gstreamingexecutor.cpp
@@ -13,6 +13,10 @@
 
 #include <opencv2/gapi/opencv_includes.hpp>
 
+#if !defined(GAPI_STANDALONE)
+#include <opencv2/gapi/core.hpp> // GCopy -- FIXME - to be removed!
+#endif // GAPI_STANDALONE
+
 #include "api/gproto_priv.hpp" // ptr(GRunArgP)
 #include "compiler/passes/passes.hpp"
 #include "backends/common/gbackend.hpp" // createMat
@@ -80,6 +84,10 @@ struct DataQueue {
     std::shared_ptr<cv::gimpl::stream::Q> q;
 };
 
+struct DesyncSpecialCase {
+    static const char *name() { return "DesyncSpecialCase"; }
+};
+
 std::vector<cv::gimpl::stream::Q*> reader_queues(      ade::Graph &g,
                                                  const ade::NodeHandle &obj)
 {
@@ -936,19 +944,53 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr<ade::Graph> &&
                                          , isl_exec
                                          });
                 // Initialize queues for every operation's input
-                ade::TypedGraph<DataQueue> qgr(*m_island_graph);
+                ade::TypedGraph<DataQueue, DesyncSpecialCase> qgr(*m_island_graph);
+                bool is_desync_start = false;
                 for (auto eh : nh->inEdges())
                 {
                     // ...only if the data is not compile-const
                     if (const_ins.count(eh->srcNode()) == 0) {
                         if (m_gim.metadata(eh).contains<DesyncIslEdge>()) {
                             qgr.metadata(eh).set(DataQueue(DataQueue::DESYNC));
+                            is_desync_start = true;
+                        } else if (qgr.metadata(eh).contains<DesyncSpecialCase>()) {
+                            // See comment below
+                            // Limit queue size to 1 in this case
+                            qgr.metadata(eh).set(DataQueue(1u));
                         } else {
                             qgr.metadata(eh).set(DataQueue(queue_capacity));
                         }
                         m_internal_queues.insert(qgr.metadata(eh).get<DataQueue>().q.get());
                     }
                 }
+                // WORKAROUND:
+                // Since now we always know desync() is followed by copy(),
+                // copy is always the island with DesyncIslEdge.
+                // Mark the node's outputs a special way so then its following
+                // queue sizes will be limited to 1 (to avoid copy reading more
+                // data in advance - as there's no other way for the underlying
+                // "slow" part to control it)
+                if (is_desync_start) {
+                    auto isl = m_gim.metadata(nh).get<FusedIsland>().object;
+                    // In the current implementation, such islands
+                    // _must_ start with copy
+                    GAPI_Assert(isl->in_ops().size() == 1u);
+#if !defined(GAPI_STANDALONE)
+                    GAPI_Assert(GModel::Graph(*m_orig_graph)
+                                .metadata(*isl->in_ops().begin())
+                                .get<cv::gimpl::Op>()
+                                .k.name == cv::gapi::core::GCopy::id());
+#endif // GAPI_STANDALONE
+                    for (auto out_nh : nh->outNodes()) {
+                        for (auto out_eh : out_nh->outEdges()) {
+                            qgr.metadata(out_eh).set(DesyncSpecialCase{});
+                        }
+                    }
+                }
+                // It is ok to do it here since the graph is visited in
+                // a topologic order and its consumers (those checking
+                // their input edges & initializing queues) are yet to be
+                // visited
             }
             break;
         case NodeKind::SLOT:

From 2a3cdba724aaf9871b988f8d7887c1899afb0f6d Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Tue, 3 Nov 2020 20:47:05 +0300
Subject: [PATCH 062/152] Merge pull request #18701 from
 TolyaTalamanov:at/introduce-config-for-ie-params

Expand ie::Params to support config

* Add config to IE params

* Add test

* Remove comments from tests

* Rename to pluginConfig

* Add one more overloads for pluginConfig

* Add more tests
---
 .../gapi/include/opencv2/gapi/infer/ie.hpp    |  34 +++++-
 modules/gapi/src/backends/ie/giebackend.cpp   |   4 +-
 .../gapi/test/infer/gapi_infer_ie_test.cpp    | 102 ++++++++++++++++++
 3 files changed, 135 insertions(+), 5 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
index a8bc0bb05d..53e31fbb09 100644
--- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp
+++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp
@@ -11,6 +11,7 @@
 #include <string>
 #include <array>
 #include <tuple> // tuple, tuple_size
+#include <map>
 
 #include <opencv2/gapi/opencv_includes.hpp>
 #include <opencv2/gapi/util/any.hpp>
@@ -42,6 +43,8 @@ enum class TraitAs: int
     IMAGE   //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc)
 };
 
+using IEConfig = std::map<std::string, std::string>;
+
 namespace detail {
     struct ParamDesc {
         std::string model_path;
@@ -63,6 +66,7 @@ namespace detail {
         enum class Kind { Load, Import };
         Kind kind;
         bool is_generic;
+        IEConfig config;
     };
 } // namespace detail
 
@@ -86,7 +90,8 @@ public:
               , std::tuple_size<typename Net::InArgs>::value  // num_in
               , std::tuple_size<typename Net::OutArgs>::value // num_out
               , detail::ParamDesc::Kind::Load
-              , false} {
+              , false
+              , {}} {
     };
 
     Params(const std::string &model,
@@ -95,7 +100,8 @@ public:
               , std::tuple_size<typename Net::InArgs>::value  // num_in
               , std::tuple_size<typename Net::OutArgs>::value // num_out
               , detail::ParamDesc::Kind::Import
-              , false} {
+              , false
+              , {}} {
     };
 
     Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
@@ -121,6 +127,16 @@ public:
         return *this;
     }
 
+    Params& pluginConfig(IEConfig&& cfg) {
+        desc.config = std::move(cfg);
+        return *this;
+    }
+
+    Params& pluginConfig(const IEConfig& cfg) {
+        desc.config = cfg;
+        return *this;
+    }
+
     // BEGIN(G-API's network parametrization API)
     GBackend      backend()    const { return cv::gapi::ie::backend();  }
     std::string   tag()        const { return Net::tag(); }
@@ -138,15 +154,25 @@ public:
            const std::string &model,
            const std::string &weights,
            const std::string &device)
-        : desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true}, m_tag(tag) {
+        : desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true, {}}, m_tag(tag) {
     };
 
     Params(const std::string &tag,
            const std::string &model,
            const std::string &device)
-        : desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true}, m_tag(tag) {
+        : desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true, {}}, m_tag(tag) {
     };
 
+    Params& pluginConfig(IEConfig&& cfg) {
+        desc.config = std::move(cfg);
+        return *this;
+    }
+
+    Params& pluginConfig(const IEConfig& cfg) {
+        desc.config = cfg;
+        return *this;
+    }
+
     // BEGIN(G-API's network parametrization API)
     GBackend      backend()    const { return cv::gapi::ie::backend();  }
     std::string   tag()        const { return m_tag; }
diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp
index c66fa44361..85c0236ff1 100644
--- a/modules/gapi/src/backends/ie/giebackend.cpp
+++ b/modules/gapi/src/backends/ie/giebackend.cpp
@@ -185,7 +185,8 @@ struct IEUnit {
             inputs  = net.getInputsInfo();
             outputs = net.getOutputsInfo();
         } else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) {
-            this_plugin  = cv::gimpl::ie::wrap::getPlugin(params);
+            this_plugin = cv::gimpl::ie::wrap::getPlugin(params);
+            this_plugin.SetConfig(params.config);
             this_network = cv::gimpl::ie::wrap::importNetwork(this_plugin, params);
             // FIXME: ICNNetwork returns InputsDataMap/OutputsDataMap,
             // but ExecutableNetwork returns ConstInputsDataMap/ConstOutputsDataMap
@@ -225,6 +226,7 @@ struct IEUnit {
             // FIXME: In case importNetwork for fill inputs/outputs need to obtain ExecutableNetwork, but
             // for loadNetwork they can be obtained by using readNetwork
             non_const_this->this_plugin  = cv::gimpl::ie::wrap::getPlugin(params);
+            non_const_this->this_plugin.SetConfig(params.config);
             non_const_this->this_network = cv::gimpl::ie::wrap::loadNetwork(non_const_this->this_plugin, net, params);
         }
 
diff --git a/modules/gapi/test/infer/gapi_infer_ie_test.cpp b/modules/gapi/test/infer/gapi_infer_ie_test.cpp
index 3125705365..547c7c7d33 100644
--- a/modules/gapi/test/infer/gapi_infer_ie_test.cpp
+++ b/modules/gapi/test/infer/gapi_infer_ie_test.cpp
@@ -403,6 +403,108 @@ TEST(TestAgeGenderIE, GenericInfer)
     normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
 }
 
+TEST(TestAgeGenderIE, InvalidConfigGeneric)
+{
+    initDLDTDataPath();
+
+    std::string model_path   = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
+    std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
+    std::string device_id    = "CPU";
+
+    // Configure & run G-API
+    cv::GMat in;
+    GInferInputs inputs;
+    inputs["data"] = in;
+
+    auto outputs = cv::gapi::infer<cv::gapi::Generic>("age-gender-generic", inputs);
+    auto age     = outputs.at("age_conv3");
+    auto gender  = outputs.at("prob");
+    cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
+
+    auto pp = cv::gapi::ie::Params<cv::gapi::Generic>{"age-gender-generic",
+                                                       model_path,
+                                                       weights_path,
+                                                       device_id}.pluginConfig({{"unsupported_config", "some_value"}});
+
+    EXPECT_ANY_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}},
+                     cv::compile_args(cv::gapi::networks(pp))));
+}
+
+TEST(TestAgeGenderIE, CPUConfigGeneric)
+{
+    initDLDTDataPath();
+
+    std::string model_path   = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
+    std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
+    std::string device_id    = "CPU";
+
+    // Configure & run G-API
+    cv::GMat in;
+    GInferInputs inputs;
+    inputs["data"] = in;
+
+    auto outputs = cv::gapi::infer<cv::gapi::Generic>("age-gender-generic", inputs);
+    auto age     = outputs.at("age_conv3");
+    auto gender  = outputs.at("prob");
+    cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
+
+    auto pp = cv::gapi::ie::Params<cv::gapi::Generic>{"age-gender-generic",
+                                                       model_path,
+                                                       weights_path,
+                                                       device_id}.pluginConfig({{"ENFORCE_BF16", "NO"}});
+
+    EXPECT_NO_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}},
+                    cv::compile_args(cv::gapi::networks(pp))));
+}
+
+TEST(TestAgeGenderIE, InvalidConfig)
+{
+    initDLDTDataPath();
+
+    std::string model_path   = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
+    std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
+    std::string device_id    = "CPU";
+
+    using AGInfo = std::tuple<cv::GMat, cv::GMat>;
+    G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
+
+    cv::GMat in;
+    cv::GMat age, gender;
+    std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
+    cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
+
+    auto pp = cv::gapi::ie::Params<AgeGender> {
+        model_path, weights_path, device_id
+    }.cfgOutputLayers({ "age_conv3", "prob" }).pluginConfig({{"unsupported_config", "some_value"}});
+
+    EXPECT_ANY_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}},
+                     cv::compile_args(cv::gapi::networks(pp))));
+}
+
+TEST(TestAgeGenderIE, CPUConfig)
+{
+    initDLDTDataPath();
+
+    std::string model_path   = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
+    std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
+    std::string device_id    = "CPU";
+
+    using AGInfo = std::tuple<cv::GMat, cv::GMat>;
+    G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
+
+    cv::GMat in;
+    cv::GMat age, gender;
+    std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
+    cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
+
+    auto pp = cv::gapi::ie::Params<AgeGender> {
+        model_path, weights_path, device_id
+    }.cfgOutputLayers({ "age_conv3", "prob" }).pluginConfig({{"ENFORCE_BF16", "NO"}});
+
+    EXPECT_NO_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}},
+                    cv::compile_args(cv::gapi::networks(pp))));
+}
+
 } // namespace opencv_test
 
 #endif //  HAVE_INF_ENGINE

From a110ede0a253a88c3b925a40b42d8c1013fbdfe4 Mon Sep 17 00:00:00 2001
From: Dmitry Matveev <dmitry.matveev@intel.com>
Date: Tue, 3 Nov 2020 21:39:16 +0300
Subject: [PATCH 063/152] Merge pull request #18716 from
 dmatveev:dm/upstream_onnx

* G-API: Introduce ONNX backend for Inference

- Basic operations are implemented (Infer, -ROI, -List, -List2);
- Implemented automatic preprocessing for ONNX models;
- Test suite is extended with `OPENCV_GAPI_ONNX_MODEL_PATH` env for test data
  (test data is an ONNX Model Zoo repo snapshot);
- Fixed kernel lookup logic in core G-API:
  - Lookup NN kernels not in the default package, but in the associated
    backend's aux package. Now two NN backends can work in the same graph.
- Added Infer SSD demo and a combined ONNX/IE demo;

* G-API/ONNX: Fix some of CMake issues

Co-authored-by: Pashchenkov, Maxim <maxim.pashchenkov@intel.com>
---
 CMakeLists.txt                                |  17 +
 cmake/FindONNX.cmake                          |  36 +
 modules/gapi/CMakeLists.txt                   |  13 +
 .../gapi/include/opencv2/gapi/infer/onnx.hpp  | 138 +++
 modules/gapi/samples/infer_ie_onnx_hybrid.cpp | 195 ++++
 modules/gapi/samples/infer_ssd_onnx.cpp       | 213 ++++
 .../gapi/src/backends/onnx/gonnxbackend.cpp   | 955 ++++++++++++++++++
 .../gapi/src/backends/onnx/gonnxbackend.hpp   |  56 +
 modules/gapi/src/compiler/passes/kernels.cpp  |  25 +-
 .../gapi/test/infer/gapi_infer_onnx_test.cpp  | 278 +++++
 10 files changed, 1920 insertions(+), 6 deletions(-)
 create mode 100644 cmake/FindONNX.cmake
 create mode 100644 modules/gapi/include/opencv2/gapi/infer/onnx.hpp
 create mode 100644 modules/gapi/samples/infer_ie_onnx_hybrid.cpp
 create mode 100644 modules/gapi/samples/infer_ssd_onnx.cpp
 create mode 100644 modules/gapi/src/backends/onnx/gonnxbackend.cpp
 create mode 100644 modules/gapi/src/backends/onnx/gonnxbackend.hpp
 create mode 100644 modules/gapi/test/infer/gapi_infer_onnx_test.cpp

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4350b2fe2a..f3ca52fd4b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -439,6 +439,9 @@ OCV_OPTION(WITH_ANDROID_MEDIANDK "Use Android Media NDK for Video I/O (Android)"
 OCV_OPTION(WITH_TENGINE "Include Arm Inference Tengine support" OFF
   VISIBLE_IF (ARM OR AARCH64) AND (UNIX OR ANDROID) AND NOT IOS
   VERIFY HAVE_TENGINE)
+OCV_OPTION(WITH_ONNX "Include Microsoft ONNX Runtime support" OFF
+  VISIBLE_IF TRUE
+  VERIFY HAVE_ONNX)
 
 # OpenCV build components
 # ===================================================
@@ -775,6 +778,11 @@ if(WITH_QUIRC)
   add_subdirectory(3rdparty/quirc)
   set(HAVE_QUIRC TRUE)
 endif()
+
+if(WITH_ONNX)
+  include(cmake/FindONNX.cmake)
+endif()
+
 # ----------------------------------------------------------------------------
 # OpenCV HAL
 # ----------------------------------------------------------------------------
@@ -1556,6 +1564,15 @@ if(WITH_OPENCL OR HAVE_OPENCL)
   endif()
 endif()
 
+if(WITH_ONNX OR HAVE_ONNX)
+  status("")
+  status("  ONNX:"     HAVE_ONNX THEN "YES" ELSE "NO")
+  if(HAVE_ONNX)
+    status("    Include path:"  ONNX_INCLUDE_DIR THEN "${ONNX_INCLUDE_DIR}" ELSE "NO")
+    status("    Link libraries:" ONNX_LIBRARIES THEN "${ONNX_LIBRARIES}" ELSE "NO")
+  endif()
+endif()
+
 # ========================== python ==========================
 if(BUILD_opencv_python2)
   status("")
diff --git a/cmake/FindONNX.cmake b/cmake/FindONNX.cmake
new file mode 100644
index 0000000000..51aa77b460
--- /dev/null
+++ b/cmake/FindONNX.cmake
@@ -0,0 +1,36 @@
+ocv_clear_vars(HAVE_ONNX)
+
+set(ONNXRT_ROOT_DIR "" CACHE PATH "ONNX Runtime install directory")
+
+# For now, check the old name ORT_INSTALL_DIR
+if(ORT_INSTALL_DIR AND NOT ONNXRT_ROOT_DIR)
+  set(ONNXRT_ROOT_DIR ORT_INSTALL_DIR)
+endif()
+
+if(ONNXRT_ROOT_DIR)
+  find_library(ORT_LIB onnxruntime
+    ${ONNXRT_ROOT_DIR}/lib
+    CMAKE_FIND_ROOT_PATH_BOTH)
+  find_path(ORT_INCLUDE onnxruntime_cxx_api.h
+    ${ONNXRT_ROOT_DIR}/include/onnxruntime/core/session
+    CMAKE_FIND_ROOT_PATH_BOTH)
+endif()
+
+if(ORT_LIB AND ORT_INCLUDE)
+  set(HAVE_ONNX TRUE)
+  # For CMake output only
+  set(ONNX_LIBRARIES "${ORT_LIB}" CACHE STRING "ONNX Runtime libraries")
+  set(ONNX_INCLUDE_DIR "${ORT_INCLUDE}" CACHE STRING "ONNX Runtime include path")
+
+  # Link target with associated interface headers
+  set(ONNX_LIBRARY "onnxruntime" CACHE STRING "ONNX Link Target")
+  ocv_add_library(${ONNX_LIBRARY} SHARED IMPORTED)
+  set_target_properties(${ONNX_LIBRARY} PROPERTIES
+                        INTERFACE_INCLUDE_DIRECTORIES ${ORT_INCLUDE}
+                        IMPORTED_LOCATION ${ORT_LIB}
+                        IMPORTED_IMPLIB ${ORT_LIB})
+endif()
+
+if(NOT HAVE_ONNX)
+  ocv_clear_vars(HAVE_ONNX ORT_LIB ORT_INCLUDE_DIR)
+endif()
diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
index acfbd1d70e..d95f255951 100644
--- a/modules/gapi/CMakeLists.txt
+++ b/modules/gapi/CMakeLists.txt
@@ -131,6 +131,9 @@ set(gapi_srcs
     src/backends/ie/giebackend.cpp
     src/backends/ie/giebackend/giewrapper.cpp
 
+    # ONNX Backend.
+    src/backends/onnx/gonnxbackend.cpp
+
     # Render Backend.
     src/backends/render/grenderocv.cpp
     src/backends/render/ft_render.cpp
@@ -205,10 +208,20 @@ if(HAVE_PLAIDML)
   ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${PLAIDML_INCLUDE_DIRS})
 endif()
 
+
 if(WIN32)
   # Required for htonl/ntohl on Windows
   ocv_target_link_libraries(${the_module} PRIVATE wsock32 ws2_32)
 endif()
 
+if(HAVE_ONNX)
+  ocv_target_link_libraries(${the_module} PRIVATE ${ONNX_LIBRARY})
+  ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX=1)
+  if(TARGET opencv_test_gapi)
+    ocv_target_compile_definitions(opencv_test_gapi PRIVATE HAVE_ONNX=1)
+    ocv_target_link_libraries(opencv_test_gapi PRIVATE ${ONNX_LIBRARY})
+  endif()
+endif()
+
 ocv_add_perf_tests()
 ocv_add_samples()
diff --git a/modules/gapi/include/opencv2/gapi/infer/onnx.hpp b/modules/gapi/include/opencv2/gapi/infer/onnx.hpp
new file mode 100644
index 0000000000..d61ceb3dca
--- /dev/null
+++ b/modules/gapi/include/opencv2/gapi/infer/onnx.hpp
@@ -0,0 +1,138 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#ifndef OPENCV_GAPI_INFER_ONNX_HPP
+#define OPENCV_GAPI_INFER_ONNX_HPP
+
+#include <unordered_map>
+#include <string>
+#include <array>
+#include <tuple> // tuple, tuple_size
+
+#include <opencv2/gapi/opencv_includes.hpp>
+#include <opencv2/gapi/util/any.hpp>
+
+#include <opencv2/core/cvdef.h>     // GAPI_EXPORTS
+#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
+
+namespace cv {
+namespace gapi {
+namespace onnx {
+
+GAPI_EXPORTS cv::gapi::GBackend backend();
+
+enum class TraitAs: int {
+    TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor
+            // and passes dimensions as-is
+    IMAGE   //!< G-API traits an associated cv::Mat as an image so
+            // creates an "image" blob (NCHW/NHWC, etc)
+};
+
+using PostProc = std::function<void(const std::unordered_map<std::string, cv::Mat> &,
+                                          std::unordered_map<std::string, cv::Mat> &)>;
+
+
+namespace detail {
+struct ParamDesc {
+    std::string model_path;
+
+    // NB: nun_* may differ from topology's real input/output port numbers
+    // (e.g. topology's partial execution)
+    std::size_t num_in;  // How many inputs are defined in the operation
+    std::size_t num_out; // How many outputs are defined in the operation
+
+    // NB: Here order follows the `Net` API
+    std::vector<std::string> input_names;
+    std::vector<std::string> output_names;
+
+    using ConstInput = std::pair<cv::Mat, TraitAs>;
+    std::unordered_map<std::string, ConstInput> const_inputs;
+
+    std::vector<cv::Scalar> mean;
+    std::vector<cv::Scalar> stdev;
+
+    std::vector<cv::GMatDesc> out_metas;
+    PostProc custom_post_proc;
+
+    std::vector<bool> normalize;
+};
+} // namespace detail
+
+template<typename Net>
+struct PortCfg {
+    using In = std::array
+        < std::string
+        , std::tuple_size<typename Net::InArgs>::value >;
+    using Out = std::array
+        < std::string
+        , std::tuple_size<typename Net::OutArgs>::value >;
+    using NormCoefs = std::array
+        < cv::Scalar
+        , std::tuple_size<typename Net::InArgs>::value >;
+    using Normalize = std::array
+        < bool
+        , std::tuple_size<typename Net::InArgs>::value >;
+};
+
+template<typename Net> class Params {
+public:
+    Params(const std::string &model) {
+        desc.model_path = model;
+        desc.num_in  = std::tuple_size<typename Net::InArgs>::value;
+        desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
+    };
+
+    // BEGIN(G-API's network parametrization API)
+    GBackend      backend() const { return cv::gapi::onnx::backend();  }
+    std::string   tag()     const { return Net::tag(); }
+    cv::util::any params()  const { return { desc }; }
+    // END(G-API's network parametrization API)
+
+    Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
+        desc.input_names.assign(ll.begin(), ll.end());
+        return *this;
+    }
+
+    Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &ll) {
+        desc.output_names.assign(ll.begin(), ll.end());
+        return *this;
+    }
+
+    Params<Net>& constInput(const std::string &layer_name,
+                            const cv::Mat &data,
+                            TraitAs hint = TraitAs::TENSOR) {
+        desc.const_inputs[layer_name] = {data, hint};
+        return *this;
+    }
+
+    Params<Net>& cfgMeanStd(const typename PortCfg<Net>::NormCoefs &m,
+                            const typename PortCfg<Net>::NormCoefs &s) {
+        desc.mean.assign(m.begin(), m.end());
+        desc.stdev.assign(s.begin(), s.end());
+        return *this;
+    }
+
+    Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &outs,
+                             const PostProc &pp) {
+        desc.out_metas = outs;
+        desc.custom_post_proc = pp;
+        return *this;
+    }
+
+    Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &n) {
+        desc.normalize.assign(n.begin(), n.end());
+        return *this;
+    }
+
+protected:
+    detail::ParamDesc desc;
+};
+
+} // namespace onnx
+} // namespace gapi
+} // namespace cv
+
+#endif // OPENCV_GAPI_INFER_HPP
diff --git a/modules/gapi/samples/infer_ie_onnx_hybrid.cpp b/modules/gapi/samples/infer_ie_onnx_hybrid.cpp
new file mode 100644
index 0000000000..b8612a25ca
--- /dev/null
+++ b/modules/gapi/samples/infer_ie_onnx_hybrid.cpp
@@ -0,0 +1,195 @@
+#include <chrono>
+#include <iomanip>
+
+#include "opencv2/imgproc.hpp"
+#include "opencv2/highgui.hpp"
+
+#include "opencv2/gapi.hpp"
+#include "opencv2/gapi/core.hpp"
+#include "opencv2/gapi/imgproc.hpp"
+#include "opencv2/gapi/infer.hpp"
+#include "opencv2/gapi/infer/ie.hpp"
+#include "opencv2/gapi/infer/onnx.hpp"
+#include "opencv2/gapi/cpu/gcpukernel.hpp"
+#include "opencv2/gapi/streaming/cap.hpp"
+
+namespace {
+const std::string keys =
+    "{ h help |   | print this help message }"
+    "{ input  |   | Path to an input video file }"
+    "{ fdm    |   | IE face detection model IR }"
+    "{ fdw    |   | IE face detection model weights }"
+    "{ fdd    |   | IE face detection device }"
+    "{ emom   |   | ONNX emotions recognition model }"
+    "{ output |   | (Optional) Path to an output video file }"
+    ;
+} // namespace
+
+namespace custom {
+G_API_NET(Faces, <cv::GMat(cv::GMat)>, "face-detector");
+G_API_NET(Emotions, <cv::GMat(cv::GMat)>, "emotions-recognition");
+
+G_API_OP(PostProc, <cv::GArray<cv::Rect>(cv::GMat, cv::GMat)>, "custom.fd_postproc") {
+    static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GMatDesc &) {
+        return cv::empty_array_desc();
+    }
+};
+
+GAPI_OCV_KERNEL(OCVPostProc, PostProc) {
+    static void run(const cv::Mat &in_ssd_result,
+                    const cv::Mat &in_frame,
+                    std::vector<cv::Rect> &out_faces) {
+        const int MAX_PROPOSALS = 200;
+        const int OBJECT_SIZE   =   7;
+        const cv::Size upscale = in_frame.size();
+        const cv::Rect surface({0,0}, upscale);
+
+        out_faces.clear();
+
+        const float *data = in_ssd_result.ptr<float>();
+        for (int i = 0; i < MAX_PROPOSALS; i++) {
+            const float image_id   = data[i * OBJECT_SIZE + 0]; // batch id
+            const float confidence = data[i * OBJECT_SIZE + 2];
+            const float rc_left    = data[i * OBJECT_SIZE + 3];
+            const float rc_top     = data[i * OBJECT_SIZE + 4];
+            const float rc_right   = data[i * OBJECT_SIZE + 5];
+            const float rc_bottom  = data[i * OBJECT_SIZE + 6];
+
+            if (image_id < 0.f) {  // indicates end of detections
+                break;
+            }
+            if (confidence < 0.5f) {
+                continue;
+            }
+
+            cv::Rect rc;
+            rc.x      = static_cast<int>(rc_left   * upscale.width);
+            rc.y      = static_cast<int>(rc_top    * upscale.height);
+            rc.width  = static_cast<int>(rc_right  * upscale.width)  - rc.x;
+            rc.height = static_cast<int>(rc_bottom * upscale.height) - rc.y;
+            out_faces.push_back(rc & surface);
+        }
+    }
+};
+//! [Postproc]
+
+} // namespace custom
+
+namespace labels {
+// Labels as defined in
+// https://github.com/onnx/models/tree/master/vision/body_analysis/emotion_ferplus
+//
+const std::string emotions[] = {
+    "neutral", "happiness", "surprise", "sadness", "anger", "disgust", "fear", "contempt"
+};
+namespace {
+template<typename Iter>
+std::vector<float> softmax(Iter begin, Iter end) {
+    std::vector<float> prob(end - begin, 0.f);
+    std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); });
+    float sum = std::accumulate(prob.begin(), prob.end(), 0.0f);
+    for (int i = 0; i < static_cast<int>(prob.size()); i++)
+        prob[i] /= sum;
+    return prob;
+}
+
+void DrawResults(cv::Mat &frame,
+                 const std::vector<cv::Rect> &faces,
+                 const std::vector<cv::Mat>  &out_emotions) {
+    CV_Assert(faces.size() == out_emotions.size());
+
+    for (auto it = faces.begin(); it != faces.end(); ++it) {
+        const auto idx = std::distance(faces.begin(), it);
+        const auto &rc = *it;
+
+        const float *emotions_data = out_emotions[idx].ptr<float>();
+        auto sm = softmax(emotions_data, emotions_data + 8);
+        const auto emo_id = std::max_element(sm.begin(), sm.end()) - sm.begin();
+
+        const int ATTRIB_OFFSET = 15;
+        cv::rectangle(frame, rc, {0, 255, 0},  4);
+        cv::putText(frame, emotions[emo_id],
+                    cv::Point(rc.x, rc.y - ATTRIB_OFFSET),
+                    cv::FONT_HERSHEY_COMPLEX_SMALL,
+                    1,
+                    cv::Scalar(0, 0, 255));
+
+        std::cout << emotions[emo_id] << " at " << rc << std::endl;
+    }
+}
+} // anonymous namespace
+} // namespace labels
+
+int main(int argc, char *argv[])
+{
+    cv::CommandLineParser cmd(argc, argv, keys);
+    if (cmd.has("help")) {
+        cmd.printMessage();
+        return 0;
+    }
+    const std::string input = cmd.get<std::string>("input");
+    const std::string output = cmd.get<std::string>("output");
+
+    // OpenVINO FD parameters here
+    auto det_net = cv::gapi::ie::Params<custom::Faces> {
+        cmd.get<std::string>("fdm"),   // read cmd args: path to topology IR
+        cmd.get<std::string>("fdw"),   // read cmd args: path to weights
+        cmd.get<std::string>("fdd"),   // read cmd args: device specifier
+    };
+
+    // ONNX Emotions parameters here
+    auto emo_net = cv::gapi::onnx::Params<custom::Emotions> {
+        cmd.get<std::string>("emom"),   // read cmd args: path to the ONNX model
+    }.cfgNormalize({false}); // model accepts 0..255 range in FP32
+
+    auto kernels = cv::gapi::kernels<custom::OCVPostProc>();
+    auto networks = cv::gapi::networks(det_net, emo_net);
+
+    cv::GMat in;
+    cv::GMat bgr = cv::gapi::copy(in);
+    cv::GMat frame = cv::gapi::streaming::desync(bgr);
+    cv::GMat detections = cv::gapi::infer<custom::Faces>(frame);
+    cv::GArray<cv::Rect> faces = custom::PostProc::on(detections, frame);
+    cv::GArray<cv::GMat> emotions = cv::gapi::infer<custom::Emotions>(faces, frame);
+    auto pipeline = cv::GComputation(cv::GIn(in), cv::GOut(bgr, faces, emotions))
+        .compileStreaming(cv::compile_args(kernels, networks));
+
+    auto in_src = cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input);
+    pipeline.setSource(cv::gin(in_src));
+    pipeline.start();
+
+    cv::util::optional<cv::Mat>               out_frame;
+    cv::util::optional<std::vector<cv::Rect>> out_faces;
+    cv::util::optional<std::vector<cv::Mat>>  out_emotions;
+
+    cv::Mat               last_mat;
+    std::vector<cv::Rect> last_faces;
+    std::vector<cv::Mat>  last_emotions;
+
+    cv::VideoWriter writer;
+
+    while (pipeline.pull(cv::gout(out_frame, out_faces, out_emotions))) {
+        if (out_faces && out_emotions) {
+            last_faces = *out_faces;
+            last_emotions = *out_emotions;
+        }
+        if (out_frame) {
+            last_mat = *out_frame;
+            labels::DrawResults(last_mat, last_faces, last_emotions);
+
+            if (!output.empty()) {
+                if (!writer.isOpened()) {
+                    const auto sz = cv::Size{last_mat.cols, last_mat.rows};
+                    writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz);
+                    CV_Assert(writer.isOpened());
+                }
+                writer << last_mat;
+            }
+        }
+        if (!last_mat.empty()) {
+            cv::imshow("Out", last_mat);
+            cv::waitKey(1);
+        }
+    }
+    return 0;
+}
diff --git a/modules/gapi/samples/infer_ssd_onnx.cpp b/modules/gapi/samples/infer_ssd_onnx.cpp
new file mode 100644
index 0000000000..fc26ca1e36
--- /dev/null
+++ b/modules/gapi/samples/infer_ssd_onnx.cpp
@@ -0,0 +1,213 @@
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+
+#include <opencv2/gapi.hpp>
+#include <opencv2/gapi/core.hpp>
+#include <opencv2/gapi/imgproc.hpp>
+#include <opencv2/gapi/infer.hpp>
+#include <opencv2/gapi/render.hpp>
+#include <opencv2/gapi/infer/onnx.hpp>
+#include <opencv2/gapi/cpu/gcpukernel.hpp>
+#include <opencv2/gapi/streaming/cap.hpp>
+#include <opencv2/highgui.hpp>
+
+namespace custom {
+
+G_API_NET(ObjDetector,   <cv::GMat(cv::GMat)>, "object-detector");
+
+using GDetections = cv::GArray<cv::Rect>;
+using GSize       = cv::GOpaque<cv::Size>;
+using GPrims      = cv::GArray<cv::gapi::wip::draw::Prim>;
+
+G_API_OP(GetSize, <GSize(cv::GMat)>, "sample.custom.get-size") {
+    static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
+        return cv::empty_gopaque_desc();
+    }
+};
+G_API_OP(ParseSSD, <GDetections(cv::GMat, GSize)>, "sample.custom.parse-ssd") {
+    static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &) {
+        return cv::empty_array_desc();
+    }
+};
+G_API_OP(BBoxes, <GPrims(GDetections)>, "sample.custom.b-boxes") {
+    static cv::GArrayDesc outMeta(const cv::GArrayDesc &) {
+        return cv::empty_array_desc();
+    }
+};
+
+GAPI_OCV_KERNEL(OCVGetSize, GetSize) {
+    static void run(const cv::Mat &in, cv::Size &out) {
+        out = {in.cols, in.rows};
+    }
+};
+GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
+    static void run(const cv::Mat &in_ssd_result,
+                    const cv::Size &in_parent_size,
+                    std::vector<cv::Rect> &out_objects) {
+        const auto &in_ssd_dims = in_ssd_result.size;
+        CV_Assert(in_ssd_dims.dims() == 4u);
+
+        const int MAX_PROPOSALS = in_ssd_dims[2];
+        const int OBJECT_SIZE   = in_ssd_dims[3];
+
+        CV_Assert(OBJECT_SIZE  == 7); // fixed SSD object size
+
+        const cv::Rect surface({0,0}, in_parent_size);
+
+        out_objects.clear();
+
+        const float *data = in_ssd_result.ptr<float>();
+        for (int i = 0; i < MAX_PROPOSALS; i++) {
+            const float image_id   = data[i * OBJECT_SIZE + 0];
+            const float label      = data[i * OBJECT_SIZE + 1];
+            const float confidence = data[i * OBJECT_SIZE + 2];
+            const float rc_left    = data[i * OBJECT_SIZE + 3];
+            const float rc_top     = data[i * OBJECT_SIZE + 4];
+            const float rc_right   = data[i * OBJECT_SIZE + 5];
+            const float rc_bottom  = data[i * OBJECT_SIZE + 6];
+            (void) label; // unused
+
+            if (image_id < 0.f) {
+                break;    // marks end-of-detections
+            }
+            if (confidence < 0.5f) {
+                continue; // skip objects with low confidence
+            }
+
+            // map relative coordinates to the original image scale
+            cv::Rect rc;
+            rc.x      = static_cast<int>(rc_left   * in_parent_size.width);
+            rc.y      = static_cast<int>(rc_top    * in_parent_size.height);
+            rc.width  = static_cast<int>(rc_right  * in_parent_size.width)  - rc.x;
+            rc.height = static_cast<int>(rc_bottom * in_parent_size.height) - rc.y;
+            out_objects.emplace_back(rc & surface);
+        }
+    }
+};
+GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
+    // This kernel converts the rectangles into G-API's
+    // rendering primitives
+    static void run(const std::vector<cv::Rect> &in_obj_rcs,
+                          std::vector<cv::gapi::wip::draw::Prim> &out_prims) {
+        out_prims.clear();
+        const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) {
+            return cv::gapi::wip::draw::Rect(rc, clr, 2);
+        };
+        for (auto &&rc : in_obj_rcs) {
+            out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0)));   // green
+        }
+
+        std::cout << "Detections:";
+        for (auto &&rc : in_obj_rcs) std::cout << ' ' << rc;
+        std::cout << std::endl;
+    }
+};
+
+} // namespace custom
+
+namespace {
+void remap_ssd_ports(const std::unordered_map<std::string, cv::Mat> &onnx,
+                           std::unordered_map<std::string, cv::Mat> &gapi) {
+    // Assemble ONNX-processed outputs back to a single 1x1x200x7 blob
+    // to preserve compatibility with OpenVINO-based SSD pipeline
+    const cv::Mat &num_detections = onnx.at("num_detections:0");
+    const cv::Mat &detection_boxes = onnx.at("detection_boxes:0");
+    const cv::Mat &detection_scores = onnx.at("detection_scores:0");
+    const cv::Mat &detection_classes = onnx.at("detection_classes:0");
+
+    GAPI_Assert(num_detections.depth() == CV_32F);
+    GAPI_Assert(detection_boxes.depth() == CV_32F);
+    GAPI_Assert(detection_scores.depth() == CV_32F);
+    GAPI_Assert(detection_classes.depth() == CV_32F);
+
+    cv::Mat &ssd_output = gapi.at("detection_output");
+
+    const int num_objects = static_cast<int>(num_detections.ptr<float>()[0]);
+    const float *in_boxes = detection_boxes.ptr<float>();
+    const float *in_scores = detection_scores.ptr<float>();
+    const float *in_classes = detection_classes.ptr<float>();
+    float *ptr = ssd_output.ptr<float>();
+
+    for (int i = 0; i < num_objects; i++) {
+        ptr[0] = 0.f;               // "image_id"
+        ptr[1] = in_classes[i];     // "label"
+        ptr[2] = in_scores[i];      // "confidence"
+        ptr[3] = in_boxes[4*i + 1]; // left
+        ptr[4] = in_boxes[4*i + 0]; // top
+        ptr[5] = in_boxes[4*i + 3]; // right
+        ptr[6] = in_boxes[4*i + 2]; // bottom
+
+        ptr      += 7;
+        in_boxes += 4;
+    }
+    if (num_objects < ssd_output.size[2]-1) {
+        // put a -1 mark at the end of output blob if there is space left
+        ptr[0] = -1.f;
+    }
+}
+} // anonymous namespace
+
+
+const std::string keys =
+    "{ h help | | Print this help message }"
+    "{ input  | | Path to the input video file }"
+    "{ output | | (Optional) path to output video file }"
+    "{ detm   | | Path to an ONNX SSD object detection model (.onnx) }"
+    ;
+
+int main(int argc, char *argv[])
+{
+    cv::CommandLineParser cmd(argc, argv, keys);
+    if (cmd.has("help")) {
+        cmd.printMessage();
+        return 0;
+    }
+
+    // Prepare parameters first
+    const std::string input = cmd.get<std::string>("input");
+    const std::string output = cmd.get<std::string>("output");
+    const auto obj_model_path = cmd.get<std::string>("detm");
+
+    auto obj_net = cv::gapi::onnx::Params<custom::ObjDetector>{obj_model_path}
+        .cfgOutputLayers({"detection_output"})
+        .cfgPostProc({cv::GMatDesc{CV_32F, {1,1,200,7}}}, remap_ssd_ports);
+    auto kernels = cv::gapi::kernels< custom::OCVGetSize
+                                    , custom::OCVParseSSD
+                                    , custom::OCVBBoxes>();
+    auto networks = cv::gapi::networks(obj_net);
+
+    // Now build the graph
+    cv::GMat in;
+    auto blob = cv::gapi::infer<custom::ObjDetector>(in);
+    auto  rcs = custom::ParseSSD::on(blob, custom::GetSize::on(in));
+    auto  out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs));
+    cv::GStreamingCompiled pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out))
+        .compileStreaming(cv::compile_args(kernels, networks));
+
+    auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
+
+    // The execution part
+    pipeline.setSource(std::move(inputs));
+    pipeline.start();
+
+    cv::VideoWriter writer;
+
+    cv::Mat outMat;
+    while (pipeline.pull(cv::gout(outMat))) {
+        cv::imshow("Out", outMat);
+        cv::waitKey(1);
+        if (!output.empty()) {
+            if (!writer.isOpened()) {
+                const auto sz = cv::Size{outMat.cols, outMat.rows};
+                writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz);
+                CV_Assert(writer.isOpened());
+            }
+            writer << outMat;
+        }
+    }
+    return 0;
+}
diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp
new file mode 100644
index 0000000000..c81e032969
--- /dev/null
+++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp
@@ -0,0 +1,955 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#include "precomp.hpp"
+#include "backends/onnx/gonnxbackend.hpp"
+
+#ifdef HAVE_ONNX
+
+#include <ade/util/algorithm.hpp> // any_of
+#include <ade/util/zip_range.hpp>
+#include <opencv2/gapi/infer.hpp>
+#include <opencv2/gapi/own/convert.hpp>
+
+#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
+
+namespace cv {
+namespace gimpl {
+namespace onnx {
+
+enum TensorPosition : int {
+    INPUT,
+    OUTPUT
+};
+
+struct TensorInfo {
+    TensorInfo() = default;
+    explicit TensorInfo(const Ort::TensorTypeAndShapeInfo& info)
+        : dims(info.GetShape())
+        , type(info.GetElementType())
+        , is_dynamic(std::find(dims.begin(), dims.end(), -1) != dims.end()) {
+        if (!is_dynamic) {
+            size = std::accumulate(dims.begin(),
+                                   dims.end(),
+                                   static_cast<int64_t>(1),
+                                   std::multiplies<int64_t>());
+        }
+        // Heuristic: check if the tensor is grayscale input
+        if (dims.size() == 4u
+            && dims[0]  == 1
+            && dims[1]  == 1
+            && dims[2]   > 1
+            && dims[3]   > 1) {
+            is_grayscale = true;
+        }
+    }
+
+    std::string name;
+    std::vector<int64_t> dims;
+    ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED;
+    int64_t size = -1;
+
+    bool normalize = true;
+
+    bool is_dynamic = false;
+    bool is_grayscale = false;
+
+    struct MeanStdev {
+        cv::Scalar mean;
+        cv::Scalar stdev;
+    };
+    cv::util::optional<MeanStdev> mstd;
+};
+
+class ONNXCompiled {
+    // ONNX Resources
+    // NOTE: Env must live with the session, otherwise segfaults.
+    Ort::Env this_env{nullptr};
+    Ort::Session this_session{nullptr};
+    Ort::MemoryInfo this_memory_info{nullptr};
+
+    std::vector<TensorInfo> in_tensor_info;
+    std::vector<TensorInfo> out_tensor_info;
+    bool is_dynamic = false;
+
+    // G-API <Net> description
+    gapi::onnx::detail::ParamDesc params;
+
+    // Input/output tensor information
+    std::vector<TensorInfo> getTensorInfo(TensorPosition pos);
+
+    // Run-time data structures
+    std::vector<cv::Mat> in_data;
+    std::vector<cv::Mat> out_data;
+
+    void Run(const std::vector<cv::Mat>& ins,
+             const std::vector<cv::Mat>& outs);
+
+public:
+    explicit ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp);
+
+    // Extract the information about output layer #i
+    cv::GMatDesc outMeta(int i) const;
+
+    // Assign input/output info
+    std::size_t numInputs() const { return params.num_in; }
+    std::size_t numOutputs() const { return params.num_out; }
+    void setInput(int i, const cv::Mat &m);
+    void setOutput(int i, cv::Mat &m);
+    cv::Mat allocOutput(int i) const;
+
+    // Run with the assigned inputs/outputs
+    void run();
+};
+
+} // namespace onnx
+} // namespace gimpl
+} // namespace cv
+
+namespace {
+
+inline std::vector<const char*> getCharNames(const std::vector<std::string>& names) {
+    std::vector<const char*> out_vec;
+    for (const auto& el : names) {
+            out_vec.push_back(el.data());
+    }
+    return out_vec;
+}
+
+inline int getIdxByName(const std::vector<cv::gimpl::onnx::TensorInfo>& info, const std::string& name) {
+    // FIXME: Cache the ordering
+    const auto it = std::find_if(info.begin(), info.end(), [&](const cv::gimpl::onnx::TensorInfo &i) {
+            return i.name == name;
+        });
+    GAPI_Assert(it != info.end());
+    return std::distance(info.begin(), it);
+}
+
+inline int toCV(ONNXTensorElementDataType prec) {
+    switch (prec) {
+    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U;
+    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F;
+    default: GAPI_Assert(false && "Unsupported data type");
+    }
+    return -1;
+}
+
+inline std::vector<int> toCV(const std::vector<int64_t> &vsz) {
+    std::vector<int> result;
+    result.reserve(vsz.size());
+    for (auto sz : vsz) {
+        result.push_back(ade::util::checked_cast<int>(sz));
+    }
+    return result;
+}
+
+inline cv::Mat toCV(Ort::Value &v) {
+    auto info = v.GetTensorTypeAndShapeInfo();
+    return cv::Mat(toCV(info.GetShape()),
+                   toCV(info.GetElementType()),
+                   reinterpret_cast<void*>(v.GetTensorMutableData<uint8_t*>()));
+}
+
+inline std::vector<int64_t> toORT(const cv::MatSize &sz) {
+    return cv::to_own<int64_t>(sz);
+}
+
+inline void preprocess(const cv::Mat& src,
+                       const cv::gimpl::onnx::TensorInfo& ti,
+                             cv::Mat& dst) {
+    GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U);
+
+    if (src.depth() == CV_32F) {
+        // Just pass the tensor as-is.
+        // No layout or dimension transformations done here!
+        // TODO: This needs to be aligned across all NN backends.
+        GAPI_Assert(toCV(ti.type) == CV_32F && "Only 32F model input is supported for 32F data");
+        GAPI_Assert(toORT(src.size) == ti.dims && "32F tensor dimensions should match with NN input");
+        GAPI_Assert(!ti.is_dynamic && "Dynamic inputs are not supported for this case");
+        dst = src;
+    } else {
+        // 8U input: full preprocessing path
+        GAPI_Assert(src.depth()   == CV_8U && "Only 8U data type is supported for preproc");
+        GAPI_Assert(ti.dims.size() == 4u && "Only NCHW/NHWC layouts are supported for preproc");
+
+        const auto ddepth = toCV(ti.type);
+        GAPI_Assert((ddepth == CV_8U || ddepth == CV_32F)
+                    && "Only 8U and 32F model input is supported for 8U data");
+
+        // Assess the expected input layout
+        const bool is_hwc = [&](int ch) {
+            if (ti.is_grayscale)       return false; // 1,1,h,w
+            else if (ti.dims[3] == ch) return true;  // _,_,_,c
+            else if (ti.dims[1] == ch) return false; // _,c,_,_
+            else cv::util::throw_error(std::logic_error("Couldn't identify input tensor layout"));
+        } (src.channels());
+
+        int new_c = src.channels();
+        cv::Mat csc;
+        if (ti.is_grayscale && new_c == 3) {
+            cv::cvtColor(src, csc, cv::COLOR_BGR2GRAY);
+            new_c = 1;
+        } else {
+            csc = src;
+        }
+
+        // NHWC vs NCHW
+        int new_h = -1, new_w = -1;
+        if (ti.is_dynamic) {
+            // reuse h & w from the input image
+            new_h = src.rows;
+            new_w = src.cols;
+        } else {
+            // take h & w from the ONNX tensor info
+            new_h = ti.dims[is_hwc ? 1 : 2];
+            new_w = ti.dims[is_hwc ? 2 : 3];
+        }
+        GAPI_Assert(new_h != -1 && new_w != -1);
+
+        cv::Mat rsz, pp;
+        cv::resize(csc, rsz, cv::Size(new_w, new_h));
+        if (src.depth() == CV_8U && ddepth == CV_32F) {
+            rsz.convertTo(pp, ddepth, ti.normalize ? 1.f / 255 : 1.f);
+            if (ti.mstd.has_value()) {
+                pp -= ti.mstd->mean;
+                pp /= ti.mstd->stdev;
+            }
+        } else {
+            pp = rsz;
+        }
+
+        if (!is_hwc && new_c > 1) {
+            // Convert to CHW
+            dst.create(cv::Size(new_w, new_h * new_c), ddepth);
+            std::vector<cv::Mat> planes(new_c);
+            for (int ch = 0; ch < new_c; ++ch) {
+                planes[ch] = dst.rowRange(ch * new_h, (ch + 1) * new_h);
+            }
+            cv::split(pp, planes);
+        } else {
+            // Keep HWC
+            dst = pp;
+        }
+
+        // Ensure dst is a tensor shape (not a 2D image)
+        if (ti.is_dynamic) {
+            // Reshape to input dimensions
+            const std::vector<int> out_dims = is_hwc
+                ? std::vector<int>{1, new_h, new_w, new_c}
+                : std::vector<int>{1, new_c, new_h, new_w};
+            dst = dst.reshape(1, out_dims);
+        } else {
+            // Reshape to ONNX dimensions (no -1s there!)
+            dst = dst.reshape(1, toCV(ti.dims));
+        }
+    }
+}
+
+template <typename T>
+inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info,
+                               const cv::gimpl::onnx::TensorInfo& tensor_params,
+                               const cv::Mat& data) {
+    (void) tensor_params;
+    auto ort_dims = toORT(data.size);
+    return Ort::Value::CreateTensor<T>(memory_info,
+                                       const_cast<T*>(data.ptr<T>()),
+                                       data.total(),
+                                       ort_dims.data(),
+                                       ort_dims.size());
+}
+
+inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info,
+                               const cv::gimpl::onnx::TensorInfo& tensor_params,
+                               const cv::Mat& data) {
+    GAPI_Assert(data.isContinuous ());
+    switch (tensor_params.type) {
+    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
+        return createTensor<uint8_t>(memory_info, tensor_params, data);
+    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
+        return createTensor<float>(memory_info, tensor_params, data);
+    default:
+        GAPI_Assert(false && "Unsupported data type");
+    }
+    return Ort::Value{nullptr};
+}
+
+struct ONNXUnit {
+    static const char *name() { return "ONNXModelConfig"; }
+
+    std::shared_ptr<cv::gimpl::onnx::ONNXCompiled> oc;
+
+    explicit ONNXUnit(const cv::gapi::onnx::detail::ParamDesc &pp)
+        : oc(new cv::gimpl::onnx::ONNXCompiled(pp)) {
+    }
+};
+
+struct ONNXCallContext {
+    // Input parameters passed to an inference operation.
+    std::vector<cv::GArg> args;
+
+    //FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call
+    //to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
+    //once on enter for input and output arguments, and once before return for output arguments only
+    //FIXME: check if the above applies to this backend (taken from CPU)
+    std::unordered_map<std::size_t, cv::GRunArgP> results;
+
+    // Generic accessor API
+    template<typename T>
+    const T& inArg(std::size_t input) { return args.at(input).get<T>(); }
+
+    // Syntax sugar
+    const cv::Mat&   inMat(std::size_t input) {
+        return inArg<cv::Mat>(input);
+    }
+    cv::Mat&         outMatR(std::size_t output) {
+        return *cv::util::get<cv::Mat*>(results.at(output));
+    }
+
+    template<typename T> std::vector<T>& outVecR(std::size_t output) { // FIXME: the same issue
+        return outVecRef(output).wref<T>();
+    }
+    cv::detail::VectorRef& outVecRef(std::size_t output) {
+        return cv::util::get<cv::detail::VectorRef>(results.at(output));
+    }
+};
+
+struct ONNXCallable {
+    static const char *name() { return "ONNXRequestCallable"; }
+    using Run = std::function<void(const ONNXUnit &, ONNXCallContext &)>;
+    Run run;
+};
+
+struct KImpl {
+    cv::gimpl::CustomMetaFunction::CM customMetaFunc;
+    ONNXCallable::Run run;
+};
+
+// FIXME: Is there a way to take a typed graph (our GModel),
+// and create a new typed graph _ATOP_ of that (by extending with a couple of
+// new types?).
+// Alternatively, is there a way to compose types graphs?
+//
+// If not, we need to introduce that!
+using GONNXModel = ade::TypedGraph
+    < cv::gimpl::Protocol
+    , cv::gimpl::Op
+    , cv::gimpl::NetworkParams
+    , cv::gimpl::CustomMetaFunction
+    , ONNXUnit
+    , ONNXCallable
+    >;
+
+// FIXME: Same issue with Typed and ConstTyped
+using GConstGONNXModel = ade::ConstTypedGraph
+    < cv::gimpl::Protocol
+    , cv::gimpl::Op
+    , cv::gimpl::NetworkParams
+    , cv::gimpl::CustomMetaFunction
+    , ONNXUnit
+    , ONNXCallable
+    >;
+} // anonymous namespace
+
+// GCPUExcecutable implementation //////////////////////////////////////////////
+cv::gimpl::onnx::GONNXExecutable::GONNXExecutable(const ade::Graph &g,
+                                                  const std::vector<ade::NodeHandle> &nodes)
+    : m_g(g), m_gm(m_g) {
+    // FIXME: Currently this backend is capable to run a single inference node only.
+    // Need to extend our island fusion with merge/not-to-merge decision making parametrization
+    GConstGONNXModel iem(g);
+
+    for (auto &nh : nodes) {
+        switch (m_gm.metadata(nh).get<NodeType>().t) {
+        case NodeType::OP:
+            if (this_nh == nullptr) {
+                this_nh = nh;
+            }
+            else {
+                util::throw_error(std::logic_error("Multi-node inference is not supported!"));
+            }
+            break;
+
+        case NodeType::DATA: {
+            m_dataNodes.push_back(nh);
+            const auto &desc = m_gm.metadata(nh).get<Data>();
+            if (desc.storage == Data::Storage::CONST_VAL) {
+                util::throw_error(std::logic_error("No const data supported in backend!"));
+            }
+            if (desc.storage == Data::Storage::INTERNAL) {
+                util::throw_error(std::logic_error("No internal data supported in backend!"));
+            }
+            break;
+        }
+        default: util::throw_error(std::logic_error("Unsupported NodeType"));
+        }
+    }
+}
+
+// FIXME: Document what it does
+cv::GArg cv::gimpl::onnx::GONNXExecutable::packArg(const cv::GArg &arg) {
+    // No API placeholders allowed at this point
+    // FIXME: this check has to be done somewhere in compilation stage.
+    GAPI_Assert(   arg.kind != cv::detail::ArgKind::GMAT
+                && arg.kind != cv::detail::ArgKind::GSCALAR
+                && arg.kind != cv::detail::ArgKind::GARRAY
+                && arg.kind != cv::detail::ArgKind::GOPAQUE);
+
+    if (arg.kind != cv::detail::ArgKind::GOBJREF) {
+        util::throw_error(std::logic_error("Inference supports G-types ONLY!"));
+    }
+    GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF);
+
+    // Wrap associated CPU object (either host or an internal one)
+    // FIXME: object can be moved out!!! GExecutor faced that.
+    const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>();
+    switch (ref.shape)
+    {
+    case GShape::GMAT:    return GArg(m_res.slot<cv::Mat>()[ref.id]);
+
+    // Note: .at() is intentional for GArray as object MUST be already there
+    //   (and constructed by either bindIn/Out or resetInternal)
+    case GShape::GARRAY:  return GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
+
+    // Note: .at() is intentional for GOpaque as object MUST be already there
+    //   (and constructed by either bindIn/Out or resetInternal)
+    case GShape::GOPAQUE:  return GArg(m_res.slot<cv::detail::OpaqueRef>().at(ref.id));
+
+    default:
+        util::throw_error(std::logic_error("Unsupported GShape type"));
+        break;
+    }
+}
+
+void cv::gimpl::onnx::GONNXExecutable::run(std::vector<InObj>  &&input_objs,
+                                           std::vector<OutObj> &&output_objs) {
+    // Update resources with run-time information - what this Island
+    // has received from user (or from another Island, or mix...)
+    // FIXME: Check input/output objects against GIsland protocol
+
+    for (auto& it : input_objs)   magazine::bindInArg (m_res, it.first, it.second);
+    for (auto& it : output_objs)  magazine::bindOutArg(m_res, it.first, it.second);
+
+    // FIXME: Running just a single node now.
+    // Not sure if need to support many of them, though
+    // FIXME: Make this island-unmergeable?
+    const auto &op = m_gm.metadata(this_nh).get<Op>();
+
+    // Initialize kernel's execution context:
+    // - Input parameters
+    ONNXCallContext context;
+    context.args.reserve(op.args.size());
+    using namespace std::placeholders;
+    ade::util::transform(op.args,
+                          std::back_inserter(context.args),
+                          std::bind(&GONNXExecutable::packArg, this, _1));
+
+    // - Output parameters.
+    for (const auto &out_it : ade::util::indexed(op.outs)) {
+        // FIXME: Can the same GArg type resolution mechanism be reused here?
+        const auto out_port  = ade::util::index(out_it);
+        const auto out_desc  = ade::util::value(out_it);
+        context.results[out_port] = magazine::getObjPtr(m_res, out_desc);
+    }
+
+    // And now trigger the execution
+    GConstGONNXModel giem(m_g);
+    const auto &uu = giem.metadata(this_nh).get<ONNXUnit>();
+    const auto &kk = giem.metadata(this_nh).get<ONNXCallable>();
+    kk.run(uu, context);
+
+    for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second);
+}
+
+namespace cv {
+namespace gimpl {
+namespace onnx {
+
+ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp)
+    : params(pp) {
+
+    // Validate input parameters before allocating any resources
+    if (params.num_in > 1u && params.num_in != params.input_names.size()) {
+        cv::util::throw_error(std::logic_error("Please specify input layer names for "
+                                               + params.model_path));
+    }
+    if (params.num_out > 1u && params.num_out != params.output_names.size()) {
+        cv::util::throw_error(std::logic_error("Please specify output layer names for "
+                                               + params.model_path));
+    }
+
+    // Create and initialize the ONNX session
+    Ort::SessionOptions session_options;
+    this_env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "");
+    this_session = Ort::Session(this_env, params.model_path.data(), session_options);
+    this_memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+
+    in_tensor_info = getTensorInfo(INPUT);
+    out_tensor_info = getTensorInfo(OUTPUT);
+
+    const auto is_dyn = [](const TensorInfo &ti) {
+        return ti.is_dynamic;
+    };
+    is_dynamic = ade::util::any_of(in_tensor_info, is_dyn)
+              || ade::util::any_of(out_tensor_info, is_dyn);
+    if (is_dynamic && !params.custom_post_proc) {
+        util::throw_error(std::logic_error("This network has dynamic shapes. "
+                                           "Please provide a custom post-processing function "
+                                           "(.cfgPostProc) in network parameters"));
+    }
+
+    // Update parameters based on session information
+    if (params.num_in == 1u && params.input_names.empty()) {
+        params.input_names = { in_tensor_info.front().name };
+    }
+    if (params.num_out == 1u && params.output_names.empty()) {
+        params.output_names = { out_tensor_info.front().name };
+    }
+
+    // Validate what is supported currently
+    GAPI_Assert(params.const_inputs.empty()
+                && "Const inputs are not currently supported");
+    GAPI_Assert(std::all_of(in_tensor_info.begin(),
+                            in_tensor_info.end(),
+                            [](const cv::gimpl::onnx::TensorInfo &p) {
+                                return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT
+                                    || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
+                            })
+                && "Only FP32 and U8 inputs for NN are supported");
+
+    // Put mean and std in appropriate tensor params
+    if (!params.mean.empty() || !params.stdev.empty()) {
+        GAPI_Assert(params.mean.size() == params.stdev.size() &&
+                    params.mean.size() == params.input_names.size());
+        for (auto idx : ade::util::iota(params.num_in)) {
+            const auto ort_idx = getIdxByName(in_tensor_info, params.input_names[idx]);
+            using M = TensorInfo::MeanStdev;
+            in_tensor_info[ort_idx].mstd = util::make_optional(M{ params.mean[idx]
+                                                                , params.stdev[idx] });
+        }
+    }
+
+    // Update normalize flags for input tensors
+    if (!params.normalize.empty()) {
+        for (auto idx : ade::util::iota(params.num_in)) {
+            const auto ort_idx = getIdxByName(in_tensor_info, params.input_names[idx]);
+            in_tensor_info[ort_idx].normalize = params.normalize[idx];
+        }
+    }
+
+    // Pre-allocate vectors (not buffers) for runtime info
+    in_data.resize(params.num_in);
+    out_data.resize(params.num_out);
+}
+
+std::vector<TensorInfo> ONNXCompiled::getTensorInfo(TensorPosition pos) {
+    GAPI_Assert(pos == INPUT || pos == OUTPUT);
+
+    const auto num_nodes = pos == INPUT
+        ? this_session.GetInputCount()
+        : this_session.GetOutputCount();
+
+    std::vector<TensorInfo> tensor_info;
+    tensor_info.reserve(num_nodes);
+
+    Ort::AllocatorWithDefaultOptions allocator;
+    for (auto i : ade::util::iota(num_nodes)) {
+        const auto info = pos == INPUT
+            ? this_session.GetInputTypeInfo(i)
+            : this_session.GetOutputTypeInfo(i);
+        tensor_info.emplace_back(info.GetTensorTypeAndShapeInfo());
+
+        char *name_p = pos == INPUT
+            ? this_session.GetInputName(i, allocator)
+            : this_session.GetOutputName(i, allocator);
+        tensor_info.back().name = name_p;
+        allocator.Free(name_p);
+    }
+
+    return tensor_info;
+}
+
+cv::GMatDesc ONNXCompiled::outMeta(int idx) const {
+    if (is_dynamic) {
+        GAPI_Assert(!params.out_metas.empty()
+                    && "Metadata must be specified if NN has dynamic inputs!");
+        return params.out_metas.at(idx);
+    }
+    const auto ort_idx = getIdxByName(out_tensor_info, params.output_names[idx]);
+    return cv::GMatDesc(toCV(out_tensor_info[ort_idx].type),
+                        toCV(out_tensor_info[ort_idx].dims));
+}
+
+void ONNXCompiled::setInput(int i, const cv::Mat &m) {
+    const auto in_idx  = i;
+    const auto in_name = params.input_names[in_idx];
+    const auto ort_idx = getIdxByName(in_tensor_info, in_name);
+    preprocess(m, in_tensor_info[ort_idx], in_data[in_idx]);
+}
+
+void ONNXCompiled::setOutput(int i, cv::Mat &m) {
+    // FIXME: No need in double-indexing?
+    out_data[i] = m;
+}
+
+cv::Mat ONNXCompiled::allocOutput(int i) const {
+    cv::Mat m;
+    m.create(toCV(out_tensor_info[i].dims),
+             toCV(out_tensor_info[i].type));
+    return m;
+}
+
+void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
+                       const std::vector<cv::Mat>& outs) {
+    std::vector<Ort::Value> in_tensors, out_tensors;
+
+    auto in_run_names  = getCharNames(params.input_names);
+
+    for (const auto it : ade::util::indexed(params.input_names)) {
+        auto i         = ade::util::index(it);
+        auto in_name   = ade::util::value(it);
+        const auto idx = getIdxByName(in_tensor_info, in_name);
+        in_tensors.emplace_back(createTensor(this_memory_info,
+                                             in_tensor_info[idx],
+                                             ins[i]));
+    }
+
+    if (!is_dynamic) {
+        // Easy path - just run the session which is bound to G-API's
+        // internal data
+        for (auto i : ade::util::iota(params.output_names.size())) {
+        out_tensors.emplace_back(createTensor(this_memory_info,
+                                              out_tensor_info[i],
+                                              outs[i]));
+        }
+        auto out_run_names = getCharNames(params.output_names);
+        this_session.Run(Ort::RunOptions{nullptr},
+                         in_run_names.data(),
+                         &in_tensors.front(),
+                         params.input_names.size(),
+                         out_run_names.data(),
+                         &out_tensors.front(),
+                         params.output_names.size());
+    } else {
+        // Hard path - run session & user-defined post-processing
+        // NOTE: use another list of output names here
+        std::vector<const char*> out_names;
+        for (auto &&ti : out_tensor_info) {
+            out_names.push_back(ti.name.c_str());
+        }
+
+        auto outputs = this_session.Run(Ort::RunOptions{nullptr},
+                                        in_run_names.data(),
+                                        &in_tensors.front(),
+                                        params.input_names.size(),
+                                        out_names.data(),
+                                        out_names.size());
+        std::unordered_map<std::string, cv::Mat> onnx_outputs;
+        std::unordered_map<std::string, cv::Mat> gapi_outputs;
+
+        GAPI_Assert(outputs.size() == out_names.size());
+        // Fill in ONNX tensors
+        for (auto &&iter : ade::util::zip(ade::util::toRange(out_tensor_info),
+                                          ade::util::toRange(outputs))) {
+            const auto &out_name   = std::get<0>(iter).name;
+                  auto &out_tensor = std::get<1>(iter);
+            onnx_outputs[out_name] = toCV(out_tensor);
+        }
+
+        // Fill in G-API outputs
+        for (auto &&it: ade::util::indexed(params.output_names)) {
+            gapi_outputs[ade::util::value(it)] = outs[ade::util::index(it)];
+        }
+        params.custom_post_proc(onnx_outputs, gapi_outputs);
+    }
+}
+
+void ONNXCompiled::run() {
+    Run(in_data, out_data);
+}
+
+struct Infer: public cv::detail::KernelTag {
+    using API = cv::GInferBase;
+    static cv::gapi::GBackend backend()  { return cv::gapi::onnx::backend(); }
+    static KImpl kernel()                { return KImpl{outMeta, run}; }
+
+    static cv::GMetaArgs outMeta(const ade::Graph      &gr,
+                                 const ade::NodeHandle &nh,
+                                 const cv::GMetaArgs   &in_metas,
+                                 const cv::GArgs       &/*in_args*/) {
+        cv::GMetaArgs result;
+
+        GConstGONNXModel gm(gr);
+        const auto &uu = gm.metadata(nh).get<ONNXUnit>();
+
+        GAPI_Assert(uu.oc->numInputs() == in_metas.size()
+                    && "Known input layers count doesn't match input meta count");
+        for (auto &&mm : in_metas) {
+            GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
+                        && "Non-GMat inputs are not supported");
+        }
+        for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) {
+            result.emplace_back(uu.oc->outMeta(idx));
+        }
+        return result;
+    }
+
+    static void run(const ONNXUnit &uu, ONNXCallContext &ctx) {
+        for (auto &&idx : ade::util::iota(uu.oc->numInputs())) {
+            uu.oc->setInput(idx, ctx.inMat(idx));
+        }
+        for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) {
+            uu.oc->setOutput(idx, ctx.outMatR(idx));
+        }
+        uu.oc->run();
+    }
+};
+
+struct InferROI: public cv::detail::KernelTag {
+    using API = cv::GInferROIBase;
+    static cv::gapi::GBackend backend()  { return cv::gapi::onnx::backend(); }
+    static KImpl kernel()                { return KImpl{outMeta, run}; }
+
+    static cv::GMetaArgs outMeta(const ade::Graph      &gr,
+                                 const ade::NodeHandle &nh,
+                                 const cv::GMetaArgs   &in_metas,
+                                 const cv::GArgs       &/*in_args*/) {
+        cv::GMetaArgs result;
+
+        GConstGONNXModel gm(gr);
+        const auto &uu = gm.metadata(nh).get<ONNXUnit>();
+        GAPI_Assert(1u == uu.oc->numInputs());
+        GAPI_Assert(2u == in_metas.size());
+
+        for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) {
+            result.emplace_back(uu.oc->outMeta(idx));
+        }
+        return result;
+    }
+
+    static void run(const ONNXUnit &uu, ONNXCallContext &ctx) {
+        // non-generic version for now, per the InferROI's definition
+        GAPI_Assert(uu.oc->numInputs() == 1u);
+        const auto& this_roi = ctx.inArg<cv::detail::OpaqueRef>(0).rref<cv::Rect>();
+        const auto  this_mat = ctx.inMat(1);
+
+        uu.oc->setInput(0, this_mat(this_roi));
+        for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) {
+            uu.oc->setOutput(idx, ctx.outMatR(idx));
+        }
+        uu.oc->run();
+    }
+};
+
+struct InferList: public cv::detail::KernelTag {
+    using API = cv::GInferListBase;
+    static cv::gapi::GBackend backend()  { return cv::gapi::onnx::backend(); }
+    static KImpl kernel()                { return KImpl{outMeta, run}; }
+
+    static cv::GMetaArgs outMeta(const ade::Graph      &gr,
+                                 const ade::NodeHandle &nh,
+                                 const cv::GMetaArgs   &in_metas,
+                                 const cv::GArgs       &/*in_args*/) {
+        GConstGONNXModel gm(gr);
+        const auto &uu = gm.metadata(nh).get<ONNXUnit>();
+
+        // Note our input layers list order matches the API order and so
+        // meta order.
+        GAPI_Assert(uu.oc->numInputs() == (in_metas.size() - 1u)
+                    && "Known input layers count doesn't match input meta count");
+
+        for (auto i : ade::util::iota(uu.oc->numInputs())) {
+            const auto & mm = in_metas[i + 1];
+
+            GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
+                        && "Non-GMat inputs are not supported");
+        }
+
+        // roi-list version is much easier at the moment.
+        // All our outputs are vectors which don't have
+        // metadata at the moment - so just create a vector of
+        // "empty" array metadatas of the required size.
+        return cv::GMetaArgs(uu.oc->numOutputs(),
+                             cv::GMetaArg{cv::empty_array_desc()});
+    }
+
+    static void run(const ONNXUnit &uu, ONNXCallContext &ctx) {
+        // non-generic version for now:
+        // - assumes input 0 is always ROI list
+        // - assumes all inputs/outputs are always Mats
+        GAPI_Assert(uu.oc->numInputs() == 1); // roi list is not counted in net's inputs
+
+        const auto& in_roi_vec = ctx.inArg<cv::detail::VectorRef>(0u).rref<cv::Rect>();
+        const cv::Mat this_mat = ctx.inMat(1u);
+
+        for (auto i : ade::util::iota(uu.oc->numOutputs())) {
+            ctx.outVecR<cv::Mat>(i).clear();
+        }
+        for (const auto &rc : in_roi_vec) {
+            uu.oc->setInput(0, this_mat(rc));
+            std::vector<cv::Mat> out_mats(uu.oc->numOutputs());
+            for (auto i : ade::util::iota(uu.oc->numOutputs())) {
+                out_mats[i] = uu.oc->allocOutput(i);
+                uu.oc->setOutput(i, out_mats[i]);
+            }
+            uu.oc->run();
+            for (auto i : ade::util::iota(uu.oc->numOutputs())) {
+                std::vector<cv::Mat> &out_vec = ctx.outVecR<cv::Mat>(i);
+                out_vec.push_back(std::move(out_mats[i]));
+            }
+        }
+    }
+};
+
+struct InferList2: public cv::detail::KernelTag {
+    using API = cv::GInferList2Base;
+    static cv::gapi::GBackend backend()  { return cv::gapi::onnx::backend(); }
+    static KImpl kernel()                { return KImpl{outMeta, run}; }
+
+    static cv::GMetaArgs outMeta(const ade::Graph      &gr,
+                                 const ade::NodeHandle &nh,
+                                 const cv::GMetaArgs   &in_metas,
+                                 const cv::GArgs       &/*in_args*/) {
+
+        GConstGONNXModel gm(gr);
+        const auto &uu = gm.metadata(nh).get<ONNXUnit>();
+
+        // Note our input layers list order matches the API order and so
+        // meta order.
+        GAPI_Assert(uu.oc->numInputs() == (in_metas.size() - 1u)
+                    && "Known input layers count doesn't match input meta count");
+
+        // In contrast to InferList, the InferList2 has only one
+        // "full-frame" image argument, and all the rest are arrays of
+        // ether ROI or blobs. So here we set the 0th arg image format
+        // to all inputs which are ROI-based (skipping the
+        // "blob"-based ones)
+        // FIXME: this is filtering not done, actually! GArrayDesc has
+        // no hint for type!
+        const auto &mm_0   = in_metas[0u];
+        const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
+        GAPI_Assert(   !meta_0.isND()
+                    && !meta_0.planar
+                    && "Only images are supported as the 0th argument");
+        for (auto i : ade::util::iota(uu.oc->numInputs())) {
+            const auto &mm = in_metas[i + 1];
+            GAPI_Assert(util::holds_alternative<cv::GArrayDesc>(mm)
+                        && "Non-array inputs are not supported");
+        }
+
+        // roi-list version is much easier at the moment.
+        // All our outputs are vectors which don't have
+        // metadata at the moment - so just create a vector of
+        // "empty" array metadatas of the required size.
+        return cv::GMetaArgs(uu.oc->numOutputs(),
+                             cv::GMetaArg{cv::empty_array_desc()});
+    }
+
+    static void run(const ONNXUnit &uu, ONNXCallContext &ctx) {
+        GAPI_Assert(ctx.args.size() > 1u
+                    && "This operation must have at least two arguments");
+
+        // Since we do a ROI list inference, always assume our input buffer is image
+        const cv::Mat mat_0  = ctx.inMat(0u);
+        // Take the next argument, which must be vector (of any kind).
+        // Use this only to obtain the ROI list size (sizes of all
+        // other vectors must be equal to this one)
+        const auto list_size = ctx.inArg<cv::detail::VectorRef>(1u).size();
+
+        for (auto i : ade::util::iota(uu.oc->numOutputs())) {
+            ctx.outVecR<cv::Mat>(i).clear();
+        }
+        // For every ROI in the list {{{
+        for (const auto &list_idx : ade::util::iota(list_size)) {
+            std::vector<Ort::Value> in_tensors, out_tensors;
+            std::vector<cv::Mat> in_mats(uu.oc->numInputs());
+            // For every input of the net {{{
+            for (auto in_idx : ade::util::iota(uu.oc->numInputs())) {
+                const auto &this_vec = ctx.inArg<cv::detail::VectorRef>(in_idx+1u);
+                GAPI_Assert(this_vec.size() == list_size);
+                // Prepare input {{{
+                //   FIXME: Terrible run-time logic based on RTTI!
+                //   FIXME: Will never work on non-RTTI systems!
+                //   FIXME: Need to replace with a static type tags
+                //   (like with serialization) instead!
+                if (this_vec.holds<cv::Rect>()) {
+                    // ROI case - create an ROI blob
+                    const auto &vec = this_vec.rref<cv::Rect>();
+                    uu.oc->setInput(in_idx, mat_0(vec[list_idx]));
+                } else if (this_vec.holds<cv::Mat>()) {
+                    // Mat case - create a regular blob
+                    // FIXME: NOW Assume Mats are always BLOBS (not
+                    // images)
+                    const auto &vec = this_vec.rref<cv::Mat>();
+                    uu.oc->setInput(in_idx, vec[list_idx]);
+                } else {
+                    GAPI_Assert(false && "Only Rect and Mat types are supported for infer list 2!");
+                }
+                // }}} (Preapre input)
+            } // }}} (For every input of the net)
+
+            std::vector<cv::Mat> out_mats(uu.oc->numOutputs());
+            for (auto i : ade::util::iota(uu.oc->numOutputs())) {
+                out_mats[i] = uu.oc->allocOutput(i);
+                uu.oc->setOutput(i, out_mats[i]);
+            }
+            uu.oc->run();
+
+            for (auto i : ade::util::iota(uu.oc->numOutputs())) {
+                std::vector<cv::Mat> &out_vec = ctx.outVecR<cv::Mat>(i);
+                out_vec.push_back(std::move(out_mats[i]));
+            }
+        } // }}} (For every ROI in the list)
+    }
+};
+
+} // namespace onnx
+} // namespace gapi
+} // namespace cv
+
+namespace {
+    class GONNXBackendImpl final: public cv::gapi::GBackend::Priv {
+        virtual void unpackKernel(ade::Graph            &gr,
+                                  const ade::NodeHandle &nh,
+                                  const cv::GKernelImpl &ii) override {
+            using namespace cv::gimpl;
+            // FIXME: Introduce a DNNBackend interface which'd specify
+            // the framework for this???
+            GONNXModel gm(gr);
+            const auto &np = gm.metadata(nh).get<NetworkParams>();
+            const auto &pp = cv::util::any_cast<cv::gapi::onnx::detail::ParamDesc>(np.opaque);
+            const auto &ki = cv::util::any_cast<KImpl>(ii.opaque);
+            gm.metadata(nh).set(ONNXUnit{pp});
+            gm.metadata(nh).set(ONNXCallable{ki.run});
+            gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc});
+        }
+
+        virtual EPtr compile(const ade::Graph &graph,
+                             const cv::GCompileArgs &,
+                             const std::vector<ade::NodeHandle> &nodes) const override {
+            return EPtr{new cv::gimpl::onnx::GONNXExecutable(graph, nodes)};
+        }
+
+        virtual cv::gapi::GKernelPackage auxiliaryKernels() const override {
+            return cv::gapi::kernels< cv::gimpl::onnx::Infer
+                                    , cv::gimpl::onnx::InferROI
+                                    , cv::gimpl::onnx::InferList
+                                    , cv::gimpl::onnx::InferList2
+                                    >();
+        }
+    };
+}
+
+cv::gapi::GBackend cv::gapi::onnx::backend() {
+    static cv::gapi::GBackend this_backend(std::make_shared<GONNXBackendImpl>());
+    return this_backend;
+}
+#else // HAVE_ONNX
+
+cv::gapi::GBackend cv::gapi::onnx::backend() {
+    // Still provide this symbol to avoid linking issues
+    util::throw_error(std::runtime_error("G-API has been compiled without ONNX support"));
+}
+#endif // HAVE_ONNX
diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.hpp b/modules/gapi/src/backends/onnx/gonnxbackend.hpp
new file mode 100644
index 0000000000..a3cc897030
--- /dev/null
+++ b/modules/gapi/src/backends/onnx/gonnxbackend.hpp
@@ -0,0 +1,56 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#ifndef OPENCV_GAPI_GONNXBACKEND_HPP
+#define OPENCV_GAPI_GONNXBACKEND_HPP
+
+#include "opencv2/gapi/infer/onnx.hpp"
+#ifdef HAVE_ONNX
+
+#include <onnxruntime_cxx_api.h>
+#include <ade/util/algorithm.hpp> // type_list_index
+
+#include "backends/common/gbackend.hpp"
+
+namespace cv {
+namespace gimpl {
+namespace onnx {
+
+class GONNXExecutable final: public GIslandExecutable
+{
+    const ade::Graph &m_g;
+    GModel::ConstGraph m_gm;
+
+    // The only executable stuff in this graph
+    // (assuming it is always single-op)
+    ade::NodeHandle this_nh;
+
+    // List of all resources in graph (both internal and external)
+    std::vector<ade::NodeHandle> m_dataNodes;
+
+    // Actual data of all resources in graph (both internal and external)
+    Mag m_res;
+
+    // Execution helpers
+    GArg packArg(const GArg &arg);
+
+public:
+    GONNXExecutable(const ade::Graph                   &graph,
+                    const std::vector<ade::NodeHandle> &nodes);
+
+    virtual inline bool canReshape() const override { return false; }
+    virtual inline void reshape(ade::Graph&, const GCompileArgs&) override {
+        GAPI_Assert(false); // Not implemented yet
+    }
+
+    virtual void run(std::vector<InObj>  &&input_objs,
+                     std::vector<OutObj> &&output_objs) override;
+};
+
+}}} // namespace cv::gimpl::onnx
+
+#endif // HAVE_ONNX
+#endif // OPENCV_GAPI_GONNXBACKEND_HPP
diff --git a/modules/gapi/src/compiler/passes/kernels.cpp b/modules/gapi/src/compiler/passes/kernels.cpp
index 100a32ec57..837e21f19a 100644
--- a/modules/gapi/src/compiler/passes/kernels.cpp
+++ b/modules/gapi/src/compiler/passes/kernels.cpp
@@ -141,6 +141,7 @@ void cv::gimpl::passes::bindNetParams(ade::passes::PassContext &ctx,
                 continue;
 
             pgr.metadata(nh).set(NetworkParams{it->params});
+            op.backend = it->backend;
         }
     }
 }
@@ -181,13 +182,25 @@ void cv::gimpl::passes::resolveKernels(ade::passes::PassContext   &ctx,
             // of the same kernel to be presented in the kernel
             // package (as it was designed originally).
 
-            cv::gapi::GBackend selected_backend;
-            cv::GKernelImpl    selected_impl;
-            std::tie(selected_backend, selected_impl) = kernels.lookup(op.k.name);
+            cv::GKernelImpl selected_impl;
 
-            selected_backend.priv().unpackKernel(ctx.graph, nh, selected_impl);
-            op.backend = selected_backend;
-            active_backends.insert(selected_backend);
+            if (op.backend == cv::gapi::GBackend()) {
+                std::tie(op.backend, selected_impl) = kernels.lookup(op.k.name);
+            } else {
+                // FIXME: This needs to be reworked properly
+                // Lookup for implementation from the pre-assinged backend
+                cv::gapi::GBackend dummy;
+                std::tie(dummy, selected_impl) = op.backend.priv()
+                    .auxiliaryKernels().lookup(op.k.name);
+                // FIXME: Warning here!
+                // This situation may happen when NN (infer) backend was assigned
+                // by tag in bindNetParams (see above) but at this stage the operation
+                // lookup resulted in another backend (and it is perfectly valid when
+                // we have multiple NN backends available).
+            }
+
+            op.backend.priv().unpackKernel(ctx.graph, nh, selected_impl);
+            active_backends.insert(op.backend);
 
             if (gr.metadata().contains<Deserialized>())
             {
diff --git a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp
new file mode 100644
index 0000000000..ebb8020e9a
--- /dev/null
+++ b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp
@@ -0,0 +1,278 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#include "../test_precomp.hpp"
+
+#ifdef HAVE_ONNX
+
+#include <stdexcept>
+#include <onnxruntime_cxx_api.h>
+#include <ade/util/iota_range.hpp>
+
+#include <opencv2/gapi/infer/onnx.hpp>
+
+namespace {
+
+struct ONNXInitPath {
+    ONNXInitPath() {
+        const char* env_path = getenv("OPENCV_GAPI_ONNX_MODEL_PATH");
+        if (env_path)
+            cvtest::addDataSearchPath(env_path);
+    }
+};
+static ONNXInitPath g_init_path;
+
+cv::Mat initMatrixRandU(int type, cv::Size sz_in)
+{
+    cv::Mat in_mat1 = cv::Mat(sz_in, type);
+
+    if (CV_MAT_DEPTH(type) < CV_32F)
+    {
+        cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255));
+    }
+    else
+    {
+        const int fscale = 256;  // avoid bits near ULP, generate stable test input
+        cv::Mat in_mat32s(in_mat1.size(), CV_MAKE_TYPE(CV_32S, CV_MAT_CN(type)));
+        cv::randu(in_mat32s, cv::Scalar::all(0), cv::Scalar::all(255 * fscale));
+        in_mat32s.convertTo(in_mat1, type, 1.0f / fscale, 0);
+    }
+    return in_mat1;
+}
+}
+namespace opencv_test
+{
+namespace {
+// FIXME: taken from the DNN module
+void normAssert(cv::InputArray ref, cv::InputArray test,
+                const char *comment /*= ""*/,
+                double l1 = 0.00001, double lInf = 0.0001)
+{
+    double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
+    EXPECT_LE(normL1, l1) << comment;
+
+    double normInf = cvtest::norm(ref, test, cv::NORM_INF);
+    EXPECT_LE(normInf, lInf) << comment;
+}
+
+std::string findModel(const std::string &model_name)
+{
+    return findDataFile("vision/classification/squeezenet/model/" + model_name + ".onnx", false);
+}
+
+inline void preprocess(const cv::Mat& src,
+                             cv::Mat& dst,
+                       const cv::Scalar& mean,
+                       const cv::Scalar& std) {
+    int new_h = 224;
+    int new_w = 224;
+    cv::Mat tmp, nmat, cvt;
+    cv::resize(src, dst, cv::Size(new_w, new_h));
+    dst.convertTo(cvt, CV_32F, 1.f / 255);
+    nmat = cvt - mean;
+    tmp = nmat / std;
+    dst.create(cv::Size(new_w, new_h * src.channels()), CV_32F);
+    std::vector<cv::Mat> planes;
+    for (int i = 0; i < src.channels(); ++i) {
+        planes.push_back(dst.rowRange(i * new_h, (i + 1) * new_h));
+    }
+    cv::split(tmp, planes);
+}
+
+void InferONNX(const std::string& model_path,
+               const cv::Mat& in,
+                     cv::Mat& out,
+               const cv::Scalar& mean,
+               const cv::Scalar& std)
+{
+    // FIXME: It must be a FIXTURE test!
+    Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test");
+    Ort::SessionOptions session_options;
+    Ort::Session session(env, model_path.data(), session_options);
+    auto input_node_dims = //    0 - one input
+        session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
+    auto output_node_dims = //    0 - one output
+        session.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
+    Ort::AllocatorWithDefaultOptions allocator;
+    char* in_node_name_p = session.GetInputName(0, allocator);
+    char* out_node_name_p = session.GetOutputName(0, allocator);
+    std::string in_node_name(in_node_name_p);
+    std::string out_node_name(out_node_name_p);
+    allocator.Free(in_node_name_p);
+    allocator.Free(out_node_name_p);
+
+    auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+    cv::Mat dst;
+    preprocess(in, dst, mean, std);
+
+    out.create(std::vector<int>(output_node_dims.begin(),
+                                output_node_dims.end()), CV_32F); // empty output Mat
+    auto in_tensor = Ort::Value::CreateTensor<float>(memory_info,
+                                                     dst.ptr<float>(),
+                                                     dst.total(),
+                                                     input_node_dims.data(),
+                                                     input_node_dims.size());
+    auto out_tensor = Ort::Value::CreateTensor<float>(memory_info,
+                                                      out.ptr<float>(),
+                                                      out.total(),
+                                                      output_node_dims.data(),
+                                                      output_node_dims.size());
+    std::vector<const char *> in_names = {in_node_name.data()};
+    std::vector<const char *> out_names = {out_node_name.data()};
+    session.Run(Ort::RunOptions{nullptr},
+                in_names.data(),
+                &in_tensor,
+                session.GetInputCount(),
+                out_names.data(),
+                &out_tensor,
+                session.GetOutputCount());
+}
+
+} // anonymous namespace
+
+TEST(ONNX, Infer)
+{
+    cv::Mat in_mat1, out_gapi, out_onnx;
+    std::string model_path = findModel("squeezenet1.0-9");
+    // NOTE: All tests chek "random" image
+    // Ideally it should be a real image
+    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
+
+    cv::Scalar mean = { 0.485, 0.456, 0.406 };
+    cv::Scalar std  = { 0.229, 0.224, 0.225 };
+
+    // ONNX_API code
+    InferONNX(model_path, in_mat1, out_onnx, mean, std);
+
+    // G_API code
+    G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
+    cv::GMat in;
+    cv::GMat out = cv::gapi::infer<SqueezNet>(in);
+    cv::GComputation comp(cv::GIn(in), cv::GOut(out));
+    // NOTE: We have to normalize U8 tensor
+    // so cfgMeanStd() is here
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    comp.apply(cv::gin(in_mat1),
+               cv::gout(out_gapi),
+               cv::compile_args(cv::gapi::networks(net)));
+
+    // Validate
+    ASSERT_EQ(1000u, out_onnx.total());
+    ASSERT_EQ(1000u, out_gapi.total());
+    normAssert(out_onnx, out_gapi, "Test classification output");
+}
+
+TEST(ONNX, InferROI)
+{
+    cv::Mat in_mat1, out_gapi, out_onnx;
+    std::string model_path = findModel("squeezenet1.0-9");
+    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
+
+    cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean
+    cv::Scalar std  = { 0.229, 0.224, 0.225 }; // squeeznet std
+
+    cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250});
+    // ONNX_API code
+    InferONNX(model_path, in_mat1(ROI), out_onnx, mean, std);
+
+    // G_API code
+    G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
+    cv::GMat in;
+    cv::GOpaque<cv::Rect> rect;
+    cv::GMat out = cv::gapi::infer<SqueezNet>(rect, in);
+    cv::GComputation comp(cv::GIn(in, rect), cv::GOut(out));
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    comp.apply(cv::gin(in_mat1, ROI),
+               cv::gout(out_gapi),
+               cv::compile_args(cv::gapi::networks(net)));
+
+    // Validate
+    ASSERT_EQ(1000u, out_onnx.total());
+    ASSERT_EQ(1000u, out_gapi.total());
+    normAssert(out_onnx, out_gapi, "Test classification output");
+}
+
+TEST(ONNX, InferROIList)
+{
+    cv::Mat in_mat1;
+    std::string model_path = findModel("squeezenet1.0-9");
+    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
+
+    cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean
+    cv::Scalar std  = { 0.229, 0.224, 0.225 }; // squeeznet std
+
+    std::vector<cv::Rect> rois = {
+        cv::Rect(cv::Point{ 0,   0}, cv::Size{80, 120}),
+        cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}),
+    };
+    std::vector<cv::Mat> out_gapi;
+    std::vector<cv::Mat> out_onnx(rois.size());
+    // ONNX_API code
+    for (size_t i = 0; i < rois.size(); ++i) {
+        InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std);
+    }
+
+    // G_API code
+    G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
+    cv::GMat in;
+    cv::GArray<cv::Rect> rr;
+    cv::GArray<cv::GMat> out = cv::gapi::infer<SqueezNet>(rr, in);
+    cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out));
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    comp.apply(cv::gin(in_mat1, rois),
+               cv::gout(out_gapi),
+               cv::compile_args(cv::gapi::networks(net)));
+
+    // Validate
+    for (size_t i = 0; i < rois.size(); ++i) {
+        ASSERT_EQ(1000u, out_onnx[i].total());
+        ASSERT_EQ(1000u, out_gapi[i].total());
+        normAssert(out_onnx[i], out_gapi[i], "Test classification output");
+    }
+}
+
+TEST(ONNX, Infer2ROIList)
+{
+    cv::Mat in_mat1;
+    std::string model_path = findModel("squeezenet1.0-9");
+    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
+
+    cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean
+    cv::Scalar std  = { 0.229, 0.224, 0.225 }; // squeeznet std
+
+    std::vector<cv::Rect> rois = {
+        cv::Rect(cv::Point{ 0,   0}, cv::Size{80, 120}),
+        cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}),
+    };
+    std::vector<cv::Mat> out_gapi;
+    std::vector<cv::Mat> out_onnx(rois.size());
+    // ONNX_API code
+    for (size_t i = 0; i < rois.size(); ++i) {
+        InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std);
+    }
+
+    // G_API code
+    G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
+    cv::GMat in;
+    cv::GArray<cv::Rect> rr;
+    cv::GArray<cv::GMat> out = cv::gapi::infer2<SqueezNet>(in,rr);
+    cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out));
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    comp.apply(cv::gin(in_mat1, rois),
+               cv::gout(out_gapi),
+               cv::compile_args(cv::gapi::networks(net)));
+
+    // Validate
+    for (size_t i = 0; i < rois.size(); ++i) {
+        ASSERT_EQ(1000u, out_onnx[i].total());
+        ASSERT_EQ(1000u, out_gapi[i].total());
+        normAssert(out_onnx[i], out_gapi[i], "Test classification output");
+    }
+}
+
+} // namespace opencv_test
+
+#endif //  HAVE_ONNX

From 039795b4051456ee4f84f4b2359bdbcf491fba3f Mon Sep 17 00:00:00 2001
From: Mark Shachkov <markshachkov@gmail.com>
Date: Tue, 3 Nov 2020 21:54:56 +0300
Subject: [PATCH 064/152] Change naming of keypoints comparator

---
 modules/features2d/src/keypoint.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/modules/features2d/src/keypoint.cpp b/modules/features2d/src/keypoint.cpp
index 219634e5b4..bab1e22b45 100644
--- a/modules/features2d/src/keypoint.cpp
+++ b/modules/features2d/src/keypoint.cpp
@@ -44,9 +44,9 @@
 namespace cv
 {
 
-struct KeypointResponseGreaterThanThreshold
+struct KeypointResponseGreaterThanOrEqualToThreshold
 {
-    KeypointResponseGreaterThanThreshold(float _value) :
+    KeypointResponseGreaterThanOrEqualToThreshold(float _value) :
     value(_value)
     {
     }
@@ -83,7 +83,7 @@ void KeyPointsFilter::retainBest(std::vector<KeyPoint>& keypoints, int n_points)
         //use std::partition to grab all of the keypoints with the boundary response.
         std::vector<KeyPoint>::const_iterator new_end =
         std::partition(keypoints.begin() + n_points, keypoints.end(),
-                       KeypointResponseGreaterThanThreshold(ambiguous_response));
+                       KeypointResponseGreaterThanOrEqualToThreshold(ambiguous_response));
         //resize the keypoints, given this new end point. nth_element and partition reordered the points inplace
         keypoints.resize(new_end - keypoints.begin());
     }

From 21a8d9569d8de225509225193e9c02e622ff2702 Mon Sep 17 00:00:00 2001
From: Maksim Shabunin <maksim.shabunin@gmail.com>
Date: Thu, 5 Nov 2020 13:11:31 +0300
Subject: [PATCH 065/152] videoio: added frameSize to MFX capture

---
 modules/videoio/src/cap_mfx_reader.cpp | 20 +++++++++++++++++---
 modules/videoio/src/cap_mfx_reader.hpp |  1 +
 modules/videoio/test/test_mfx.cpp      |  2 ++
 3 files changed, 20 insertions(+), 3 deletions(-)

diff --git a/modules/videoio/src/cap_mfx_reader.cpp b/modules/videoio/src/cap_mfx_reader.cpp
index 7df2cf56af..2f85bbe02a 100644
--- a/modules/videoio/src/cap_mfx_reader.cpp
+++ b/modules/videoio/src/cap_mfx_reader.cpp
@@ -111,6 +111,7 @@ VideoCapture_IntelMFX::VideoCapture_IntelMFX(const cv::String &filename)
         return;
     }
 
+    frameSize = Size(params.mfx.FrameInfo.CropW, params.mfx.FrameInfo.CropH);
     good = true;
 }
 
@@ -126,10 +127,23 @@ VideoCapture_IntelMFX::~VideoCapture_IntelMFX()
     cleanup(deviceHandler);
 }
 
-double VideoCapture_IntelMFX::getProperty(int) const
+double VideoCapture_IntelMFX::getProperty(int prop) const
 {
-    MSG(cerr << "MFX: getProperty() is not implemented" << endl);
-    return 0;
+    if (!good)
+    {
+        MSG(cerr << "MFX: can not call getProperty(), backend has not been initialized" << endl);
+        return 0;
+    }
+    switch (prop)
+    {
+        case CAP_PROP_FRAME_WIDTH:
+            return frameSize.width;
+        case CAP_PROP_FRAME_HEIGHT:
+            return frameSize.height;
+        default:
+            MSG(cerr << "MFX: unsupported property" << endl);
+            return 0;
+    }
 }
 
 bool VideoCapture_IntelMFX::setProperty(int, double)
diff --git a/modules/videoio/src/cap_mfx_reader.hpp b/modules/videoio/src/cap_mfx_reader.hpp
index cad5297b8a..bd3673864c 100644
--- a/modules/videoio/src/cap_mfx_reader.hpp
+++ b/modules/videoio/src/cap_mfx_reader.hpp
@@ -34,6 +34,7 @@ private:
     MFXVideoDECODE *decoder;
     SurfacePool *pool;
     void *outSurface;
+    cv::Size frameSize;
     bool good;
 };
 
diff --git a/modules/videoio/test/test_mfx.cpp b/modules/videoio/test/test_mfx.cpp
index f739cbda17..6613383fde 100644
--- a/modules/videoio/test/test_mfx.cpp
+++ b/modules/videoio/test/test_mfx.cpp
@@ -111,6 +111,8 @@ TEST_P(Videoio_MFX, read_write_raw)
     VideoCapture cap;
     cap.open(filename, CAP_INTEL_MFX);
     ASSERT_TRUE(cap.isOpened());
+    EXPECT_EQ(FRAME_SIZE.width, cap.get(CAP_PROP_FRAME_WIDTH));
+    EXPECT_EQ(FRAME_SIZE.height, cap.get(CAP_PROP_FRAME_HEIGHT));
     for (int i = 0; i < FRAME_COUNT; ++i)
     {
         ASSERT_TRUE(cap.read(frame));

From a6e15b2f577b5d00bce2d8db624d95838e19609d Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Thu, 29 Oct 2020 23:38:30 +0000
Subject: [PATCH 066/152] cmake: prefer using Eigen configuration files

- for better compatibility with Ceres 2.0.0 CMake scripts
---
 cmake/OpenCVFindLibsPerf.cmake | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/cmake/OpenCVFindLibsPerf.cmake b/cmake/OpenCVFindLibsPerf.cmake
index b9b1a95799..7b3874ff0e 100644
--- a/cmake/OpenCVFindLibsPerf.cmake
+++ b/cmake/OpenCVFindLibsPerf.cmake
@@ -51,7 +51,12 @@ endif(WITH_CUDA)
 
 # --- Eigen ---
 if(WITH_EIGEN AND NOT HAVE_EIGEN)
-  find_package(Eigen3 QUIET)
+  if(NOT OPENCV_SKIP_EIGEN_FIND_PACKAGE_CONFIG)
+    find_package(Eigen3 CONFIG QUIET)  # Ceres 2.0.0 CMake scripts doesn't work with CMake's FindEigen3.cmake module (due to missing EIGEN3_VERSION_STRING)
+  endif()
+  if(NOT Eigen3_FOUND)
+    find_package(Eigen3 QUIET)
+  endif()
 
   if(Eigen3_FOUND)
     if(TARGET Eigen3::Eigen)

From d9877efe1d9d8c12d2cc29aacd4b511af4345fa1 Mon Sep 17 00:00:00 2001
From: Ruslan Garnov <ruslan.garnov@intel.com>
Date: Fri, 6 Nov 2020 01:59:09 +0300
Subject: [PATCH 067/152] Added rmat.cpp and media.cpp to files being built in
 standalone

---
 modules/gapi/cmake/standalone.cmake | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/modules/gapi/cmake/standalone.cmake b/modules/gapi/cmake/standalone.cmake
index ca54697524..5cc57d8269 100644
--- a/modules/gapi/cmake/standalone.cmake
+++ b/modules/gapi/cmake/standalone.cmake
@@ -15,6 +15,8 @@ file(GLOB FLUID_includes "${FLUID_ROOT}/include/opencv2/*.hpp"
                          "${FLUID_ROOT}/include/opencv2/gapi/own/*.hpp"
                          "${FLUID_ROOT}/include/opencv2/gapi/fluid/*.hpp")
 file(GLOB FLUID_sources  "${FLUID_ROOT}/src/api/g*.cpp"
+                         "${FLUID_ROOT}/src/api/rmat.cpp"
+                         "${FLUID_ROOT}/src/api/media.cpp"
                          "${FLUID_ROOT}/src/compiler/*.cpp"
                          "${FLUID_ROOT}/src/compiler/passes/*.cpp"
                          "${FLUID_ROOT}/src/executor/*.cpp"

From eb24575e2ce6ae56613fe4b9709ea55b4d8a228e Mon Sep 17 00:00:00 2001
From: Roman Kazantsev <roman.kazantsev@intel.com>
Date: Fri, 6 Nov 2020 09:51:40 +0300
Subject: [PATCH 068/152] Use explicit opset of Unsqueeze from nGraph

The change is needed due to removing default opset namespace for Unsqueeze
in the scope of this refactoring activity: https://github.com/openvinotoolkit/openvino/pull/2767

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
---
 modules/dnn/src/layers/prior_box_layer.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp
index dc1364a06b..f7340b1e67 100644
--- a/modules/dnn/src/layers/prior_box_layer.cpp
+++ b/modules/dnn/src/layers/prior_box_layer.cpp
@@ -607,7 +607,7 @@ public:
 
             auto priorBox = std::make_shared<ngraph::op::PriorBoxClustered>(slice_layer, slice_image, attrs);
             auto axis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
-            auto unsqueeze = std::make_shared<ngraph::op::Unsqueeze>(priorBox, axis);
+            auto unsqueeze = std::make_shared<ngraph::op::v0::Unsqueeze>(priorBox, axis);
             return Ptr<BackendNode>(new InfEngineNgraphNode(unsqueeze));
         }
         else
@@ -628,7 +628,7 @@ public:
 
             auto priorBox = std::make_shared<ngraph::op::PriorBox>(slice_layer, slice_image, attrs);
             auto axis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
-            auto unsqueeze = std::make_shared<ngraph::op::Unsqueeze>(priorBox, axis);
+            auto unsqueeze = std::make_shared<ngraph::op::v0::Unsqueeze>(priorBox, axis);
             return Ptr<BackendNode>(new InfEngineNgraphNode(unsqueeze));
         }
     }

From ad71a1633cff03f72cfcaa85b7c0ad35a8356d48 Mon Sep 17 00:00:00 2001
From: junxnone <junchen0102@gmail.com>
Date: Thu, 5 Nov 2020 12:25:38 +0800
Subject: [PATCH 069/152] fix truncate threshold example display issue in
 py_tutorials

Signed-off-by: junxnone <junchen0102@gmail.com>
---
 .../py_thresholding/images/threshold.jpg      | Bin 15543 -> 18675 bytes
 .../py_thresholding/py_thresholding.markdown  |   2 +-
 2 files changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/py_tutorials/py_imgproc/py_thresholding/images/threshold.jpg b/doc/py_tutorials/py_imgproc/py_thresholding/images/threshold.jpg
index e203927791e209945680447d515812c543ca76e9..c7053cc76d59c9545598274275b62f3f0a68a6ba 100644
GIT binary patch
literal 18675
zcmeIZ3tUX=`!~Fjio!@IN(PmX3YAD2$C6|a(#dS6jxwk;F|`tksM$%ig(f<Z8tFtb
zqcf7x`K0C$DwWn$rd2bu=DlnG_Tl|+zvuHj@B4h7_kG^?*Z$0Wwl!<5`?~JydtcXe
zU)T4Rwo19s++EhT){u;h473yc2T41iZO}CEPZ^oX?`e~N(`6^W<)%-Uoh~mYFF*BB
zm^njUVWxt-{0yZTGZiQQ!2iusQk*sUkI9$({^&G0Sy?$n1$l+3TmFZ>q^}|688VAy
zp32HBg{CRX$STW7D<C`s$;eHKCj<TKPiC6zbUAqiAchjSp<pf$Use_fEe9k9cL#&t
zq3OzU^OmgHBCoRVsKU~-s%tOBJeZ-gHMdl4e;uN0a_n5#Oht8#`3p4lmgz5FvC`CR
zo%#9=7TdP(u(sK0yUXFg!9$LRot%%KIO*=;>E-R`A8<a95_IA6m8;<q*RDt2ioG2d
zpYYqA#D^)VY3Ui6j~+kE%P%M_dj6ugth}PKs`|~_cl8a8P0cN>ZS6dM_vbHPdwTo6
z35G{Te+b9W@rg;jWFXn!)dJtYEB0UXQU-cWo3w$#q+T-90>MXCdAi(^HS+Ve>{B>;
zR%PkhOEXlr#yrR^ovCB8A5lAYu1-;1*OaFxm{e^_v;RKD!u~I5_NQWh>D3O+l9d6*
zlU0TYkVO12ayj(>#D_6ol1lKVsZov4H11Fvn+un%w+s;AtC}^VA7%7TBwS-XGwn3D
zed^<1+-S^D@A|s)*u2j6vK!hZY~9#U)s-)e^I9%p^_)C*RfiO6*Lf|43h@J_fxL^f
z`4nRLg`#jXniATu_jztZfkb0(;)gHYqLU>WYn|A$Yp<5XyfOSbD|_Wyap`SYm2wi|
z0AFFMAW|qY8}@m%0Mno_USsPTN(}h+@*6ywx$&ZXmpmGDbCa4s*#^qBTj{um>5nM$
zDRqsqAKS4m7|PEgX;S*DV0Gv6tw?R*l@ZUxgz_k#-Oc`Q%{IODnf}x@gC2M`z^Ey_
zS?k1o7v*OgEm~snsD4JZ6k3QD5@{TD>OL+9B~tAKEoD|~1dC!*T|2GfQhF0-xf}Re
zeDGR4uWOI2-I}DG$xeFrLl4Jkm=R9FZRCosd4hzBp$-jK^d;AL)?pV)H4mskH4f_D
z<m}_NM!!z;@UqJXf7`kvRV6s{p{xGgFD9#=T(QXw-E`4L3WbGX9|)@1i|_JmsxnWd
z<)U}eC<Icz0AD_MY~ybQ8{gbIQJ>g#qA>nLUw1sA>)n%I*BGokEw^JRf`#mZTgUhr
z6(Z7=MG9y}U_kZ}G~p03c$dvHJk(wl>|uZFi?M$rGqK6g`8(m6tF7>?@oxf~o~8M_
zb3$w-v6!0p7fE5sJ^@}xYxn`nYFmkf&ykE9r+&b*za=<2FUv~rEtZe-@sr%S(EO%7
z@=465;?DUGBfWgT8sx5v6td+)fXy{5&5#;i#N{077G7)+m(?Cy!I@VOl>G8}+q#6z
zZ(TYi;q$MDsu$1wCVca)bg${TOFDA<xr^Q+B4LB%KDLbN3SSCYD6lH<<@?Ef5~?3r
zj2y~Nt$bYRv$Wbz?`q|bC#$^;v@A~hc-&t8O9l7qbJ`_VA?>E6ixirkh75^SHTby8
zqaF9p_j6s3ST>u^*0eP=T=qhf*pXUzHPys&P2RM~!*34E-7~yF0EdzgBCdm6&MBxa
z<FqSuk5$?mkQPBrmfMW46})8{Q4$U&RAj+N1ITif%o_o-F1Uvu_gXred}^WETNUvo
z-5>0$oLoi)7+MOIF3D%LDjjU`UtoFsq=+De%4Qp-kpng~%t?_%pQ6?+xnlP@C}U$u
zvM8vH9v^B-wUa_}TO~B6)?1&4R9EzB-~%5Yo;JQB5u=lNQI2~rZ*BOoWO3N{@88?&
zctr&`)y%QA`Dd*UQp*>OvWZ+;2TOPajY_BF9Yg}73pN%Yhwl1j_Qwx#2M-w}<i1#Y
z!=+}YZh=#PlGl5c86Npg4}YsFTX$~F5gh>zo7o^psN^U~Ay1B6AEtvmJR&AZ68wqb
zU0R73YeM7Ov*YhFoZp(&seew(jXiKF?$@27L;H{KS2f=v+KyGh`8_wS7E@aI<^$sb
z7i%ObBb$o*Sfc%aV<@S8Rbd|QT-o%c?9S3lQTZk=u3G*tZcsnwM=h<s?e=<6Od4Fe
z89p)Sz$0JBR@VrYjoH^z7d-0d>stOypm61fy~)7_hS998&26JEUwRIe^u0%}>C9EX
z_bzPpm(a%{i@+M8g;XzEhB!u2gEGViO2QjToonN1d>?JQGR=adG>=D}PkendBw_p}
zqt7p6Yi}CnpEXUpXH@&i@vU>hfSlkimvEK20;`ATGgndYWe}PNR_KkAwMnZ{7T;La
ztKIj*r63bK-^V@{jaxsv5PKeGmsw|>KEe5X4A;e@>PnKV^nty1rl4&CDYRQ)6F!em
zYL_h!(L)ZUafu82)f455+a1HR{rhiRbh}V$NKU(aYNS+>MxIB@-&%R>*82CSWG3_w
zn{G}StKFc?0d3%!Uo$fHT~)Bce{fI7zIvnl{!R<w;&eNgtc`CTJ}<a)u<oXxHLY;_
zDc_U&xeS~n9Cq(?5K@qc$kS9uL|9#pEf&C$a6v$79{Q4x)7sRn7nOa_f%vHZk@ovr
z*RpSIdT6>Mdim`WVLqS!iZ6^`#hU;KSD-~_yVnO0)v@<oTF=nHV1ZBy@%G$h^s!Xj
zi(7OX7dPdYJ|DvGirp2yM7CCbOu1{9V0-1er`FXiapNt(o-3@@JBgi}#CEAv9E#)J
z_ZA7mS%SfgL8XU23B+!S|8?>9_Pbup&2<lI?A&WJ-Y_z4adD=1Y&B+X*2ZFi3{mwi
zm4v(|S|k%<r}vT;n$~&`ISDi=zu9YC#@;?|^;#Exa@||uweREtgXNa*t3T&|+Zphw
zrjyc-DVpOi!|tp|mbNc*w%sFdOcuQ##5Go;Y~_#+CRAQ{!@5j-dTE*G*;A);a*l2r
zw)?WeU9T*0gndN{A-h=fXgIMYdRDZVg!&yqs;(m{)kh#fR8A~>#yu(6&$(_WIk72e
zdRetPecegDMH_bOsl7PKXQ~3N38y!5W>9bdJ=N^?nw?Lj&=sF<{c9mQXhc<#Y~+R?
zn@El)qGvB6<#{Jkn{N!)1o@rG$;tijG{^<fy(!xfKth*SM^c?c79ngYv;g*?g=yMc
zGh@+`^ou?|DcQ*9Xx65<^lFe=6LT^GwVN6>gFi><z3ek-e`d&9yYS1_dtWI!m|{V)
zwu+6Qx|(!VXf+eLh4xe|k{47*X;jS&PD9BRiKi-;T+a|$Zgo-0U09>tAGAYX*`;LV
z$h49<H-P7g5Hx@pP)FbL;ThCJr~_}P-Fu9G>O5iYXpL76`BVUY&NqbA#y%=f*S>T%
z$J;k(-b3Ax134i!*AMN<tKcB^L)Xr;atT%VRukU)uGvM%vI4%`OLRj<m+e}c2Yn5n
zTsW?MMav5Z)R&Xh2ReQezu&fM&96y&b!B@QZE&~wb(SaTvZhBBJXdT~j9g`1JL1E_
zd09lY-5R#nOC{&JxmF$9=3|Nk`I+T*9Ua+f5`Recqi0`zB((hDU!U<TezjP&<uyFR
zYAJO)!h9vN5bu74u*-t&h<WIBfzIv->tMXK`T2DTH3r)!Zhc!@xlrTR{3x}1ch8`2
z_InpqvQPu#fm&o&`R-;aoJ?VIh>al|I;n;UdvEM6qMR}_@YGKV-pZAvtm&((BpZ})
zHFoT;ox8V!+9o<4Qi0FMRnVe1`DsH}2`7n>7lc^?jYD+|rYfa;C(1g0l-uu8L&SN-
zfBfWQVcP!AaNrE7-DGEMU!eP8zM^bc^7#qE08YTFvRZ+pUl)|~HbrOi`dHpXZAxtQ
znspDneRHTAvMAf!>g&7TjokRH=Q%UnSl9ie%E_36PN51-=dfleH1m|@4uQ`$!Stf`
z19-X7ejh2M>D_*1P4>eM`;7bBYOlw&-PteaD~emaMfS|slAdxHIY15zF_m0-LLkEm
z;?;sL7%h_$U0q&>&hHbh>slJ&Slsg9<E<*s(`22(u!FCXR%0j29#@Zi*MY=Q3SyNp
zOwlo0Y>bwR3@9|~hA(+&gg~K;sX>a={QAfzcuoq}ZE5A@Z{~$DZsmTfT2?!Ec(tf=
zQ*YS?nMNKm?qZMx8p7$4Vun943O~KX$>RZ~sGP25wVb+6Aj!!?c&W$}i<5)&>-m@X
zGnPaLAM&a3BIJ!e50Fb*Q0nCj8=R_C3$===BXHC4idqhLH={Agjp{D>#!&{|D`}qK
zYDDC!(XU`x9vj+UsN$8Nk>_;e(`H|z`~0G4ite35K{2|+m^*1dsw3J)F&1883ShB8
zt--<AT?j2Q$I`M(=S7%VZqtx&ST?8i&I#jH=SS`srpCl*u)|L5=P5bXCgKJ(_=Ky8
zj)NwmjlKBULzW~i@kWTUFR8psPv8+bC;PE$zlzte*GO+s(S@k;F4t7pBR{D<sIA%T
zMZC$<@#VOWoZO<aEY>B?Gskkm<q3ND;yOl&IoEz=`GE$*_H~Dib@HQ5%XrqTy6HZm
zwBEg|qVVg7oeB#?J3$EA9)iQ(+Y0tk-MGXBKIZtDmKr>cicevPR)cTG-MvN4490-(
z;%81qIwKzQ`WnbK!;K0wpAD<B=mITFF<4|lkuB9ER0Nyv6xc+X+WT>HvITp}q)@r0
zze+eFyuU6^yCE)~!ufEp=|RFfNc`)bnR6a#jYkYV5G@V)D23ERbg_?#;C?9-!D?4e
z+JJf?<-3r4R(o?yaN)>K!J-!zGi|QxIpE{SL8nBiij1$mTL+#zG`<W!WmnO!l8^(l
z4}FJaoV@XQ<<ymeTd23tZEI)v-AoGY@JF(7zI?v%*WWA$*I~ov%Hz8dx~zF$9{-BV
z5FZuo5?dk(mBer<q`(7Tj!hxUVNZMBxao<=!;7Ci%hNF7d>C9hSSWAjxh*;7X3?*+
zUpn0nmS-A){NV)l2A`i%%D8@E=$aJrb~7?7L_K((Om*?RR~Gr^=e`|LEqImqCBTbX
z<R0#m2|vsJ9KBVyGxfC2oF^f5$<rd2|MY|Vjlg{dc0C!D1XLdutN8E<gVzy<%?S1g
z_*QMaXl-@nx>)v6{q`SswH8=5hIyViwZl)xs7+&*>b=kd7`~T-1ZjxX3=VbB!(hlE
zFyl%<o)mhKC^;sDE<a>_k)tn!yO3%ub;2IjKZ3(>Wub)Q`DAqCaZK^X<8Fc?Qzkvw
zQZ1V<scmseflKQ+h=b%nCK(dT?T+r6-b}-K42DT%+75k39`SG`^j(l-a~+HOnSicn
z&Wh?<fX=xyiUw!nB_F094}rPqi8&@m5j~SWgyT#QB{4pbXZRDdFJxS?$cBP>U<lx1
zxM>qPpFUBQ?7Qf*0zfQ(DHm{oAXdAJDLOnpZXtzUH@+D}7)}Jy)0V2Px&8P?$498m
zL&<X4Z-xMb8$j^aZ94y5Q1p8A?I03+m>|k^bV><nCm{O<9krPHROQ_h)J4rWY#@%C
zq>SPIHTs35DA6{s1gy>6UNYB;Rl(@;W`1CF!+;CsG?@GFLXL@Gg!{~+fg7CzkGbbK
zl!CiaF!r{GAEzgpC5Tnc53bY_#-gi%(i&NKNxQ)-$*qu;6nZyfAOorC4vflvvZR@M
z5*gdkVn*?QzR2M4_nd*F&*P5TYIo}3bmR^N{_<Szw_*R1C1bIDu0*j)EVi!?!DDaO
zSEbNsgYV`=wWQzh$T<zMIzyNNaAyUVfyEN;iQvj5ewezxSeMiRBlo==TdZta33--u
zoq?kG4nJXA;AzhNM^Vo=vWWeN0*1RMh3>imAb@@K8C97PDI~Ve2GeFQg&rzQ?lgFY
zIq)!_hDNzyig%^ZcAO9f+Yc94DfFc>WOLnSJcA?9EE21kW7qHlQV96N!WUAgCX&E6
zBcm(HlDUMo$*VJc!Tcqtjmz$ULRfHxHwQVunOF?xNg-@rdyf<{GWbSAb?D-S;5NDS
zK%Y#k{XF5BdmfJ+iYbPTloyAm2h@-wikn1_3U?gE-)<k+{Jpa!a8Wqxexd}(d3hWB
zWv#QZBvx_}NSOw~yFdnZ8JK0O6zc7@nguk8Xkv|LreJenbU8_)tx!rAubwE9LS||M
zje8Yt0<CahY_M-hNk&b;RN@awp|1~V=oSFO2k@vrULHouzj4Hkj(PhwF9LVWAc$3s
zvFOWOHa0&P>=6?q`7AGj@}%#yf<itdS)oG;g9-@g06ZNvIM@=q8xGSfS&Fo^+nrgm
z7N=798avY2;j-YvoHZo-7BGIPHJH>z^hJb$n`nv@vNt!C8ED+M1ENaO(Q08WS|G^a
zGs2k~R2TFpj{%b5s=~xMcf1&4qsD-XHJ9`wMx*xFrag~v&EHDxwBD}x^yVs`TNSY`
z@K5JKNRmRgI3gRk%{^BNH5!!t7-*E~Cj9i?|2cDpkD?=P1k}hztXhuE_#!y|nm*gq
z7~L0QMO;8R-{Wv=c!-yjm!ajL$~RuMvVVAp?fUt6J<F772Ii)Ll`n+`4dA1Xu{t`k
zhuk_=J?4DCSUth7n|#H*_(}ZP7jpH>iW@&wJ=v^lB(vBfEm#it?-D8Wwm}RG`wY&M
zLVaf#6`GLb7z$jA!X!90W3$j@)w~R_e%oY)$B~Q((}I8oO9GD(F_9e=l^mpLqr64_
zNp97G&QF8;;@$Oc*`NL?dWYpBAxVf7y4J+vK4Oh-mO>t2Dd`okG1q-kNI45Gb%1*>
zVIUPp?R@Cs=ZrU+%Vj540iZ7jV$_m~SYlNgrfAG%wP|&e%V<|Dh~d;N$Fp3$&6#Sz
z$<H3X>t@0Kx<>1hw}bVITe_Xa>)w5rogR)aRue~oH#{N?-(`p6g>nKsc86>*@%iUb
zB$UPNWsGm}sK$?#tdT;GtYS%TTfjtIMi~7X*qlfybX%QzRtjA#$4|uenThXH2q-}j
z`e8gHfS3kcv(<30!8`GiCabt_NpnSSN0V_5NW}JumdBKBk6Yq^`wRAIvRu;SDce}G
zq?wS(a6o7~CLH4_+n&Jtj%4r_-I#iUhWvh;&pWY>81MXpI3vIr`aw{bjJM#fIX8|t
zalxV!8#IcS)PZ-2|BVxLE1DAMI+(?C@ViY`vcQT95Eho`n<?9t^<TZ4nn*$lB_Pxu
zar=<u14k5^jC-xQ{9&l#<mV?YB;*&zK43#lEh)6(RvkP*@8?YD0i(V8qlC&>00st=
z4@S0x+ZG6Hu*B0oZCO<?Hw7N15ovg#H5C{?{ca*8^ag`idi-R~HFXm}To=cKvgSst
z#*BtNIHJLD|I>Q79^j*cq)==IikCtsq!4&vJ&$zFOceZY(EZ28MKT|lbwB*YoIZ;%
zpu{Ig)^<7mcuRW25o^W*jY`m?Qs@_wGFTMx!>pWL|ECSlAMZew{w*f3T=?HXDH*pP
zi8wIf2tYBUUthxbds6U&@Mn+@B3-f2Z+_)arhO=MlU<qi@rg+A>nJHL$m5Dd^$)dg
zA<Dh1T>uv|fTq9S2i`>}{lPeZL>!z%9*l&5yXN*5d`X&2SY8p!wADj55zFvFR&k(k
zR=R-z+^LKd0{#>@JNtD_9a9oLr=$A%;*<t1K@vj;{=P50J#<qd3xc`h?C}DQ=ss&q
zhQw+@0h|;*z_mG2=+mGSdaekGW&SjM16atW=9q)tLx4F#3u|<j<5#&5IvDf;Yxs_#
z2KE+v1w!clP(oJw9~cwR(d8U4Zg({pR~7C{|N1Ji!~@hAq|owFQ~=EDQHh`MdXbEC
z2jj?A!6&8AxS<rv50u3F;3Zp2jE~eFF%cO9mqwPPum<uDOQCOSybD6_KaGc-fw^P!
zaSQm86cQ4D;3Y<C=w(t72f4@=-zU75LL&VM8kV3a-trg1>S(B$KIRaFlw+@%I#Q^?
z@C)o3_aCqFAFuManE8iC_>Wilk5~DRSNTt_^8dbEWn2SOdLc5Po|P2B+&Gdd%iXw*
zRkp96X)4&*t7B9DwK028{V18?HRhmVSf1*sK0`0$uu75as?7v;Gf08uU%{o`{;yb`
zl6e1y=$BSYdLza=TYkVhAlT@FhFA**o5SdOp(I5}ELxBCRLoYG=xXtL0GG<Z+#F{o
zhqI!kFJAX}(5M6nxqssD!%)KgS|w~o0(i`E>amTlz+=(WjgkP(K~p@cOB1V(*y#-7
zbE9+4O4wQy^2E1*39^Xd1?opvnC4Wy7wADzAaxfK9xEX5zc8-;AkOxjP`L7a)o2Z2
zKzq+%SKUzl#v3sP><sHK>4_&7oqBNaKr-<@;S{|?Uzj0DWExUk5!_Y6Df|tKbQUsF
zjwwg*3Zsn6+?I}Q5g6rW>P9+{nhZzRa~tCM@Ji6GIra3w9A`6K-^Gh&CxcYRjKxj0
zIKs{kslb+Qyf7e02q)xZxustf%?3Yi$$xECCz?Zf%7qnfhz%tH7bFW%GXcyefL+9C
zViZY%YQvaG-NZ9iLM=u%H^}90cB-W&J}4%y*=xIO#23dors45U!;l^sG6yc*#X4oE
zQ-RN8RS_atDvO(~w7g-+lsK1qrXY69v#Z?+2O5(~shgi|Q#7^p-B4h%{pwNmde>ch
z=imM)H(N>gi==q87MSiz$*D6zZG5&O?B@Q`UV%p#q^HFQX{|c^nbl*K`*_Cv*)4X+
z4dGMC6W9y9-AeMCm>_s1Hh6+&RpK=Qh$_^r`G}Cd?txD->Qj~HS!fY_#pJax?7Ur;
zsaJZ~v8DOvPX_6&#pN+)p;~<Na<&qi$G%R|3NXfh%1ItR?9<*Hb^PQ7!^rej5A`+M
z$Oc*0ad#H34tw(0VWm^3)#i`>?Dht`4j~QIAFIfVerajP9TER%2Y^_BHCP<RdxEUv
z0`(9;%C!<LNx;MeKN1=r+KLJUBD<BR3Ecan`EwLDnhk-yEop9xuV@~sP(@ILj~ics
zm6%IC?bFc9<)m4rvklxD7V%d^rF~)EpKh}C#MLfygQaV~Tc2G2E_b@-=(pt%;|&Y7
zfU%iG(HXdO&q-7nKj4ErZm38pHQ>kcZH>e>$1`p?1W96qXRMk`-9Z)QB>np|jhgY?
z7k(3cU{UD60!5-&eFTfTyaQBeqd+1&d=USWYq0C>y$Zu1$(@@DU_&PKwwjW}>)SyZ
zs|PkS|Gelp3U=diCd8YVnk0s~m>PuQMSIY4URN&C!>hhlAhLsJds(P;7Bc*Ne;J!=
z$p2RC+a2VI9G`1C{d(%B_vckdW(QW#3rOK5z6eohhqMgRyOks!-OCcP(7Pm*#)oI4
z=iIh!Z7|K+C~FpfM=m|V*Q1AcHnW3omuYm~I`;mP^Mep}k;PY~qlP^aE%_Uu2CW6O
z{QOv?gUtX^Pd@<c^a!Z4S_+Y05rmoOm}nu#8(Wl*Fw@fCdhl=y(ewP#nzNdZSKHm+
zHNAbthK(b=vqN$yI@JX2bTm#t7h0gTA}{o^4PMEgh@;flqYY`E;028xTN@qrI~f+}
z7B{3f9ONa~y>L(3czD}Z!Q9M)ZDOJ;`bBh(f)lC>#s;G@u~xdOF2ab!mY^ED(QQTr
zh^V~9N-r1b?1?n3wHR}BvD2PomdMB2hdm1OFWP>NHqN^r?xna`p_PL);<Z?P3+W43
zND!BSd4d8-crw~4g?4^MYr*D`EeDV6AemArk`$&cb*<!%KT(xxDan;WUP|TJVqat^
zqJTCJtr5aSPX*f1^m7%iJJP+*QTDfXopwvLXzg6bJ-BC!^9M9gbOCE<ZyBi0Eae$r
z3%tO5#AmgK;I7h@LUgHHkW&`tAIaZc=_1M4*JOH)m#^IAVz=ngtTVQ9k76DadM(-8
zLs4kL|9462Y8+}w7Hz>k(2EqVuuc+gkn-g-1jKS!#Zo4xKT-Y#-yXL+uW8hGo6Ylv
z{d@%rx9sea=GYyNQkJY9k13IJ`m}SMWXn`_Z}t|!ODyKC02DnHk`zHHFtTJ!ScKje
z*;3C*Zez>DDoCJMg~9*i%(`i{sNZ>69N4N|qpJU^SKE2><MtCL1xjk0hmH8EznX+z
zaBNcDE3-p&ufhkb1p@PG@+DZCY4XVht0K)UIhu-=^VQ;ul6^4s=90DX4IRee{u>qK
zjDOs4s%z#~@pgKKE;)E<$)%hWO!IPgphPFO+Z@ALs!5?*wTouw2n+JbG|ir?+<ZwA
zQ<q{>Hc`*X>hxY@g4j2|^3ltwo;|Ol!`t~{>ectBbWPS?wIQuam=(H^AU0E|8k(@t
zuOf>VQ4B!&awUP6X=M)AZ?H0fxg(_{ah>A_cqW02>xdQ!*Q49O4z=1Mp^Pz)@=|!U
zG2dQ=SABJqAnWaH7=JyptK-uV;wDLB@vHvMj#3XVzjbdOUi!TH$}HwDCw>^u26L9@
zPi&ti60St^`0Q|xs-(qekRP7|`YX!$L|N~dsc*IK52mF*@Vb~%^Ekijg!M#SObTn-
z!TZrVYt+un>Hnwh|A0i+*H?Z*u7pL!@rdm%{QM*4=%#M=bgC>$<ZHbt0Sp48CjY|)
zc+JC3b^($*_&ba)r7V}DbM1;8lvV6L>qJBzi?;;JTy7D1iAQP`@#%vqTzbcdkc7U<
zq8OJo2O?fl2v=gVYOm-1TEB(vzB_IBKFySMdAu!!8*b7~!XSkTI&#)SJP*{h6%T?p
z`J)X;51_Jiiqw4C^=MZtjRVrZ?W1&8B`qk7cRKlEUe<QQ0GH63qOik{*vFe$7KEQT
z9f=SDt(#a<os}jP5GY>pIT5gyKlzG<Rm52yYmr!^FuvScV3hT!pu*t6SC_%9>%Skp
zze?Pu^CIrhrmf3PSntp7D`e&42I-<rVh_}X%jr-ghF=(pz>ItvDCgdR+W6OqOs{&>
z!%Z`|`GH)1)%W2Zk#*PMCppJ7R>nVhHY0CN+nm?9uOrwJTj2P*;JT-tEc9$X3EP$O
z&r6JN`a&-HqZ`+jA*9~rS6}k)TsO3jR%NA8e@NSTf$rtEjU`($`tk~O{C)dpf4UiQ
zUiR^=`O`AK;+26%b!3{OI#r~Cz{{2%Tw_INxGV?hi8M35)Sg`QVmP2bO36BGVxoGl
z1EJQ`H|XT@x%t+Ib?<sQ$i9L?6yb^$1jL0sv2M186q>UR`0cj`esb-gM7xiJ?5(xq
z#j&b4GTL2s)<0?B1zz%J<0*k6^_fR~J<0<UeJnzsy<KvvFTh}vb<5}1i`&JQ=wHWJ
zp%(P2**@IBD`t##rQP+Xr~wywFAZoKt)8oGwd2wsB3Z6m6l-*8UsA9Bs(xPA=pq*i
znE{uAY7!SzWsn1Kt2w4fLqLHhRstfThe%t$wAennfc*GdVTfscPCH=4WMv*Zu4-<m
zBOljIj5!>}{Z-Y(cy!|qRcPPOKBT!|-l_@eRx2he-C-uw<LCp#kx#m2iRb&S2JY9q
z;)r^)N5jydB}duo*SB9>H}~mY<loxa;WI1i*}bqG@EWEjcrY0*wKpydW?+h&1mhzu
zgatq9#5ujPqP-7U(NbtK>DvaBc&O14(gdKbgrej9^eAT(bByb?(vlQ`w6BK$uwN)d
zN5>cTbd#^a<Lg2Rk7zgXGE}^PQL#uyfR8LN+|>m~95E-V_Fm7Z)zlrkarELboqHB*
zJWI01?<(#qdaT_l=Rz0YKsw@4rOBy$<xq8KY@&cs#hBC9n`ByV?0&QT?#7F@gVV~?
zSNA$^jQHeq_p74g=?&O9OLLK9Nb497PjpsHq5fc3Pa?|h+bC`}HAIp*6e-CPve5w`
zb##&*{v8%dI2XV{^$TNzE@q&yAnPa~Rk9V)n-tt0z<v@tCVLl?I>as+t=E|!CN8~b
z_!@OOF6Z=NH|J{K)#kVeSx+~{<fIU!kULAUv!lK5i26M=q=WmOpp=3HSJW)~jk-~5
z&D>|t7P!4z6LlwbNkZl)JA5U34s{PIg)XQJk_tH0Hf&dMpulFd(SVGpFrx9J=i!9_
zCSnwH_2NxsCg?~HHq#6-=iy)DBk0pHE%7gCT$;BOQZn=P+lx3>pg(GCskT1L7{O<Q
zGIgKTcs3WEwbv`fsS4|LW*l{&t?Q$AkyXmL+D<PcE3qH=&^5#^-8cGQj=d~t5uXe=
zHMW1_%4L<&^<7=<GhXkgI8&~9BJY>HJbXDF-NRfD*R_fLKz=vEr(=ifKqIfE8aJ_#
z5DlXtR0q)L1MQTNhb&nF&jRI1AsY_?XmQ}!k?{C&8?ioVA)$8w+W`E_xPT~yR(Z;f
zeTWo;Ufh-b?g)uCy_z5<SginEuc(kJFp(Rm_I#2iT)HVl7Ok!dS;}fQi*n(zuMP#c
zxg30T>+3h+SGQ$tUy{61^}fV(-nr8n?|#fyHx~3lcYrb{h|{;RW<mjBfF*@yb%Rfx
z$d0-h4V1h9V#7j?crkM=v!0M|Ngp8Uvqh_N$C;$NMxAXPUzuy3f0h+aL}H6sya7NQ
z;7r(h49b9{FBbGUKR+h?>>`9UViw{hAiI=9Y^n${mL9>uz9}fDtT_5omTTXwhpv^r
zLB4t=43(YHS&MDwKRS7Gmi&Qe9A1zIQomq9R(Y3FA$dMZFT<9i0qg0nF$*MnVD7i}
z?cuE*cKPgCg(EhrbW@+QXZ^~0w>xONf%r6B-a6G?y2Wk>-UPP{=mp%{&o0VHd{7ZK
za|@Eew<#S`6R!~%4=M<@x{<0e{RT6}HT+y%X1zNg)k)~IXRU5FR`-PXw!VTb&!X$s
z&nM5iVz)XLt|mP-z8+#A)@`=JiQObGLsp~l4f~q>4IS(n8U8~vI@~-1i}z&vJZ@+E
z2sg>kzjyb>dfBuJzpA}+)oeRSZ;0YeH>P^VwyPw`1Y{Vrz2c5c_JXH_;(6F5xO5A1
zJ=S<AWEu(*X*oG`JgaYB@0j1F+bm7-mib1JiubB1*;T=wvJ)|ID|4h`!s>b&)m!qI
zsVm+B&!LR%61d+8;%_A5oNIgoetBP48f9#oZ*!^b8Lugo>l?G-J$@d{O=8I~S9sHv
z1MtcuN@S;%?l<RUd6^wv&X3!p^hxz$+37Dc-Yz`m;)6VG%d&kHbM+>aF~K$&ZYjTz
zP-!s8@|I*W*E28lboWF?3^)3Gq?#q8#b{<RNQupvHZToag(b#8AveWyB{xepA_@FT
zPBmML>MmecaI}fZ@KGl;qk7B&zMt;pe(=t~;4clQ1`QuL=|$_uR9Kx#pI`KHMkDS7
z<2_-vcy%$eL&%_!v`Cx$L|Z5?C%glCtu#@Yha5GLV--1yob*ESpyU@q{G-kI;5yP$
zijR<p$O+j6Vh6My__TKYs^jC{P1}S;k_4(6(k)z#z7T9Hh1D?A*8VOn^eE$7qERv{
zpK$YDhg(NqUhS+0q=PFfPwY6gdU5x3<w__x4rHh@J76OMat8ETMIwrL0kBk^_$2Cx
z1PVW)EFONAmq$6LEomXbs;G6`^E7|Na$~x^ejJ&6CZ?L{q#IM1`RT*^bGBx~Lm;u^
zg7AjypPWVTnn-Fe@W@s7DX>8?gr)g|2Ffx?JY9sNu9Vz1v@+>xr^Ip_6RjU`;cJDO
zrk=v$!X(<lKzk3}TRN4x``NRennw6L&RZUEOxsVqL!=PW(6z2ukWpo<7ScZ9TU51)
z%=^@(gb<=i%#pe$qb#|;7L7HoZnZw?HIM99wfI@C&a>D1s+OI+JW@FhkRDjGur4W7
z^h3xg0Ip}V)h6cId*V|9(1cv(m`w2yR!a&QVZjXHAUZr=<tJcOY#TrFQDLYMJUe|t
zl8P6u5s(Ko7$F~7imO8Kl*>G$JnkzaygXH<aa2@mHuNN|z@;a#zak;*OI&(T>Y**V
z-z)Opy?=j!aE<WHc<{KOM;IvZ80Z#l2?6V2&X9lYMeBmR`XR9vazP61N~+p<dP&0j
zgh2}l|I>=Yg#-JP<%&)(_9a}wG$m;)9~UmbW?{(2No*!uCyZOXQYeQPu)MS;F`FA=
zF?qe{di8MQa#`Ws?jo@Y>ot9mWuQQ1f3u}WU>Zu|v&-Lz3EiAIUWdXqoDSINbh)p1
z@YM&gOP5QE?SkW~NuJRSXQwqC6tD*h1c}vENvdEK`;yvWOx-#GIcjXinCZD_CTf57
z_@=6dr#$j4;=a5pXc}$5|IPW6o9;g6ZPUol&pkB>Itt&Tp(}}lH6jXCR#1H<pK%2)
zVpnHta`x5rBnsCv*P-D=vu581te^`OXRGHk;{T1ieZgE#Ve=fts-)K%ggiKm#1FiB
zy2bfp$@U4>O{NT*VO=mhdf$)OxZ7Y|_Hhr{w<`*jr_Z|so&QzmTC1au2n3)~ol$zb
zosf#Mkc*f`Du!>d(xuum1rZUJ$31v;tGYDeM|NGdeNm{abyei*=VaJfLbr5Dd39#m
z?OE_`7`+J6aC7XIlV~%{``khpUrGDtrBFgP=;a1tGc%Gy>gjW_xs<OX{8!CU3grbw
z&jcZHzNxEn(UzTIBgDp|4LLJ!2d&9@knT);Igc7n$t?^t8~p*#B_TrNN%D+to-h=|
z-nW2)a}fWNYY<G%cN`uB#mhR3Lob4>+2|_sayXm?G6-MRjV=?Uvnzp76UZA<quMnE
zC}~B|Y+h2{$B%q6J?DEyJ8(t&yC<tR>r7n#vm5w#{u%CnAzMJF-lDhv9zxsMpYoS1
ztm82EH~esvaa%X`#^VZWwAPCheNY4%^3z`frh-=p8!y~usRvr@KD(eX3o(e;mwO0b
zTj1~KLC;nW&@mjjI58gNAM0L9A-^JwyIdBL|AS%2OaWdA6g}VeflZwd0!hP1b*3u=
zR6)2jc=&=dED1FRTr0U`T-(qLSfOQNlHWCCTwjt`hA;hr#evSVs0Q#EKsOpNAi!^r
zTL94kFdzV5VUhtM#*6V+Phy>%VVdzI7r=5K5cEc3<5b%^K@<qTvt$C^!2C(xfRU$c
z?@G`oCd81@4JnudOTg~u(IwWViQ|MaZIyn~VYsvwKYU*bMZ+YJKYd%1+VU2g+e@F~
zP52X&0n;LICl*$cSK6u43wRUS^@QXp(RP4eCY$k-qD=`Iyj+$~LJe)iw`tR)P%8^0
zcPah+M3D=o1O(L6dCd|Ue*&DT2{iq8YQ&^i)&y=NecZmM94DHF(!jxlqBzMp$H}6e
zJn-@F$X4^=R*edH;$#V<|JI+6O))F})%~^tgZ-Rm$a+FTEex=|PJ(L5&@A{HF|}nl
zfD5c3UOq`9`IBqHF!&p*V6x2*<^^yK=VTpm<juMM2P<Iz6ozvA$~NibME@m;BnlY&
z7x<ki%Y!Cbk_7hW4Q6Q}8ypA7Vij3zUz!M5ViuBs-N{}q`{^IVxc??~1T@rVj7cH=
zG?1{j0xhM`X6Ny9KSNh2!J}k}7*Fa0Bo|x>Yu7OQ@3;!x1gH=|B|w15I^r~?5-{DL
zM2G*17@yxshlfampklXtc#1!89PkG`0e>Jl6R_8@9W3rw+V~z&wIMLV;UDwseLe%y
z8sJz49|gIxDf})F+aRHuNTJNCSd#uEd1n%}+I3c8uZX3cIunY+Y=ys|H6@<<B*y|!
zJb`#OCIx*2@P8fV7T|~XTLLD<F$o)^F(Q2i$^Qh{pB#n%-CD?#KzhNRF&bTL@uv<3
zLUU~9B{1*xk|e@FIK>VuiRL);3n`a{?8c2-CkzMxox`MzG~u3~jPql>#-mn#m~SA~
zLzcXD9QqLgP*OTVAMs(0ZG^v13nf6F|2l7A@Qr}#aKuURbU@DmQ!0`1`SR{(5Q8L1
zNCXlLr(K`}7L>%G#`5pbf&i&%l1)noY}!da4ItV8%K@Sdu@sDq{scAI#3}xj6I>K1
zPN|vXsL%jM<>A**tFTEn%D6gMzlnq|8^IYyj0y6>#^bh(SisM+D)<p0UI=!ED>!J+
z4}^`SXxdZhlklCS=mc(`Pz2ple_AgP@NdwXlFu1%djPL&Qa-~;Ni*t0t$;TVYlPqK
zM2saj#t0Ku%oU6R)_(<<=O4#dWk-)=vq4Gfb{BQE6nbiW4LIzYAX1ASXip&CL~;5E
zFG6yOY6Cp4%RB7FU&oXFAo5JA#x?#o{>NaV^4~eFDaO*DT<2ldU;H@Ww5DbS5ahlU
zd;#nv`X2;0<0%aFSWd>e-aj;e7_A(1nSwaQPDlYP#RWWLiHWmgcihy5cs1dRCMmC4
z@+*5L+?QA%y6MCJ7)$9Nv5b-MPkedG3NQXWqA^ze9aqa`0ih2V8ALmvPSTV*z-oW;
z_5OjE0?VJrJ!nD^L|)uy7i^9vFxZgTZvyoIe72}%OyX1oZN?*hHsTDpo8D4Rn7HX>
zHVjYVb`r9G#WWz?Uy%A;wyAh)6*no{l$3yajwbvwm@%9Gfvi6>4cz}$9H{@~O8@`O
zm7dYiRiuG^0v$|IAEd#b(FoB(v5vqfA_cu%g-Z#J*e_U^r+05@gHtZ^_M4;8Hg6-_
zHzzNtQB#28@sZ91@d6N6<do_`f?Q=wT@Xu_Y%c)W!1D=6-2AuniirV-&aGph%a6p-
zP!?YPM76|v>c;kO`G7A74q#}c&4qcV#S3C(vG7fY;qOMFRxAJF0ylwU8d@i(hROv8
zx0Sf>e*<*myhC`=^_(o=jxWP7_rZlj1b!xJvW)O8peX|5^~(1A9%d)>cY?L53zu#k
z&1Us+<WF=+AqU|T!kCgX0r@&f-yg$363i<Dgnrx2__cK;d${}#5cmPinWXMVC>;Vd
zMLqog-FsX9nH?H%*{I(n?_UH`Ul`$ebHu5Px}jDMNLa6fVm_lyiRbtluXUu=wckn}
zB|ku6#~1Zqf(%a<KWpF?#__zAwNs3>)K*1S?$kSgK%18S!=sNUfb2{iE>&TH9?PoT
zECmg&aRr!3ikFqP@ikUKN9CA?=V?%Q7JA%TqAnc%Y}}gm>2QwuGT9TLYi$n>KGM)I
zGB6M*$J!#m#mbtIu`Mas;=5o{uS%gha3mU>oT+t@Lce;-3X&ovz%<vEO-<jZKOJag
z0;cb?CV@YbHFZ)#ADon!1*Td)l(3-xpXtdE=_kW6_DjG)nR-y<Sxy?D3(`4!DI_+=
zT?S`%VvcSq5(WytXZyT-51k$%<ejKm(?GU*7`ln{_b`+bI|+jQAY0iw`UI}Xn?lgz
z*esl+)bSwLx|Lc^B50oiri{P5_7}KrKY$Q-`Ly2;5lyB&=QIfe7ym$@I_LN2i@@_O
zS&!?m>67Ss0$1Lf!e8#Uaw!z>4*Ee;0F%%AM^MW(g%u|sxX%*&!kqQC2+MK@aCCYM
z<1OXD5OdKc(FRn+HO0$>DD|<s$#UMs)4a)oJjdJ4S~VMduT-t!#dWE??<_jkGyly8
zPuXvY|E<klF$ZxpM%UqmwO|G=!=*bzbVJ(cO3V$^)paF=ZiRY6o@s5!*glsxSJR`8
zA765Nn|p6+jqef}H|G9S?yZyxQy4XAnOvgBv9%ggif0xAd9^p=?}yCkrEfWs!Tla1
zkqd5`oX1Q&viFrfUT^^@Mw_5b&cxZcCQlA>$^+osR*}H-oN^TQnoq&5dS5b*2J)MK
z1>I35*bK_||I0{~K8X8nxUn0|afPG9<hy4~qH1!P2TuJm0)8pqtI%yMR!_lp*<6AL
zNG|{&e+ca1&2V9cSo7tiy(CX9Z68n~Fb!}-XAam@^bI?Y6Ob71mXPz~=HJ;u*NbNx
z<`bL+#Gl|U8CP||KzRJ;VE-x1A+g6pmis@!{<B4tj04{DXFFjYK)3Q;Z1zn_9?VSy
ztwrFQu@+c0WIc6ti<rO@**kL_8b*}{Z3eHnc@?dz%eOO2)QMRY^L<v|@b|^HrWJjc
QgH*v;pZ_m^U}@X`0fKESP5=M^

literal 15543
zcmch72|Sc*`~NcrBip1bS*M~XNrjX!sw2^Y7Ba#q$ugt`iFl+%_K-T&$u5d9DMMMB
zqR?rv&(uiP6GJizO=FhdJw|2eJ@5a0-_Pf-&(zFh=6UY>y1w7<bzR?^_lDO2&9&NM
zxdlQXAP52eK)hCHGbDl(78XW|fWJgUL_|@NVkq!~mXMeuDTAIjPX;X`GhbdsVZN-A
zoQ%wZMGKTvRWTUMe1*jti`6t#)G%uNFF}BhqC`>BVq(&2vNE!2|M3s+F(e~~Fhf)#
z5sM)q83a-W!Fvi}z<r7!CVrsFAA}H67~G}U9B~Qo3#7S_5CVx50{0IV0De0J{68ct
zBQkH%>W!lFZ4aOpACp~kCLvu+eN*8Rxm^tm4ZVZM!{><0D=bh{TC!AAYuR#rgSCe1
z)*Eg9&3uc+R!ghhd-mGd?{mPp9&$VE?&0Zm!p}e8WZ<cwv**rVh`1OTm6&un`O4Kl
zu3gW_%*wu%lY9Hlz57L^;*tjsODn6MKC7;&eg0QtQ*#T2`sOXIv#Y!3?+?9w^#0+`
zUq(j9zB0eD`0GMINWrqe--3nBSeFb~mk<bxFp9q}gwRRwK*|V<ELtr(Z=)^hz_Iy@
z*PIcP-IS1C_+*Z{-Y$mR!Q&0$@*4V`ONRMNn^@WZ+rq;CtCjs&*w1y*AW0+w1P>_#
zVWIJrQ91s3o&iax3~|Xf%OAXc?!MPMsN;IXIJ}7mrBQD1pfi?SeMGkTVIJh$=Q5(y
z1T&UltK$bb65O*mU#^HIvtBe>MUrZ4?AcjO!QQ)SyZhMKS#RaPV6B%%PnWdbhsQVE
zPg!l>?owaKgBB4Cd5};N8~<v)m49J$)ed5ycV7bDg@zB%g&U)yGyN8??)LEELAuw-
zIUA09Ae-WdhmLfS;@qxS8p3%S!aELqQvWpEWa4?K<)g)0_U%TvXxrb5E)GG1hkN4A
zik&hUs2wWTyk*fT<~QI3Z@zzXh5v#A_mXQ4ZJ%VqA0S08w!J<YvONB*PI~j`HBzS9
z*)+$RJ5@%lS+R2y=2d&_yHlzjLcf}LXH`xCVxd~c_48*pYrF{dJ{Zq&jkNdr+S40V
z;2*6!=;*Y1>Ad|;j+Xw?ulHugX4-9tP<68|z}GHf#3&s(>vJFrAG)CAP(#tU@*kAH
zWmam0Z)%*^s;qqZk_K&`dp4eJ*m!_3NV#V4^`idTZR-rS#vF4QqOJ5*Pchi_c%Gci
zZB3PKq$*^hMs;c|Fl{MjiHvJ}cpox&|4`U6?JuszMkAwgp2Yks>$C7>{#$o0aa~x6
z*)qb$|J7b~-Jm^kDY?bKTKYZCWOTH#YDwiIW9EU_Ky^hm$KLu?)u*bD2ij@t%+of}
zb7Z{q_Ny?W42m?SJ-mXt^w+bowGyZWA=wX1jOtPMDn>6KtI@a`(3dEgsUIe~pblj`
z{K|NLt8)B(iHqf&GIm9GK>L3Gnu^nHT_Iw0)45K|vg=1%UsPai?_QYudFPdskmkgv
z4UZ~i&nL(Zt&VStSt}9{WPRb|py?vjpl;{Jl?(CRfl08};vI^sPK7#bR9u;M2>t#{
z2}V~XELc_Udg<6X1@2}+nl*69>2N)M2M>Cj#e<Zf#*F)L$%rmX%9G5LP3^WA<DTb1
z8z=M218EgSaD!b#$RF*CziwDv_B`3;NRT@!D%{C;OO#!dlXK<zxyf$$%OYF*#p*=E
z^tJNar=;|MdaZ=lEwA+)+FMx)9MPh7wz&21t>?tP1A!F3CIA~gfRcQNQI%Bm(Zc?f
zWcRo(3s0THu~v_t9oBjoW?Cs7khF^!XRP<3e>D&Cs`}trSf$hD<NY)-%+=fV(neLi
zA?Mwm9lh~`06{)XuQxAz^UTN2Q#Af~t*iQ0w)!vmx!IQrH(6@OUC0?$IJ;|1qBR!#
zl#3~8c28;BY|#FENp85<-?0q6oAnX<k$=T>FAXy}8hCXJxBWqVXd7lMnUq_0{T=e*
zK~3dSSE6@sLcg}&c4kDU$=2U|3^qq@-(Wo8dEnjB+Q<u|813YRhMSJ+C5jxq-AS*x
z==hY_y+e^Shol{uQ~V`!?hQ|yP3rSpPie~@RZ)|uX<dJZg+1%XF|48**$m6a`;QGt
z-nx4H2x;w&=MN*d#-<<E$p5V@F;m$p-S&kIir7aZTp>s{?c1ptwJb%{A9ZA8!#*L4
zbqix+V!}7<?f+1&d#ZC#*{rZhy`|_63Wc^Vpyv3E<984>rVk0&De<cpO_sYcJzua>
za>Yz(kDc%IeBrj&llu&A@t}@fa4h6$>p-3o%siX-Z(n&G=`0K1b}Z4&60tF^JZ@c@
zgzSIxC7ClRbzi9Z@XmzJ@o0R=PEU&C<%F&{We4}e`^JX6x8$2CU)5kmk*2SqBUE$W
z+rVhYMqPsLh(xXTU_r-qiCE8%Ur)Wa6AMg^thw?QrD_iOE4(*lWGCm)KKbVb4i@8{
zf9Q<zet3AF@<LbXy$}0VzZP+5DR)Wp#0BK#dMU*OjM28=miicze>%r)%ihO|8j-H_
z)N<{Y8I0KSD_1DjQzK8C1kWvMAQYT1%KgKksM`1PQREym+tJs{r8ZRBdubGshhImI
zAqc7)4_hWWWXEam^BGJuKUS1bcr*TMf>{4i1fA9!XKa|UzsBdbm#9LZeE0Du>eUH)
zKGL7=?3KNA>Y#`NX?SO}ze)LA=lrDo`*--L98iX@$)A2~ujurk=TB*i+`7b6VT+Tu
z3{o<?rPkP!8m{grOg5O8r<-gXkESgaUUAW;x*%d9yTkvL{}tU%4!T;YF4kYi{<rhK
z`r}(uYae`3TfgVQ<qpY9k1N-RZjCu4DGq%!;6d~IXkxB;&V=GtOQN*xjjyW)zhJ^b
zKaF-1jrT}&>!S6G6AE(XhumAIQQ{`KmlEK1u6IGznoD}^Y8el#6<=fqj=wCeXK%~A
zcy&v&e&vy+t5er@*J>Tz5)i+lXFSIJ`mGe1yN*NiA3xG+$ZRBjJQ%%o??ach)j-X@
z&`F1n($7BbULX`^a-?d{y+;)l_e%b-G>OhXxg5c1T@wNQ`TNZ*H4}5E0`1Os#Y!u~
zcJCV&jWs2nG`<>iuHH%a%9r)eCH_>svo@p0U0gf!A2WDX^8G^36}dElxQFTIX4%;x
zlFy(nZ5%L%LQnIcCmWf}Q4bUcBGhrsub0a=qc?uDuYjLy!uC`+@Sr#1?L279D`yE$
zGo>rFf1Zy1W=G1mz4<T}JCxuM&}NRO`Qf0Z;&3(W2@L16HjYATM}C!o+lB7yl-WJY
z**KaeCE@6JaD7Pxmnlc|araE%#-cgnayLC2i*-t)pbEU(;jOQ-P~5li3<BO!LJeC#
zNFHkBLFa9Y25}T}m6-y;Z(I^RqU<t0Lhi&Bmmc{u=nakswI}l+@f>`;T}94Fnnb{L
z=Rc0!G^*pWiA+0EK~gpks$UCU=`!~^nzj%;8^c!6JeBM@$Q9(FO7P`)Tp!JXBa{y_
zRI!zLIJ;*=FDQj--Vmpk^#<-Rg3o)=`hu3C*p?$ah%IW($o1J^M<SZ1NfM<UXbwys
zv_GzqrtD|r6D=1h6n^?e%2Dkv_G6bcxe9f$5oQKA;yIF3dRzhzS|S1N^{!hyH>Q;M
zvYob>yNw#lgFfmV<3Z`yp)c%I+?o(itu&G6qnVnN#3*S0lsA@w*Bm9z#iE-lhsONs
zZ<RLX_g=2^znUm=5Dw+k61};P>v>QgItOsu8u<Jv)JI(&w6raa2T`B3b1$0&$3mdC
z``rT`K>@L1C}Jz)KT4swVJw)7)MVNv%OY-Y&ZL8#!dL_8Jp}d>dsiRJLehB<Q!=#^
zx5q}*cT6;b2bqOnc@QFrtu_6Ep0;L+<Tpwf4q|eb3UoTcG3QeZE-Y}3TavU_{9>Ax
zV$xmW78C8dBXG~Pz;<q_7x}dnG~hELg|3a~YOvro%;%3OsaEi`>n!JxSr>Uw8|JUn
z8CS?0M{Wkyt7$3^3VY6#%QpX?S9qY_Z@Y?zU`y<K*4}+X(LD-bJd$GIBK+8j5d&s#
zJ`bur_?7!e1ME~Rw0Ff0Mbo0oHbOnDhNJ;Yb7-YC;rko)b5MjOLSfj0BW^AqqgP`8
zZo`hqn6Cr@Ul@&LeYNHY+nQm7et|9LQ63~B#)A?^-9f;#VUDsvpiVY~cl4JddN$w}
zx8@RpcSf5jQQGnjHl?vBCM8~Piuz285FoSpFsdn{JIc42LwH;O0q@e8e`9A00ngS)
z5C!<ik9H0x4PwC;z8xdrcP1yf(?|p(8&OiWH*PFH*y;^w5PbK|JRCKHvLnZfLh36*
zSZoEibDjXdSyKZDz{&oD@8BYk&I8pvXiV_|*os%7*E}(wZAip%(S0BbU)*88;$#8R
z08y?o#IO|8(54wtTF{gSawK(3fd?fWsfP!;hIo*bT19pFhFNcgiECy2cpw|g$9lPz
zSHT_*+{$D35I4<CrlCO|)NH}+<L+U({Y)DXSzEL353wL6_6Z8yvpSduxy*z6wvb)X
zn_c39>ev@_!JSxh#Og(?rd(58L{=Q)#59n5WI#Ba0UXkzw5Gp8c33mDEZA4`u;U0v
zu==-oPkglBlRMHepPfOzd=&BJL79h%B1LIZ_#+vlK_i_*f>?SWajhh8oF4{PQ#|M>
ze-lLsGawk*Ju9)@H+EC|U~4Y;z(nv2k~~3>;0s{}gF)idB5rcRB-cp=PY~KY?{Pa)
zjuk{HB1Aj4#Ctl*SL7mbO(Z;S1oxazofYz7TJ(Pk5V1)|oogje0vY!L!%_5ixkfIE
zU|%IF1J*y>&4V`UBYK-{%^}$-kI*D8pwG`fG&JY56Az+qraquXgwA}c9$_wP^Zfw@
z*{8hYLC@C=lR17Z@Sn+j7TKo;>)1)U;6l_L_TQeHss6<K_7Wasiy6=Sw!3d?>L6wW
zjQa6FAAYzF2-Kx#L!8r=fK8R4+f%NJFv~b&hAF&m$PQltQfC5)N>g7zr|0|%bHP-$
ztU7mFODZRfz=KA-TRMs_BM3gtKxWMIqXnS^xC|gf?>sxOZ%ly4dQlLGH^u$hYE9YR
zj{dR$mJ_kNH=!DgdeI8vM%-9x<2{{;P+mq5Cje--Bh`yiL<i7IF2vIc2el7UNdgEv
zM?0*YfMdOg>OaTGfaR72Ki@<le^hh@DYr83i{aQMK9O|8<C5>-zQbY0tSGHCp(zh^
zBmBmNyQ6F)4<bgBIa)zql^8=a-+{C;qJBKkR$p3Ezb$nJ9e%-e;+Mn<u@R!lLvR(h
zZkR>C!-LjqA;~`iN58^@`qg=mAG|zdTJkKK6*5kYiB#zN0qo}m72swfu_Nkca#Js?
zC2OOF9MjtZ!fc+(3C`h$Yh_M-k%m5losBtS5DyAJ2R{$G52RO)%#8(QO(TF=X%w}n
zkCY$?CLfqBg`V{xc(qAD0w_(es4LBn03r=Oz}I|+fDyS$5S7%Y?cBt&GZzFi%;4h$
z0-xUpV7sY>!~rG_wlm>V>Li96@Jj`Fgfb^%r10w)k+&Dcf78NIIcHbEBW^9Bhq;Y_
zN6G<*9N<?H^{7-^b6IH01HrN$mjIY5@xYD_pX5Q=Xy<Nc)md--U_d?-BIwS+CwP#?
z3&7x`O?$b=?bFqgdrPpNxAQOLWZ=@f1UfnPGjwa(IlsZ5;{UdumOR2U1Ljf~kxr~J
zap!JUAde1fa?g1PWl!N=5uqp%D74z6{j(T|2GI`)hq>(ZR|tmYEX*YjxiD<vzv<e+
zV?rRY8t&VN(|}%;*y6@+%HIQu2O<g7&xcm5V}vHSwhHjb7coAE<_GUgF7#;zAK;4_
zS80L|K`E&;Rw06?!B(|`NZN>2OaRbfZAc!j38>>l9azIB)Mf#MgLDqjfTB7_o5}k9
zQ-rAWEC0UM>z^ty;5ins)&cn2+Cdce(MrHI=>zZZRpc4(cuZwMXomKUsLxQxG->Yl
zsNDjxlpD`!VBa2@nmN#nXyM0J&8~6wOc8A;&DJap{pOW2ccVN!`YDwqmJvJUd6=p2
zFThqYz!jGP)k1$A_pu!hVy*RJI0C5x4C&mIK`|GE5dcikBIE}PkO;JKf<NWrCz}I`
zpal?cA=3ltOid#MThkrn1!07!6VTIMS_f3m4}gwe$m|>Alj$UQ%>|WEgNM-1THNW^
zwK!)@Beo|U?=mJ?2=eXf-fuU?w*m?QqIKa2c0|uiEo7>?fafSWc2>O|cyiWT(-;sn
zL;0M~%@D`Si1yYWaeD!ef=w*NCY$~!hxnz@+*tt&tya50RvAtGCg}_$^=T?gCQbbE
z6xEg$*OEnyD`vvAi|e=*pcmLEYdiIY4eD{~EV%uPCXgcN`Dz8R1j;8UMM?3KSOh4V
zpwGg=iK=F@u4D?RkOxUYc-jakYB}1A1;GTMoIu~9iP98kEx(e<Q{$8AB!2ry6Qxj)
z4rKd}A#%BevtVD=!E>Kn!Lru$ll5Rv^fz6;Xx)8P!@b~vtjnFEX?&UCXmY@0v8S~N
z!(x{4pvDN|F{1RWx2EwQG*kJs>*HsrW7vlAAA!5(%gw-kwfLMftt1kf87|_Rs(Na7
z3Sh`Z(d{f|5)ay>kJvRuvxUx>gTBI5-}jp^sClTRaNpvnX)hq={9i!sD^XCTTK}k0
zbrj)ptPw<=grdRyTNba$H}SX^801Uq@&2&2K9iW-=8)d~6+oeV6NzwS;Q|iZ<?EX?
zsj>-TwUf(kDPglW^PrF@3aN}#%Y!V>!L1w3l#BUQ3llF&8G8x8c!&AQup<#h;}y#R
zhm+5;J&>V-pzr4tr2oG9Myu~Qi`<Q)lwSE0WjI!W?FFSoEaWGk;}gZDDL2l5L5>6{
zRD7<o@wlcvO&j{{;IH4{>M$d1KG^rS-6CYe^;UIUpe;-ash}_muHOY379j<2u`EFu
z{4j3^-lChkl!9f>P32$heJuZLgVF3c*28Hc|D03VDXL|p@?q}7CD|_x#q?X{bmm`x
zzFjpwWd9*`q9N+7ZlCOjpi`q$g%3Q(fN}O+GW$lrS<rYWz$}~5PX-+>h0=MqaNO6|
zWetsDv)0V{bT4uHohm)@L3}H=#Lt=~=?U8Jd8yrIO8Ryu$38^@w8nz=TWB}igWJ}C
z?OUA#+I=f9%JgUebLmn|j;KDEQGlgnj-UJuv?|r0H$Jlr45l;9-QgpBsUr&j;2BH!
z&Q9BW?pY<6E1UzIoyb|`z(N1E`Iyh&fBQjBisB>rK$?e}rMjoVF4YBdw;cMUg{=SC
zZM`U=`dKf=GH=lNwcd-rf<nNZPracuDJAU<({F0p4Tb4@<PS}`sRmA223+rI4Tcey
z`945H#(nbC9kH0c8%VR+L|9Q=meIk3p0<Dg681UDQUB;hX$zW0FIRY0ZGythZuH{W
zRx8K@@3H)Dea^`&x_#0qYLL&(rw>{m?UQR&yS6x;2c3gKQl>4v0mcxwxpFtVu$Vyg
zs-TNa<r}-&(3a(^BZU=8hAaP`vv4it_hV0bY4>ZM5zp7;Jhc|bj7NiyKC?lY^B^%U
zNDQ+bfT0k(*;+HdwJtuYT{7PKqM8Est6T&RipoJpxk$xZt1%Dppm)14+~r}0a3`i7
z?wfN9q{MRgye?avh2|ji+u0)4jQowmhMf)cZlYA`cqF*kv)vI-QzL3}V#|%;PwEOh
zNX|?d%ZTs8jtGO{pd7aHp|6~aM+&Aze|&z&oKei6#2Fu7o$QK~>dRS)KBk9qEnX<A
zbgQl>EMZ1>ETwmwK;SMm>2jCfJH5FJix@Dxu2aWe%2KhgYBr_a(OrCC(Py`4&u?H<
zfZG|8D^z^5OLtBiGg9BvSaC_F<!M&X{vDoP2My<-uQ#yc>b{=%;?sKR6g~Os)08>U
zRY9iZ@0B<E+@UA-AJr<12zsWn_^_m2SnEzmIOgk+gFJ;S?Ze)zd68i3mS&#!=+4&<
zgNGwtdbG)@wZ(nBC#PD6LaChCWup7!Ro0U%>tT%5RqMNrpZwa^tk$rqw^$?gd9dv3
zc^eTG-P1iOkl}&hlT~#6S_90-b(CfK=5IFV)O--tTOlNrnQHjz_ZuO>)NN@vPh8@*
zF`_W+W+Zd2ytsJDlTVU6yGxEsof@S?Fvdb$Z6B$b-o2px!b9#rT>lEkch6QFCsfX}
zUeXyGl@PuW`xJk-zT6=L7b#8b=Adi>)ZH&PJkCWGk7=Y5>%}2D+^P02BMAs$XSpKi
z$ajJ<$eBp4K3wVFQO8p9^JPl{3Im<M)Hud7Er6g20rN~ewQ~=)(}>~DkuoymifS?&
zc+ec`S!|zY1DUZJTOFp4ot8hCsb2%c=$}5i*4uADLJCx{g~g=ySnhVK8--2lRGR1Y
z;WX}*-|J(YPF(-)^;qpT%&x6HO@2=BZxotaqk-dwFVJqSl4~x9KX$J(c1L|SIG@Y5
zVII5HUYFBvx`FZPW?T7%^=YYl&TGUzd1ANgt#FVM1f~b)g3W&7O%|>+NrUU>?*K%x
z2TUz{v7JZQ+B_&DzEhX2`~;-BEf117+3CWOh3CK>@6iA$Mp&>b=<mB?!B(fCz5}J;
z1Xq5jlFZWL&I9A{atwPfhEWnK2dY2|##OEqpp&j#Waf-}kh8aI^b6Kjcu>N~E>2(E
zEk8zK%)w}?D2fwRPaSA_*i}fLc~iSJm=76uW6&2TuJaUJ2XdpQXfmqPv*{&w+1PRi
z1H0;*UX<c3_)D_)?>^18-6Os2-Y9hF(sF$2TqaIcwAaB!*ixq=>4kh+`1{v}_Y#fe
z9fT8eHHGKe++7$xr?zgjZx{Of6`XE_Ptj2Hd)X3ly-Xr)kR)-^L5_;gt0{N0Mum$l
zU*9jKOmOUMswld|#xsIfHET2vC4TrYN4>0c(Wdo#uN^HsXOov`x>r*JX{H*I8d<*j
zWPU4sTgQfrEB`JkF1@1t$BF}Z|34p?`+J_1S93ffrmP}uEnDgvZHzu~Qo}w}UddZR
z>f>|&6YkyzlXWy?URS*caC@>aw*Mh2-s)+DBV!MDneTkfXBec@j$=MP!s8T0#HykQ
zKcC*y=f0hT<yT4B72joAN_wjgQ!g}bOAnFG*08rxcX4dnP`0IK?eYA-_C8ei#60yt
zlIs1j68*UBf}1BgsExNOO7J&yOZGh$d5JGM<`na6t8@;2&|B?K6@QS~9+>Y$SKhmY
za#|TQhy6k1XF8}oA#2`m3zi%IZSJ77tlpyqL&xVz#-<$_eieZ1(hZBZR_wztS49K<
zO_TEdmr0agIE;QA!F5+f@gQV3P%qnI)Z?0*=n-AEbUdg<thq9SNN(75U#?IN6Qrlu
z*qNWyuWiVk4?YHZ<F<6b<bH7XHUQM2aJbLA0n1p69Zr+NFfB%YfZDF}ZsCRcw^i=G
zs`rH?mn{%GLH%3h#J*HUN}z5@RFNxirV~IV<>82D=}_WV-uJoV@G57M%5%S@WsySe
zr&dUdt~x=Fg0O&cus_6O+7>;0GJK-blRm6a=}yn%BWicev-k_BoTs|tspC;q69Tqz
z#wz#%HWj9F;ik*j`X<J3=UE<fY%9#XbCj9R=olI9y%5iN+BpHMqfB@s`1;82N?P-Z
z5E$D2^s3R4x~lPuI|yXjhqAyGr_YtVfAJyp{ZQ-Y)zxip265e8E>*=Uf!$RR+%8*q
zcyC?%K)V!?y&1l<KX9Pxvw29i)EY;)<XQNQtb->->r4;GB3yS+jrD?;WVt@3%J<rw
z>1~o6v-?Xoynu823p>XCgT%_J7?z5AT{lMDacKtC1wFKgTUT0q|EhmQq+QBMvBHS?
zvhT{0Z>(5*LG3~Qn-7e-q(j8v6XuCKmQ%LsM{avk+D22Kd)+#9UTONmqnRJpt6j=h
zQ%tI~VFp-7hRzSv`9%1Xen4Ec@!6V}IGd)_Rb{#_kI#9x(*t>O->}Ph{W6b&mBtc!
z7YZMclMh^Y#=f}FP5R?+t=8{_g;xX(r!0S;*M&yCwGLXui0)rY^IK+`?vFo!h-*+Z
zjD7Of>Eia)d+Zj<RBx3#dm9vC(*AFCQlFYGXONiT)m;s)CHvC$d~s5K8RP4!T=05<
z3OMWXs+9Ujt;i@wVz{hI2FAD6NZ)PuE^gP$-?a3K9{KCeTF7`+$L~lpwX0onj9amt
zj%1lv8T+i=@Lor5)E32Ft6v$qze6Sd*nZ|P_6%G^3R%bWm+#%ytmD08OX0awvBxxy
zWhMN{u2nNzz}SLT)q6camNi{4@CZ}E*c6z#t;yS+6f8?NB1LbysNnIt$dlt@NX7l1
z^`bKKUD%77eY?mQ1>@DeC)6HXzwOte!}XJrAeEhFr{e=#O*YyNU6LN(*<!2;t400J
zV5<cXjO#6s4!>!fNk@P9mbuO*TRd^jw`&&b3o@so&i-njvnJC#o7uW&csYAFqloN{
zswMg0-Yv;9ZQ8J?z3z|Nn2?%>1tl}(093bA>4UGYKa^yDFI%VmJUn8{Uc!UU24b0a
zW{Hsv%3>x`Ox5beo;2vcgQ<7m^Cwbg%FS)`v!V09yL06Y&((3B2?U5Fb_YU2N1;LU
zwd)i2QMT6l9aY??5{}-am4*((cBElHyMRIWcjG~+vCRG+P%|^Q=h54rF0Nj7K$`S@
zt9}yI(?sOBk^SC+GRyIdFrzljJJbW&0R`Bt{iHXZPPs@zV^Gw9UrNh!PZM+hQfn`0
zz%Q<)$q~LI_sUZ>FT0or?a>n2kG5K60tOP2v;PI}u5UBsTCD|*-*<PxMc^*L0&~HD
zg$GsZb8`x~>F=X!?SDJEz7>i%!RV^umICe1RH`s)BAtqVjQOT1j~Q+B<t%8SQ*P!W
zotwaz$oVXo6NP|r-w1|plJuD-6Fi$kw3mVbE-A-$Buy&VY+V;=Fi0@o9pKLf^CDWn
zG$0|62XVVGe_4If3ARV)fqPs$%!4@9*s*LV)Rn&<G)rT9^{g?lcY|iChm&cggPJ6;
znO*}$Da{C~2Pco%7;L{Nb5xE>H{ow(DZ#(g-gf_$B(?$7(+Qm7xb9-bj--%#^n$+|
zQcfcU;pD{z#HkaUho-j@rx<A2s-FBQjzZ=cPJ;Vm9uzN_rIt=NHDrLPfdG^FarYla
z;_GBu#SN}Fke4N6Kjv}6H?>LNMkX`hp{n*Bs#6xH!A$i?LJR&b$R13VKklP`K{J9v
z7o<Q>QA$Q>n<n@a&BBZBLv9iOP!!kF*b3i~Md-|Ed{sVy&S;6U<4^0GIK%qp7&O5>
z83XW-aa;1g8@DxKA-LzyeK6IMJmPAfg9{+8W`Z?oOv?t~aDwR)zCJ{Iah)IuzyK<>
z5xAZ4*@wX7mr~SkdKpp3w`cxk<i~fF0Y|N>ANX8b%V8j0@toUcm_KON`~bJq(oOOT
z8iQ?64gfD81NO%W5&&!q*48OBiS|$mlUqRMr^b=VfFC!qW>B57fiFa%JBft;hIjet
zceoU=>^Hh3C{h0xFEXB~g=SxE{ozGc`xUf&Y-iYDzUdbK7@)_{0j~4;$w;$qldGjW
z*@3+M6van&p3{Y7+5?XI1PP=`mss{|6F>?t$r_ad(a%K|G;Xlp=EKdkH^>SoxYEk8
zNt@#_W0cnG*ZxhbwEeq-;TAg0!JyI$_z9u#4+#N%nK>Ufy*4|(ze4+Dpr@jpI}_~D
zd{31k@Z)g6$r;>lN(8wRUBiREF5p45<afBS0*pNXj(Il^8jpbotsr(a*aIhka|_Es
zr+_3{n9(b364*Si9<aTDK*7E=u%ZS=Ku&mY%>7vtEScI_P-fMyozMthC>_+!1)FKr
zr~O$S90K`iRB}!>mWs140s{EqNQW(-;_g5;T4^?@kB33C_v75amZ>kyFv9RJsGWEi
zjN-!pVTFL+ZPNI1x0>Ln7JNZtXkUTF1odP6Y*3B@c(>HhRqe{8_*_tGZR>Odq_2)<
zZb1n@eFG9`rH;O%n;sI>Nq)uc{pX==Ge_VX{vCyrcl={s{KDB9shh#)c`rjDu%>l)
zf%%vO_f->j7J<_WW0TZgdmor`)HD`<Ry+dEHe}Kr)=uIb^5&n0aGsnV^G#T<&QbWA
zhBKI^J*eR}7j#icM_FWcqAmtZGF-v=OdHgPwMmn7K7n??ex6QzxZA4>&<=JbzS-Ud
zB*kWner>?>{G9|-ZGRdd__!YgUpfQR)PYCI_4YkmfY<)t4|vso#o@t*Y*`d{+u#qs
zx*G@)G@xIQ2U7N{`BaS;!26#xev;3pYCH}oIp1{n{+Lkpm9tg-`<a3NR@P^Q*x8kV
z;#xk7;P?UN*kTampW_Am9zn3}%u|7%UV-A9y4Y+y&L3m-9GsBg3%nNcAm4Kx6&OF@
zaK+$E5kCp+g{eY6zAFs78W1}`bY6l0@%$QT0`fo?ClC(}6oqD#a)8V$2}qt_GBE+B
zN~3!G1d!L_3;hgKlYb@a|CurK1T&k)^WKst>OJqD^`2s58DEk7aRWuErGR||EYBAl
z0lbf~EcO;u@wwngQnS4y$lq-OlD9!QO>%kxe(Ru<P6;dBY+l0*xA`@l{Mpu@{Ntd{
zU(*iy5SyGprS}gx@r!dLA$qf-3yx=EKkr<D9mr~Dt<GyAY~b6of-v7(!4e1VJ5X){
zFph9dxh%UKlV~?nmgidz1c6Um1Wr8B9)!4XEfwN<(5KZrh@|~s7TI4$e2W#7Zve6r
zaOYP-lj=SJcd#ph@-6R@fa3XO5Vp!GAh{V-ev^0ydpQf#2CSiMKe=x*1bE7kvH!?U
z75gQJJHPs6UB{OsLE#3<5CO*n#Ss+odGU=#ga9Qz8~6z-x8pbg#fKRUPBQul++-;S
zU72b&f#=dEL4}<dO#rxF14NR40L*^Y0kG@*ydeFny!gcdFna2Y7~B8B=2xr74uBH~
zOTok~*7>F<FmFE!;+>q}4$gNHH-c&%Z%qfMEG!!D`Ah;HdNbkf2z*6w$~1Up$>s#k
zbWY>@H1ta!%}D{DfIGh&DibKY0PbL81hw1#3j&ts*Fs#Sfue$phpLl!2fI15R3}nH
zw9|;SvH1Vx41~omczn!vc40RmNrKW1)FA?v2jVS&_x?~p_0CtNiP|kMQP9YNzT|t~
zrhR>~nzIoun(7gQ=O;>i*_WMszZ<A)$kPl|{0Q~m=fp2eu<gGHad}M#nw=DlX3-6~
zF<v7BJDb6oC_$|AqfNnK#UM2QJcgiPPfnUdy^ZKI{(+7G!GQn*u##sMZU3TY6G*rK
z^S^A_Cd#)SeF4eyYoUpP9aPBz(1ZP)1?$kVS;hQ+dUz>Z(6>+Lar+0M?`em#gNNT!
z@Fu38J}ha_X)d?9A9nhA+!~a#dEkd@Szga$X-=OxKJ9ERBs^Kt+3VeU>%7$P?Yjb#
zgT9P{)NPisQVm<ldlmzZm{fk<?I{|q6_p(P?Ir_l;p*`R_WjL>cat4{hu^Cc7TbMW
zbQA`(<eEKGmNGuO29)FwDgJybZe+(KW1RYQwHqzI)Q}m!&4thi$NH`1u48z_AiHiZ
zfLptOE9uv4zX1#{>wwGPC2$$ynLAJO%?rC&fq8*wu-!kz)-;s_hxYQefEoT%a0P7V
zdcIA8o3JVBQRe0+$c#UkdfW>HWiT0?&-MVr#oi~WBL*Jr%mu(4Rh5%_&4VgW@NJNy
z2^%DnJ3m!)Z20?CAkTl)tj1Icuj@yM-4(}&wmmAT2=k<DbqhD^UtscFT|K5F{-u4d
z?x0Kt-Vm$8_B45v0P9=l@0ar{4zN?M+av;=3X#j<&veLOhHC<>Mg(TUz6*8%CWRXK
zpRjnYEaI9u7{yhZfIg!hy&Ejkf`7oeQ;iCai=u9WF|Rr}hYlUK#ev}x9vuHcHUk?6
zwC|N*z69z-7NQ?!0x{?`Q@m`K3j7K%gMdb{Y+;zepK8I}u%J|~5SD3!s&Aj&7=vNd
zydlWJ(It=e`kobT{pC!_GMOjD&7|ghlw++$%QMK&ZnA>@K=2RI3F?=x;?l&Mi>dm*
zi~4WdSaZr8@&hLpCmQGD#qSr_+bdF5SYW=q#qXj%eXpkKUtxUg{X}XEOdONLbq|)?
zcP3Z+bM!-w*t7AwTC3G$Q6GlYE}H@h)knOY1=Tmj!^Cr<dR-w4Lyk1D@!}u4B)|kM
zbbSh#?nfQV`y)mD!B+IO*ew4#5)|qt=(_FXucZ%o#?nkP$d2w{JLw~@40mn!Z6YcR
zAklO)Rr$$bOsP*rrK^9@z-TPFvNTpF+%?dRbz2tkX0lJ~@+*6>NBwjkdAHNfYk72!
zE#u?<tXdK1ByJM-y)^fW`uget#6x6<E2Xi9&zzOE`%;TqSnqPVMTkDy%<{g!`bM|4
zSnjv-9a(W3+H#oGzh4v6@Wec4BahM~?VUX`UOh4o#%)C9{=cS;coTmjKQ$+TaW*X-
dA<n63=bD%4AIIKX@r4JO(m^IiX(4%U{|^!1!y^Cy

diff --git a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown
index 285124d17c..0540098850 100644
--- a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown
+++ b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown
@@ -48,7 +48,7 @@ titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
 images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
 
 for i in xrange(6):
-    plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
+    plt.subplot(2,3,i+1),plt.imshow(images[i],'gray',vmin=0,vmax=255)
     plt.title(titles[i])
     plt.xticks([]),plt.yticks([])
 

From ec5065eb53405c490f17c39f6507fde9e5f70a88 Mon Sep 17 00:00:00 2001
From: Anastasiya Pronina <anastasiya.pronina@intel.com>
Date: Fri, 6 Nov 2020 14:54:23 +0300
Subject: [PATCH 070/152] Fixed Inputs/OutputsDataMap conversion

---
 modules/gapi/src/backends/ie/giebackend/giewrapper.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp
index 8f5a7eca11..ba0632d4f0 100644
--- a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp
+++ b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp
@@ -25,7 +25,7 @@ using GIEParam = cv::gapi::ie::detail::ParamDesc;
 IE::InputsDataMap giewrap::toInputsDataMap (const IE::ConstInputsDataMap& inputs) {
     IE::InputsDataMap transformed;
     auto convert = [](const std::pair<std::string, IE::InputInfo::CPtr>& p) {
-        return std::make_pair(p.first, std::make_shared<IE::InputInfo>(*p.second));
+        return std::make_pair(p.first, std::const_pointer_cast<IE::InputInfo>(p.second));
     };
     std::transform(inputs.begin(), inputs.end(), std::inserter(transformed, transformed.end()), convert);
     return transformed;
@@ -34,7 +34,7 @@ IE::InputsDataMap giewrap::toInputsDataMap (const IE::ConstInputsDataMap& inputs
 IE::OutputsDataMap giewrap::toOutputsDataMap (const IE::ConstOutputsDataMap& outputs) {
     IE::OutputsDataMap transformed;
     auto convert = [](const std::pair<std::string, IE::CDataPtr>& p) {
-        return std::make_pair(p.first, std::make_shared<IE::Data>(*p.second));
+        return std::make_pair(p.first, std::const_pointer_cast<IE::Data>(p.second));
     };
     std::transform(outputs.begin(), outputs.end(), std::inserter(transformed, transformed.end()), convert);
     return transformed;

From bb5b628cce18c3b947c12aef6df5063244b8d1ea Mon Sep 17 00:00:00 2001
From: Roman Kazantsev <roman.kazantsev@intel.com>
Date: Fri, 6 Nov 2020 09:51:40 +0300
Subject: [PATCH 071/152] Use explicit opset of Unsqueeze from nGraph

backporting commit eb24575e2ce6ae56613fe4b9709ea55b4d8a228e
---
 modules/dnn/src/layers/prior_box_layer.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp
index 7385afd3b0..dd39ce4417 100644
--- a/modules/dnn/src/layers/prior_box_layer.cpp
+++ b/modules/dnn/src/layers/prior_box_layer.cpp
@@ -595,7 +595,7 @@ public:
 
             auto priorBox = std::make_shared<ngraph::op::PriorBoxClustered>(slice_layer, slice_image, attrs);
             auto axis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
-            auto unsqueeze = std::make_shared<ngraph::op::Unsqueeze>(priorBox, axis);
+            auto unsqueeze = std::make_shared<ngraph::op::v0::Unsqueeze>(priorBox, axis);
             return Ptr<BackendNode>(new InfEngineNgraphNode(unsqueeze));
         }
         else
@@ -616,7 +616,7 @@ public:
 
             auto priorBox = std::make_shared<ngraph::op::PriorBox>(slice_layer, slice_image, attrs);
             auto axis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
-            auto unsqueeze = std::make_shared<ngraph::op::Unsqueeze>(priorBox, axis);
+            auto unsqueeze = std::make_shared<ngraph::op::v0::Unsqueeze>(priorBox, axis);
             return Ptr<BackendNode>(new InfEngineNgraphNode(unsqueeze));
         }
     }

From bed5debca639f35931b778cf9e7727e4f27c7659 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 7 Nov 2020 17:27:33 +0000
Subject: [PATCH 072/152] dnn: use OpenVINO 2021.1 defines

---
 cmake/OpenCVDetectInferenceEngine.cmake | 4 ++--
 modules/dnn/src/op_inf_engine.hpp       | 5 +++--
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/cmake/OpenCVDetectInferenceEngine.cmake b/cmake/OpenCVDetectInferenceEngine.cmake
index 3eaf890f32..ceb7b7989c 100644
--- a/cmake/OpenCVDetectInferenceEngine.cmake
+++ b/cmake/OpenCVDetectInferenceEngine.cmake
@@ -135,9 +135,9 @@ endif()
 
 if(INF_ENGINE_TARGET)
   if(NOT INF_ENGINE_RELEASE)
-    message(WARNING "InferenceEngine version has not been set, 2020.4 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
+    message(WARNING "InferenceEngine version has not been set, 2021.1 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
   endif()
-  set(INF_ENGINE_RELEASE "2020040000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
+  set(INF_ENGINE_RELEASE "2021010000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
   set_target_properties(${INF_ENGINE_TARGET} PROPERTIES
     INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
   )
diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp
index e8fdada99a..bb9563f4ac 100644
--- a/modules/dnn/src/op_inf_engine.hpp
+++ b/modules/dnn/src/op_inf_engine.hpp
@@ -27,10 +27,11 @@
 #define INF_ENGINE_RELEASE_2020_2 2020020000
 #define INF_ENGINE_RELEASE_2020_3 2020030000
 #define INF_ENGINE_RELEASE_2020_4 2020040000
+#define INF_ENGINE_RELEASE_2021_1 2021010000
 
 #ifndef INF_ENGINE_RELEASE
-#warning("IE version have not been provided via command-line. Using 2020.4 by default")
-#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2020_4
+#warning("IE version have not been provided via command-line. Using 2021.1 by default")
+#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_1
 #endif
 
 #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))

From df7bf9a048468e424831d521721ca8e028ae0621 Mon Sep 17 00:00:00 2001
From: catree <catree.catreus@outlook.com>
Date: Sun, 8 Nov 2020 14:42:47 +0100
Subject: [PATCH 073/152] Fix typo in OpenCVFindOpenBLAS.cmake file.

---
 cmake/OpenCVFindOpenBLAS.cmake | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/cmake/OpenCVFindOpenBLAS.cmake b/cmake/OpenCVFindOpenBLAS.cmake
index 6cb486d95d..d1db034908 100644
--- a/cmake/OpenCVFindOpenBLAS.cmake
+++ b/cmake/OpenCVFindOpenBLAS.cmake
@@ -57,7 +57,7 @@ SET(Open_BLAS_INCLUDE_SEARCH_PATHS
 )
 
 SET(Open_BLAS_LIB_SEARCH_PATHS
-        $ENV{OpenBLAS}cd
+        $ENV{OpenBLAS}
         $ENV{OpenBLAS}/lib
         $ENV{OpenBLAS_HOME}
         $ENV{OpenBLAS_HOME}/lib

From a104e7c59368f6500c5a9083e1d577d48f9c54ee Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 9 Nov 2020 19:12:09 +0000
Subject: [PATCH 074/152] doxygen: adjust IMAGE_PATH, allow custom
 OPENCV_DOCS_EXTRA_IMAGE_PATH

- add opencv/modules
- add opencv_contrib/modules
---
 doc/CMakeLists.txt | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 107c01a144..83859314b3 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -130,9 +130,23 @@ if(DOXYGEN_FOUND)
   set(tutorial_js_path "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials")
   set(example_path "${CMAKE_SOURCE_DIR}/samples")
 
+  set(doxygen_image_path
+      ${CMAKE_CURRENT_SOURCE_DIR}/images
+      ${paths_doc}
+      ${tutorial_path}
+      ${tutorial_py_path}
+      ${tutorial_js_path}
+      ${paths_tutorial}
+      #${OpenCV_SOURCE_DIR}/samples/data         # TODO: need to resolve ambiguous conflicts first
+      ${OpenCV_SOURCE_DIR}
+      ${OpenCV_SOURCE_DIR}/modules               # <opencv>/modules
+      ${OPENCV_EXTRA_MODULES_PATH}               # <opencv_contrib>/modules
+      ${OPENCV_DOCS_EXTRA_IMAGE_PATH}            # custom variable for user modules
+  )
+
   # set export variables
   string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}")
-  string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/images ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial}")
+  string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${doxygen_image_path}")
   string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXCLUDE_LIST "${CMAKE_DOXYGEN_EXCLUDE_LIST}")
   string(REPLACE ";" " " CMAKE_DOXYGEN_ENABLED_SECTIONS "${CMAKE_DOXYGEN_ENABLED_SECTIONS}")
   # TODO: remove paths_doc from EXAMPLE_PATH after face module tutorials/samples moved to separate folders

From 08271e5591770e0c1efaaff839db6da03401593b Mon Sep 17 00:00:00 2001
From: Igor Murzov <igor.murzov@xperience.ai>
Date: Tue, 10 Nov 2020 15:36:13 +0300
Subject: [PATCH 075/152] Fix code snippets inclusion into video tutorials

Code snippets need a section marked with ### above to render properly
---
 .../background_subtraction.markdown                 |  3 +--
 .../video_input_psnr_ssim.markdown                  | 13 ++++++-------
 .../videoio/video-write/video_write.markdown        |  2 +-
 3 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/doc/tutorials/video/background_subtraction/background_subtraction.markdown b/doc/tutorials/video/background_subtraction/background_subtraction.markdown
index 91dbd02d9b..267acc6f60 100644
--- a/doc/tutorials/video/background_subtraction/background_subtraction.markdown
+++ b/doc/tutorials/video/background_subtraction/background_subtraction.markdown
@@ -32,8 +32,7 @@ In this tutorial you will learn how to:
 -#  Create and update the background model by using @ref cv::BackgroundSubtractor class;
 -#  Get and show the foreground mask by using @ref cv::imshow ;
 
-Code
-----
+### Code
 
 In the following you can find the source code. We will let the user choose to process either a video
 file or a sequence of images.
diff --git a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
index 2cd038255a..08cc596964 100644
--- a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
+++ b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
@@ -126,8 +126,7 @@ captRefrnc.set(CAP_PROP_POS_FRAMES, 10); // go to the 10th frame of the video
 For properties you can read and change look into the documentation of the @ref cv::VideoCapture::get and
 @ref cv::VideoCapture::set functions.
 
-Image similarity - PSNR and SSIM
---------------------------------
+### Image similarity - PSNR and SSIM
 
 We want to check just how imperceptible our video converting operation went, therefore we need a
 system to check frame by frame the similarity or differences. The most common algorithm used for
@@ -145,15 +144,15 @@ Here the \f$MAX_I\f$ is the maximum valid value for a pixel. In case of the simp
 per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in
 an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as
 we'll need to handle this case separately. The transition to a logarithmic scale is made because the
-pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks
+pixel values have a very wide dynamic range. All this translated to OpenCV and a function looks
 like:
 
 @add_toggle_cpp
-@include cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-psnr
+@snippet cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-psnr
 @end_toggle
 
 @add_toggle_python
-@include samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-psnr
+@snippet samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-psnr
 @end_toggle
 
 Typically result values are anywhere between 30 and 50 for video compression, where higher is
@@ -172,11 +171,11 @@ implementation below.
     Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004." article.
 
 @add_toggle_cpp
-@include cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-mssim
+@snippet samples/cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-mssim
 @end_toggle
 
 @add_toggle_python
-@include samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-mssim
+@snippet samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-mssim
 @end_toggle
 
 This will return a similarity index for each channel of the image. This value is between zero and
diff --git a/doc/tutorials/videoio/video-write/video_write.markdown b/doc/tutorials/videoio/video-write/video_write.markdown
index feafc4408d..b81107559e 100644
--- a/doc/tutorials/videoio/video-write/video_write.markdown
+++ b/doc/tutorials/videoio/video-write/video_write.markdown
@@ -63,7 +63,7 @@ specialized video writing libraries such as *FFMpeg* or codecs as *HuffYUV*, *Co
 an alternative, create the video track with OpenCV and expand it with sound tracks or convert it to
 other formats by using video manipulation programs such as *VirtualDub* or *AviSynth*.
 
-The *VideoWriter* class
+The VideoWriter class
 -----------------------
 
 The content written here builds on the assumption you

From 5f1ca33c6f06727665692ea43988caf5f8caa02b Mon Sep 17 00:00:00 2001
From: Orest Chura <orest.chura@intel.com>
Date: Tue, 10 Nov 2020 21:57:52 +0300
Subject: [PATCH 076/152] Merge pull request #18652 from
 OrestChura:oc/morphologyEx

[G-API]: morphologyEx() Standard Kernel Implementation

* cv::gapi::morphologyEx() kernel
 - implemented (without separate 3x3 version)
 - tests added: check only different operations, not kernels/borders

* Address comments: add `const` where needed

* Replaced fundamental tyeps -> enums where needed
 - added operator<< overload for cv::MorphTypes for tests output
---
 modules/gapi/include/opencv2/gapi/imgproc.hpp | 41 ++++++++++++++++++-
 modules/gapi/src/api/kernels_imgproc.cpp      |  7 ++++
 modules/gapi/src/backends/cpu/gcpuimgproc.cpp | 11 +++++
 .../gapi/test/common/gapi_imgproc_tests.hpp   |  2 +
 .../test/common/gapi_imgproc_tests_inl.hpp    | 23 +++++++++++
 .../gapi/test/common/gapi_tests_common.hpp    | 19 +++++++++
 .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp  | 24 +++++++++++
 7 files changed, 126 insertions(+), 1 deletion(-)

diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp
index 294b3b7842..cc091dfa8e 100644
--- a/modules/gapi/include/opencv2/gapi/imgproc.hpp
+++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp
@@ -78,6 +78,14 @@ namespace imgproc {
         }
     };
 
+    G_TYPED_KERNEL(GMorphologyEx, <GMat(GMat,MorphTypes,Mat,Point,int,BorderTypes,Scalar)>,
+                   "org.opencv.imgproc.filters.morphologyEx") {
+        static GMatDesc outMeta(const GMatDesc &in, MorphTypes, Mat, Point, int,
+                                BorderTypes, Scalar) {
+            return in;
+        }
+    };
+
     G_TYPED_KERNEL(GSobel, <GMat(GMat,int,int,int,int,double,double,int,Scalar)>, "org.opencv.imgproc.filters.sobel") {
         static GMatDesc outMeta(GMatDesc in, int ddepth, int, int, int, double, double, int, Scalar) {
             return in.withDepth(ddepth);
@@ -521,7 +529,7 @@ anchor is at the element center.
 @param iterations number of times erosion is applied.
 @param borderType pixel extrapolation method, see cv::BorderTypes
 @param borderValue border value in case of a constant border
-@sa  dilate
+@sa  dilate, morphologyEx
  */
 GAPI_EXPORTS GMat erode(const GMat& src, const Mat& kernel, const Point& anchor = Point(-1,-1), int iterations = 1,
                         int borderType = BORDER_CONSTANT,
@@ -596,6 +604,37 @@ GAPI_EXPORTS GMat dilate3x3(const GMat& src, int iterations = 1,
                             int borderType = BORDER_CONSTANT,
                             const  Scalar& borderValue = morphologyDefaultBorderValue());
 
+/** @brief Performs advanced morphological transformations.
+
+The function can perform advanced morphological transformations using an erosion and dilation as
+basic operations.
+
+Any of the operations can be done in-place. In case of multi-channel images, each channel is
+processed independently.
+
+@note Function textual ID is "org.opencv.imgproc.filters.morphologyEx"
+
+@param src Input image.
+@param op Type of a morphological operation, see #MorphTypes
+@param kernel Structuring element. It can be created using #getStructuringElement.
+@param anchor Anchor position within the element. Both negative values mean that the anchor is at
+the kernel center.
+@param iterations Number of times erosion and dilation are applied.
+@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
+@param borderValue Border value in case of a constant border. The default value has a special
+meaning.
+@sa  dilate, erode, getStructuringElement
+@note The number of iterations is the number of times erosion or dilatation operation will be
+applied. For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to
+apply successively: erode -> erode -> dilate -> dilate
+(and not erode -> dilate -> erode -> dilate).
+ */
+GAPI_EXPORTS GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel,
+                               const Point       &anchor      = Point(-1,-1),
+                               const int          iterations  = 1,
+                               const BorderTypes  borderType  = BORDER_CONSTANT,
+                               const Scalar      &borderValue = morphologyDefaultBorderValue());
+
 /** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
 
 In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to
diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp
index 652f83935f..9a5b07c14a 100644
--- a/modules/gapi/src/api/kernels_imgproc.cpp
+++ b/modules/gapi/src/api/kernels_imgproc.cpp
@@ -73,6 +73,13 @@ GMat dilate3x3(const GMat& src, int iterations,
     return dilate(src, cv::Mat(), cv::Point(-1,-1), iterations, borderType, borderValue);
 }
 
+GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel, const Point &anchor,
+                  const int iterations, const BorderTypes borderType, const Scalar &borderValue)
+{
+    return imgproc::GMorphologyEx::on(src, op, kernel, anchor, iterations,
+                                      borderType, borderValue);
+}
+
 GMat Sobel(const GMat& src, int ddepth, int dx, int dy, int ksize,
            double scale, double delta,
            int borderType, const Scalar& bordVal)
diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
index c07ed6785c..a3c4e1b60f 100644
--- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
+++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
@@ -145,6 +145,16 @@ GAPI_OCV_KERNEL(GCPUDilate, cv::gapi::imgproc::GDilate)
     }
 };
 
+GAPI_OCV_KERNEL(GCPUMorphologyEx, cv::gapi::imgproc::GMorphologyEx)
+{
+    static void run(const cv::Mat &in, const cv::MorphTypes op, const cv::Mat &kernel,
+                    const cv::Point &anchor, const int iterations,
+                    const cv::BorderTypes borderType, const cv::Scalar &borderValue, cv::Mat &out)
+    {
+        cv::morphologyEx(in, out, op, kernel, anchor, iterations, borderType, borderValue);
+    }
+};
+
 GAPI_OCV_KERNEL(GCPUSobel, cv::gapi::imgproc::GSobel)
 {
     static void run(const cv::Mat& in, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType,
@@ -478,6 +488,7 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels()
         , GCPUMedianBlur
         , GCPUErode
         , GCPUDilate
+        , GCPUMorphologyEx
         , GCPUSobel
         , GCPUSobelXY
         , GCPULaplacian
diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp
index 38a02985e7..d562b306c2 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp
@@ -46,6 +46,8 @@ GAPI_TEST_FIXTURE(Erode3x3Test, initMatrixRandN, FIXTURE_API(CompareMats,int), 2
 GAPI_TEST_FIXTURE(DilateTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int), 3,
     cmpF, kernSize, kernType)
 GAPI_TEST_FIXTURE(Dilate3x3Test, initMatrixRandN, FIXTURE_API(CompareMats,int), 2, cmpF, numIters)
+GAPI_TEST_FIXTURE(MorphologyExTest, initMatrixRandN, FIXTURE_API(CompareMats,MorphTypes),
+                  2, cmpF, op)
 GAPI_TEST_FIXTURE(SobelTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int,int), 4,
     cmpF, kernSize, dx, dy)
 GAPI_TEST_FIXTURE(SobelXYTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int,int,int), 5,
diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
index 95728e87b7..c087733fa8 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
@@ -290,6 +290,29 @@ TEST_P(Dilate3x3Test, AccuracyTest)
     }
 }
 
+TEST_P(MorphologyExTest, AccuracyTest)
+{
+    MorphShapes defShape = cv::MORPH_RECT;
+    int defKernSize = 3;
+    cv::Mat kernel = cv::getStructuringElement(defShape, cv::Size(defKernSize, defKernSize));
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::morphologyEx(in, op, kernel);
+
+    cv::GComputation c(in, out);
+    c.apply(in_mat1, out_mat_gapi, getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::morphologyEx(in_mat1, out_mat_ocv, op, kernel);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
+        EXPECT_EQ(out_mat_gapi.size(), sz);
+    }
+}
+
 TEST_P(SobelTest, AccuracyTest)
 {
     // G-API code //////////////////////////////////////////////////////////////
diff --git a/modules/gapi/test/common/gapi_tests_common.hpp b/modules/gapi/test/common/gapi_tests_common.hpp
index 113f3c73c0..bb045b83d1 100644
--- a/modules/gapi/test/common/gapi_tests_common.hpp
+++ b/modules/gapi/test/common/gapi_tests_common.hpp
@@ -848,6 +848,25 @@ inline std::ostream& operator<<(std::ostream& os, NormTypes op)
 #undef CASE
     return os;
 }
+
+inline std::ostream& operator<<(std::ostream& os, MorphTypes op)
+{
+#define CASE(v) case MorphTypes::v: os << #v; break
+    switch (op)
+    {
+        CASE(MORPH_ERODE);
+        CASE(MORPH_DILATE);
+        CASE(MORPH_OPEN);
+        CASE(MORPH_CLOSE);
+        CASE(MORPH_GRADIENT);
+        CASE(MORPH_TOPHAT);
+        CASE(MORPH_BLACKHAT);
+        CASE(MORPH_HITMISS);
+        default: GAPI_Assert(false && "unknown MorphTypes value");
+    }
+#undef CASE
+    return os;
+}
 }  // namespace cv
 
 #endif //OPENCV_GAPI_TESTS_COMMON_HPP
diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
index e7f9667096..7cba6b05db 100644
--- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
+++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
@@ -130,6 +130,30 @@ INSTANTIATE_TEST_CASE_P(Dilate3x3TestCPU, Dilate3x3Test,
                                 Values(AbsExact().to_compare_obj()),
                                 Values(1,2,4)));
 
+INSTANTIATE_TEST_CASE_P(MorphologyExTestCPU, MorphologyExTest,
+                        Combine(Values(CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(AbsExact().to_compare_obj()),
+                                Values(cv::MorphTypes::MORPH_ERODE,
+                                       cv::MorphTypes::MORPH_DILATE,
+                                       cv::MorphTypes::MORPH_OPEN,
+                                       cv::MorphTypes::MORPH_CLOSE,
+                                       cv::MorphTypes::MORPH_GRADIENT,
+                                       cv::MorphTypes::MORPH_TOPHAT,
+                                       cv::MorphTypes::MORPH_BLACKHAT)));
+
+INSTANTIATE_TEST_CASE_P(MorphologyExHitMissTestCPU, MorphologyExTest,
+                        Combine(Values(CV_8UC1),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(AbsExact().to_compare_obj()),
+                                Values(cv::MorphTypes::MORPH_HITMISS)));
+
 INSTANTIATE_TEST_CASE_P(SobelTestCPU, SobelTest,
                         Combine(Values(CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1),
                                 Values(cv::Size(1280, 720),

From 5dae27865244c0ff1ade47fcdd579457a394d6fc Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 7 Nov 2020 18:25:48 +0000
Subject: [PATCH 077/152] bindings: "inline namespace"

---
 modules/python/src2/hdr_parser.py | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py
index eba7000d47..d8b04b43ce 100755
--- a/modules/python/src2/hdr_parser.py
+++ b/modules/python/src2/hdr_parser.py
@@ -658,6 +658,10 @@ class CppHeaderParser(object):
         stack_top = self.block_stack[-1]
         context = stack_top[self.BLOCK_TYPE]
 
+        if stmt.startswith('inline namespace'):
+            # emulate anonymous namespace
+            return "namespace", "", True, None
+
         stmt_type = ""
         if end_token == "{":
             stmt_type = "block"

From 1b0dca9c2c7c2921303451be7676961447516edf Mon Sep 17 00:00:00 2001
From: Maksim Shabunin <maksim.shabunin@gmail.com>
Date: Wed, 11 Nov 2020 01:54:01 +0300
Subject: [PATCH 078/152] Fix issues found by static analysis

---
 modules/calib3d/src/usac/ransac_solvers.cpp               | 4 ++--
 modules/core/src/dxt.cpp                                  | 4 ++--
 modules/gapi/include/opencv2/gapi/render/render_types.hpp | 2 +-
 modules/objdetect/src/qrcode.cpp                          | 3 ++-
 4 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/modules/calib3d/src/usac/ransac_solvers.cpp b/modules/calib3d/src/usac/ransac_solvers.cpp
index 65fa2d3b9f..0c7637d582 100644
--- a/modules/calib3d/src/usac/ransac_solvers.cpp
+++ b/modules/calib3d/src/usac/ransac_solvers.cpp
@@ -286,7 +286,7 @@ public:
                             current_score = quality->getScore(models[i]);
                         } else {
                             if (is_magsac && iters % repeat_magsac == 0) {
-                                if (!local_optimization->refineModel
+                                if (local_optimization && !local_optimization->refineModel
                                         (models[i], best_score_thread, models[i], current_score))
                                     continue;
                             } else if (model_verifier->isModelGood(models[i])) {
@@ -1028,4 +1028,4 @@ bool run (const Ptr<const Model> &params, InputArray points1, InputArray points2
     }
     return false;
 }
-}}
\ No newline at end of file
+}}
diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp
index b307703a32..fcdb2a202f 100644
--- a/modules/core/src/dxt.cpp
+++ b/modules/core/src/dxt.cpp
@@ -531,14 +531,14 @@ template<typename T> struct DFT_R5
 template<typename T> struct DFT_VecR2
 {
     void operator()(Complex<T>* dst, const int c_n, const int n, const int dw0, const Complex<T>* wave) const {
-        return DFT_R2<T>()(dst, c_n, n, dw0, wave);
+        DFT_R2<T>()(dst, c_n, n, dw0, wave);
     }
 };
 
 template<typename T> struct DFT_VecR3
 {
     void operator()(Complex<T>* dst, const int c_n, const int n, const int dw0, const Complex<T>* wave) const {
-        return DFT_R3<T>()(dst, c_n, n, dw0, wave);
+        DFT_R3<T>()(dst, c_n, n, dw0, wave);
     }
 };
 
diff --git a/modules/gapi/include/opencv2/gapi/render/render_types.hpp b/modules/gapi/include/opencv2/gapi/render/render_types.hpp
index 08b14d1ddd..ca403be361 100644
--- a/modules/gapi/include/opencv2/gapi/render/render_types.hpp
+++ b/modules/gapi/include/opencv2/gapi/render/render_types.hpp
@@ -252,7 +252,7 @@ struct Mosaic
     {
     }
 
-    Mosaic() = default;
+    Mosaic() : cellSz(0), decim(0) {}
 
     /*@{*/
     cv::Rect   mos;    //!< Coordinates of the mosaic
diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp
index c42bb8a309..d47f1d3a20 100644
--- a/modules/objdetect/src/qrcode.cpp
+++ b/modules/objdetect/src/qrcode.cpp
@@ -1122,7 +1122,7 @@ bool QRDecode::computeClosestPoints(const vector<Point> &result_integer_hull)
 {
     CV_TRACE_FUNCTION();
     double min_norm, max_norm = 0.0;
-    size_t idx_min;
+    size_t idx_min = (size_t)-1;
     for (size_t i = 0; i < original_points.size(); i++)
     {
         min_norm = std::numeric_limits<double>::max();
@@ -1144,6 +1144,7 @@ bool QRDecode::computeClosestPoints(const vector<Point> &result_integer_hull)
             max_norm = min_norm;
             unstable_pair = std::pair<size_t,Point>(i, closest_pnt);
         }
+        CV_Assert(idx_min != (size_t)-1);
         closest_points.push_back(std::pair<size_t,Point>(idx_min, closest_pnt));
     }
 

From 3fc1c73064e4112a1e24e9d2d1fd41e3eabe132e Mon Sep 17 00:00:00 2001
From: Orest Chura <orest.chura@intel.com>
Date: Wed, 11 Nov 2020 15:13:10 +0300
Subject: [PATCH 079/152] Merge pull request #18510 from
 OrestChura:oc/boundingRect

[G-API]: findContours() and boundingRect() Standard Kernels Implementation

* Add findContours() standard kernel
 - API and documentation provided:
   - as OpenCV provides two overloads whether to calculate hierarchy or not, but they differ by only the output in sight of G-API, two different G-API functions and kernels implemented
   - G-API Imgproc documentation divided into more parts according to imgproc module parts
   - some typos connected with division into parts corrected
 - `GArray<GArray<U>>` overload for `get_out` function provided to coonvert correctly into `vector<vector<U>>`
 - OCV backend supported
 - accuracy tests provided

* Add boundingRect() standard kernel
     - API and documentation provided:
       - GOpaque<Rect> used as an output
       - as OpenCV provides two possibilities whether to take a gray-scale image or a set of 2D points (`Point2i` or `Point2f` supported), three different overloads of a single G-API function and three kernels implemented
          - for a gray-scale image the overload via `GMat`
          - for a set of `Point2i` - the one via GArray<`Point2i`>
          - set of `Point2f` -> GArray<`Point2f`>
     - OCV backend supported
     - accuracy tests provided
       - comparison function for Rects provided
     - some typos in `gapi_tests_common` corrected

* Fix precommit windows warnings

* - Addressing comments:
   - split tests
 - Fix Windows warnings

* Static_cast for warnings

* - Remove randomness
 - Fix unnecessary precision losses

* - Forgot reference for RNG

* addressing comments

* equalizeHist -> no group

* `const` addedin new functions

* Address suggestions:
 - Hierarchical -> H
 - added cv::GMatDesc::isVectorPoins()
 - added support of giving a set of points to boundingRect()

* Addressing comments
 - IoU comparison function added for Rects
 - isPointsVector moved from a GMatDesc method to a separate function in imgproc.hpp
 - enums instead of int
 - typos corrected

* Addressing comments
 - findContours: Point offset -> GOpaque<Point>
 - removed "straight" comparison for Rects, IoU available only
 - changed vectors initialization -> fix Debug test run
 - Some typos

* added comment for later upgrades

* Fix not to corrupt docs by FIXME

* Addressing commens
 - overload without offset added (as a temporary workaround)
 - checkMetaForFindingContours -> validateFindingContoursMeta
 - added ostream overload for enums used in tests
---
 .../include/opencv2/gapi/cpu/gcpukernel.hpp   |   5 +
 modules/gapi/include/opencv2/gapi/imgproc.hpp | 240 ++++++++++++++-
 modules/gapi/src/api/kernels_imgproc.cpp      |  42 +++
 modules/gapi/src/backends/cpu/gcpuimgproc.cpp |  71 +++++
 .../gapi/test/common/gapi_imgproc_tests.hpp   |  15 +
 .../test/common/gapi_imgproc_tests_inl.hpp    | 282 ++++++++++++++++++
 .../gapi/test/common/gapi_tests_common.hpp    | 108 ++++++-
 .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp  |  72 +++++
 8 files changed, 821 insertions(+), 14 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
index 741fbe18f0..5dd70bd2e8 100644
--- a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
+++ b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
@@ -271,6 +271,11 @@ template<> struct get_out<cv::GArray<cv::GMat> >: public get_out<cv::GArray<cv::
 {
 };
 
+// FIXME(dm): GArray<vector<U>>/GArray<GArray<U>> conversion should be done more gracefully in the system
+template<typename U> struct get_out<cv::GArray<cv::GArray<U>> >: public get_out<cv::GArray<std::vector<U>> >
+{
+};
+
 template<typename U> struct get_out<cv::GOpaque<U>>
 {
     static U& get(GCPUContext &ctx, int idx)
diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp
index cc091dfa8e..0e4254cb87 100644
--- a/modules/gapi/include/opencv2/gapi/imgproc.hpp
+++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp
@@ -21,14 +21,45 @@
 @{
     @defgroup gapi_filters Graph API: Image filters
     @defgroup gapi_colorconvert Graph API: Converting image from one color space to another
+    @defgroup gapi_feature Graph API: Image Feature Detection
+    @defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors
 @}
  */
 
+namespace {
+void validateFindingContoursMeta(const int depth, const int chan, const int mode)
+{
+    GAPI_Assert(chan == 1);
+    switch (mode)
+    {
+    case cv::RETR_CCOMP:
+        GAPI_Assert(depth == CV_8U || depth == CV_32S);
+        break;
+    case cv::RETR_FLOODFILL:
+        GAPI_Assert(depth == CV_32S);
+        break;
+    default:
+        GAPI_Assert(depth == CV_8U);
+        break;
+    }
+}
+
+// Checks if the passed mat is a set of n-dimentional points of the given depth
+bool isPointsVector(const int chan, const cv::Size &size, const int depth,
+                    const int n, const int ddepth)
+{
+    return (ddepth == depth || ddepth < 0) &&
+           ((chan == n && (size.height == 1 || size.width == 1)) ||
+            (chan == 1 && size.width == n));
+}
+} // anonymous namespace
+
 namespace cv { namespace gapi {
 
 namespace imgproc {
     using GMat2 = std::tuple<GMat,GMat>;
     using GMat3 = std::tuple<GMat,GMat,GMat>; // FIXME: how to avoid this?
+    using GFindContoursOutput = std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>;
 
     G_TYPED_KERNEL(GFilter2D, <GMat(GMat,int,Mat,Point,Scalar,int,Scalar)>,"org.opencv.imgproc.filters.filter2D") {
         static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) {
@@ -118,7 +149,7 @@ namespace imgproc {
         }
     };
 
-    G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.canny"){
+    G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.feature.canny"){
         static GMatDesc outMeta(GMatDesc in, double, double, int, bool) {
             return in.withType(CV_8U, 1);
         }
@@ -126,12 +157,83 @@ namespace imgproc {
 
     G_TYPED_KERNEL(GGoodFeatures,
                    <cv::GArray<cv::Point2f>(GMat,int,double,double,Mat,int,bool,double)>,
-                   "org.opencv.imgproc.goodFeaturesToTrack") {
+                   "org.opencv.imgproc.feature.goodFeaturesToTrack") {
         static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) {
             return empty_array_desc();
         }
     };
 
+    using RetrMode = RetrievalModes;
+    using ContMethod = ContourApproximationModes;
+    G_TYPED_KERNEL(GFindContours, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
+                   "org.opencv.imgproc.shape.findContours")
+    {
+        static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
+        {
+            validateFindingContoursMeta(in.depth, in.chan, mode);
+            return empty_array_desc();
+        }
+    };
+
+    // FIXME oc: make default value offset = Point()
+    G_TYPED_KERNEL(GFindContoursNoOffset, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod)>,
+                   "org.opencv.imgproc.shape.findContoursNoOffset")
+    {
+        static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod)
+        {
+            validateFindingContoursMeta(in.depth, in.chan, mode);
+            return empty_array_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFindContoursH,<GFindContoursOutput(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
+                   "org.opencv.imgproc.shape.findContoursH")
+    {
+        static std::tuple<GArrayDesc,GArrayDesc>
+        outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
+        {
+            validateFindingContoursMeta(in.depth, in.chan, mode);
+            return std::make_tuple(empty_array_desc(), empty_array_desc());
+        }
+    };
+
+    // FIXME oc: make default value offset = Point()
+    G_TYPED_KERNEL(GFindContoursHNoOffset,<GFindContoursOutput(GMat,RetrMode,ContMethod)>,
+                   "org.opencv.imgproc.shape.findContoursHNoOffset")
+    {
+        static std::tuple<GArrayDesc,GArrayDesc>
+        outMeta(GMatDesc in, RetrMode mode, ContMethod)
+        {
+            validateFindingContoursMeta(in.depth, in.chan, mode);
+            return std::make_tuple(empty_array_desc(), empty_array_desc());
+        }
+    };
+
+    G_TYPED_KERNEL(GBoundingRectMat, <GOpaque<Rect>(GMat)>,
+                   "org.opencv.imgproc.shape.boundingRectMat") {
+        static GOpaqueDesc outMeta(GMatDesc in) {
+            GAPI_Assert((in.depth == CV_8U && in.chan == 1) ||
+                        (isPointsVector(in.chan, in.size, in.depth, 2, CV_32S) ||
+                         isPointsVector(in.chan, in.size, in.depth, 2, CV_32F)));
+
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GBoundingRectVector32S, <GOpaque<Rect>(GArray<Point2i>)>,
+                   "org.opencv.imgproc.shape.boundingRectVector32S") {
+        static GOpaqueDesc outMeta(GArrayDesc) {
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GBoundingRectVector32F, <GOpaque<Rect>(GArray<Point2f>)>,
+                   "org.opencv.imgproc.shape.boundingRectVector32F") {
+        static GOpaqueDesc outMeta(GArrayDesc) {
+            return empty_gopaque_desc();
+        }
+    };
+
     G_TYPED_KERNEL(GBGR2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2rgb") {
         static GMatDesc outMeta(GMatDesc in) {
             return in; // type still remains CV_8UC3;
@@ -280,7 +382,7 @@ namespace imgproc {
         }
     };
 
-    G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12torgbp") {
+    G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12torgbp") {
         static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
             GAPI_Assert(inY.depth == CV_8U);
             GAPI_Assert(inUV.depth == CV_8U);
@@ -294,7 +396,7 @@ namespace imgproc {
         }
     };
 
-    G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12togray") {
+    G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12togray") {
         static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
             GAPI_Assert(inY.depth   == CV_8U);
             GAPI_Assert(inUV.depth  == CV_8U);
@@ -309,7 +411,7 @@ namespace imgproc {
         }
     };
 
-    G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12tobgrp") {
+    G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12tobgrp") {
         static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
             GAPI_Assert(inY.depth == CV_8U);
             GAPI_Assert(inUV.depth == CV_8U);
@@ -800,6 +902,10 @@ proportional to sigmaSpace.
 GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace,
                                   int borderType = BORDER_DEFAULT);
 
+//! @} gapi_filters
+
+//! @addtogroup gapi_feature
+//! @{
 /** @brief Finds edges in an image using the Canny algorithm.
 
 The function finds edges in the input image and marks them in the output map edges using the
@@ -807,7 +913,7 @@ Canny algorithm. The smallest value between threshold1 and threshold2 is used fo
 largest value is used to find initial segments of strong edges. See
 <http://en.wikipedia.org/wiki/Canny_edge_detector>
 
-@note Function textual ID is "org.opencv.imgproc.filters.canny"
+@note Function textual ID is "org.opencv.imgproc.feature.canny"
 
 @param image 8-bit input image.
 @param threshold1 first threshold for the hysteresis procedure.
@@ -842,7 +948,7 @@ The function can be used to initialize a point-based tracker of an object.
 A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
 with qualityLevel=B .
 
-@note Function textual ID is "org.opencv.imgproc.goodFeaturesToTrack"
+@note Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack"
 
 @param image Input 8-bit or floating-point 32-bit, single-channel image.
 @param maxCorners Maximum number of corners to return. If there are more corners than are found,
@@ -876,6 +982,8 @@ GAPI_EXPORTS GArray<Point2f> goodFeaturesToTrack(const GMat  &image,
 
 /** @brief Equalizes the histogram of a grayscale image.
 
+//! @} gapi_feature
+
 The function equalizes the histogram of the input image using the following algorithm:
 
 - Calculate the histogram \f$H\f$ for src .
@@ -893,6 +1001,120 @@ The algorithm normalizes the brightness and increases the contrast of the image.
  */
 GAPI_EXPORTS GMat equalizeHist(const GMat& src);
 
+//! @addtogroup gapi_shape
+//! @{
+/** @brief Finds contours in a binary image.
+
+The function retrieves contours from the binary image using the algorithm @cite Suzuki85 .
+The contours are a useful tool for shape analysis and object detection and recognition.
+See squares.cpp in the OpenCV sample directory.
+
+@note Function textual ID is "org.opencv.imgproc.shape.findContours"
+
+@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
+pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
+#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
+If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
+image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only.
+@param mode Contour retrieval mode, see #RetrievalModes
+@param method Contour approximation method, see #ContourApproximationModes
+@param offset Optional offset by which every contour point is shifted. This is useful if the
+contours are extracted from the image ROI and then they should be analyzed in the whole image
+context.
+
+@return GArray of detected contours. Each contour is stored as a GArray of points.
+ */
+GAPI_EXPORTS GArray<GArray<Point>>
+findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
+             const GOpaque<Point> &offset);
+
+// FIXME oc: make default value offset = Point()
+/** @overload
+@note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset"
+ */
+GAPI_EXPORTS GArray<GArray<Point>>
+findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
+
+/** @brief Finds contours and their hierarchy in a binary image.
+
+The function retrieves contours from the binary image using the algorithm @cite Suzuki85
+and calculates their hierarchy.
+The contours are a useful tool for shape analysis and object detection and recognition.
+See squares.cpp in the OpenCV sample directory.
+
+@note Function textual ID is "org.opencv.imgproc.shape.findContoursH"
+
+@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
+pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
+#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
+If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
+image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only.
+@param mode Contour retrieval mode, see #RetrievalModes
+@param method Contour approximation method, see #ContourApproximationModes
+@param offset Optional offset by which every contour point is shifted. This is useful if the
+contours are extracted from the image ROI and then they should be analyzed in the whole image
+context.
+
+@return GArray of detected contours. Each contour is stored as a GArray of points.
+@return Optional output GArray of cv::Vec4i, containing information about the image topology.
+It has as many elements as the number of contours. For each i-th contour contours[i], the elements
+hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based
+indices in contours of the next and previous contours at the same hierarchical level, the first
+child contour and the parent contour, respectively. If for the contour i there are no next,
+previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
+ */
+GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
+findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
+              const GOpaque<Point> &offset);
+
+// FIXME oc: make default value offset = Point()
+/** @overload
+@note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset"
+ */
+GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
+findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
+
+/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels
+of gray-scale image.
+
+The function calculates and returns the minimal up-right bounding rectangle for the specified
+point set or non-zero pixels of gray-scale image.
+
+@note Function textual ID is "org.opencv.imgproc.shape.boundingRectMat"
+
+@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F
+2D points stored in Mat.
+
+@note In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column
+if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either
+@ref CV_32S or @ref CV_32F depth
+ */
+GAPI_EXPORTS GOpaque<Rect> boundingRect(const GMat& src);
+
+/** @overload
+
+Calculates the up-right bounding rectangle of a point set.
+
+@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S"
+
+@param src Input 2D point set, stored in std::vector<cv::Point2i>.
+ */
+GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2i>& src);
+
+/** @overload
+
+Calculates the up-right bounding rectangle of a point set.
+
+@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F"
+
+@param src Input 2D point set, stored in std::vector<cv::Point2f>.
+ */
+GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2f>& src);
+
+//! @} gapi_shape
+
+//! @addtogroup gapi_colorconvert
+//! @{
 /** @brief Converts an image from BGR color space to RGB color space.
 
 The function converts an input image from BGR color space to RGB.
@@ -907,10 +1129,6 @@ Output image is 8-bit unsigned 3-channel image @ref CV_8UC3.
 */
 GAPI_EXPORTS GMat BGR2RGB(const GMat& src);
 
-//! @} gapi_filters
-
-//! @addtogroup gapi_colorconvert
-//! @{
 /** @brief Converts an image from RGB color space to gray-scaled.
 The conventional ranges for R, G, and B channel values are 0 to 255.
 Resulting gray color value computed as
diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp
index 9a5b07c14a..faf8de54c7 100644
--- a/modules/gapi/src/api/kernels_imgproc.cpp
+++ b/modules/gapi/src/api/kernels_imgproc.cpp
@@ -122,6 +122,48 @@ cv::GArray<cv::Point2f> goodFeaturesToTrack(const GMat& image, int maxCorners, d
                                       useHarrisDetector, k);
 }
 
+GArray<GArray<Point>>
+findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
+             const GOpaque<Point> &offset)
+{
+    return imgproc::GFindContours::on(src, mode, method, offset);
+}
+
+GArray<GArray<Point>>
+findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method)
+{
+    return imgproc::GFindContoursNoOffset::on(src, mode, method);
+}
+
+
+std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
+findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
+              const GOpaque<Point> &offset)
+{
+    return imgproc::GFindContoursH::on(src, mode, method, offset);
+}
+
+std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
+findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method)
+{
+    return imgproc::GFindContoursHNoOffset::on(src, mode, method);
+}
+
+GOpaque<Rect> boundingRect(const GMat& src)
+{
+    return imgproc::GBoundingRectMat::on(src);
+}
+
+GOpaque<Rect> boundingRect(const GArray<Point2i>& src)
+{
+    return imgproc::GBoundingRectVector32S::on(src);
+}
+
+GOpaque<Rect> boundingRect(const GArray<Point2f>& src)
+{
+    return imgproc::GBoundingRectVector32F::on(src);
+}
+
 GMat BGR2RGB(const GMat& src)
 {
     return imgproc::GBGR2RGB::on(src);
diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
index a3c4e1b60f..9eca0f12f0 100644
--- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
+++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
@@ -221,6 +221,70 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures)
     }
 };
 
+GAPI_OCV_KERNEL(GCPUFindContours, cv::gapi::imgproc::GFindContours)
+{
+    static void run(const cv::Mat& image, const cv::RetrievalModes mode,
+                    const cv::ContourApproximationModes method, const cv::Point& offset,
+                    std::vector<std::vector<cv::Point>> &outConts)
+    {
+        cv::findContours(image, outConts, mode, method, offset);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFindContoursNoOffset, cv::gapi::imgproc::GFindContoursNoOffset)
+{
+    static void run(const cv::Mat& image, const cv::RetrievalModes mode,
+                    const cv::ContourApproximationModes method,
+                    std::vector<std::vector<cv::Point>> &outConts)
+    {
+        cv::findContours(image, outConts, mode, method);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFindContoursH, cv::gapi::imgproc::GFindContoursH)
+{
+    static void run(const cv::Mat& image, const cv::RetrievalModes mode,
+                    const cv::ContourApproximationModes method, const cv::Point& offset,
+                    std::vector<std::vector<cv::Point>> &outConts, std::vector<cv::Vec4i> &outHier)
+    {
+        cv::findContours(image, outConts, outHier, mode, method, offset);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFindContoursHNoOffset, cv::gapi::imgproc::GFindContoursHNoOffset)
+{
+    static void run(const cv::Mat& image, const cv::RetrievalModes mode,
+                    const cv::ContourApproximationModes method,
+                    std::vector<std::vector<cv::Point>> &outConts, std::vector<cv::Vec4i> &outHier)
+    {
+        cv::findContours(image, outConts, outHier, mode, method);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUBoundingRectMat, cv::gapi::imgproc::GBoundingRectMat)
+{
+    static void run(const cv::Mat& in, cv::Rect& out)
+    {
+        out = cv::boundingRect(in);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUBoundingRectVector32S, cv::gapi::imgproc::GBoundingRectVector32S)
+{
+    static void run(const std::vector<cv::Point2i>& in, cv::Rect& out)
+    {
+        out = cv::boundingRect(in);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVector32F)
+{
+    static void run(const std::vector<cv::Point2f>& in, cv::Rect& out)
+    {
+        out = cv::boundingRect(in);
+    }
+};
+
 GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB)
 {
     static void run(const cv::Mat& in, cv::Mat &out)
@@ -496,8 +560,15 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels()
         , GCPUCanny
         , GCPUGoodFeatures
         , GCPUEqualizeHist
+        , GCPUFindContours
+        , GCPUFindContoursNoOffset
+        , GCPUFindContoursH
+        , GCPUFindContoursHNoOffset
         , GCPUBGR2RGB
         , GCPURGB2YUV
+        , GCPUBoundingRectMat
+        , GCPUBoundingRectVector32S
+        , GCPUBoundingRectVector32F
         , GCPUYUV2RGB
         , GCPUBGR2I420
         , GCPURGB2I420
diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp
index d562b306c2..b27da28c87 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp
@@ -66,6 +66,21 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(GoodFeaturesTest,
                                           double,int,bool),
                               8, cmpF, fileName, type, maxCorners, qualityLevel, minDistance,
                               blockSize, useHarrisDetector)
+GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursNoOffsetTest,
+                              FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes,
+                                          cv::ContourApproximationModes),
+                              4, sz, type, mode, method)
+GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursOffsetTest, <>, 0)
+GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHNoOffsetTest,
+                              FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes,
+                                          cv::ContourApproximationModes),
+                              4, sz, type, mode, method)
+GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHOffsetTest, <>, 0)
+GAPI_TEST_FIXTURE(BoundingRectMatTest, initMatrixRandU, FIXTURE_API(CompareRects), 1, cmpF)
+GAPI_TEST_FIXTURE(BoundingRectMatVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
+GAPI_TEST_FIXTURE(BoundingRectMatVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
+GAPI_TEST_FIXTURE(BoundingRectVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
+GAPI_TEST_FIXTURE(BoundingRectVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
 GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
index c087733fa8..91e676c5e7 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
@@ -50,6 +50,27 @@ namespace
             rgb2yuyv(in_line_p, out_line_p, in.cols);
         }
     }
+
+    // Draw random ellipses on given mat of given size and type
+    void initMatForFindingContours(cv::Mat& mat, const cv::Size& sz, const int type)
+    {
+        cv::RNG& rng = theRNG();
+        mat = cv::Mat(sz, type, cv::Scalar::all(0));
+        size_t numEllipses = rng.uniform(1, 10);
+
+        for( size_t i = 0; i < numEllipses; i++ )
+        {
+            cv::Point center;
+            cv::Size  axes;
+            center.x     = rng.uniform(0, sz.width);
+            center.y     = rng.uniform(0, sz.height);
+            axes.width   = rng.uniform(2, sz.width);
+            axes.height  = rng.uniform(2, sz.height);
+            int color    = rng.uniform(1, 256);
+            double angle = rng.uniform(0., 180.);
+            cv::ellipse(mat, center, axes, angle, 0., 360., color, 1, FILLED);
+        }
+    }
 }
 
 TEST_P(Filter2DTest, AccuracyTest)
@@ -470,6 +491,267 @@ TEST_P(GoodFeaturesTest, AccuracyTest)
     }
 }
 
+TEST_P(FindContoursNoOffsetTest, AccuracyTest)
+{
+    std::vector<std::vector<cv::Point>> outCtsOCV,  outCtsGAPI;
+
+    initMatForFindingContours(in_mat1, sz, type);
+    out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
+    out_mat_ocv  = cv::Mat(sz, type, cv::Scalar::all(0));
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::findContours(in_mat1, outCtsOCV, mode, method);
+    }
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    cv::GArray<cv::GArray<cv::Point>> outCts;
+    outCts = cv::gapi::findContours(in, mode, method);
+    cv::GComputation c(GIn(in), GOut(outCts));
+    c.apply(gin(in_mat1), gout(outCtsGAPI), getCompileArgs());
+
+    // Comparison //////////////////////////////////////////////////////////////
+    EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
+    cv::fillPoly(out_mat_ocv,  outCtsOCV,  cv::Scalar::all(1));
+    cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
+    EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
+}
+
+TEST_P(FindContoursOffsetTest, AccuracyTest)
+{
+    const cv::Size sz(1280, 720);
+    const MatType2 type = CV_8UC1;
+    const cv::RetrievalModes mode = cv::RETR_EXTERNAL;
+    const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE;
+    const cv::Point offset(15, 15);
+    std::vector<std::vector<cv::Point>> outCtsOCV,  outCtsGAPI;
+
+    initMatForFindingContours(in_mat1, sz, type);
+    out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
+    out_mat_ocv  = cv::Mat(sz, type, cv::Scalar::all(0));
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::findContours(in_mat1, outCtsOCV, mode, method, offset);
+    }
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    GOpaque<Point> gOffset;
+    cv::GArray<cv::GArray<cv::Point>> outCts;
+    outCts = cv::gapi::findContours(in, mode, method, gOffset);
+    cv::GComputation c(GIn(in, gOffset), GOut(outCts));
+    c.apply(gin(in_mat1, offset), gout(outCtsGAPI), getCompileArgs());
+
+    // Comparison //////////////////////////////////////////////////////////////
+    EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
+    cv::fillPoly(out_mat_ocv,  outCtsOCV,  cv::Scalar::all(1));
+    cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
+    EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
+}
+
+TEST_P(FindContoursHNoOffsetTest, AccuracyTest)
+{
+    std::vector<std::vector<cv::Point>> outCtsOCV,  outCtsGAPI;
+    std::vector<cv::Vec4i>              outHierOCV, outHierGAPI;
+
+    initMatForFindingContours(in_mat1, sz, type);
+    out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
+    out_mat_ocv  = cv::Mat(sz, type, cv::Scalar::all(0));
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method);
+    }
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    cv::GArray<cv::GArray<cv::Point>> outCts;
+    cv::GArray<cv::Vec4i> outHier;
+    std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method);
+    cv::GComputation c(GIn(in), GOut(outCts, outHier));
+    c.apply(gin(in_mat1), gout(outCtsGAPI, outHierGAPI), getCompileArgs());
+
+    // Comparison //////////////////////////////////////////////////////////////
+    EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
+    cv::fillPoly(out_mat_ocv,  outCtsOCV,  cv::Scalar::all(1));
+    cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
+    EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
+
+    EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
+    EXPECT_TRUE(AbsExactVector<cv::Vec4i>().to_compare_f()(outHierOCV, outHierGAPI));
+}
+
+TEST_P(FindContoursHOffsetTest, AccuracyTest)
+{
+    const cv::Size sz(1280, 720);
+    const MatType2 type = CV_8UC1;
+    const cv::RetrievalModes mode = cv::RETR_EXTERNAL;
+    const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE;
+    const cv::Point offset(15, 15);
+    std::vector<std::vector<cv::Point>> outCtsOCV,  outCtsGAPI;
+    std::vector<cv::Vec4i>              outHierOCV, outHierGAPI;
+
+    initMatForFindingContours(in_mat1, sz, type);
+    out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
+    out_mat_ocv  = cv::Mat(sz, type, cv::Scalar::all(0));
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method, offset);
+    }
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    GOpaque<Point> gOffset;
+    cv::GArray<cv::GArray<cv::Point>> outCts;
+    cv::GArray<cv::Vec4i> outHier;
+    std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method, gOffset);
+    cv::GComputation c(GIn(in, gOffset), GOut(outCts, outHier));
+    c.apply(gin(in_mat1, offset), gout(outCtsGAPI, outHierGAPI), getCompileArgs());
+
+    // Comparison //////////////////////////////////////////////////////////////
+    EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
+    cv::fillPoly(out_mat_ocv,  outCtsOCV,  cv::Scalar::all(1));
+    cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
+    EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
+
+    EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
+    EXPECT_TRUE(AbsExactVector<cv::Vec4i>().to_compare_f()(outHierOCV, outHierGAPI));
+}
+
+TEST_P(BoundingRectMatTest, AccuracyTest)
+{
+    cv::Rect out_rect_gapi, out_rect_ocv;
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::boundingRect(in);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        out_rect_ocv = cv::boundingRect(in_mat1);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
+    }
+}
+
+TEST_P(BoundingRectMatVector32STest, AccuracyTest)
+{
+    cv::Rect out_rect_gapi, out_rect_ocv;
+
+    std::vector<cv::Point2i> in_vectorS(sz.width);
+    cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255));
+    in_mat1 = cv::Mat(in_vectorS);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::boundingRect(in);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        out_rect_ocv = cv::boundingRect(in_mat1);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
+    }
+}
+
+TEST_P(BoundingRectMatVector32FTest, AccuracyTest)
+{
+    cv::RNG& rng = theRNG();
+    cv::Rect out_rect_gapi, out_rect_ocv;
+
+    std::vector<cv::Point2f> in_vectorF(sz.width);
+    const int fscale = 256;  // avoid bits near ULP, generate stable test input
+    for (int i = 0; i < sz.width; i++)
+    {
+        cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast<float>(fscale),
+                       rng.uniform(0, 255 * fscale) / static_cast<float>(fscale));
+        in_vectorF.push_back(pt);
+    }
+    in_mat1 = cv::Mat(in_vectorF);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::boundingRect(in);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        out_rect_ocv = cv::boundingRect(in_mat1);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
+    }
+}
+
+
+TEST_P(BoundingRectVector32STest, AccuracyTest)
+{
+    cv::Rect out_rect_gapi, out_rect_ocv;
+
+    std::vector<cv::Point2i> in_vectorS(sz.width);
+    cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255));
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point2i> in;
+    auto out = cv::gapi::boundingRect(in);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vectorS), cv::gout(out_rect_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        out_rect_ocv = cv::boundingRect(in_vectorS);
+    }
+
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
+    }
+}
+
+TEST_P(BoundingRectVector32FTest, AccuracyTest)
+{
+    cv::RNG& rng = theRNG();
+    cv::Rect out_rect_gapi, out_rect_ocv;
+
+    std::vector<cv::Point2f> in_vectorF(sz.width);
+    const int fscale = 256;  // avoid bits near ULP, generate stable test input
+    for (int i = 0; i < sz.width; i++)
+    {
+        cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast<float>(fscale),
+                       rng.uniform(0, 255 * fscale) / static_cast<float>(fscale));
+        in_vectorF.push_back(pt);
+    }
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point2f> in;
+    auto out = cv::gapi::boundingRect(in);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vectorF), cv::gout(out_rect_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        out_rect_ocv = cv::boundingRect(in_vectorF);
+    }
+
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
+    }
+}
+
 TEST_P(BGR2RGBTest, AccuracyTest)
 {
     // G-API code //////////////////////////////////////////////////////////////
diff --git a/modules/gapi/test/common/gapi_tests_common.hpp b/modules/gapi/test/common/gapi_tests_common.hpp
index bb045b83d1..948476fa10 100644
--- a/modules/gapi/test/common/gapi_tests_common.hpp
+++ b/modules/gapi/test/common/gapi_tests_common.hpp
@@ -463,6 +463,7 @@ struct TestWithParamsSpecific : public TestWithParamsBase<ParamsSpecific<Specifi
 
 using compare_f = std::function<bool(const cv::Mat &a, const cv::Mat &b)>;
 using compare_scalar_f = std::function<bool(const cv::Scalar &a, const cv::Scalar &b)>;
+using compare_rect_f = std::function<bool(const cv::Rect &a, const cv::Rect &b)>;
 
 template<typename Elem>
 using compare_vector_f = std::function<bool(const std::vector<Elem> &a,
@@ -489,6 +490,7 @@ private:
 
 using CompareMats = CompareF<cv::Mat, cv::Mat>;
 using CompareScalars = CompareF<cv::Scalar, cv::Scalar>;
+using CompareRects = CompareF<cv::Rect, cv::Rect>;
 
 template<typename Elem>
 using CompareVectors = CompareF<std::vector<Elem>, std::vector<Elem>>;
@@ -535,6 +537,27 @@ struct WrappableScalar
     }
 };
 
+template<typename T>
+struct WrappableRect
+{
+    compare_rect_f to_compare_f()
+    {
+        T t = *static_cast<T*const>(this);
+        return [t](const cv::Rect &a, const cv::Rect &b)
+        {
+            return t(a, b);
+        };
+    }
+
+    CompareRects to_compare_obj()
+    {
+        T t = *static_cast<T*const>(this);
+        std::stringstream ss;
+        ss << t;
+        return CompareRects(to_compare_f(), ss.str());
+    }
+};
+
 template<typename T, typename Elem>
 struct WrappableVector
 {
@@ -719,13 +742,15 @@ public:
             double err_Inf = cv::norm(in1, in2, NORM_INF);
             if (err_Inf > _inf_tol)
             {
-                std::cout << "ToleranceColor error: err_Inf=" << err_Inf << "  tolerance=" << _inf_tol << std::endl;;
+                std::cout << "ToleranceColor error: err_Inf=" << err_Inf
+                          << "  tolerance=" << _inf_tol << std::endl;
                 return false;
             }
             double err = cv::norm(in1, in2, NORM_L1 | NORM_RELATIVE);
             if (err > _tol)
             {
-                std::cout << "ToleranceColor error: err=" << err << "  tolerance=" << _tol << std::endl;;
+                std::cout << "ToleranceColor error: err=" << err
+                          << "  tolerance=" << _tol << std::endl;
                 return false;
             }
         }
@@ -749,7 +774,8 @@ public:
         double abs_err = std::abs(in1[0] - in2[0]) / std::max(1.0, std::abs(in2[0]));
         if (abs_err > _tol)
         {
-            std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << "  tolerance=" << _tol << " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl;;
+            std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << "  tolerance=" << _tol
+                      << " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl;
             return false;
         }
         else
@@ -765,6 +791,46 @@ private:
     double _tol;
 };
 
+class IoUToleranceRect : public WrappableRect<IoUToleranceRect>
+{
+public:
+    IoUToleranceRect(double tol) : _tol(tol) {}
+    bool operator() (const cv::Rect& in1, const cv::Rect& in2) const
+    {
+        // determine the (x, y)-coordinates of the intersection rectangle
+        int xA = max(in1.x, in2.x);
+        int yA = max(in1.y, in2.y);
+        int xB = min(in1.br().x, in2.br().x);
+        int yB = min(in1.br().y, in2.br().y);
+        // compute the area of intersection rectangle
+        int interArea = max(0, xB - xA) * max(0, yB - yA);
+        // compute the area of union rectangle
+        int unionArea = in1.area() + in2.area() - interArea;
+
+        double iou = interArea / unionArea;
+        double err = 1 - iou;
+        if (err > _tol)
+        {
+            std::cout << "IoUToleranceRect error: err=" << err << "  tolerance=" << _tol
+                      << " in1.x="      << in1.x      << " in2.x="      << in2.x
+                      << " in1.y="      << in1.y      << " in2.y="      << in2.y
+                      << " in1.width="  << in1.width  << " in2.width="  << in2.width
+                      << " in1.height=" << in1.height << " in2.height=" << in2.height << std::endl;
+            return false;
+        }
+        else
+        {
+            return true;
+        }
+    }
+    friend std::ostream& operator<<(std::ostream& os, const IoUToleranceRect& obj)
+    {
+        return os << "IoUToleranceRect(" << std::to_string(obj._tol) << ")";
+    }
+private:
+    double _tol;
+};
+
 template<typename Elem>
 class AbsExactVector : public WrappableVector<AbsExactVector<Elem>, Elem>
 {
@@ -803,6 +869,11 @@ inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_sca
     return os << "compare_scalar_f";
 }
 
+inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_rect_f&)
+{
+    return os << "compare_rect_f";
+}
+
 template<typename Elem>
 inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_vector_f<Elem>&)
 {
@@ -849,6 +920,37 @@ inline std::ostream& operator<<(std::ostream& os, NormTypes op)
     return os;
 }
 
+inline std::ostream& operator<<(std::ostream& os, RetrievalModes op)
+{
+#define CASE(v) case RetrievalModes::v: os << #v; break
+    switch (op)
+    {
+        CASE(RETR_EXTERNAL);
+        CASE(RETR_LIST);
+        CASE(RETR_CCOMP);
+        CASE(RETR_TREE);
+        CASE(RETR_FLOODFILL);
+        default: GAPI_Assert(false && "unknown RetrievalModes value");
+    }
+#undef CASE
+    return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, ContourApproximationModes op)
+{
+#define CASE(v) case ContourApproximationModes::v: os << #v; break
+    switch (op)
+    {
+        CASE(CHAIN_APPROX_NONE);
+        CASE(CHAIN_APPROX_SIMPLE);
+        CASE(CHAIN_APPROX_TC89_L1);
+        CASE(CHAIN_APPROX_TC89_KCOS);
+        default: GAPI_Assert(false && "unknown ContourApproximationModes value");
+    }
+#undef CASE
+    return os;
+}
+
 inline std::ostream& operator<<(std::ostream& os, MorphTypes op)
 {
 #define CASE(v) case MorphTypes::v: os << #v; break
diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
index 7cba6b05db..cea0e0da32 100644
--- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
+++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
@@ -265,6 +265,78 @@ INSTANTIATE_TEST_CASE_P(GoodFeaturesInternalTestCPU, GoodFeaturesTest,
                                 Values(3),
                                 Values(true)));
 
+INSTANTIATE_TEST_CASE_P(FindContoursNoOffsetTestCPU, FindContoursNoOffsetTest,
+                        Combine(Values(IMGPROC_CPU),
+                                Values(cv::Size(1280, 720)),
+                                Values(CV_8UC1),
+                                Values(RETR_EXTERNAL),
+                                Values(CHAIN_APPROX_NONE)));
+
+INSTANTIATE_TEST_CASE_P(FindContoursOffsetTestCPU, FindContoursOffsetTest,
+                        Values(IMGPROC_CPU));
+
+INSTANTIATE_TEST_CASE_P(FindContoursHNoOffsetTestCPU, FindContoursHNoOffsetTest,
+                        Combine(Values(IMGPROC_CPU),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(CV_8UC1),
+                                Values(RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE),
+                                Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE,
+                                       CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS)));
+
+INSTANTIATE_TEST_CASE_P(FindContoursHNoOffset32STestCPU, FindContoursHNoOffsetTest,
+                        Combine(Values(IMGPROC_CPU),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480)),
+                                Values(CV_32SC1),
+                                Values(RETR_CCOMP, RETR_FLOODFILL),
+                                Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE,
+                                       CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS)));
+
+INSTANTIATE_TEST_CASE_P(FindContoursHOffsetTestCPU, FindContoursHOffsetTest,
+                        Values(IMGPROC_CPU));
+
+INSTANTIATE_TEST_CASE_P(BoundingRectMatTestCPU, BoundingRectMatTest,
+                        Combine(Values( CV_8UC1 ),
+                                Values(cv::Size(1280, 720),
+                                       cv::Size(640, 480),
+                                       cv::Size(128, 128)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(IoUToleranceRect(0).to_compare_obj())));
+
+INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32STestCPU, BoundingRectMatVector32STest,
+                        Combine(Values(-1),
+                                Values(cv::Size(1280, 1),
+                                       cv::Size(128, 1)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(IoUToleranceRect(0).to_compare_obj())));
+
+ INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32FTestCPU, BoundingRectMatVector32FTest,
+                         Combine(Values(-1),
+                                 Values(cv::Size(1280, 1),
+                                        cv::Size(128, 1)),
+                                 Values(-1),
+                                 Values(IMGPROC_CPU),
+                                 Values(IoUToleranceRect(1e-5).to_compare_obj())));
+
+INSTANTIATE_TEST_CASE_P(BoundingRectVector32STestCPU, BoundingRectVector32STest,
+                        Combine(Values(-1),
+                                Values(cv::Size(1280, 1),
+                                       cv::Size(128, 1)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(IoUToleranceRect(0).to_compare_obj())));
+
+ INSTANTIATE_TEST_CASE_P(BoundingRectVector32FTestCPU, BoundingRectVector32FTest,
+                         Combine(Values(-1),
+                                 Values(cv::Size(1280, 1),
+                                        cv::Size(128, 1)),
+                                 Values(-1),
+                                 Values(IMGPROC_CPU),
+                                 Values(IoUToleranceRect(1e-5).to_compare_obj())));
+
 INSTANTIATE_TEST_CASE_P(BGR2RGBTestCPU, BGR2RGBTest,
                         Combine(Values(CV_8UC3),
                                 Values(cv::Size(1280, 720),

From 724001aa0f646aa58913c5e46917d104334275ed Mon Sep 17 00:00:00 2001
From: Ruslan Garnov <ruslan.garnov@intel.com>
Date: Tue, 3 Nov 2020 18:50:49 +0300
Subject: [PATCH 080/152] Added multidimensional RMat::View steps

---
 modules/gapi/include/opencv2/gapi/rmat.hpp    |  27 ++--
 modules/gapi/src/api/rmat.cpp                 |  70 ++++++++--
 modules/gapi/src/backends/common/gbackend.hpp |  16 ++-
 modules/gapi/test/rmat/rmat_test_common.hpp   |  16 ++-
 modules/gapi/test/rmat/rmat_view_tests.cpp    | 130 ++++++++++++++++--
 modules/gapi/test/s11n/gapi_s11n_tests.cpp    |   9 +-
 6 files changed, 230 insertions(+), 38 deletions(-)

diff --git a/modules/gapi/include/opencv2/gapi/rmat.hpp b/modules/gapi/include/opencv2/gapi/rmat.hpp
index ff834b46b1..f50bd08b65 100644
--- a/modules/gapi/include/opencv2/gapi/rmat.hpp
+++ b/modules/gapi/include/opencv2/gapi/rmat.hpp
@@ -54,11 +54,11 @@ public:
     {
     public:
         using DestroyCallback = std::function<void()>;
+        using stepsT = std::vector<size_t>;
 
         View() = default;
-        View(const GMatDesc& desc, uchar* data, size_t step = 0u, DestroyCallback&& cb = nullptr)
-            : m_desc(desc), m_data(data), m_step(step == 0u ? elemSize()*cols() : step), m_cb(std::move(cb))
-        {}
+        View(const GMatDesc& desc, uchar* data, const stepsT& steps = {}, DestroyCallback&& cb = nullptr);
+        View(const GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb = nullptr);
 
         View(const View&) = delete;
         View& operator=(const View&) = delete;
@@ -70,23 +70,30 @@ public:
         const std::vector<int>& dims() const { return m_desc.dims; }
         int cols() const { return m_desc.size.width; }
         int rows() const { return m_desc.size.height; }
-        int type() const { return CV_MAKE_TYPE(depth(), chan()); }
+        int type() const;
         int depth() const { return m_desc.depth; }
         int chan() const { return m_desc.chan; }
         size_t elemSize() const { return CV_ELEM_SIZE(type()); }
 
-        template<typename T = uchar> T* ptr(int y = 0, int x = 0) {
-            return reinterpret_cast<T*>(m_data + m_step*y + x*CV_ELEM_SIZE(type()));
+        template<typename T = uchar> T* ptr(int y = 0) {
+            return reinterpret_cast<T*>(m_data + step()*y);
         }
-        template<typename T = uchar> const T* ptr(int y = 0, int x = 0) const {
-            return reinterpret_cast<const T*>(m_data + m_step*y + x*CV_ELEM_SIZE(type()));
+        template<typename T = uchar> const T* ptr(int y = 0) const {
+            return reinterpret_cast<T*>(m_data + step()*y);
         }
-        size_t step() const { return m_step; }
+        template<typename T = uchar> T* ptr(int y, int x) {
+            return reinterpret_cast<T*>(m_data + step()*y + step(1)*x);
+        }
+        template<typename T = uchar> const T* ptr(int y, int x) const {
+            return reinterpret_cast<const T*>(m_data + step()*y + step(1)*x);
+        }
+        size_t step(size_t i = 0) const { GAPI_DbgAssert(i<m_steps.size()); return m_steps[i]; }
+        const stepsT& steps() const { return m_steps; }
 
     private:
         GMatDesc m_desc;
         uchar* m_data = nullptr;
-        size_t m_step = 0u;
+        stepsT m_steps = {0u};
         DestroyCallback m_cb = nullptr;
     };
 
diff --git a/modules/gapi/src/api/rmat.cpp b/modules/gapi/src/api/rmat.cpp
index 9c2da2ebc7..12ba4e5e0e 100644
--- a/modules/gapi/src/api/rmat.cpp
+++ b/modules/gapi/src/api/rmat.cpp
@@ -8,16 +8,68 @@
 
 using View = cv::RMat::View;
 
+namespace {
+cv::GMatDesc checkDesc(const cv::GMatDesc& desc) {
+    if (!desc.dims.empty() && desc.chan != -1) {
+        cv::util::throw_error(
+            std::logic_error("Multidimesional RMat::Views with chan different from -1 are not supported!"));
+    }
+    return desc;
+}
+
+int typeFromDesc(const cv::GMatDesc& desc) {
+    // In multidimensional case GMatDesc::chan is -1,
+    // change it to 1 when calling CV_MAKE_TYPE
+    return CV_MAKE_TYPE(desc.depth, desc.chan == -1 ? 1 : desc.chan);
+}
+
+static View::stepsT defaultSteps(const cv::GMatDesc& desc) {
+    const auto& dims = desc.dims.empty()
+                       ? std::vector<int>{desc.size.height, desc.size.width}
+                       : desc.dims;
+    View::stepsT steps(dims.size(), 0u);
+    auto type = typeFromDesc(desc);
+    steps.back() = CV_ELEM_SIZE(type);
+    for (int i = static_cast<int>(dims.size())-2; i >= 0; i--) {
+        steps[i] = steps[i+1]*dims[i];
+    }
+    return steps;
+}
+} // anonymous namespace
+
+View::View(const cv::GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb)
+    : m_desc(checkDesc(desc))
+    , m_data(data)
+    , m_steps([this, step](){
+        GAPI_Assert(m_desc.dims.empty());
+        auto steps = defaultSteps(m_desc);
+        if (step != 0u) {
+            steps[0] = step;
+        }
+        return steps;
+    }())
+    , m_cb(std::move(cb)) {
+}
+
+View::View(const cv::GMatDesc& desc, uchar* data, const stepsT &steps, DestroyCallback&& cb)
+    : m_desc(checkDesc(desc))
+    , m_data(data)
+    , m_steps(steps == stepsT{} ? defaultSteps(m_desc): steps)
+    , m_cb(std::move(cb)) {
+}
+
+int View::type() const { return typeFromDesc(m_desc); }
+
 // There is an issue with default generated operator=(View&&) on Mac:
-// it doesn't nullify m_cb of a moved object
+// it doesn't nullify m_cb of the moved object
 View& View::operator=(View&& v) {
-    m_desc = v.m_desc;
-    m_data = v.m_data;
-    m_step = v.m_step;
-    m_cb   = v.m_cb;
-    v.m_desc = {};
-    v.m_data = nullptr;
-    v.m_step = 0u;
-    v.m_cb   = nullptr;
+    m_desc  = v.m_desc;
+    m_data  = v.m_data;
+    m_steps = v.m_steps;
+    m_cb    = v.m_cb;
+    v.m_desc  = {};
+    v.m_data  = nullptr;
+    v.m_steps = {0u};
+    v.m_cb    = nullptr;
     return *this;
 }
diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp
index 8c1749377e..4914715fa7 100644
--- a/modules/gapi/src/backends/common/gbackend.hpp
+++ b/modules/gapi/src/backends/common/gbackend.hpp
@@ -23,12 +23,26 @@ namespace cv {
 namespace gimpl {
 
     inline cv::Mat asMat(RMat::View& v) {
+#if !defined(GAPI_STANDALONE)
+        return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step())
+                                : cv::Mat(v.dims(), v.type(), v.ptr(), v.steps().data());
+#else
+        // FIXME: add a check that steps are default
         return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step())
                                 : cv::Mat(v.dims(), v.type(), v.ptr());
+
+#endif
     }
     inline RMat::View asView(const Mat& m, RMat::View::DestroyCallback&& cb = nullptr) {
-        // FIXME: View doesn't support multidimensional cv::Mat's
+#if !defined(GAPI_STANDALONE)
+        RMat::View::stepsT steps(m.dims);
+        for (int i = 0; i < m.dims; i++) {
+            steps[i] = m.step[i];
+        }
+        return RMat::View(cv::descr_of(m), m.data, steps, std::move(cb));
+#else
         return RMat::View(cv::descr_of(m), m.data, m.step, std::move(cb));
+#endif
     }
 
     class RMatAdapter : public RMat::Adapter {
diff --git a/modules/gapi/test/rmat/rmat_test_common.hpp b/modules/gapi/test/rmat/rmat_test_common.hpp
index 47a744499e..5685d06253 100644
--- a/modules/gapi/test/rmat/rmat_test_common.hpp
+++ b/modules/gapi/test/rmat/rmat_test_common.hpp
@@ -19,14 +19,18 @@ public:
         : m_mat(m), m_callbackCalled(callbackCalled)
     {}
     virtual RMat::View access(RMat::Access access) override {
+        RMat::View::stepsT steps(m_mat.dims);
+        for (int i = 0; i < m_mat.dims; i++) {
+            steps[i] = m_mat.step[i];
+        }
         if (access == RMat::Access::W) {
-            return RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step,
+            return RMat::View(cv::descr_of(m_mat), m_mat.data, steps,
                               [this](){
                                   EXPECT_FALSE(m_callbackCalled);
                                   m_callbackCalled = true;
                               });
         } else {
-            return RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step);
+            return RMat::View(cv::descr_of(m_mat), m_mat.data, steps);
         }
     }
     virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); }
@@ -42,8 +46,12 @@ public:
         : m_deviceMat(m), m_hostMat(m.clone()), m_callbackCalled(callbackCalled)
     {}
     virtual RMat::View access(RMat::Access access) override {
+        RMat::View::stepsT steps(m_hostMat.dims);
+        for (int i = 0; i < m_hostMat.dims; i++) {
+            steps[i] = m_hostMat.step[i];
+        }
         if (access == RMat::Access::W) {
-            return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, m_hostMat.step,
+            return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, steps,
                               [this](){
                                   EXPECT_FALSE(m_callbackCalled);
                                   m_callbackCalled = true;
@@ -51,7 +59,7 @@ public:
                               });
         } else {
             m_deviceMat.copyTo(m_hostMat);
-            return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, m_hostMat.step);
+            return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, steps);
         }
     }
     virtual cv::GMatDesc desc() const override { return cv::descr_of(m_hostMat); }
diff --git a/modules/gapi/test/rmat/rmat_view_tests.cpp b/modules/gapi/test/rmat/rmat_view_tests.cpp
index abc251660b..14025231a7 100644
--- a/modules/gapi/test/rmat/rmat_view_tests.cpp
+++ b/modules/gapi/test/rmat/rmat_view_tests.cpp
@@ -15,6 +15,8 @@ namespace opencv_test
 using cv::GMatDesc;
 using View = cv::RMat::View;
 using cv::Mat;
+using cv::gimpl::asMat;
+using cv::gimpl::asView;
 using namespace ::testing;
 
 static void expect_eq_desc(const GMatDesc& desc, const View& view) {
@@ -22,7 +24,8 @@ static void expect_eq_desc(const GMatDesc& desc, const View& view) {
     EXPECT_EQ(desc.dims, view.dims());
     EXPECT_EQ(desc.size.width, view.cols());
     EXPECT_EQ(desc.size.height, view.rows());
-    EXPECT_EQ(CV_MAKE_TYPE(desc.depth,desc.chan), view.type());
+    EXPECT_EQ(desc.depth, view.depth());
+    EXPECT_EQ(desc.chan, view.chan());
     EXPECT_EQ(desc.depth, view.depth());
     EXPECT_EQ(desc.chan, view.chan());
 }
@@ -40,10 +43,10 @@ TEST_P(RMatViewTest, ConstructionFromMat) {
     auto type = GetParam();
     Mat mat(8,8,type);
     const auto desc = cv::descr_of(mat);
-    View view(cv::descr_of(mat), mat.ptr(), mat.step1());
+    View view = asView(mat);
     expect_eq_desc(desc, view);
     EXPECT_EQ(mat.ptr(), view.ptr());
-    EXPECT_EQ(mat.step1(), view.step());
+    EXPECT_EQ(mat.step, view.step());
 }
 
 TEST(RMatView, TestConstructionFromMatND) {
@@ -66,16 +69,98 @@ TEST_P(RMatViewTest, DefaultStep) {
     EXPECT_EQ(static_cast<size_t>(desc.size.width)*CV_ELEM_SIZE(type), view.step());
 }
 
-static Mat asMat(View& view) {
-    return Mat(view.size(), view.type(), view.ptr(), view.step());
+struct RMatViewNDTest : public TestWithParam<
+    std::tuple<int /*depth*/, int /*ndims*/>>{};
+TEST_P(RMatViewNDTest, DefaultStep) {
+    int depth = 0, ndims = 0;
+    std::tie(depth, ndims) = GetParam();
+    std::vector<int> dims(ndims, 12);
+    GMatDesc desc;
+    desc.dims = dims;
+    desc.depth = depth;
+    GAPI_Assert(desc.chan == -1);
+    auto elemSize = CV_ELEM_SIZE(depth);
+    auto total = std::accumulate(dims.begin(), dims.end(), elemSize, std::multiplies<int>());
+    std::vector<unsigned char> data(total);
+    View view(desc, data.data());
+    auto step = static_cast<size_t>(total/dims[0]);
+    EXPECT_EQ(step, view.step(0));
+    for (int i = 1; i < ndims; i++) {
+        step /= dims[i];
+        EXPECT_EQ(step, view.step(i));
+    }
+}
+
+TEST_P(RMatViewNDTest, StepFromMat) {
+    int depth = 0, ndims = 0;
+    std::tie(depth, ndims) = GetParam();
+    std::vector<int> dims(ndims, 12);
+    cv::Mat mat(dims, depth);
+    auto view = asView(mat);
+    EXPECT_EQ(mat.ptr(), view.ptr());
+    for (int i = 0; i < ndims; i++) {
+        EXPECT_EQ(mat.step[i], view.step(i));
+    }
+}
+
+TEST_P(RMatViewNDTest, StepFromView) {
+    int depth = 0, ndims = 0;
+    std::tie(depth, ndims) = GetParam();
+    std::vector<int> dims(ndims, 12);
+    std::vector<int> aligned(ndims, 16);
+    GMatDesc desc;
+    desc.dims = dims;
+    desc.depth = depth;
+    GAPI_Assert(desc.chan == -1);
+    auto elemSize = CV_ELEM_SIZE(depth);
+    auto total = std::accumulate(aligned.begin(), aligned.end(), elemSize, std::multiplies<int>());
+    std::vector<unsigned char> data(total);
+    View::stepsT steps(ndims);
+    auto step = static_cast<size_t>(total/aligned[0]);
+    steps[0] = step;
+    for (int i = 1; i < ndims; i++) {
+        step /= aligned[i];
+        steps[i] = step;
+    }
+    View view(desc, data.data(), steps);
+    auto mat = asMat(view);
+    EXPECT_EQ(mat.ptr(), view.ptr());
+    for (int i = 0; i < ndims; i++) {
+        EXPECT_EQ(mat.step[i], view.step(i));
+    }
+}
+
+INSTANTIATE_TEST_CASE_P(Test, RMatViewNDTest,
+                        Combine(Values(CV_8U, CV_32F), // depth
+                                Values(1,2,3,4,7)));   // ndims
+
+struct RMatViewNDTestNegative : public TestWithParam<
+    std::tuple<int /*depth*/, int /*chan*/, int /*ndims*/>>{};
+TEST_P(RMatViewNDTestNegative, DefaultStep) {
+    int depth = 0, chan = 0, ndims = 0;
+    std::tie(depth, chan, ndims) = GetParam();
+    std::vector<int> dims(ndims, 12);
+    GMatDesc desc;
+    desc.dims = dims;
+    desc.depth = depth;
+    desc.chan = chan;
+    auto elemSize = CV_ELEM_SIZE(depth);
+    auto total = std::accumulate(dims.begin(), dims.end(), elemSize, std::multiplies<int>());
+    std::vector<unsigned char> data(total);
+    EXPECT_ANY_THROW(View view(desc, data.data()));
 }
 
+INSTANTIATE_TEST_CASE_P(Test, RMatViewNDTestNegative,
+                        Combine(Values(CV_8U, CV_32F), // depth
+                                Values(1,2,3,4),       // chan
+                                Values(2,4,7)));       // ndims
+
 TEST_P(RMatViewTest, NonDefaultStepInput) {
     auto type = GetParam();
     Mat bigMat(16,16,type);
     cv::randn(bigMat, cv::Scalar::all(127), cv::Scalar::all(40));
     Mat mat = bigMat(cv::Rect{4,4,8,8});
-    View view(cv::descr_of(mat), mat.data, mat.step);
+    View view = asView(mat);
     const auto viewMat = asMat(view);
     Mat ref, out;
     cv::Size ksize{1,1};
@@ -90,7 +175,36 @@ TEST_P(RMatViewTest, NonDefaultStepOutput) {
     cv::randn(mat, cv::Scalar::all(127), cv::Scalar::all(40));
     Mat bigMat = Mat::zeros(16,16,type);
     Mat out = bigMat(cv::Rect{4,4,8,8});
-    View view(cv::descr_of(out), out.ptr(), out.step);
+    View view = asView(out);
+    auto viewMat = asMat(view);
+    Mat ref;
+    cv::Size ksize{1,1};
+    cv::blur(mat, viewMat, ksize);
+    cv::blur(mat, ref,     ksize);
+    EXPECT_EQ(0, cvtest::norm(ref, out, NORM_INF));
+}
+
+TEST_P(RMatViewTest, NonDefaultStep2DInput) {
+    auto type = GetParam();
+    Mat bigMat(16,16,type);
+    cv::randn(bigMat, cv::Scalar::all(127), cv::Scalar::all(40));
+    Mat mat = bigMat(cv::Rect{4,4,8,8});
+    View view(cv::descr_of(mat), mat.data, mat.step);
+    const auto viewMat = asMat(view);
+    Mat ref, out;
+    cv::Size ksize{1,1};
+    cv::blur(viewMat, out, ksize);
+    cv::blur(    mat, ref, ksize);
+    EXPECT_EQ(0, cvtest::norm(ref, out, NORM_INF));
+}
+
+TEST_P(RMatViewTest, NonDefaultStep2DOutput) {
+    auto type = GetParam();
+    Mat mat(8,8,type);
+    cv::randn(mat, cv::Scalar::all(127), cv::Scalar::all(40));
+    Mat bigMat = Mat::zeros(16,16,type);
+    Mat out = bigMat(cv::Rect{4,4,8,8});
+    View view(cv::descr_of(out), out.data, out.step);
     auto viewMat = asMat(view);
     Mat ref;
     cv::Size ksize{1,1};
@@ -107,7 +221,7 @@ struct RMatViewCallbackTest : public ::testing::Test {
         : mat(8,8,CV_8UC1) {
         cv::randn(mat, cv::Scalar::all(127), cv::Scalar::all(40));
     }
-    View getView() { return {cv::descr_of(mat), mat.ptr(), mat.step1(), [this](){ callbackCalls++; }}; }
+    View getView() { return asView(mat, [this](){ callbackCalls++; }); }
     int callbackCalls = 0;
     Mat mat;
 };
diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
index 2fc1e46253..74aac19306 100644
--- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp
+++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
@@ -2,6 +2,7 @@
 
 #include "backends/common/serialization.hpp"
 #include <opencv2/gapi/rmat.hpp>
+#include <../src/backends/common/gbackend.hpp> // asView
 
 namespace {
 struct EmptyCustomType { };
@@ -134,12 +135,8 @@ public:
     MyRMatAdapter(cv::Mat m, int value, const std::string& str)
         : m_mat(m), m_value(value), m_str(str)
     {}
-    virtual cv::RMat::View access(cv::RMat::Access access) override {
-        if (access == cv::RMat::Access::W) {
-            return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step);
-        } else {
-            return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step);
-        }
+    virtual cv::RMat::View access(cv::RMat::Access) override {
+        return cv::gimpl::asView(m_mat);
     }
     virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); }
     virtual void serialize(cv::gapi::s11n::IOStream& os) override {

From d986cc4861b978415fc20c3a0dc6f16ff9d0bcdf Mon Sep 17 00:00:00 2001
From: Maksim Shabunin <maksim.shabunin@gmail.com>
Date: Thu, 12 Nov 2020 13:38:26 +0300
Subject: [PATCH 081/152] calib3d: uninitialzed fields in usac

---
 modules/calib3d/src/usac.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/calib3d/src/usac.hpp b/modules/calib3d/src/usac.hpp
index c18de92479..06a0ff2056 100644
--- a/modules/calib3d/src/usac.hpp
+++ b/modules/calib3d/src/usac.hpp
@@ -421,7 +421,7 @@ struct SPRT_history {
     double epsilon, delta, A;
     // number of samples processed by test
     int tested_samples; // k
-    SPRT_history () {
+    SPRT_history () : epsilon(0), delta(0), A(0) {
         tested_samples = 0;
     }
 };

From d9c5b85671471a3b3ecdde09fb33170803f7767d Mon Sep 17 00:00:00 2001
From: Aitik Gupta <aitikgupta@gmail.com>
Date: Fri, 13 Nov 2020 09:00:54 +0530
Subject: [PATCH 082/152] Use in-place sort

---
 samples/python/stitching_detailed.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/samples/python/stitching_detailed.py b/samples/python/stitching_detailed.py
index b0cf78a759..cd3f063e35 100644
--- a/samples/python/stitching_detailed.py
+++ b/samples/python/stitching_detailed.py
@@ -387,7 +387,7 @@ def main():
     focals = []
     for cam in cameras:
         focals.append(cam.focal)
-    sorted(focals)
+    focals.sort()
     if len(focals) % 2 == 1:
         warped_image_scale = focals[len(focals) // 2]
     else:

From 011d8e80d8a69d736a59619b7dcae768023bd045 Mon Sep 17 00:00:00 2001
From: Igor Murzov <igor.murzov@xperience.ai>
Date: Mon, 9 Nov 2020 19:03:46 +0300
Subject: [PATCH 083/152] videoio: Support Orbbec Astra 3D cameras using
 OpenNI2 API

Only depth sensor is supported. Color sensor is accessible as a
regular UVC camera.
---
 modules/videoio/include/opencv2/videoio.hpp |  3 ++-
 modules/videoio/src/cap_openni2.cpp         | 11 +++++++++--
 2 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp
index eb5645ab77..c429699d3f 100644
--- a/modules/videoio/include/opencv2/videoio.hpp
+++ b/modules/videoio/include/opencv2/videoio.hpp
@@ -112,6 +112,7 @@ enum VideoCaptureAPIs {
        CAP_REALSENSE    = 1500,         //!< Synonym for CAP_INTELPERC
        CAP_OPENNI2      = 1600,         //!< OpenNI2 (for Kinect)
        CAP_OPENNI2_ASUS = 1610,         //!< OpenNI2 (for Asus Xtion and Occipital Structure sensors)
+       CAP_OPENNI2_ASTRA= 1620,         //!< OpenNI2 (for Orbbec Astra)
        CAP_GPHOTO2      = 1700,         //!< gPhoto2 connection
        CAP_GSTREAMER    = 1800,         //!< GStreamer
        CAP_FFMPEG       = 1900,         //!< Open and record video file or stream using the FFMPEG library
@@ -825,7 +826,7 @@ public:
     @throws Exception %Exception on stream errors (check .isOpened() to filter out malformed streams) or VideoCapture type is not supported
 
     The primary use of the function is in multi-camera environments.
-    The method fills the ready state vector, grabbs video frame, if camera is ready.
+    The method fills the ready state vector, grabs video frame, if camera is ready.
 
     After this call use VideoCapture::retrieve() to decode and fetch frame data.
     */
diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp
index adec7359fb..1d455442fa 100644
--- a/modules/videoio/src/cap_openni2.cpp
+++ b/modules/videoio/src/cap_openni2.cpp
@@ -103,7 +103,7 @@ private:
 class CvCapture_OpenNI2 : public CvCapture
 {
 public:
-    enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 };
+    enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_ORBBEC_ASTRA=2, DEVICE_MAX=2 };
 
     static const int INVALID_PIXEL_VAL = 0;
     static const int INVALID_COORDINATE_VAL = 0;
@@ -116,6 +116,7 @@ public:
     CvCapture_OpenNI2(const char * filename);
     virtual ~CvCapture_OpenNI2();
 
+    virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI2; }
     virtual double getProperty(int propIdx) const CV_OVERRIDE;
     virtual bool setProperty(int probIdx, double propVal) CV_OVERRIDE;
     virtual bool grabFrame() CV_OVERRIDE;
@@ -261,7 +262,8 @@ CvCapture_OpenNI2::CvCapture_OpenNI2(int index, const char * filename) :
             index %= 10;
         }
         // Asus XTION and Occipital Structure Sensor do not have an image generator
-        needColor = (deviceType != DEVICE_ASUS_XTION);
+        // Orbbec Astra cameras don't provide OpenNI interface for color stream reading
+        needColor = (deviceType != DEVICE_ASUS_XTION) && (deviceType != DEVICE_ORBBEC_ASTRA);
 
         // find appropriate device URI
         openni::Array<openni::DeviceInfo> ldevs;
@@ -300,6 +302,11 @@ CvCapture_OpenNI2::CvCapture_OpenNI2(int index, const char * filename) :
     setProperty(CV_CAP_PROP_OPENNI2_MIRROR, 0.0);
 
     isContextOpened = true;
+
+    CV_LOG_INFO(NULL, cv::format("Opened OpenNI camera: %s %s (%04x:%04x)",
+                      device.getDeviceInfo().getVendor(), device.getDeviceInfo().getName(),
+                      device.getDeviceInfo().getUsbVendorId(), device.getDeviceInfo().getUsbProductId())
+    );
 }
 
 CvCapture_OpenNI2::~CvCapture_OpenNI2()

From 0e4b5b88dcc379259c5e6e530c25181916abbda9 Mon Sep 17 00:00:00 2001
From: Ruslan Garnov <ruslan.garnov@intel.com>
Date: Thu, 5 Nov 2020 02:27:32 +0300
Subject: [PATCH 084/152] Added support of 1x1x1xN input for parseYolo

---
 modules/gapi/src/backends/cpu/gnnparsers.cpp  | 34 +++++++++++++++----
 modules/gapi/test/common/gapi_core_tests.hpp  |  2 +-
 .../gapi/test/common/gapi_core_tests_inl.hpp  |  2 +-
 .../test/common/gapi_parsers_tests_common.hpp | 19 +++++++++--
 modules/gapi/test/cpu/gapi_core_tests_cpu.cpp |  7 +++-
 5 files changed, 52 insertions(+), 12 deletions(-)

diff --git a/modules/gapi/src/backends/cpu/gnnparsers.cpp b/modules/gapi/src/backends/cpu/gnnparsers.cpp
index 234382d530..a5e4bf5f85 100644
--- a/modules/gapi/src/backends/cpu/gnnparsers.cpp
+++ b/modules/gapi/src/backends/cpu/gnnparsers.cpp
@@ -246,6 +246,28 @@ void parseSSD(const cv::Mat&  in_ssd_result,
     }
 }
 
+static void checkYoloDims(const MatSize& dims) {
+    const auto d = dims.dims();
+    // Accept 1x13x13xN and 13x13xN
+    GAPI_Assert(d >= 2);
+    if (d >= 3) {
+        if (dims[d-2] == 13) {
+            GAPI_Assert(dims[d-1]%5 == 0);
+            GAPI_Assert(dims[d-2] == 13);
+            GAPI_Assert(dims[d-3] == 13);
+            for (int i = 0; i < d-3; i++) {
+                GAPI_Assert(dims[i] == 1);
+            }
+            return;
+        }
+    }
+    // Accept 1x1x1xN, 1x1xN, 1xN
+    GAPI_Assert(dims[d-1]%(5*13*13) == 0);
+    for (int i = 0; i < d-1; i++) {
+        GAPI_Assert(dims[i] == 1);
+    }
+}
+
 void parseYolo(const cv::Mat&  in_yolo_result,
                const cv::Size& in_size,
                const float     confidence_threshold,
@@ -255,12 +277,12 @@ void parseYolo(const cv::Mat&  in_yolo_result,
                std::vector<int>&      out_labels)
 {
     const auto& dims = in_yolo_result.size;
-    GAPI_Assert(dims.dims() == 4);
-    GAPI_Assert(dims[0] == 1);
-    GAPI_Assert(dims[1] == 13);
-    GAPI_Assert(dims[2] == 13);
-    GAPI_Assert(dims[3] % 5 == 0); // 5 boxes
-    const auto num_classes = dims[3] / 5 - 5;
+    checkYoloDims(dims);
+    int acc = 1;
+    for (int i = 0; i < dims.dims(); i++) {
+        acc *= dims[i];
+    }
+    const auto num_classes = acc/(5*13*13)-5;
     GAPI_Assert(num_classes > 0);
     GAPI_Assert(0 < nms_threshold && nms_threshold <= 1);
     out_boxes.clear();
diff --git a/modules/gapi/test/common/gapi_core_tests.hpp b/modules/gapi/test/common/gapi_core_tests.hpp
index 4a0a7641f9..889e32f1c1 100644
--- a/modules/gapi/test/common/gapi_core_tests.hpp
+++ b/modules/gapi/test/common/gapi_core_tests.hpp
@@ -157,7 +157,7 @@ GAPI_TEST_EXT_BASE_FIXTURE(ParseSSDBLTest, ParserSSDTest, initNothing,
 GAPI_TEST_EXT_BASE_FIXTURE(ParseSSDTest, ParserSSDTest, initNothing,
     FIXTURE_API(float, bool, bool), 3, confidence_threshold, alignment_to_square, filter_out_of_bounds)
 GAPI_TEST_EXT_BASE_FIXTURE(ParseYoloTest, ParserYoloTest, initNothing,
-    FIXTURE_API(float, float, int), 3, confidence_threshold, nms_threshold, num_classes)
+    FIXTURE_API(float, float, int, std::pair<bool,int>), 4, confidence_threshold, nms_threshold, num_classes, dims_config)
 GAPI_TEST_FIXTURE(SizeTest, initMatrixRandU, <>, 0)
 GAPI_TEST_FIXTURE(SizeRTest, initNothing, <>, 0)
 } // opencv_test
diff --git a/modules/gapi/test/common/gapi_core_tests_inl.hpp b/modules/gapi/test/common/gapi_core_tests_inl.hpp
index 1a167ad5ea..045b556369 100644
--- a/modules/gapi/test/common/gapi_core_tests_inl.hpp
+++ b/modules/gapi/test/common/gapi_core_tests_inl.hpp
@@ -1666,7 +1666,7 @@ TEST_P(ParseSSDTest, ParseTest)
 
 TEST_P(ParseYoloTest, ParseTest)
 {
-    cv::Mat in_mat = generateYoloOutput(num_classes);
+    cv::Mat in_mat = generateYoloOutput(num_classes, dims_config);
     auto anchors = cv::gapi::nn::parsers::GParseYolo::defaultAnchors();
     std::vector<cv::Rect> boxes_gapi, boxes_ref;
     std::vector<int> labels_gapi, labels_ref;
diff --git a/modules/gapi/test/common/gapi_parsers_tests_common.hpp b/modules/gapi/test/common/gapi_parsers_tests_common.hpp
index 127a1c5a5e..91dcca7b3e 100644
--- a/modules/gapi/test/common/gapi_parsers_tests_common.hpp
+++ b/modules/gapi/test/common/gapi_parsers_tests_common.hpp
@@ -225,13 +225,26 @@ private:
 class ParserYoloTest
 {
 public:
-    cv::Mat generateYoloOutput(const int num_classes)
+    cv::Mat generateYoloOutput(const int num_classes, std::pair<bool,int> dims_config = {false, 4})
     {
-        std::vector<int> dims = { 1, 13, 13, (num_classes + 5) * 5 };
+        bool one_dim = false;
+        int num_dims = 0;
+        std::tie(one_dim, num_dims) = dims_config;
+        GAPI_Assert(num_dims <= 4);
+        GAPI_Assert((!one_dim && num_dims >= 3) ||
+                    ( one_dim && num_dims >= 1));
+        std::vector<int> dims(num_dims, 1);
+        if (one_dim) {
+            dims.back() = (num_classes+5)*5*13*13;
+        } else {
+            dims.back() = (num_classes+5)*5;
+            dims[num_dims-2] = 13;
+            dims[num_dims-3] = 13;
+        }
         cv::Mat mat(dims, CV_32FC1);
         auto data = mat.ptr<float>();
 
-        const size_t range = dims[0] * dims[1] * dims[2] * dims[3];
+        const size_t range = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>());
         for (size_t i = 0; i < range; ++i)
         {
             data[i] = static_cast<float>(std::rand()) / RAND_MAX;
diff --git a/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp
index 53faa28178..595b63dd1f 100644
--- a/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp
+++ b/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp
@@ -531,7 +531,12 @@ INSTANTIATE_TEST_CASE_P(ParseTestCPU, ParseYoloTest,
                                 Values(CORE_CPU),
                                 Values(0.3f, 0.5f, 0.7f),
                                 Values(0.5f, 1.0f),
-                                Values(80, 7)));
+                                Values(80, 7),
+                                Values(std::make_pair(false, 3),
+                                       std::make_pair(false, 4),
+                                       std::make_pair(true,  2),
+                                       std::make_pair(true,  3),
+                                       std::make_pair(true,  4))));
 
 INSTANTIATE_TEST_CASE_P(SizeTestCPU, SizeTest,
                         Combine(Values(CV_8UC1, CV_8UC3, CV_32FC1),

From 41c2669476ba81d684bfd81f83d83c6cb96db027 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Thu, 12 Nov 2020 19:47:54 +0000
Subject: [PATCH 085/152] java: robust code generation

- the same generated code from Python2/3
- avoid randomized output due to unpredictable dict/set order
---
 modules/java/generator/gen_java.py | 45 ++++++++++++++++--------------
 1 file changed, 24 insertions(+), 21 deletions(-)

diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py
index 03075c5ae7..8e5c69e788 100755
--- a/modules/java/generator/gen_java.py
+++ b/modules/java/generator/gen_java.py
@@ -105,7 +105,7 @@ T_CPP_MODULE = Template(read_contents(os.path.join(SCRIPT_DIR, 'templates/cpp_mo
 
 class GeneralInfo():
     def __init__(self, type, decl, namespaces):
-        self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces)
+        self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces)
 
         # parse doxygen comments
         self.params={}
@@ -141,13 +141,13 @@ class GeneralInfo():
                 break
         pieces = localName.split(".")
         if len(pieces) > 2: # <class>.<class>.<class>.<name>
-            return spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1]
+            return name, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1]
         elif len(pieces) == 2: # <class>.<name>
-            return spaceName, pieces[0], pieces[0], pieces[1]
+            return name, spaceName, pieces[0], pieces[0], pieces[1]
         elif len(pieces) == 1: # <name>
-            return spaceName, "", "", pieces[0]
+            return name, spaceName, "", "", pieces[0]
         else:
-            return spaceName, "", "" # error?!
+            return name, spaceName, "", "" # error?!
 
     def fullName(self, isCPP=False):
         result = ".".join([self.fullClass(), self.name])
@@ -249,8 +249,8 @@ class ClassInfo(GeneralInfo):
 
     def getAllMethods(self):
         result = []
-        result.extend([fi for fi in sorted(self.methods) if fi.isconstructor])
-        result.extend([fi for fi in sorted(self.methods) if not fi.isconstructor])
+        result += [fi for fi in self.methods if fi.isconstructor]
+        result += [fi for fi in self.methods if not fi.isconstructor]
         return result
 
     def addMethod(self, fi):
@@ -369,7 +369,7 @@ class JavaWrapperGenerator(object):
         self.clear()
 
     def clear(self):
-        self.namespaces = set(["cv"])
+        self.namespaces = ["cv"]
         self.classes = { "Mat" : ClassInfo([ 'class Mat', '', [], [] ], self.namespaces) }
         self.module = ""
         self.Module = ""
@@ -512,9 +512,9 @@ class JavaWrapperGenerator(object):
             includes.append('#include "' + hdr + '"')
         for hdr in srcfiles:
             decls = parser.parse(hdr)
-            self.namespaces = parser.namespaces
+            self.namespaces = sorted(parser.namespaces)
             logging.info("\n\n===== Header: %s =====", hdr)
-            logging.info("Namespaces: %s", parser.namespaces)
+            logging.info("Namespaces: %s", sorted(parser.namespaces))
             if decls:
                 includes.append('#include "' + hdr + '"')
             else:
@@ -536,7 +536,7 @@ class JavaWrapperGenerator(object):
         moduleCppCode = StringIO()
         package_path = os.path.join(output_java_path, module)
         mkdir_p(package_path)
-        for ci in self.classes.values():
+        for ci in sorted(self.classes.values(), key=lambda x: x.symbol_id):
             if ci.name == "Mat":
                 continue
             ci.initCodeStreams(self.Module)
@@ -560,7 +560,7 @@ class JavaWrapperGenerator(object):
         report.write("\n".join(self.ported_func_list))
         report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % (len(self.skipped_func_list), total_count))
         report.write("".join(self.skipped_func_list))
-        for i in self.def_args_hist.keys():
+        for i in sorted(self.def_args_hist.keys()):
             report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i]))
         return report.getvalue()
 
@@ -1028,10 +1028,11 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
         if ci.consts:
             enumTypes = set(map(lambda c: c.enumType, ci.consts))
             grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes}
-            for typeName, consts in grouped_consts.items():
+            for typeName in sorted(grouped_consts.keys(), key=lambda x: str(x) if x is not None else ""):
+                consts = grouped_consts[typeName]
                 logging.info("%s", consts)
                 if typeName:
-                    typeName = typeName.rsplit(".", 1)[-1]
+                    typeNameShort = typeName.rsplit(".", 1)[-1]
 ###################### Utilize Java enums ######################
 #                    ci.j_code.write("""
 #    public enum {1} {{
@@ -1045,9 +1046,9 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
 #                    )
 ################################################################
                     ci.j_code.write("""
-    // C++: enum {1}
+    // C++: enum {1} ({2})
     public static final int
-            {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, const_value(c.value)) for c in consts]), typeName)
+            {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, const_value(c.value)) for c in consts]), typeNameShort, typeName)
                     )
                 else:
                     ci.j_code.write("""
@@ -1072,10 +1073,12 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
 
         # manual ports
         if ci.name in ManualFuncs:
-            for func in ManualFuncs[ci.name].keys():
-                ci.j_code.write ( "\n".join(ManualFuncs[ci.name][func]["j_code"]) )
-                ci.jn_code.write( "\n".join(ManualFuncs[ci.name][func]["jn_code"]) )
-                ci.cpp_code.write( "\n".join(ManualFuncs[ci.name][func]["cpp_code"]) )
+            for func in sorted(ManualFuncs[ci.name].keys()):
+                logging.info("manual function: %s", func)
+                fn = ManualFuncs[ci.name][func]
+                ci.j_code.write("\n".join(fn["j_code"]))
+                ci.jn_code.write("\n".join(fn["jn_code"]))
+                ci.cpp_code.write("\n".join(fn["cpp_code"]))
 
         if ci.name != self.Module or ci.base:
             # finalize()
@@ -1303,7 +1306,7 @@ if __name__ == "__main__":
     # initialize logger
     logging.basicConfig(filename='gen_java.log', format=None, filemode='w', level=logging.INFO)
     handler = logging.StreamHandler()
-    handler.setLevel(logging.WARNING)
+    handler.setLevel(os.environ.get('LOG_LEVEL', logging.WARNING))
     logging.getLogger().addHandler(handler)
 
     # parse command line parameters

From 4d00ed8df730f3829a9a8bc7b223246c249777c7 Mon Sep 17 00:00:00 2001
From: Chris Ballinger <cballinger@rightpoint.com>
Date: Fri, 13 Nov 2020 07:30:53 -0800
Subject: [PATCH 086/152] Merge pull request #18771 from
 chrisballinger:xcode-12-fixes

Xcode 12 and Python 2/3 fixes

* Fix compilation issues using Xcode 12 on macOS Catalina

* Fix macOS scripts to work on Python 2 or 3

* Fix additional issues with Python 3

* Fix additional Python 2/3 issue

* Fix another Python 2/3 issue

* Remove dependency on builtins module
---
 modules/objc/generator/gen_objc.py | 83 ++++++++++++++----------------
 platforms/ios/build_framework.py   |  6 +--
 2 files changed, 42 insertions(+), 47 deletions(-)

diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index 87e42e821d..e6637a7c4c 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -1,23 +1,20 @@
 #!/usr/bin/env python
 
+from __future__ import print_function, unicode_literals
 import sys, re, os.path, errno, fnmatch
 import json
 import logging
 import codecs
+import io
 from shutil import copyfile
 from pprint import pformat
 from string import Template
 from distutils.dir_util import copy_tree
 
-if sys.version_info[0] >= 3:
-    from io import StringIO
-else:
-    import io
-    class StringIO(io.StringIO):
-        def write(self, s):
-            if isinstance(s, str):
-                s = unicode(s)  # noqa: F821
-            return super(StringIO, self).write(s)
+try:
+    from io import StringIO # Python 3
+except:
+    from io import BytesIO as StringIO
 
 SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
 
@@ -267,15 +264,15 @@ class ClassInfo(GeneralInfo):
         return Template("CLASS $namespace::$classpath.$name : $base").substitute(**self.__dict__)
 
     def getImports(self, module):
-        return ["#import \"%s.h\"" % c for c in sorted(filter(lambda m: m != self.name, map(lambda m: type_dict[m]["import_module"] if m in type_dict and "import_module" in type_dict[m] else m, self.imports)))]
+        return ["#import \"%s.h\"" % c for c in sorted([m for m in [type_dict[m]["import_module"] if m in type_dict and "import_module" in type_dict[m] else m for m in self.imports] if m != self.name])]
 
     def isEnum(self, c):
         return c in type_dict and type_dict[c].get("is_enum", False)
 
     def getForwardDeclarations(self, module):
-        enum_decl = filter(lambda x:self.isEnum(x) and type_dict[x]["import_module"] != module, self.imports)
-        enum_imports = list(set(map(lambda m: type_dict[m]["import_module"], enum_decl)))
-        class_decl = filter(lambda x: not self.isEnum(x), self.imports)
+        enum_decl = [x for x in self.imports if self.isEnum(x) and type_dict[x]["import_module"] != module]
+        enum_imports = list(set([type_dict[m]["import_module"] for m in enum_decl]))
+        class_decl = [x for x in self.imports if not self.isEnum(x)]
         return ["#import \"%s.h\"" % c for c in enum_imports] + [""] + ["@class %s;" % c for c in sorted(class_decl)]
 
     def addImports(self, ctype, is_out_type):
@@ -350,7 +347,7 @@ class ClassInfo(GeneralInfo):
                             module = M,
                             additionalImports = self.additionalImports.getvalue(),
                             importBaseClass = '#import "' + self.base + '.h"' if not self.is_base_class else "",
-                            forwardDeclarations = "\n".join(filter(None, self.getForwardDeclarations(objcM))),
+                            forwardDeclarations = "\n".join([_f for _f in self.getForwardDeclarations(objcM) if _f]),
                             enumDeclarations = self.enum_declarations.getvalue(),
                             nativePointerHandling = Template(
 """
@@ -656,7 +653,7 @@ def build_swift_logues(args):
 
 def add_method_to_dict(class_name, fi):
     static = fi.static if fi.classname else True
-    if not method_dict.has_key((class_name, fi.objc_name)):
+    if (class_name, fi.objc_name) not in method_dict:
         objc_method_name = ("+" if static else "-") + fi.objc_name + ":" + build_objc_method_name(fi.args)
         method_dict[(class_name, fi.objc_name)] = objc_method_name
 
@@ -664,7 +661,7 @@ def see_lookup(objc_class, see):
     semi_colon = see.find("::")
     see_class = see[:semi_colon] if semi_colon > 0 else objc_class
     see_method = see[(semi_colon + 2):] if semi_colon != -1 else see
-    if method_dict.has_key((see_class, see_method)):
+    if (see_class, see_method) in method_dict:
         method = method_dict[(see_class, see_method)]
         if see_class == objc_class:
             return method
@@ -741,7 +738,7 @@ class ObjectiveCWrapperGenerator(object):
             logging.info('ignored: %s', constinfo)
         else:
             objc_type = enumType.rsplit(".", 1)[-1] if enumType else ""
-            if const_fix.has_key(constinfo.classname) and const_fix[constinfo.classname].has_key(objc_type) and const_fix[constinfo.classname][objc_type].has_key(constinfo.name):
+            if constinfo.classname in const_fix and objc_type in const_fix[constinfo.classname] and constinfo.name in const_fix[constinfo.classname][objc_type]:
                 fixed_const = const_fix[constinfo.classname][objc_type][constinfo.name]
                 constinfo.name = fixed_const
                 constinfo.cname = fixed_const
@@ -772,7 +769,7 @@ class ObjectiveCWrapperGenerator(object):
             objc_type = enumType.rsplit(".", 1)[-1]
             if objc_type in enum_ignore_list:
                 return
-            if enum_fix.has_key(constinfo.classname):
+            if constinfo.classname in enum_fix:
                 objc_type = enum_fix[constinfo.classname].get(objc_type, objc_type)
             import_module = constinfo.classname if constinfo.classname and constinfo.classname != objc_type else self.Module
             type_dict[ctype] = { "cast_from" : "int",
@@ -800,7 +797,7 @@ class ObjectiveCWrapperGenerator(object):
             logging.info('ignored: %s', fi)
         elif classname in ManualFuncs and fi.objc_name in ManualFuncs[classname]:
             logging.info('manual: %s', fi)
-            if ManualFuncs[classname][fi.objc_name].has_key("objc_method_name"):
+            if "objc_method_name" in ManualFuncs[classname][fi.objc_name]:
                 method_dict[(classname, fi.objc_name)] = ManualFuncs[classname][fi.objc_name]["objc_method_name"]
         elif not self.isWrapped(classname):
             logging.warning('not found: %s', fi)
@@ -827,7 +824,7 @@ class ObjectiveCWrapperGenerator(object):
         updated_files += 1
 
     def get_namespace_prefix(self, cname):
-        namespace = self.classes[cname].namespace if self.classes.has_key(cname) else "cv"
+        namespace = self.classes[cname].namespace if cname in self.classes else "cv"
         return namespace.replace(".", "::") + "::"
 
     def gen(self, srcfiles, module, output_path, output_objc_path, common_headers, manual_classes):
@@ -875,7 +872,7 @@ class ObjectiveCWrapperGenerator(object):
         mkdir_p(package_path)
         extension_file = "%s/%s/%sExt.swift" % (output_objc_path, module, self.Module)
 
-        for ci in self.classes.values():
+        for ci in list(self.classes.values()):
             if ci.name == "Mat":
                 continue
             ci.initCodeStreams(self.Module)
@@ -901,13 +898,13 @@ class ObjectiveCWrapperGenerator(object):
         report.write("\n".join(self.ported_func_list))
         report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % (len(self.skipped_func_list), total_count))
         report.write("".join(self.skipped_func_list))
-        for i in self.def_args_hist.keys():
+        for i in list(self.def_args_hist.keys()):
             report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i]))
         return report.getvalue()
 
     def fullTypeName(self, t):
-        if not type_dict[t].get("is_primitive", False) or type_dict[t].has_key("cast_to"):
-            if type_dict[t].has_key("cast_to"):
+        if not type_dict[t].get("is_primitive", False) or "cast_to" in type_dict[t]:
+            if "cast_to" in type_dict[t]:
                 return type_dict[t]["cast_to"]
             else:
                 namespace_prefix = self.get_namespace_prefix(t)
@@ -916,7 +913,7 @@ class ObjectiveCWrapperGenerator(object):
             return t
 
     def build_objc2cv_prologue(self, prologue, vector_type, vector_full_type, objc_type, vector_name, array_name):
-        if not (type_dict.has_key(vector_type) and type_dict[vector_type].has_key("to_cpp") and type_dict[vector_type]["to_cpp"] != "%(n)s.nativeRef"):
+        if not (vector_type in type_dict and "to_cpp" in type_dict[vector_type] and type_dict[vector_type]["to_cpp"] != "%(n)s.nativeRef"):
             prologue.append("OBJC2CV(" + vector_full_type + ", " + objc_type[:-1] + ", " + vector_name + ", " + array_name + ");")
         else:
             conv_macro = "CONV_" + array_name
@@ -925,7 +922,7 @@ class ObjectiveCWrapperGenerator(object):
             prologue.append("#undef " + conv_macro)
 
     def build_cv2objc_epilogue(self, epilogue, vector_type, vector_full_type, objc_type, vector_name, array_name):
-        if not (type_dict.has_key(vector_type) and type_dict[vector_type].has_key("from_cpp") and type_dict[vector_type]["from_cpp"] != ("[" + objc_type[:-1] + " fromNative:%(n)s]")):
+        if not (vector_type in type_dict and "from_cpp" in type_dict[vector_type] and type_dict[vector_type]["from_cpp"] != ("[" + objc_type[:-1] + " fromNative:%(n)s]")):
             epilogue.append("CV2OBJC(" + vector_full_type + ", " + objc_type[:-1] + ", " + vector_name + ", " + array_name + ");")
         else:
             unconv_macro = "UNCONV_" + array_name
@@ -1106,7 +1103,7 @@ class ObjectiveCWrapperGenerator(object):
                 ret_val = "cv::Ptr<" + namespace_prefix + ret_type + "> retVal = new " + namespace_prefix + ret_type + "("
                 tail = ")"
                 ret_type_dict = type_dict[ret_type]
-                from_cpp = ret_type_dict["from_cpp_ptr"] if ret_type_dict.has_key("from_cpp_ptr") else ret_type_dict["from_cpp"]
+                from_cpp = ret_type_dict["from_cpp_ptr"] if "from_cpp_ptr" in ret_type_dict else ret_type_dict["from_cpp"]
                 ret = "return " + (from_cpp % { "n" : "retVal" }) + ";"
             elif "from_cpp" in type_dict[ret_type]:
                 ret = "return " + (type_dict[ret_type]["from_cpp"] % { "n" : "retVal" }) + ";"
@@ -1212,13 +1209,13 @@ $unrefined_call$epilogue$ret
                 return const_value(target.value)
             return v
         if ci.consts:
-            enumTypes = set(map(lambda c: c.enumType, ci.consts))
+            enumTypes = set([c.enumType for c in ci.consts])
             grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes}
-            for typeName, consts in grouped_consts.items():
+            for typeName, consts in list(grouped_consts.items()):
                 logging.info("%s", consts)
                 if typeName:
                     typeName = typeName.rsplit(".", 1)[-1]
-                    if enum_fix.has_key(ci.cname):
+                    if ci.cname in enum_fix:
                         typeName = enum_fix[ci.cname].get(typeName, typeName)
 
                     ci.enum_declarations.write("""
@@ -1257,7 +1254,7 @@ typedef NS_ENUM(int, {2}) {{
             ci.addImports(pi.ctype, False)
             ci.method_declarations.write("@property " + ("(readonly) " if not pi.rw else "") + objc_type + " " + pi.name + ";\n")
             ptr_ref = "self." + ci.native_ptr_name + "->" if not ci.is_base_class else "self.nativePtr->"
-            if type_data.has_key("v_type"):
+            if "v_type" in type_data:
                 vector_cpp_type = type_data["v_type"]
                 has_namespace = vector_cpp_type.find("::") != -1
                 vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type
@@ -1269,7 +1266,7 @@ typedef NS_ENUM(int, {2}) {{
                 self.build_cv2objc_epilogue(epilogue, vector_cpp_type, vector_full_cpp_type, objc_type, "retValVector", "retVal")
                 ci.method_implementations.write("\t" + ("\n\t".join(epilogue)) + "\n")
                 ci.method_implementations.write("\treturn retVal;\n}\n\n")
-            elif type_data.has_key("v_v_type"):
+            elif "v_v_type" in type_data:
                 vector_cpp_type = type_data["v_v_type"]
                 has_namespace = vector_cpp_type.find("::") != -1
                 vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type
@@ -1283,14 +1280,14 @@ typedef NS_ENUM(int, {2}) {{
                 namespace_prefix = self.get_namespace_prefix(pi.ctype)
                 ci.method_implementations.write("-(" + objc_type + ")" + pi.name + " {\n")
                 ci.method_implementations.write("\tcv::Ptr<" + namespace_prefix + pi.ctype + "> retVal = new " + namespace_prefix + pi.ctype + "(" + ptr_ref + pi.name + ");\n")
-                from_cpp = type_data["from_cpp_ptr"] if type_data.has_key("from_cpp_ptr") else type_data["from_cpp"]
+                from_cpp = type_data["from_cpp_ptr"] if "from_cpp_ptr" in type_data else type_data["from_cpp"]
                 ci.method_implementations.write("\treturn " + (from_cpp % {"n": "retVal"}) + ";\n}\n\n")
             else:
                 from_cpp = type_data.get("from_cpp", "%(n)s")
                 retVal = from_cpp % {"n": (ptr_ref + pi.name)}
                 ci.method_implementations.write("-(" + objc_type + ")" + pi.name + " {\n\treturn " + retVal + ";\n}\n\n")
             if pi.rw:
-                if type_data.has_key("v_type"):
+                if "v_type" in type_data:
                     vector_cpp_type = type_data["v_type"]
                     has_namespace = vector_cpp_type.find("::") != -1
                     vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type
@@ -1300,13 +1297,13 @@ typedef NS_ENUM(int, {2}) {{
                     ci.method_implementations.write("\t" + ("\n\t".join(prologue)) + "\n")
                     ci.method_implementations.write("\t" + ptr_ref + pi.name + " = valVector;\n}\n\n")
                 else:
-                    to_cpp = type_data.get("to_cpp", ("(" + type_data.get("cast_to") + ")%(n)s") if type_data.has_key("cast_to") else "%(n)s")
+                    to_cpp = type_data.get("to_cpp", ("(" + type_data.get("cast_to") + ")%(n)s") if "cast_to" in type_data else "%(n)s")
                     val = to_cpp % {"n": pi.name}
                     ci.method_implementations.write("-(void)set" + pi.name[0].upper() + pi.name[1:] + ":(" + objc_type + ")" + pi.name + " {\n\t" + ptr_ref + pi.name + " = " + val + ";\n}\n\n")
 
         # manual ports
         if ci.name in ManualFuncs:
-            for func in ManualFuncs[ci.name].keys():
+            for func in list(ManualFuncs[ci.name].keys()):
                 ci.method_declarations.write( "\n".join(ManualFuncs[ci.name][func]["declaration"]) )
                 ci.method_implementations.write( "\n".join(ManualFuncs[ci.name][func]["implementation"]) )
 
@@ -1373,11 +1370,11 @@ typedef NS_ENUM(int, {2}) {{
             for dirname, dirs, files in os.walk(os.path.join(testdir, "test")):
                 for filename in files:
                     filepath = os.path.join(dirname, filename)
-                    with open(filepath) as file:
+                    with io.open(filepath, encoding="utf-8", errors="ignore") as file:
                         body = file.read()
                     body = body.replace("import OpenCV", "import " + framework_name)
                     body = body.replace("#import <OpenCV/OpenCV.h>", "#import <" + framework_name + "/" + framework_name + ".h>")
-                    with open(filepath, "w") as file:
+                    with codecs.open(filepath, "w", "utf-8") as file:
                         file.write(body)
 
 
@@ -1477,9 +1474,9 @@ def sanitize_documentation_string(doc, type):
             in_code = True
             lines[i] = line.replace("<code>", "")
 
-    lines = list(map(lambda x: x[x.find('*'):].strip() if x.lstrip().startswith("*") else x, lines))
-    lines = list(map(lambda x: "* " + x[1:].strip() if x.startswith("*") and x != "*" else x, lines))
-    lines = list(map(lambda x: x if x.startswith("*") else "* " + x if x and x != "*" else "*", lines))
+    lines = list([x[x.find('*'):].strip() if x.lstrip().startswith("*") else x for x in lines])
+    lines = list(["* " + x[1:].strip() if x.startswith("*") and x != "*" else x for x in lines])
+    lines = list([x if x.startswith("*") else "* " + x if x and x != "*" else "*" for x in lines])
 
     hasValues = False
     for line in lines:
@@ -1605,9 +1602,7 @@ if __name__ == "__main__":
             if os.path.exists(objc_test_resources_dir):
                 copy_tree(objc_test_resources_dir, os.path.join(objc_test_base_path, 'test', 'resources'))
 
-        manual_classes = filter(lambda x:type_dict.has_key(x),
-                                map(lambda x: x[x.rfind('/')+1:-2],
-                                    filter(lambda x: x.endswith('.h'), copied_files)))
+        manual_classes = [x for x in [x[x.rfind('/')+1:-2] for x in [x for x in copied_files if x.endswith('.h')]] if x in type_dict]
 
         if len(srcfiles) > 0:
             generator.gen(srcfiles, module, dstdir, objc_base_path, common_headers, manual_classes)
diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py
index e89cf3c666..e759072825 100755
--- a/platforms/ios/build_framework.py
+++ b/platforms/ios/build_framework.py
@@ -31,12 +31,12 @@ However, {framework_name}.framework directory is erased and recreated on each ru
 Adding --dynamic parameter will build {framework_name}.framework as App Store dynamic framework. Only iOS 8+ versions are supported.
 """
 
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
 import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing
 from subprocess import check_call, check_output, CalledProcessError
 from distutils.dir_util import copy_tree
 
-IPHONEOS_DEPLOYMENT_TARGET='8.0'  # default, can be changed via command line options or environment variable
+IPHONEOS_DEPLOYMENT_TARGET='9.0'  # default, can be changed via command line options or environment variable
 
 def execute(cmd, cwd = None):
     print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr)
@@ -46,7 +46,7 @@ def execute(cmd, cwd = None):
         raise Exception("Child returned:", retcode)
 
 def getXCodeMajor():
-    ret = check_output(["xcodebuild", "-version"])
+    ret = check_output(["xcodebuild", "-version"]).decode('utf-8')
     m = re.match(r'Xcode\s+(\d+)\..*', ret, flags=re.IGNORECASE)
     if m:
         return int(m.group(1))

From a3f3fbe05de648e125a9aa4d64f92cbcf39007fd Mon Sep 17 00:00:00 2001
From: Andreas Franek <itfranek@gmail.com>
Date: Sun, 18 Oct 2020 11:48:41 +0200
Subject: [PATCH 087/152] add rudimentary support for uEye cameras

uEye are cameras from IDS, c.f. https://en.ids-imaging.com/
Supports driver version 4.94 and up currently, since the event system was overhauled there.
Supports setting/getting the properties: fps,width,height
---
 CMakeLists.txt                              |   7 +
 modules/videoio/CMakeLists.txt              |   9 +
 modules/videoio/cmake/detect_ueye.cmake     |  25 +
 modules/videoio/cmake/init.cmake            |   1 +
 modules/videoio/include/opencv2/videoio.hpp |   1 +
 modules/videoio/src/cap_interface.hpp       |   2 +
 modules/videoio/src/cap_ueye.cpp            | 499 ++++++++++++++++++++
 modules/videoio/src/videoio_registry.cpp    |   8 +-
 8 files changed, 551 insertions(+), 1 deletion(-)
 create mode 100644 modules/videoio/cmake/detect_ueye.cmake
 create mode 100644 modules/videoio/src/cap_ueye.cpp

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4350b2fe2a..02c0009e9c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -367,6 +367,9 @@ OCV_OPTION(WITH_MSMF_DXVA "Enable hardware acceleration in Media Foundation back
 OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF
   VISIBLE_IF NOT ANDROID AND NOT WINRT
   VERIFY HAVE_XIMEA)
+OCV_OPTION(WITH_UEYE "Include UEYE camera support" OFF
+  VISIBLE_IF NOT ANDROID AND NOT APPLE AND NOT WINRT
+  VERIFY HAVE_UEYE)
 OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF
   VISIBLE_IF UNIX AND NOT APPLE AND NOT ANDROID
   VERIFY HAVE_XINE)
@@ -1372,6 +1375,10 @@ if(WITH_XIMEA OR HAVE_XIMEA)
   status("    XIMEA:" HAVE_XIMEA THEN YES ELSE NO)
 endif()
 
+if(WITH_UEYE OR HAVE_UEYE)
+  status("    uEye:" HAVE_UEYE THEN YES ELSE NO)
+endif()
+
 if(WITH_XINE OR HAVE_XINE)
   status("    Xine:"           HAVE_XINE           THEN "YES (ver ${XINE_VERSION})"     ELSE NO)
 endif()
diff --git a/modules/videoio/CMakeLists.txt b/modules/videoio/CMakeLists.txt
index 12ff992294..a31d969ab8 100644
--- a/modules/videoio/CMakeLists.txt
+++ b/modules/videoio/CMakeLists.txt
@@ -138,6 +138,15 @@ if(TARGET ocv.3rdparty.ximea)
   list(APPEND tgts ocv.3rdparty.ximea)
 endif()
 
+if(TARGET ocv.3rdparty.ueye)
+  if("ueye" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
+    ocv_create_builtin_videoio_plugin("opencv_videoio_ueye" ocv.3rdparty.ueye "cap_ueye.cpp")
+  else()
+    list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ueye.cpp)
+    list(APPEND tgts ocv.3rdparty.ueye)
+  endif()
+endif()
+
 if(TARGET ocv.3rdparty.ffmpeg)
   if(HAVE_FFMPEG_WRAPPER)
     list(APPEND tgts ocv.3rdparty.ffmpeg)
diff --git a/modules/videoio/cmake/detect_ueye.cmake b/modules/videoio/cmake/detect_ueye.cmake
new file mode 100644
index 0000000000..495e9c2450
--- /dev/null
+++ b/modules/videoio/cmake/detect_ueye.cmake
@@ -0,0 +1,25 @@
+if(NOT HAVE_UEYE)
+  if(WIN32)
+    if(X86_64)
+      set(_WIN_LIB_SUFFIX "_64")
+    endif()
+  endif()
+  find_path(UEYE_INCLUDE "ueye.h"
+    PATHS "${UEYE_ROOT}" ENV UEYE_ROOT "/usr" "C:/Program Files/IDS/uEye/Develop"
+    HINTS "${regpath}"
+    PATH_SUFFIXES "include")
+  find_library(UEYE_LIBRARY ueye_api${_WIN_LIB_SUFFIX}
+    PATHS "${UEYE_ROOT}" ENV UEYE_ROOT "/usr" "C:/Program Files/IDS/uEye/Develop"
+    HINTS "${regpath}"
+    PATH_SUFFIXES "lib")
+  if(UEYE_INCLUDE AND UEYE_LIBRARY)
+    set(HAVE_UEYE TRUE)
+  endif()
+endif()
+unset(_WIN_LIB_SUFFIX)
+
+if(HAVE_UEYE)
+  ocv_add_external_target(ueye "${UEYE_INCLUDE}" "${UEYE_LIBRARY}" "HAVE_UEYE")
+endif()
+
+set(HAVE_UEYE ${HAVE_UEYE} PARENT_SCOPE)
diff --git a/modules/videoio/cmake/init.cmake b/modules/videoio/cmake/init.cmake
index 1efef12c5e..42c3f9c27f 100644
--- a/modules/videoio/cmake/init.cmake
+++ b/modules/videoio/cmake/init.cmake
@@ -30,6 +30,7 @@ add_backend("msdk" WITH_MFX)
 add_backend("openni2" WITH_OPENNI2)
 add_backend("pvapi" WITH_PVAPI)
 add_backend("realsense" WITH_LIBREALSENSE)
+add_backend("ueye" WITH_UEYE)
 add_backend("ximea" WITH_XIMEA)
 add_backend("xine" WITH_XINE)
 
diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp
index eb5645ab77..40586a35fb 100644
--- a/modules/videoio/include/opencv2/videoio.hpp
+++ b/modules/videoio/include/opencv2/videoio.hpp
@@ -120,6 +120,7 @@ enum VideoCaptureAPIs {
        CAP_OPENCV_MJPEG = 2200,         //!< Built-in OpenCV MotionJPEG codec
        CAP_INTEL_MFX    = 2300,         //!< Intel MediaSDK
        CAP_XINE         = 2400,         //!< XINE engine (Linux)
+       CAP_UEYE         = 2500,         //!< uEye Camera API
      };
 
 /** @brief %VideoCapture generic properties identifier.
diff --git a/modules/videoio/src/cap_interface.hpp b/modules/videoio/src/cap_interface.hpp
index 34bef9b9e1..5112fffe6f 100644
--- a/modules/videoio/src/cap_interface.hpp
+++ b/modules/videoio/src/cap_interface.hpp
@@ -301,6 +301,8 @@ Ptr<IVideoCapture> create_PvAPI_capture( int index );
 Ptr<IVideoCapture> create_XIMEA_capture_cam( int index );
 Ptr<IVideoCapture> create_XIMEA_capture_file( const std::string &serialNumber );
 
+Ptr<IVideoCapture> create_ueye_camera(int camera);
+
 Ptr<IVideoCapture> create_Aravis_capture( int index );
 
 Ptr<IVideoCapture> createMotionJpegCapture(const std::string& filename);
diff --git a/modules/videoio/src/cap_ueye.cpp b/modules/videoio/src/cap_ueye.cpp
new file mode 100644
index 0000000000..3912da52bc
--- /dev/null
+++ b/modules/videoio/src/cap_ueye.cpp
@@ -0,0 +1,499 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+/*
+This file adds support for uEye cameras in OpenCV.
+
+Cameras can be opened by ID. If 0 is passed as ID the first available camera
+will be used. For any other number, the camera associated with that ID will be
+opened (c.f. IDS documentation for is_InitCamera).
+
+Images are double buffered in a ring buffer of size 2 (called 'image memory
+sequence' in the uEye SDK c.f. is_AddToSequence). The memory is locked on a
+'grab' call and copied and unlocked during 'retrieve'. The image queue provided
+in the uEye SDK is not used since it automatically locks the buffers when a new
+image arrives, which means the buffer can fill up when frames are retrieved too
+slowly.
+*/
+
+#include "precomp.hpp"
+
+#include <ueye.h>
+
+#include <array>
+#include <chrono>
+#include <cstdlib>
+#include <memory>
+#include <thread>
+
+namespace cv
+{
+namespace
+{
+struct image_buffer
+{
+    char* data;
+    INT id;
+};
+}
+#define ASSERT_UEYE(expr) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_Error_(Error::StsAssert, ("%s %s %d: failed with code %u", #expr, __FILE__, __LINE__, expr_result)); }
+#define PRINT_ON_UEYE_ERROR( expr ) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << #expr << " " << __FILE__ << " " << __LINE__ << ": failed with code " << expr_result); }
+
+struct VideoCapture_uEye CV_FINAL: public IVideoCapture
+{
+    int getCaptureDomain() CV_OVERRIDE
+    {
+        return cv::CAP_UEYE;
+    }
+
+    VideoCapture_uEye(int camera);
+
+    bool isOpened() const CV_OVERRIDE
+    {
+        return 255 != cam_id;
+    }
+
+    ~VideoCapture_uEye() CV_OVERRIDE
+    {
+        close();
+    }
+
+    double getProperty(int property_id) const CV_OVERRIDE;
+    bool setProperty(int property_id, double value) CV_OVERRIDE;
+    bool grabFrame() CV_OVERRIDE;
+    bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
+
+    void close();
+    void start_camera();
+    void stop_camera();
+
+    void unlock_image_buffer();
+
+    HIDS cam_id = 255;
+    SENSORINFO sensor_info;
+    double fps;
+    int width;
+    int height;
+    int pitch;
+    std::array<image_buffer, 2> ring_buffer = {{{nullptr, 0}, {nullptr, 0}}};
+    char* locked_image = nullptr;
+};
+
+Ptr<IVideoCapture> create_ueye_camera(int camera)
+{
+    return cv::makePtr<VideoCapture_uEye>(camera);
+}
+
+namespace
+{
+std::vector<IMAGE_FORMAT_INFO> get_freerun_formats(HIDS cam_id)
+{
+    UINT count;
+    ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count)));
+    UINT sizeof_list = sizeof(IMAGE_FORMAT_LIST) + (count - 1) * sizeof(IMAGE_FORMAT_INFO);
+    std::unique_ptr<IMAGE_FORMAT_LIST> list(new (std::malloc(sizeof_list)) IMAGE_FORMAT_LIST);
+
+    list->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO);
+    list->nNumListElements = count;
+    ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_LIST, list.get(), sizeof_list));
+
+    // copy to vector and filter out non-live modes
+    std::vector<IMAGE_FORMAT_INFO> formats;
+    formats.reserve(count + 1);
+    std::copy_if(list->FormatInfo, list->FormatInfo+count, std::back_inserter(formats), [](const IMAGE_FORMAT_INFO& format)
+    {
+        return (format.nSupportedCaptureModes & CAPTMODE_FREERUN);
+    });
+
+    return formats;
+}
+
+void set_matching_format(HIDS cam_id, const SENSORINFO& sensor_info, int width, int height)
+{
+    // uEye camera formats sometimes do not include the native resolution (without binning, subsampling or AOI)
+    if(width == int(sensor_info.nMaxWidth) && height == int(sensor_info.nMaxHeight))
+    {
+        ASSERT_UEYE(is_SetBinning(cam_id, IS_BINNING_DISABLE));
+        ASSERT_UEYE(is_SetSubSampling(cam_id, IS_SUBSAMPLING_DISABLE));
+        IS_RECT rectAOI = {0, 0, width, height};
+        ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_SET_AOI, &rectAOI, sizeof(rectAOI)));
+        return;
+    }
+    auto formats = get_freerun_formats(cam_id);
+    CV_Assert(formats.size() > 0);
+    auto calc_err = [=](const IMAGE_FORMAT_INFO& format)
+    {
+        return format.nWidth - width + format.nHeight - height + (sensor_info.nMaxWidth - width)/2 - format.nX0 + (sensor_info.nMaxHeight - height)/2 - format.nY0;
+    };
+
+    std::sort(formats.begin(), formats.end(), [=](const IMAGE_FORMAT_INFO& f0, const IMAGE_FORMAT_INFO& f1)
+    {
+        return calc_err(f0) < calc_err(f1);
+    });
+
+    ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_SET_FORMAT, &formats.front().nFormatID, sizeof(UINT)));
+}
+}
+
+
+VideoCapture_uEye::VideoCapture_uEye(int camera)
+{
+    CV_Assert(camera >= 0);
+    CV_Assert(camera < 255); // max camera id is 254
+    cam_id = static_cast<HIDS>(camera);
+    CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): opening...");
+    ASSERT_UEYE(is_InitCamera(&cam_id, nullptr));
+
+    IS_INIT_EVENT init_event = {IS_SET_EVENT_FRAME, FALSE, FALSE};
+    ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_INIT, &init_event, sizeof(init_event)));
+    UINT frame_event = IS_SET_EVENT_FRAME;
+    ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_ENABLE, &frame_event, sizeof(frame_event)));
+
+    ASSERT_UEYE(is_ResetToDefault(cam_id));
+
+    ASSERT_UEYE(is_SetFrameRate(cam_id, IS_GET_FRAMERATE, &fps));
+
+    start_camera();
+}
+
+double VideoCapture_uEye::getProperty(int property_id) const
+{
+    auto value = 0.;
+    switch (property_id)
+    {
+    case CAP_PROP_FRAME_WIDTH:
+        value = width;
+        break;
+    case CAP_PROP_FRAME_HEIGHT:
+        value = height;
+        break;
+    case CAP_PROP_FPS:
+        value = fps;
+        break;
+    }
+    return value;
+}
+
+bool VideoCapture_uEye::setProperty(int property_id, double value)
+{
+    if(!isOpened())
+        return false;
+    try
+    {
+        bool set_format = false;
+        switch (property_id)
+        {
+        case CAP_PROP_FRAME_WIDTH:
+            if(width == value)
+                break;
+            width = static_cast<int>(value);
+            set_format = true;
+            break;
+        case CAP_PROP_FRAME_HEIGHT:
+            if(height == value)
+                break;
+            height = static_cast<int>(value);
+            set_format = true;
+            break;
+        case CAP_PROP_FPS:
+            if(fps == value)
+                break;
+            ASSERT_UEYE(is_SetFrameRate(cam_id, value, &fps));
+            break;
+        }
+        if(set_format)
+        {
+            set_matching_format(cam_id, sensor_info, width, height);
+            start_camera();
+        }
+    }
+    catch(const cv::Exception& e)
+    {
+        CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " <<  e.what());
+        return false;
+    }
+
+    return true;
+}
+
+bool VideoCapture_uEye::grabFrame()
+{
+    if (!isOpened())
+        return false;
+
+    try
+    {
+        IS_WAIT_EVENT wait_event{IS_SET_EVENT_FRAME, static_cast<UINT>(3*1000/fps), 0, 0}; // wait for the time it should take to get 3 frames
+        ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_WAIT, &wait_event, sizeof(wait_event)));
+        INT current_buffer_id;
+        char* current_buffer;
+        char* last;
+        ASSERT_UEYE(is_GetActSeqBuf(cam_id, &current_buffer_id, &current_buffer, &last));
+
+        const int lock_tries = 4;
+        std::chrono::milliseconds lock_time_out(static_cast<int>(1000/(fps*4))); // wait for a quarter of a frame if not lockable, should not occur in event mode
+        UINT ret;
+        for(int i = 0; i < lock_tries; i++) // try locking the buffer
+        {
+            ret = is_LockSeqBuf(cam_id, IS_IGNORE_PARAMETER, last);
+            if(IS_SEQ_BUFFER_IS_LOCKED == ret)
+                std::this_thread::sleep_for(lock_time_out);
+            else
+                break;
+        }
+        ASSERT_UEYE(ret);
+        locked_image = last;
+    }
+    catch(const cv::Exception& e)
+    {
+        CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " <<  e.what());
+        close();
+        return false;
+    }
+    return true;
+}
+
+bool VideoCapture_uEye::retrieveFrame(int /*outputType*/, OutputArray frame)
+{
+    if(!locked_image)
+        return false;
+    Mat(height, width, CV_8UC3, locked_image, pitch).copyTo(frame);
+    try
+    {
+        unlock_image_buffer();
+    }
+    catch(const cv::Exception& e)
+    {
+        CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " <<  e.what());
+        return false;
+    }
+
+    return true;
+}
+
+void VideoCapture_uEye::start_camera()
+{
+    stop_camera();
+
+    IS_RECT aoi;
+    ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_AOI, &aoi, sizeof(aoi)));
+
+    UINT x_is_abs_pos;
+    UINT y_is_abs_pos;
+
+    ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_X_ABS, &x_is_abs_pos , sizeof(x_is_abs_pos)));
+    ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_Y_ABS, &y_is_abs_pos , sizeof(y_is_abs_pos)));
+
+    ASSERT_UEYE(is_GetSensorInfo(cam_id, &sensor_info));
+    width  = x_is_abs_pos? sensor_info.nMaxWidth: aoi.s32Width;
+    height = y_is_abs_pos? sensor_info.nMaxHeight: aoi.s32Height;
+
+    // allocate ring_buffer
+    int bpp = 24;
+    for(auto& image_memory: ring_buffer)
+    {
+        ASSERT_UEYE(is_AllocImageMem(cam_id, width, height, bpp, &image_memory.data, &image_memory.id));
+        ASSERT_UEYE(is_AddToSequence(cam_id, image_memory.data, image_memory.id));
+    }
+
+    // TODO: this could be set according to sensor_info.nColorMode and CAP_PROP_FOURCC
+    ASSERT_UEYE(is_SetColorMode(cam_id, IS_CM_BGR8_PACKED));
+    ASSERT_UEYE(is_GetImageMemPitch (cam_id, &pitch));
+
+    ASSERT_UEYE(is_CaptureVideo(cam_id, IS_DONT_WAIT));
+}
+
+void VideoCapture_uEye::stop_camera()
+{
+    if(is_CaptureVideo(cam_id, IS_GET_LIVE))
+        ASSERT_UEYE(is_StopLiveVideo(cam_id, IS_FORCE_VIDEO_STOP));
+
+    if(locked_image)
+        unlock_image_buffer();
+    ASSERT_UEYE(is_ClearSequence(cam_id));
+    for(auto buffer: ring_buffer)
+    {
+        if(buffer.data)
+        {
+            ASSERT_UEYE(is_FreeImageMem(cam_id, buffer.data, buffer.id));
+            buffer.data = nullptr;
+        }
+    }
+}
+
+void VideoCapture_uEye::close()
+{
+    if(!isOpened())
+        return;
+    CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): closing...");
+    // During closing we do not care about correct error handling as much.
+    // Either something has gone wrong already or it has been called from the
+    // destructor. Just make sure that all calls are done.
+    try
+    {
+        stop_camera();
+    }
+    catch(const cv::Exception& e)
+    {
+        CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " <<  e.what());
+    }
+    UINT frame_event = IS_SET_EVENT_FRAME;
+    PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_DISABLE, &frame_event, sizeof(frame_event)));
+    PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_EXIT, &frame_event, sizeof(frame_event)));
+    PRINT_ON_UEYE_ERROR(is_ExitCamera(cam_id));
+    cam_id = 255;
+}
+
+void VideoCapture_uEye::unlock_image_buffer()
+{
+    char* tmp_buffer = nullptr;
+    std::swap(locked_image, tmp_buffer);
+    ASSERT_UEYE(is_UnlockSeqBuf(cam_id, IS_IGNORE_PARAMETER, tmp_buffer));
+}
+} // namespace cv
+
+// plugin glue
+#if defined(BUILD_PLUGIN)
+
+#include "plugin_api.hpp"
+
+namespace cv
+{
+
+namespace
+{
+#define CV_PLUGIN_NULL_FAIL(ptr) if(!ptr) return CV_ERROR_FAIL;
+#define CV_PLUGIN_CALL_BEGIN CV_PLUGIN_NULL_FAIL(handle) try {
+#define CV_PLUGIN_CALL_END } catch (...) { return CV_ERROR_FAIL; }
+
+CvResult CV_API_CALL cv_capture_open(const char*, int cam_id, CV_OUT CvPluginCapture* handle)
+{
+    CV_PLUGIN_CALL_BEGIN
+
+    *handle = NULL;
+    std::unique_ptr<VideoCapture_uEye> cap(new VideoCapture_uEye(cam_id));
+    if (cap->isOpened())
+    {
+        *handle = (CvPluginCapture)cap.release();
+        return CV_ERROR_OK;
+    }
+    return CV_ERROR_FAIL;
+
+    CV_PLUGIN_CALL_END
+}
+
+CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle)
+{
+    CV_PLUGIN_NULL_FAIL(handle)
+
+    VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
+    delete instance;
+    return CV_ERROR_OK;
+}
+
+
+CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val)
+{
+    CV_PLUGIN_NULL_FAIL(val)
+    CV_PLUGIN_CALL_BEGIN
+
+    VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
+    *val = instance->getProperty(prop);
+    return CV_ERROR_OK;
+
+    CV_PLUGIN_CALL_END
+}
+
+CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val)
+{
+    CV_PLUGIN_CALL_BEGIN
+
+    VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
+    return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL;
+
+    CV_PLUGIN_CALL_END
+}
+
+CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle)
+{
+    CV_PLUGIN_CALL_BEGIN
+
+    VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
+    return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL;
+
+    CV_PLUGIN_CALL_END
+}
+
+CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata)
+{
+    CV_PLUGIN_CALL_BEGIN
+
+    VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
+    Mat img;
+    if (instance->retrieveFrame(stream_idx, img))
+        return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata);
+    return CV_ERROR_FAIL;
+
+    CV_PLUGIN_CALL_END
+}
+
+CvResult CV_API_CALL cv_writer_open(const char* /*filename*/, int /*fourcc*/, double /*fps*/, int /*width*/, int /*height*/, int /*isColor*/,
+                                    CV_OUT CvPluginWriter* /*handle*/)
+{
+    return CV_ERROR_FAIL;
+}
+
+CvResult CV_API_CALL cv_writer_release(CvPluginWriter /*handle*/)
+{
+    return CV_ERROR_FAIL;
+}
+
+CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter /*handle*/, int /*prop*/, CV_OUT double* /*val*/)
+{
+    return CV_ERROR_FAIL;
+}
+
+CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/)
+{
+    return CV_ERROR_FAIL;
+}
+
+CvResult CV_API_CALL cv_writer_write(CvPluginWriter /*handle*/, const unsigned char* /*data*/, int /*step*/, int /*width*/, int /*height*/, int /*cn*/)
+{
+    return CV_ERROR_FAIL;
+}
+
+const OpenCV_VideoIO_Plugin_API_preview plugin_api_v0 =
+{
+    {
+        sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION,
+        CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
+        "uEye OpenCV Video I/O plugin"
+    },
+    /*  1*/CAP_UEYE,
+    /*  2*/cv_capture_open,
+    /*  3*/cv_capture_release,
+    /*  4*/cv_capture_get_prop,
+    /*  5*/cv_capture_set_prop,
+    /*  6*/cv_capture_grab,
+    /*  7*/cv_capture_retrieve,
+    /*  8*/cv_writer_open,
+    /*  9*/cv_writer_release,
+    /* 10*/cv_writer_get_prop,
+    /* 11*/cv_writer_set_prop,
+    /* 12*/cv_writer_write
+};
+} // namespace
+} // namespace cv
+
+const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
+{
+    if (requested_abi_version != 0)
+        return NULL;
+    if (requested_api_version != 0)
+        return NULL;
+    return &cv::plugin_api_v0;
+}
+
+#endif // BUILD_PLUGIN
diff --git a/modules/videoio/src/videoio_registry.cpp b/modules/videoio/src/videoio_registry.cpp
index b5798db80e..3ee1bab822 100644
--- a/modules/videoio/src/videoio_registry.cpp
+++ b/modules/videoio/src/videoio_registry.cpp
@@ -51,7 +51,7 @@ namespace {
 - platform specific universal SDK: WINRT, AVFOUNDATION, MSMF/DSHOW, V4L/V4L2
 - RGB-D: OpenNI/OpenNI2, REALSENSE
 - special OpenCV (file-based): "images", "mjpeg"
-- special camera SDKs, including stereo: other special SDKs: FIREWIRE/1394, XIMEA/ARAVIS/GIGANETIX/PVAPI(GigE)
+- special camera SDKs, including stereo: other special SDKs: FIREWIRE/1394, XIMEA/ARAVIS/GIGANETIX/PVAPI(GigE)/uEye
 - other: XINE, gphoto2, etc
 */
 static const struct VideoBackendInfo builtin_backends[] =
@@ -130,6 +130,12 @@ static const struct VideoBackendInfo builtin_backends[] =
     DECLARE_STATIC_BACKEND(CAP_ARAVIS, "ARAVIS", MODE_CAPTURE_BY_INDEX, 0, create_Aravis_capture, 0),
 #endif
 
+#ifdef HAVE_UEYE // uEye
+    DECLARE_STATIC_BACKEND(CAP_UEYE, "UEYE", MODE_CAPTURE_BY_INDEX, 0, create_ueye_camera, 0),
+#elif defined(ENABLE_PLUGINS)
+    DECLARE_DYNAMIC_BACKEND(CAP_UEYE, "UEYE", MODE_CAPTURE_BY_INDEX),
+#endif
+
 #ifdef HAVE_GPHOTO2
     DECLARE_STATIC_BACKEND(CAP_GPHOTO2, "GPHOTO2", MODE_CAPTURE_ALL, createGPhoto2Capture, createGPhoto2Capture, 0),
 #endif

From 61144f935efaae03d506ab2b54ee02b3bc1a4452 Mon Sep 17 00:00:00 2001
From: Sergei Slashchinin <62052793+sl-sergei@users.noreply.github.com>
Date: Sat, 14 Nov 2020 01:22:10 +0300
Subject: [PATCH 088/152] Merge pull request #18783 from sl-sergei:fix_conv1d

Add support for Conv1D on OpenCV backend

* Add support for Conv1D on OpenCV backend

* disable tests on other targets/backends

* Fix formatting

* Restore comment

* Remove unnecessary flag and fix test logic

* Fix perf test

* fix braces

* Fix indentation, assert check and remove unnecessary condition

* Remove unnecessary changes

* Add test cases for variable weights and bias

* dnn(conv): fallback on OpenCV+CPU instead of failures

* coding style
---
 modules/dnn/perf/perf_convolution.cpp        |   4 +-
 modules/dnn/perf/perf_convolution1d.cpp      | 163 +++++++++++++
 modules/dnn/perf/perf_convolution3d.cpp      |   4 +-
 modules/dnn/src/layers/convolution_layer.cpp | 232 ++++++++++++++-----
 modules/dnn/src/onnx/onnx_importer.cpp       |   8 +-
 modules/dnn/test/test_onnx_importer.cpp      |  61 ++++-
 modules/dnn/test/test_tf_importer.cpp        |   2 -
 7 files changed, 404 insertions(+), 70 deletions(-)
 create mode 100644 modules/dnn/perf/perf_convolution1d.cpp

diff --git a/modules/dnn/perf/perf_convolution.cpp b/modules/dnn/perf/perf_convolution.cpp
index 7d51cd300f..c2a3a66ab9 100644
--- a/modules/dnn/perf/perf_convolution.cpp
+++ b/modules/dnn/perf/perf_convolution.cpp
@@ -533,7 +533,7 @@ struct ConvParamID
         CONV_100 = 100,
         CONV_LAST = sizeof(testConvolutionConfigs) / sizeof(testConvolutionConfigs[0])
     };
-    int val_;                                                                  \
+    int val_;
     ConvParamID(int val = 0) : val_(val) {}
     operator int() const { return val_; }
     static ::testing::internal::ParamGenerator<ConvParamID> all()
@@ -546,7 +546,7 @@ struct ConvParamID
         ConvParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = ConvParamID(i); } // reduce generated code size
         return ::testing::ValuesIn(v_, v_ + NUM);
     }
-};                                                                                  \
+};
 static inline void PrintTo(const ConvParamID& v, std::ostream* os)
 {
     CV_Assert((int)v >= 0); CV_Assert((int)v < ConvParamID::CONV_LAST);
diff --git a/modules/dnn/perf/perf_convolution1d.cpp b/modules/dnn/perf/perf_convolution1d.cpp
new file mode 100644
index 0000000000..c35cbd503f
--- /dev/null
+++ b/modules/dnn/perf/perf_convolution1d.cpp
@@ -0,0 +1,163 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "perf_precomp.hpp"
+#include <opencv2/dnn/shape_utils.hpp>
+
+namespace opencv_test {
+
+struct Conv1DParam_t {
+    int kernel;
+    struct BlobShape { int dims[3]; } shapeIn;
+    int outCN;
+    int groups;
+    int stride;
+    int dilation;
+    int pad[2];
+    const char* padMode;
+    bool hasBias;
+    double declared_flops;
+};
+// Details: #12142
+static const Conv1DParam_t testConvolution1DConfigs[] = {
+        {3, {{1, 6, 10}}, 6, 1, 1, 1, {0, 0}, "VALID", true, 1776.},
+        {3, {{1, 2, 19}}, 2, 2, 2, 1, {1, 1}, "", true, 260.},
+        {3, {{1, 2, 25}}, 2, 2, 1, 1, {2, 2}, "SAME", false, 650.},
+};
+
+struct Conv1DParamID
+{
+    enum {
+        CONV_0 = 0,
+        CONV_LAST = sizeof(testConvolution1DConfigs) / sizeof(testConvolution1DConfigs[0])
+    };
+    int val_;
+    Conv1DParamID(int val = 0) : val_(val) {}
+    operator int() const { return val_; }
+    static ::testing::internal::ParamGenerator<Conv1DParamID> all()
+    {
+        enum { NUM = (int)CONV_LAST };
+        Conv1DParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = Conv1DParamID(i); } // reduce generated code size
+        return ::testing::ValuesIn(v_, v_ + NUM);
+    }
+};
+static inline void PrintTo(const Conv1DParamID& v, std::ostream* os)
+{
+    CV_Assert((int)v >= 0); CV_Assert((int)v < Conv1DParamID::CONV_LAST);
+    const Conv1DParam_t& p = testConvolution1DConfigs[(int)v];
+
+    *os << "GFLOPS=" << cv::format("%.3f", p.declared_flops * 1e-9)
+        << ", K=[" << p.kernel << "]"
+        << ", IN={" << p.shapeIn.dims[0] << ", " << p.shapeIn.dims[1] << ", " << p.shapeIn.dims[2] << "}"
+        << ", OCN=" << p.outCN;
+    if (p.groups > 1)
+        *os << ", G=" << p.groups;
+    if (p.stride != 1)
+        *os << ", S=" << p.stride;
+    if (p.dilation != 1)
+        *os << ", D="  << p.dilation;
+    if (p.pad[0] != 0 && p.pad[1] != 0 )
+        *os << ", P=(" << p.pad[0] << ", " << p.pad[1] << ")";
+    if (!((std::string)p.padMode).empty())
+        *os << ", PM=" << ((std::string)p.padMode);
+    if (p.hasBias)
+        *os << ", BIAS";
+}
+
+
+typedef tuple<Conv1DParamID, tuple<Backend, Target> > Conv1DTestParam_t;
+typedef TestBaseWithParam<Conv1DTestParam_t> Conv1D;
+
+PERF_TEST_P_(Conv1D, conv1d)
+{
+    int test_id = (int)get<0>(GetParam());
+    ASSERT_GE(test_id, 0); ASSERT_LT(test_id, Conv1DParamID::CONV_LAST);
+    const Conv1DParam_t& params = testConvolution1DConfigs[test_id];
+    double declared_flops = params.declared_flops;
+
+    DictValue kernel   = DictValue::arrayInt(&params.kernel, 1);
+    DictValue stride   = DictValue::arrayInt(&params.stride, 1);
+    DictValue pad      = DictValue::arrayInt(&params.pad[0], 2);
+    DictValue dilation = DictValue::arrayInt(&params.dilation, 1);
+
+    MatShape inputShape = MatShape(params.shapeIn.dims, params.shapeIn.dims + 3);
+    int outChannels = params.outCN;
+    int groups = params.groups;
+    std::string padMode(params.padMode);
+
+    bool hasBias = params.hasBias;
+    Backend backendId = get<0>(get<1>(GetParam()));
+    Target targetId = get<1>(get<1>(GetParam()));
+
+    if (targetId != DNN_TARGET_CPU)
+        throw SkipTestException("Only CPU is supported");
+
+    int inChannels = inputShape[1];
+
+    int sz[] = {outChannels, inChannels / groups, params.kernel};
+    Mat weights(3, &sz[0], CV_32F);
+    randu(weights, -1.0f, 1.0f);
+
+    LayerParams lp;
+    lp.set("kernel_size", kernel);
+    lp.set("pad", pad);
+    if (!padMode.empty())
+        lp.set("pad_mode", padMode);
+
+    lp.set("stride", stride);
+    lp.set("dilation", dilation);
+    lp.set("num_output", outChannels);
+    lp.set("group", groups);
+    lp.set("bias_term", hasBias);
+    lp.type = "Convolution";
+    lp.name = "testLayer";
+    lp.blobs.push_back(weights);
+
+    if (hasBias)
+    {
+        Mat bias(1, outChannels, CV_32F);
+        randu(bias, -1.0f, 1.0f);
+        lp.blobs.push_back(bias);
+    }
+
+    int inpSz[] = {1, inChannels, inputShape[2]};
+    Mat input(3, &inpSz[0], CV_32F);
+    randu(input, -1.0f, 1.0f);
+
+    Net net;
+    net.addLayerToPrev(lp.name, lp.type, lp);
+
+    net.setInput(input);
+    net.setPreferableBackend(backendId);
+    net.setPreferableTarget(targetId);
+
+    // warmup
+    Mat output = net.forward();
+
+    MatShape netInputShape = shape(input);
+    size_t weightsMemory = 0, blobsMemory = 0;
+    net.getMemoryConsumption(netInputShape, weightsMemory, blobsMemory);
+    int64 flops = net.getFLOPS(netInputShape);
+    CV_Assert(flops > 0);
+
+    std::cout
+    << "IN=" << divUp(input.total() * input.elemSize(), 1u<<10) << " Kb " << netInputShape
+    << "    OUT=" << divUp(output.total() * output.elemSize(), 1u<<10) << " Kb " << shape(output)
+    << "    Weights(parameters): " << divUp(weightsMemory, 1u<<10) << " Kb"
+    << "    MFLOPS=" << flops * 1e-6 << std::endl;
+
+    TEST_CYCLE()
+    {
+        Mat res = net.forward();
+    }
+    EXPECT_NEAR(flops, declared_flops, declared_flops * 1e-6);
+    SANITY_CHECK_NOTHING();
+}
+
+INSTANTIATE_TEST_CASE_P(/**/, Conv1D, Combine(
+        Conv1DParamID::all(),
+        dnnBackendsAndTargets(false, false)  // defined in ../test/test_common.hpp
+));
+
+} // namespace
diff --git a/modules/dnn/perf/perf_convolution3d.cpp b/modules/dnn/perf/perf_convolution3d.cpp
index 1f512b2a15..0cf4ce26a3 100644
--- a/modules/dnn/perf/perf_convolution3d.cpp
+++ b/modules/dnn/perf/perf_convolution3d.cpp
@@ -46,7 +46,7 @@ struct Conv3DParamID
         CONV_100 = 16,
         CONV_LAST = sizeof(testConvolution3DConfigs) / sizeof(testConvolution3DConfigs[0])
     };
-    int val_;                                                                  \
+    int val_;
     Conv3DParamID(int val = 0) : val_(val) {}
     operator int() const { return val_; }
     static ::testing::internal::ParamGenerator<Conv3DParamID> all()
@@ -59,7 +59,7 @@ struct Conv3DParamID
         Conv3DParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = Conv3DParamID(i); } // reduce generated code size
         return ::testing::ValuesIn(v_, v_ + NUM);
     }
-};                                                                                  \
+};
 static inline void PrintTo(const Conv3DParamID& v, std::ostream* os)
 {
     CV_Assert((int)v >= 0); CV_Assert((int)v < Conv3DParamID::CONV_LAST);
diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp
index 473c07b755..c8245c487d 100644
--- a/modules/dnn/src/layers/convolution_layer.cpp
+++ b/modules/dnn/src/layers/convolution_layer.cpp
@@ -113,17 +113,22 @@ public:
         MatSize weightShape = blobs.empty() ? inputs[1].size : blobs[0].size;
 
         CV_Assert(inputs[0].dims == outputs[0].dims);
+        if (weightShape.dims() == 3)
+        {
+            kernel_size.assign(1, kernel_size[0]);
+            strides.assign(1, strides[0]);
+        }
         CV_Assert(weightShape.dims() == kernel_size.size() + 2);
         for (int i = 0; i < kernel_size.size(); i++) {
             CV_Assert(weightShape[i + 2] == kernel_size[i]);
         }
 
         const Mat &input = inputs[0];
-        CV_Assert((input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S));
+        CV_Assert(((input.dims == 3 && kernel_size.size() == 1) || input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S));
         for (size_t i = 0; i < outputs.size(); i++)
         {
             CV_Assert(inputs[i].type() == input.type());
-            CV_Assert((inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]);
+            CV_Assert(((input.dims == 3 && kernel_size.size() == 1) || inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]);
             for (int j = 0; j < inputs[i].dims; j++) {
                 CV_Assert(inputs[i].size[j] == input.size[j]);
             }
@@ -261,19 +266,26 @@ public:
 
     virtual bool supportBackend(int backendId) CV_OVERRIDE
     {
+        size_t ksize = kernel_size.size();
 #ifdef HAVE_INF_ENGINE
         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
         {
-            if (kernel_size.size() == 3)
+            if (ksize == 1)
+                return false;
+            if (ksize == 3)
                 return preferableTarget == DNN_TARGET_CPU;
             if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
                 return false;
             return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
         }
-        else
 #endif
-            return (kernel_size.size() == 3 && preferableTarget == DNN_TARGET_CPU && backendId == DNN_BACKEND_OPENCV) ||
-                   (kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || (backendId == DNN_BACKEND_HALIDE && !blobs.empty())));
+        if (backendId == DNN_BACKEND_OPENCV)
+            return ksize >= 1 && ksize <= 3;
+#ifdef HAVE_HALIDE
+        if (backendId == DNN_BACKEND_HALIDE)
+            return ksize == 2 && !blobs.empty();
+#endif
+        return false;
     }
 
     bool getMemoryShapes(const std::vector<MatShape> &inputs,
@@ -325,18 +337,27 @@ public:
         inputs_arr.getMatVector(inputs);
         // prepare weightsMat where each row is aligned and has enough zero padding on the right to
         // use vectorized (i.e. with intrinsics) loops without tail processing
-        Mat wm = blobs.empty() ? inputs[1].reshape(1, numOutput) : blobs[0].reshape(1, numOutput);
-        if( wm.step1() % VEC_ALIGN != 0 )
-        {
-            int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
-            Mat wm_buffer = Mat(numOutput, newcols, wm.type());
-            Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
-            wm_padding.setTo(Scalar::all(0.));
-            Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
-            wm.copyTo(wm_aligned);
-            wm = wm_aligned;
-        }
-        weightsMat = wm;
+        if (!blobs.empty())
+        {
+            Mat wm = blobs[0].reshape(1, numOutput);
+            if( wm.step1() % VEC_ALIGN != 0 )
+            {
+                int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
+                Mat wm_buffer = Mat(numOutput, newcols, wm.type());
+                Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
+                wm_padding.setTo(Scalar::all(0.));
+                Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
+                wm.copyTo(wm_aligned);
+                wm = wm_aligned;
+            }
+            weightsMat = wm;
+        }
+        else
+        {
+            // initialized in .forward()
+            weightsMat.release();
+        }
+
         weightsMultipliers.assign(numOutput, 1.0);
 
         Mat biasMat = hasBias() ? blobs[1].reshape(1, numOutput) : Mat();
@@ -678,8 +699,11 @@ public:
         {
             size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
                                            1, std::multiplies<size_t>());
-            CV_Assert_N(
-                       (input.dims == 4 || input.dims == 5) && (input.dims == output.dims),
+            bool isConv1D = input.dims == 3;
+            bool isConv2D = input.dims == 4;
+            bool isConv3D = input.dims == 5;
+            CV_CheckEQ(static_cast<int>(kernel_size.size()), input.dims - 2, "");
+            CV_Assert_N(input.dims == output.dims,
                        input.size[0] == output.size[0],
                        weights.rows == output.size[1],
                        weights.cols == (input.size[1]/ngroups)*karea,
@@ -689,12 +713,15 @@ public:
                        input.isContinuous(),
                        output.isContinuous(),
                        biasvec.size() == (size_t)output.size[1]+2);
+            CV_Check(weights.step1(), weights.step1() % VEC_ALIGN == 0, "");
+            CV_CheckType(weights.type(), CV_32FC1, "");
             ParallelConv p;
 
             p.input_ = &input;
             p.weights_ = &weights;
             p.output_ = &output;
-            for( int i = 0; i < 4; i++ ) p.outShape[i] = output.size[i];
+            int max_ind = isConv1D? 3: 4;
+            for( int i = 0; i < max_ind; i++ ) p.outShape[i] = output.size[i];
             p.outShape[1] /= ngroups;
 
             p.kernel_size = kernel_size; p.strides = strides; p.dilations = dilations;
@@ -706,20 +733,19 @@ public:
             int inpCnAll = input.size[1];
             int depth = (input.dims == 5) ? input.size[2] : 1;
             int width = input.size[input.dims - 1];
-            int height = input.size[input.dims - 2];
+            int height = isConv1D? 1 : input.size[input.dims - 2];
             int inpCn = inpCnAll / ngroups;
 
-            bool isConv2D = kernel_size.size() == 2;
-
-            p.is1x1_ = isConv2D && kernel_size[0] == 1 && kernel_size[1] == 1 &&
-                       pads_begin[0] == 0  && pads_begin[1] == 0;
+            p.is1x1_ = (isConv2D && kernel_size[0] == 1 && kernel_size[1] == 1 &&
+                       pads_begin[0] == 0  && pads_begin[1] == 0) ||
+                       (isConv1D && pads_begin[0] == 0 && kernel_size[0] == 1);
 
             p.useAVX    = checkHardwareSupport(CPU_AVX)  && isConv2D;
             p.useAVX2   = checkHardwareSupport(CPU_AVX2) && isConv2D;
             p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX  && isConv2D;
 
-            int kernel_d = !isConv2D? kernel_size[0] : 1;
-            int kernel_h = kernel_size[kernel_size.size() - 2];
+            int kernel_d = isConv3D? kernel_size[0] : 1;
+            int kernel_h = isConv1D? 1 : kernel_size[kernel_size.size() - 2];
             int kernel_w = kernel_size.back();
 
             int blk_size_cn0 = cvCeil(800./(kernel_w*kernel_h));
@@ -729,14 +755,20 @@ public:
             ncn = std::min(ncn, inpCn);
             p.blk_size_cn = ncn;
 
-            int dil_d = !isConv2D? dilations[0] : 1;
-            int dil_h = dilations[dilations.size() - 2];
+            int dil_d = isConv3D? dilations[0] : 1;
+            int dil_h = isConv1D? 1 : dilations[dilations.size() - 2];
             int dil_w = dilations.back();
 
             p.ofstab_.resize(karea * ncn);
             int* ofstab = &p.ofstab_[0];
 
-            if (isConv2D)
+            if (isConv1D)
+            {
+                for( int k = 0; k < ncn; k++ )
+                    for( int k_c = 0; k_c < kernel_w; k_c++ )
+                        ofstab[k*kernel_w + k_c] = k*width + k_c*dil_w;
+            }
+            else if (isConv2D)
             {
                 for( int k = 0; k < ncn; k++ )
                     for( int k_r = 0; k_r < kernel_h; k_r++ )
@@ -765,34 +797,36 @@ public:
         {
             const int valign = ConvolutionLayerImpl::VEC_ALIGN;
             int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
+            bool isConv1D = input_->dims == 3;
             bool isConv2D = input_->dims == 4;
+            bool isConv3D = input_->dims == 5;
 
             int outW = output_->size[output_->dims - 1];
-            int outH = output_->size[output_->dims - 2];
+            int outH = isConv1D? 1 : output_->size[output_->dims - 2];
             int outCn = output_->size[1]/ngroups;
 
-            int depth = !isConv2D? input_->size[2] : 1;
-            int height = input_->size[input_->dims - 2];
+            int depth = isConv3D? input_->size[2] : 1;
+            int height = isConv1D? 1 : input_->size[input_->dims - 2];
             int width = input_->size[input_->dims - 1];
             int inpCn = input_->size[1]/ngroups;
 
             const int nstripes = nstripes_;
 
-            int kernel_d = !isConv2D? kernel_size[0] : 1;
-            int kernel_h = kernel_size[kernel_size.size() - 2];
+            int kernel_d = isConv3D? kernel_size[0] : 1;
+            int kernel_h = isConv1D? 1 : kernel_size[kernel_size.size() - 2];
             int kernel_w = kernel_size.back();
             int karea = kernel_w*kernel_h*kernel_d;
 
-            int pad_d = !isConv2D? pads_begin[0] : 0;
-            int pad_t = pads_begin[pads_begin.size() - 2];
+            int pad_d = isConv3D? pads_begin[0] : 0;
+            int pad_t = isConv1D? 0 : pads_begin[pads_begin.size() - 2];
             int pad_l = pads_begin.back();
 
-            int stride_d = !isConv2D? strides[0] : 0;
-            int stride_h = strides[strides.size() - 2];
+            int stride_d = isConv3D? strides[0] : 0;
+            int stride_h = isConv1D? 0 : strides[strides.size() - 2];
             int stride_w = strides.back();
 
-            int dilation_d = !isConv2D? dilations[0] : 1;
-            int dilation_h = dilations[dilations.size() - 2];
+            int dilation_d = isConv3D? dilations[0] : 1;
+            int dilation_h = isConv1D? 1 : dilations[dilations.size() - 2];
             int dilation_w = dilations.back();
 
             int i, j, k, d;
@@ -1032,7 +1066,71 @@ public:
                         // do im2row for a part of input tensor
                         float* rowbuf = rowbuf0;
 
-                        if (isConv2D)
+                        if (isConv1D)
+                        {
+                            for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
+                            {
+                                int delta = std::min(ofs1 - ofs, outW - out_j);
+                                int out_j1 = out_j + delta;
+
+                                int in_j = out_j * stride_w - pad_l;
+                                const float* imgptr = data_inp0 + cn0*width + in_j;
+                                ofs += delta;
+
+                                // do im2row for a part of input tensor
+                                if( is1x1 )
+                                {
+                                    for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
+                                    {
+                                        for( k = 0; k < vsz; k++ )
+                                            rowbuf[k] = imgptr[k*inpPlaneSize];
+                                    }
+                                }
+                                else
+                                {
+                                    for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
+                                    {
+                                        // this condition should be true for most of the tensor elements, i.e.
+                                        // most of the time the kernel aperture is inside the tensor X-Y plane.
+                                        if( out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
+                                        {
+                                            for( k = 0; k < vsz; k++ )
+                                            {
+                                                int k1 = ofstab[k];
+                                                float v0 = imgptr[k1];
+                                                float v1 = imgptr[k1 + stride_w];
+                                                rowbuf[k] = v0;
+                                                rowbuf[k+vsz_a] = v1;
+                                            }
+                                            out_j++;
+                                            rowbuf += vsz_a;
+                                            imgptr += stride_w;
+                                            in_j += stride_w;
+                                        }
+                                        else
+                                        {
+                                            int i0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
+                                            int i1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
+
+                                            // here some non-continuous sub-row of the row will not be
+                                            // filled from the tensor; we need to make sure that the uncovered
+                                            // elements are explicitly set to 0's. the easiest way is to
+                                            // set all the elements to 0's before the loop.
+                                            memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
+                                            for( k = 0; k < ncn; k++ )
+                                            {
+                                                for( i = i0; i < i1; i++ )
+                                                {
+                                                    int imgofs = k*width + i*dilation_w;
+                                                    rowbuf[k*kernel_w + i] = imgptr[imgofs];
+                                                }
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                        else if (isConv2D)
                         {
                             if( is1x1 && stride_w == 1 && stride_h == 1 )
                             {
@@ -1265,9 +1363,12 @@ public:
                                             vs12 = v_setzero_f32(), vs13 = v_setzero_f32();
                                 for( k = 0; k < vsz; k += 4, rptr += 4 )
                                 {
-                                    v_float32x4 w0 = v_load_aligned(wptr0 + k), w1 = v_load_aligned(wptr1 + k);
-                                    v_float32x4 r0 = v_load_aligned(rptr), r1 = v_load_aligned(rptr + vsz_a),
-                                                r2 = v_load_aligned(rptr + vsz_a*2), r3 = v_load_aligned(rptr + vsz_a*3);
+                                    v_float32x4 w0 = v_load_aligned(wptr0 + k);
+                                    v_float32x4 w1 = v_load_aligned(wptr1 + k);
+                                    v_float32x4 r0 = v_load_aligned(rptr);
+                                    v_float32x4 r1 = v_load_aligned(rptr + vsz_a);
+                                    v_float32x4 r2 = v_load_aligned(rptr + vsz_a*2);
+                                    v_float32x4 r3 = v_load_aligned(rptr + vsz_a*3);
 
                                     vs00 += w0*r0;
                                     vs01 += w0*r1;
@@ -1337,6 +1438,12 @@ public:
 #ifdef HAVE_OPENCL
     bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
     {
+        if (kernel_size.size() != 2)
+        {
+            // no OpenCL optimizations, see .supportedBacked()
+            return false;
+        }
+
         std::vector<UMat> inputs;
         std::vector<UMat> outputs;
 
@@ -1520,26 +1627,35 @@ public:
         if (blobs.empty())
         {
             Mat wm = inputs[1].reshape(1, outCn);
-            if( wm.step1() % VEC_ALIGN != 0 )
+            if (wm.data != weightsMat.data)
             {
-                wm.copyTo(weightsMat);
+                int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
+                Mat wm_buffer = Mat(numOutput, newcols, wm.type());
+                Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
+                wm_padding.setTo(Scalar::all(0.));
+                weightsMat = wm_buffer.colRange(0, wm.cols);
+
+                wm.copyTo((const Mat&)weightsMat);
                 if (inputs.size() > 2)
                 {
                     Mat biasMat = inputs[2].reshape(1, outCn);
                     biasMat.col(0).copyTo(biasvec);
-                    biasvec.resize(outCn + 2);
-                }
-                else
-                {
-                    biasvec.resize(outCn + 2, 0);
                 }
+                biasvec.resize(outCn + 2, 0);
             }
         }
-
-        /*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
-               name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
-               kernel.width, kernel.height, pad.width, pad.height,
-               stride.width, stride.height, dilation.width, dilation.height);*/
+        /*if (inputs[0].dims > 3) {
+            printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
+                   name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
+                   kernel.width, kernel.height, pad.width, pad.height,
+                   stride.width, stride.height, dilation.width, dilation.height);
+        }
+        else {
+            printf("conv %s: input (%d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
+                   name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2],
+                   kernel.width, kernel.height, pad.width, pad.height,
+                   stride.width, stride.height, dilation.width, dilation.height);
+        }*/
         int inpGroupCn = blobs.empty() ? inputs[1].size[1] : blobs[0].size[1];
         CV_Assert_N(inputs.size() >= (size_t)1, inputs[0].size[1] % inpGroupCn == 0,
                     outputs.size() == 1, inputs[0].data != outputs[0].data);
diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp
index 56683f4c14..9443336305 100644
--- a/modules/dnn/src/onnx/onnx_importer.cpp
+++ b/modules/dnn/src/onnx/onnx_importer.cpp
@@ -200,12 +200,12 @@ LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_prot
 
         if(attribute_name == "kernel_shape")
         {
-            CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
+            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
             lp.set("kernel_size", parse(attribute_proto.ints()));
         }
         else if(attribute_name == "strides")
         {
-            CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
+            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
             lp.set("stride", parse(attribute_proto.ints()));
         }
         else if(attribute_name == "pads")
@@ -229,7 +229,7 @@ LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_prot
             else
             {
                 // Convolution or pooling.
-                CV_Assert(attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
+                CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
                 lp.set("pad", parse(attribute_proto.ints()));
             }
         }
@@ -244,7 +244,7 @@ LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_prot
         }
         else if(attribute_name == "dilations")
         {
-            CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
+            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
             lp.set("dilation", parse(attribute_proto.ints()));
         }
         else if (attribute_proto.has_i())
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 993ba56be4..5c6de55da5 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -183,9 +183,14 @@ TEST_P(Test_ONNX_layers, Convolution3D)
 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
 #endif
-    if (target != DNN_TARGET_CPU)
-        throw SkipTestException("Only CPU is supported");
     testONNXModels("conv3d");
+}
+
+TEST_P(Test_ONNX_layers, Convolution3D_bias)
+{
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
+    applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+#endif
     testONNXModels("conv3d_bias");
 }
 
@@ -648,6 +653,58 @@ TEST_P(Test_ONNX_layers, ResizeOpset11_Torch1_6)
     testONNXModels("resize_opset11_torch1.6");
 }
 
+TEST_P(Test_ONNX_layers, Conv1d)
+{
+    testONNXModels("conv1d");
+}
+
+TEST_P(Test_ONNX_layers, Conv1d_bias)
+{
+    testONNXModels("conv1d_bias");
+}
+
+TEST_P(Test_ONNX_layers, Conv1d_variable_weight)
+{
+    String basename = "conv1d_variable_w";
+    Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
+    ASSERT_FALSE(net.empty());
+
+    net.setPreferableBackend(backend);
+    net.setPreferableTarget(target);
+
+    Mat input = blobFromNPY(_tf("data/input_" + basename + "_0.npy"));
+    Mat weights = blobFromNPY(_tf("data/input_" + basename + "_1.npy"));
+    Mat ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
+
+    net.setInput(input, "0");
+    net.setInput(weights, "1");
+
+    Mat out = net.forward();
+    normAssert(ref, out, "", default_l1, default_lInf);
+}
+
+TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
+{
+    String basename = "conv1d_variable_wb";
+    Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
+    ASSERT_FALSE(net.empty());
+
+    net.setPreferableBackend(backend);
+    net.setPreferableTarget(target);
+
+    Mat input = blobFromNPY(_tf("data/input_" + basename + "_0.npy"));
+    Mat weights = blobFromNPY(_tf("data/input_" + basename + "_1.npy"));
+    Mat bias = blobFromNPY(_tf("data/input_" + basename + "_2.npy"));
+    Mat ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
+
+    net.setInput(input, "0");
+    net.setInput(weights, "1");
+    net.setInput(bias, "bias");
+
+    Mat out = net.forward();
+    normAssert(ref, out, "", default_l1, default_lInf);
+}
+
 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
 
 class Test_ONNX_nets : public Test_ONNX_layers
diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp
index 68b720a375..e9c1562b4c 100644
--- a/modules/dnn/test/test_tf_importer.cpp
+++ b/modules/dnn/test/test_tf_importer.cpp
@@ -173,8 +173,6 @@ TEST_P(Test_TensorFlow_layers, Convolution3D)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
-    if (target != DNN_TARGET_CPU)
-        throw SkipTestException("Only CPU is supported");
     runTensorFlowNet("conv3d");
 }
 

From 3826158547d4af8250100164377ec2271a49e4c9 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Fri, 13 Nov 2020 06:27:18 +0000
Subject: [PATCH 089/152] java: 'namespace_' class prefix, support inner
 classes

- support multi-level namespaces
- support inner classes (Params)
- reduce scope of 'using namespace' in JNI C++ code
---
 modules/java/generator/gen_java.py | 137 ++++++++++++++++++++---------
 1 file changed, 94 insertions(+), 43 deletions(-)

diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py
index 279a2e140b..6c604ed04b 100755
--- a/modules/java/generator/gen_java.py
+++ b/modules/java/generator/gen_java.py
@@ -123,7 +123,8 @@ T_CPP_MODULE = Template(read_contents(os.path.join(SCRIPT_DIR, 'templates/cpp_mo
 
 class GeneralInfo():
     def __init__(self, type, decl, namespaces):
-        self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces)
+        self.symbol_id, self.parent_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces)
+        self.cname = get_cname(self.symbol_id)
 
         # parse doxygen comments
         self.params={}
@@ -150,6 +151,9 @@ class GeneralInfo():
         returns: (namespace, classpath, classname, name)
         '''
         name = name[name.find(" ")+1:].strip() # remove struct/class/const prefix
+        parent = name[:name.rfind('.')].strip()
+        if len(parent) == 0:
+            parent = None
         spaceName = ""
         localName = name # <classes>.<name>
         for namespace in sorted(namespaces, key=len, reverse=True):
@@ -159,31 +163,44 @@ class GeneralInfo():
                 break
         pieces = localName.split(".")
         if len(pieces) > 2: # <class>.<class>.<class>.<name>
-            return name, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1]
+            return name, parent, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1]
         elif len(pieces) == 2: # <class>.<name>
-            return name, spaceName, pieces[0], pieces[0], pieces[1]
+            return name, parent, spaceName, pieces[0], pieces[0], pieces[1]
         elif len(pieces) == 1: # <name>
-            return name, spaceName, "", "", pieces[0]
+            return name, parent, spaceName, "", "", pieces[0]
         else:
-            return name, spaceName, "", "" # error?!
+            return name, parent, spaceName, "", "" # error?!
 
-    def fullName(self, isCPP=False):
-        result = ".".join([self.fullClass(), self.name])
-        return result if not isCPP else get_cname(result)
+    def fullNameOrigin(self):
+        result = self.symbol_id
+        return result
+
+    def fullNameJAVA(self):
+        result = '.'.join([self.fullParentNameJAVA(), self.jname])
+        return result
 
-    def fullClass(self, isCPP=False):
+    def fullNameCPP(self):
+        result = self.cname
+        return result
+
+    def fullParentNameJAVA(self):
         result = ".".join([f for f in [self.namespace] + self.classpath.split(".") if len(f)>0])
-        return result if not isCPP else get_cname(result)
+        return result
+
+    def fullParentNameCPP(self):
+        result = get_cname(self.parent_id)
+        return result
 
 class ConstInfo(GeneralInfo):
     def __init__(self, decl, addedManually=False, namespaces=[], enumType=None):
         GeneralInfo.__init__(self, "const", decl, namespaces)
-        self.cname = get_cname(self.name)
         self.value = decl[1]
         self.enumType = enumType
         self.addedManually = addedManually
         if self.namespace in namespaces_dict:
-            self.name = '%s_%s' % (namespaces_dict[self.namespace], self.name)
+            prefix = namespaces_dict[self.namespace]
+            if prefix:
+                self.name = '%s_%s' % (prefix, self.name)
 
     def __repr__(self):
         return Template("CONST $name=$value$manual").substitute(name=self.name,
@@ -227,7 +244,6 @@ class ClassPropInfo():
 class ClassInfo(GeneralInfo):
     def __init__(self, decl, namespaces=[]): # [ 'class/struct cname', ': base', [modlist] ]
         GeneralInfo.__init__(self, "class", decl, namespaces)
-        self.cname = get_cname(self.name)
         self.methods = []
         self.methods_suffixes = {}
         self.consts = [] # using a list to save the occurrence order
@@ -242,6 +258,18 @@ class ClassInfo(GeneralInfo):
         for m in decl[2]:
             if m.startswith("="):
                 self.jname = m[1:]
+
+        if self.classpath:
+            prefix = self.classpath.replace('.', '_')
+            self.name = '%s_%s' % (prefix, self.name)
+            self.jname = '%s_%s' % (prefix, self.jname)
+
+        if self.namespace in namespaces_dict:
+            prefix = namespaces_dict[self.namespace]
+            if prefix:
+                self.name = '%s_%s' % (prefix, self.name)
+                self.jname = '%s_%s' % (prefix, self.jname)
+
         self.base = ''
         if decl[1]:
             #self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
@@ -358,11 +386,26 @@ class FuncInfo(GeneralInfo):
         self.isconstructor = self.name == self.classname
         if "[" in self.name:
             self.jname = "getelem"
-        if self.namespace in namespaces_dict:
-            self.jname = '%s_%s' % (namespaces_dict[self.namespace], self.jname)
         for m in decl[2]:
-            if m.startswith("="):
+            if m.startswith("="):  # alias from WRAP_AS
                 self.jname = m[1:]
+        if self.classpath and self.classname != self.classpath:
+            prefix = self.classpath.replace('.', '_')
+            self.classname = prefix #'%s_%s' % (prefix, self.classname)
+            if self.isconstructor:
+                self.name = prefix #'%s_%s' % (prefix, self.name)
+                self.jname = prefix #'%s_%s' % (prefix, self.jname)
+
+        if self.namespace in namespaces_dict:
+            prefix = namespaces_dict[self.namespace]
+            if prefix:
+                if self.classname:
+                    self.classname = '%s_%s' % (prefix, self.classname)
+                    if self.isconstructor:
+                        self.jname = '%s_%s' % (prefix, self.jname)
+                else:
+                    self.jname = '%s_%s' % (prefix, self.jname)
+
         self.static = ["","static"][ "/S" in decl[2] ]
         self.ctype = re.sub(r"^CvTermCriteria", "TermCriteria", decl[1] or "")
         self.args = []
@@ -374,6 +417,12 @@ class FuncInfo(GeneralInfo):
             arg[3] = arg_fix_map.get('attrib', arg[3]) #fixing arg attrib
             self.args.append(ArgInfo(arg))
 
+    def fullClassJAVA(self):
+        return self.fullParentNameJAVA()
+
+    def fullClassCPP(self):
+        return self.fullParentNameCPP()
+
     def __repr__(self):
         return Template("FUNC <$ctype $namespace.$classpath.$name $args>").substitute(**self.__dict__)
 
@@ -388,7 +437,8 @@ class JavaWrapperGenerator(object):
 
     def clear(self):
         self.namespaces = ["cv"]
-        self.classes = { "Mat" : ClassInfo([ 'class Mat', '', [], [] ], self.namespaces) }
+        classinfo_Mat = ClassInfo([ 'class cv.Mat', '', [], [] ], self.namespaces)
+        self.classes = { "Mat" : classinfo_Mat }
         self.module = ""
         self.Module = ""
         self.ported_func_list = []
@@ -411,7 +461,7 @@ class JavaWrapperGenerator(object):
         type_dict.setdefault(name, {}).update(
             { "j_type" : classinfo.jname,
               "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
-              "jni_name" : "(*("+classinfo.fullName(isCPP=True)+"*)%(n)s_nativeObj)", "jni_type" : "jlong",
+              "jni_name" : "(*("+classinfo.fullNameCPP()+"*)%(n)s_nativeObj)", "jni_type" : "jlong",
               "suffix" : "J",
               "j_import" : "org.opencv.%s.%s" % (self.module, classinfo.jname)
             }
@@ -419,7 +469,7 @@ class JavaWrapperGenerator(object):
         type_dict.setdefault(name+'*', {}).update(
             { "j_type" : classinfo.jname,
               "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
-              "jni_name" : "("+classinfo.fullName(isCPP=True)+"*)%(n)s_nativeObj", "jni_type" : "jlong",
+              "jni_name" : "("+classinfo.fullNameCPP()+"*)%(n)s_nativeObj", "jni_type" : "jlong",
               "suffix" : "J",
               "j_import" : "org.opencv.%s.%s" % (self.module, classinfo.jname)
             }
@@ -446,7 +496,7 @@ class JavaWrapperGenerator(object):
         type_dict.setdefault("Ptr_"+name, {}).update(
             { "j_type" : classinfo.jname,
               "jn_type" : "long", "jn_args" : (("__int64", ".getNativeObjAddr()"),),
-              "jni_name" : "*((Ptr<"+classinfo.fullName(isCPP=True)+">*)%(n)s_nativeObj)", "jni_type" : "jlong",
+              "jni_name" : "*((Ptr<"+classinfo.fullNameCPP()+">*)%(n)s_nativeObj)", "jni_type" : "jlong",
               "suffix" : "J",
               "j_import" : "org.opencv.%s.%s" % (self.module, classinfo.jname)
             }
@@ -489,14 +539,15 @@ class JavaWrapperGenerator(object):
     def add_func(self, decl):
         fi = FuncInfo(decl, namespaces=self.namespaces)
         classname = fi.classname or self.Module
+        class_symbol_id = classname if self.isWrapped(classname) else fi.classpath.replace('.', '_') #('.'.join([fi.namespace, fi.classpath])[3:])
         if classname in class_ignore_list:
             logging.info('ignored: %s', fi)
         elif classname in ManualFuncs and fi.jname in ManualFuncs[classname]:
             logging.info('manual: %s', fi)
-        elif not self.isWrapped(classname):
+        elif not self.isWrapped(class_symbol_id):
             logging.warning('not found: %s', fi)
         else:
-            self.getClass(classname).addMethod(fi)
+            self.getClass(class_symbol_id).addMethod(fi)
             logging.info('ok: %s', fi)
             # calc args with def val
             cnt = len([a for a in fi.args if a.defval])
@@ -521,7 +572,7 @@ class JavaWrapperGenerator(object):
         # TODO: support UMat versions of declarations (implement UMat-wrapper for Java)
         parser = hdr_parser.CppHeaderParser(generate_umat_decls=False)
 
-        self.add_class( ['class ' + self.Module, '', [], []] ) # [ 'class/struct cname', ':bases', [modlist] [props] ]
+        self.add_class( ['class cv.' + self.Module, '', [], []] ) # [ 'class/struct cname', ':bases', [modlist] [props] ]
 
         # scan the headers and build more descriptive maps of classes, consts, functions
         includes = []
@@ -582,9 +633,9 @@ class JavaWrapperGenerator(object):
             report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i]))
         return report.getvalue()
 
-    def fullTypeName(self, t):
+    def fullTypeNameCPP(self, t):
         if self.isWrapped(t):
-            return self.getClass(t).fullName(isCPP=True)
+            return self.getClass(t).fullNameCPP()
         else:
             return cast_from(t)
 
@@ -897,7 +948,7 @@ class JavaWrapperGenerator(object):
                 default = ""
             elif not fi.ctype: # c-tor
                 if self.isSmartClass(ci):
-                    ret = "return (jlong)(new Ptr<%(ctype)s>(_retval_));" % { 'ctype': fi.fullClass(isCPP=True) }
+                    ret = "return (jlong)(new Ptr<%(ctype)s>(_retval_));" % { 'ctype': fi.fullClassCPP() }
                 else:
                     ret = "return (jlong) _retval_;"
             elif "v_type" in type_dict[fi.ctype]: # c-tor
@@ -907,9 +958,9 @@ class JavaWrapperGenerator(object):
                 ret = "return env->NewStringUTF(_retval_.c_str());"
                 default = 'return env->NewStringUTF("");'
             elif self.isWrapped(fi.ctype): # wrapped class:
-                ret = "return (jlong) new %s(_retval_);" % self.fullTypeName(fi.ctype)
+                ret = "return (jlong) new %s(_retval_);" % self.fullTypeNameCPP(fi.ctype)
             elif fi.ctype.startswith('Ptr_'):
-                c_prologue.append("typedef Ptr<%s> %s;" % (self.fullTypeName(fi.ctype[4:]), fi.ctype))
+                c_prologue.append("typedef Ptr<%s> %s;" % (self.fullTypeNameCPP(fi.ctype[4:]), fi.ctype))
                 ret = "return (jlong)(new %(ctype)s(_retval_));" % { 'ctype':fi.ctype }
             elif self.isWrapped(ret_type): # pointer to wrapped class:
                 ret = "return (jlong) _retval_;"
@@ -924,12 +975,12 @@ class JavaWrapperGenerator(object):
                 else:
                     name = prop_name + ";//"
 
-            cvname = fi.fullName(isCPP=True)
-            retval = self.fullTypeName(fi.ctype) + " _retval_ = " if ret else "return "
+            cvname = fi.fullNameCPP()
+            retval = self.fullTypeNameCPP(fi.ctype) + " _retval_ = " if ret else "return "
             if fi.ctype == "void":
                 retval = ""
             elif fi.ctype == "String":
-                retval = "cv::" + self.fullTypeName(fi.ctype) + " _retval_ = "
+                retval = "cv::" + self.fullTypeNameCPP(fi.ctype) + " _retval_ = "
             elif fi.ctype == "string":
                 retval = "std::string _retval_ = "
             elif "v_type" in type_dict[fi.ctype]: # vector is returned
@@ -945,18 +996,18 @@ class JavaWrapperGenerator(object):
             if fi.classname:
                 if not fi.ctype: # c-tor
                     if self.isSmartClass(ci):
-                        retval = self.smartWrap(ci, fi.fullClass(isCPP=True)) + " _retval_ = "
-                        cvname = "makePtr<" + fi.fullClass(isCPP=True) +">"
+                        retval = self.smartWrap(ci, fi.fullClassCPP()) + " _retval_ = "
+                        cvname = "makePtr<" + fi.fullClassCPP() +">"
                     else:
-                        retval = fi.fullClass(isCPP=True) + "* _retval_ = "
-                        cvname = "new " + fi.fullClass(isCPP=True)
+                        retval = fi.fullClassCPP() + "* _retval_ = "
+                        cvname = "new " + fi.fullClassCPP()
                 elif fi.static:
-                    cvname = fi.fullName(isCPP=True)
+                    cvname = fi.fullNameCPP()
                 else:
                     cvname = ("me->" if  not self.isSmartClass(ci) else "(*me)->") + name
                     c_prologue.append(
                         "%(cls)s* me = (%(cls)s*) self; //TODO: check for NULL"
-                            % { "cls" : self.smartWrap(ci, fi.fullClass(isCPP=True))}
+                            % { "cls" : self.smartWrap(ci, fi.fullClassCPP())}
                     )
             cvargs = []
             for a in args:
@@ -981,13 +1032,12 @@ class JavaWrapperGenerator(object):
             clazz = ci.jname
             cpp_code.write ( Template(
 """
-${namespace}
-
 JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname ($argst);
 
 JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
   ($args)
 {
+    ${namespace}
     static const char method_name[] = "$module::$fname()";
     try {
         LOGD("%s", method_name);$prologue
@@ -1014,7 +1064,7 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
         cvargs = " " + ", ".join(cvargs) + " " if cvargs else "",
         default = "\n    " + default if default else "",
         retval = retval,
-        namespace = ('using namespace ' + ci.namespace.replace('.', '::') + ';') if ci.namespace else ''
+        namespace = ('using namespace ' + ci.namespace.replace('.', '::') + ';') if ci.namespace and ci.namespace != 'cv' else ''
     ) )
 
             # adding method signature to dictionary
@@ -1081,13 +1131,14 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
             self.gen_func(ci, fi)
         # props
         for pi in ci.props:
+            basename = ci.fullNameOrigin()
             # getter
-            getter_name = ci.fullName() + ".get_" + pi.name
+            getter_name = basename + ".get_" + pi.name
             fi = FuncInfo( [getter_name, pi.ctype, [], []], self.namespaces ) # [ funcname, return_ctype, [modifiers], [args] ]
             self.gen_func(ci, fi, pi.name)
             if pi.rw:
                 #setter
-                setter_name = ci.fullName() + ".set_" + pi.name
+                setter_name = basename + ".set_" + pi.name
                 fi = FuncInfo( [ setter_name, "void", [], [ [pi.ctype, pi.name, "", [], ""] ] ], self.namespaces)
                 self.gen_func(ci, fi, pi.name)
 
@@ -1131,7 +1182,7 @@ JNIEXPORT void JNICALL Java_org_opencv_%(module)s_%(j_cls)s_delete
     delete (%(cls)s*) self;
 }
 
-""" % {"module" : module.replace('_', '_1'), "cls" : self.smartWrap(ci, ci.fullName(isCPP=True)), "j_cls" : ci.jname.replace('_', '_1')}
+""" % {"module" : module.replace('_', '_1'), "cls" : self.smartWrap(ci, ci.fullNameCPP()), "j_cls" : ci.jname.replace('_', '_1')}
             )
 
     def getClass(self, classname):

From 05c011e842195272f6147e17e5eb734d7d034ba4 Mon Sep 17 00:00:00 2001
From: Aitik Gupta <aitikgupta@gmail.com>
Date: Sat, 14 Nov 2020 08:17:44 +0530
Subject: [PATCH 090/152] Small typo-fix

---
 .../video-input-psnr-ssim/video_input_psnr_ssim.markdown        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
index 08cc596964..ffd4d0213e 100644
--- a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
+++ b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
@@ -131,7 +131,7 @@ For properties you can read and change look into the documentation of the @ref c
 We want to check just how imperceptible our video converting operation went, therefore we need a
 system to check frame by frame the similarity or differences. The most common algorithm used for
 this is the PSNR (aka **Peak signal-to-noise ratio**). The simplest definition of this starts out
-from the *mean squad error*. Let there be two images: I1 and I2; with a two dimensional size i and
+from the *mean squared error*. Let there be two images: I1 and I2; with a two dimensional size i and
 j, composed of c number of channels.
 
 \f[MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}\f]

From 79a82013ad2af04269891ce15281c9fdc31d3f66 Mon Sep 17 00:00:00 2001
From: Jose Quaresma <quaresma.jose@gmail.com>
Date: Sun, 1 Nov 2020 12:29:56 +0000
Subject: [PATCH 091/152] samples: cmake: digits needs opencv_dnn module to
 build

Intrudeced in commit 397ba2d9aafb5312e777ce2f886d7b568109e931:
add OpenCV sample for digit and text recongnition, and provide multiple OCR models.
https://github.com/opencv/opencv/commit/397ba2d9aafb5312e777ce2f886d7b568109e931

Signed-off-by: Jose Quaresma <quaresma.jose@gmail.com>
---
 samples/cpp/CMakeLists.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt
index 617629df2e..14ab6141df 100644
--- a/samples/cpp/CMakeLists.txt
+++ b/samples/cpp/CMakeLists.txt
@@ -14,6 +14,7 @@ set(OPENCV_CPP_SAMPLES_REQUIRED_DEPS
   opencv_features2d
   opencv_calib3d
   opencv_stitching
+  opencv_dnn
   ${OPENCV_MODULES_PUBLIC}
   ${OpenCV_LIB_COMPONENTS})
 ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})

From 2e7ef6f4e80f11975086cdf57240ae00b1581a3d Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 14 Nov 2020 19:10:43 +0000
Subject: [PATCH 092/152] objc: fix std::string handling

- arg types may be passed as string instead of std::string
---
 modules/core/misc/objc/gen_dict.json | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/modules/core/misc/objc/gen_dict.json b/modules/core/misc/objc/gen_dict.json
index 4cb8133dc4..05082ce1ca 100644
--- a/modules/core/misc/objc/gen_dict.json
+++ b/modules/core/misc/objc/gen_dict.json
@@ -215,6 +215,13 @@
             "from_cpp": "[NSString stringWithUTF8String:%(n)s.c_str()]",
             "swift_type": "String"
         },
+        "string": {
+            "cast_to": "std::string",
+            "objc_type": "NSString*",
+            "to_cpp": "std::string(%(n)s.UTF8String)",
+            "from_cpp": "[NSString stringWithUTF8String:%(n)s.c_str()]",
+            "swift_type": "String"
+        },
         "TermCriteria": {
             "objc_type": "TermCriteria*",
             "to_cpp": "%(n)s.nativeRef",

From 9e84b860f233f3acad6de325f214b6bd62fc5a3f Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 14 Nov 2020 07:16:13 +0000
Subject: [PATCH 093/152] cmake: update objc generator scripts

- allow to run generator without strong requirement of building 'objc' module
---
 modules/objc/CMakeLists.txt                   | 15 ++-
 modules/objc/generator/CMakeLists.txt         | 95 +++++++++++++------
 modules/objc/generator/gen_objc.py            | 19 ++--
 .../generator/templates/cmakelists.template   |  2 +-
 platforms/ios/build_framework.py              | 10 +-
 platforms/osx/build_framework.py              |  4 +
 6 files changed, 106 insertions(+), 39 deletions(-)

diff --git a/modules/objc/CMakeLists.txt b/modules/objc/CMakeLists.txt
index d4ea6e3563..8cf24de56e 100644
--- a/modules/objc/CMakeLists.txt
+++ b/modules/objc/CMakeLists.txt
@@ -1,6 +1,19 @@
-if(OPENCV_INITIAL_PASS AND APPLE_FRAMEWORK AND NOT (BUILD_opencv_objc STREQUAL "OFF"))
+if(OPENCV_INITIAL_PASS)
   # generator for Objective-C source code and documentation signatures
   add_subdirectory(generator)
 endif()
 
+if(NOT APPLE_FRAMEWORK)
+  return()
+endif()
+
+set(the_description "The Objective-C bindings")
+ocv_add_module(objc BINDINGS opencv_core opencv_imgproc PRIVATE_REQUIRED opencv_objc_bindings_generator)
+
+add_custom_target(${the_module}
+    ALL
+    COMMENT "Objective-C framework"
+)
+add_dependencies(${the_module} gen_opencv_objc_source)
+
 #include(${CMAKE_CURRENT_SOURCE_DIR}/common.cmake)
diff --git a/modules/objc/generator/CMakeLists.txt b/modules/objc/generator/CMakeLists.txt
index dd6f58db6d..b3cbbd3f5f 100644
--- a/modules/objc/generator/CMakeLists.txt
+++ b/modules/objc/generator/CMakeLists.txt
@@ -1,16 +1,18 @@
-set(MODULE_NAME "objc")
+set(MODULE_NAME "objc_bindings_generator")
 set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE)
 ocv_add_module(${MODULE_NAME} INTERNAL opencv_core opencv_imgproc)
 
-set(OPENCV_OBJC_SIGNATURES_FILE "${CMAKE_CURRENT_BINARY_DIR}/opencv_objc_signatures.json" CACHE INTERNAL "")
+#set(OPENCV_OBJC_SIGNATURES_FILE "${CMAKE_CURRENT_BINARY_DIR}/opencv_objc_signatures.json" CACHE INTERNAL "")
 set(OPENCV_OBJC_BINDINGS_DIR "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
 
-file(REMOVE_RECURSE "${OPENCV_OBJC_BINDINGS_DIR}/gen")
-file(REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source")  # force re-run after CMake
+file(REMOVE_RECURSE "${OPENCV_OBJC_BINDINGS_DIR}/osx")
+file(REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source_osx")  # force re-run after CMake
+file(REMOVE_RECURSE "${OPENCV_OBJC_BINDINGS_DIR}/ios")
+file(REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source_ios")  # force re-run after CMake
 
 # This file is included from a subdirectory
 set(OBJC_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/..")
-include(${OBJC_SOURCE_DIR}/common.cmake)
+include(${OBJC_SOURCE_DIR}/common.cmake)  # fill OPENCV_OBJC_MODULES
 
 # common files
 file(GLOB_RECURSE deps "${CMAKE_CURRENT_SOURCE_DIR}/templates/*")
@@ -30,15 +32,21 @@ foreach(m ${OPENCV_OBJC_MODULES})
   set(__modules_config "${__modules_config}    { \"name\": \"${m_}\", \"location\": \"${rel_path}\" }")
 endforeach(m)
 
+if(HAVE_opencv_objc)
+  set(__objc_build_dir "\"objc_build_dir\": \"${CMAKE_CURRENT_BINARY_DIR}/../objc\",")
+endif()
+
 set(CONFIG_FILE "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json")
 set(__config_str
 "{
   \"rootdir\": \"${OpenCV_SOURCE_DIR}\",
+  ${__objc_build_dir}
   \"modules\": [
 ${__modules_config}
   ]
 }
 ")
+#TODO: ocv_update_file("${CONFIG_FILE}" "${__config_str}" ON_CHANGE_REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source")
 if(EXISTS "${CONFIG_FILE}")
   file(READ "${CONFIG_FILE}" __content)
 else()
@@ -52,33 +60,66 @@ unset(__config_str)
 
 set(objc_generated_files
     # "${OPENCV_OBJC_SIGNATURES_FILE}"
-    "${OPENCV_DEPHELPER}/gen_opencv_objc_source"
 )
 
 string(REPLACE "opencv_" "" MODULES "${OPENCV_OBJC_MODULES}")
 
-if(IOS)
-  set(TARGET "ios")
-else()
-  set(TARGET "osx")
+if(NOT DEFINED OPENCV_OBJC_TARGET AND APPLE_FRAMEWORK)
+  if(IOS)
+    set(OPENCV_OBJC_TARGET "ios")
+  else()
+    set(OPENCV_OBJC_TARGET "osx")
+  endif()
 endif()
 
-add_custom_command(
-    OUTPUT ${objc_generated_files}
-    COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${OBJC_SOURCE_DIR}/generator/gen_objc.py" -p "${OBJC_SOURCE_DIR}/../python/src2/gen2.py" -c "${CONFIG_FILE}" -t "${TARGET}" -f "${FRAMEWORK_NAME}"
-    COMMAND ${CMAKE_COMMAND} -E touch "${OPENCV_DEPHELPER}/gen_opencv_objc_source"
-    WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
-    DEPENDS "${OBJC_SOURCE_DIR}/generator/gen_objc.py"
-            "${OBJC_SOURCE_DIR}/../python/src2/gen2.py"
-            "${OBJC_SOURCE_DIR}/../python/src2/hdr_parser.py"
-            # don't, result of file(WRITE): "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json"
-            ${deps}
-            # not allowed (file(WRITE) result): "${CONFIG_FILE}"
-    COMMENT "Generate files for Objective-C bindings"
-)
+if(NOT DEFINED OPENCV_OBJC_FRAMEWORK_NAME)
+  if(DEFINED FRAMEWORK_NAME)
+    set(OPENCV_OBJC_FRAMEWORK_NAME "${FRAMEWORK_NAME}")
+  else()
+    set(OPENCV_OBJC_FRAMEWORK_NAME "opencv2")
+  endif()
+endif()
+
+set(objc_generated_targets "")
+
+macro(ocv_add_objc_generated_target TARGET)
+  set(objc_${TARGET}_generated_output_dependecy "${OPENCV_DEPHELPER}/gen_opencv_objc_source_${TARGET}")
+  file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}")
+  add_custom_command(
+      OUTPUT ${objc_generated_files} "${objc_${TARGET}_generated_output_dependecy}"
+      COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${OBJC_SOURCE_DIR}/generator/gen_objc.py"
+              -p "${OBJC_SOURCE_DIR}/../python/src2/gen2.py"
+              -c "${CONFIG_FILE}"
+              -t "${TARGET}"
+              -f "${OPENCV_OBJC_FRAMEWORK_NAME}"
+      COMMAND ${CMAKE_COMMAND} -E touch "${objc_${TARGET}_generated_output_dependecy}"
+      WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}"
+      DEPENDS "${OpenCV_SOURCE_DIR}/modules/objc/generator/gen_objc.py"
+              "${OpenCV_SOURCE_DIR}/modules/python/src2/gen2.py"
+              "${OpenCV_SOURCE_DIR}/modules/python/src2/hdr_parser.py"
+              # don't, result of file(WRITE): "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json"
+              ${deps}
+              # not allowed (file(WRITE) result): "${CONFIG_FILE}"
+      COMMENT "Generate files for Objective-C bindings (${TARGET})"
+  )
+  add_custom_target(gen_opencv_objc_source_${TARGET}
+      # excluded from all: ALL
+      DEPENDS ${objc_generated_files} ${objc_${TARGET}_generated_output_dependecy}
+      SOURCES "${OBJC_SOURCE_DIR}/generator/gen_objc.py"
+              "${OBJC_SOURCE_DIR}/generator/templates/cmakelists.template"
+              "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json"
+  )
+  list(APPEND objc_generated_targets gen_opencv_objc_source_${TARGET})
+endmacro()
+
+if(OPENCV_OBJC_TARGET)
+  ocv_add_objc_generated_target(${OPENCV_OBJC_TARGET})
+else()
+  ocv_add_objc_generated_target(osx)
+  ocv_add_objc_generated_target(ios)
+endif()
 
-add_custom_target(gen_opencv_objc_source ALL DEPENDS ${objc_generated_files}
-    SOURCES "${OBJC_SOURCE_DIR}/generator/gen_objc.py"
-            "${OBJC_SOURCE_DIR}/generator/templates/cmakelists.template"
-            "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json"
+add_custom_target(gen_opencv_objc_source
+    # excluded from all: ALL
+    DEPENDS ${objc_generated_targets}
 )
diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index e6637a7c4c..c20251d261 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -1342,7 +1342,7 @@ typedef NS_ENUM(int, {2}) {{
             return "Ptr<" + fullname + ">"
         return fullname
 
-    def finalize(self, output_objc_path):
+    def finalize(self, objc_target, output_objc_path, output_objc_build_path):
         opencv_header_file = os.path.join(output_objc_path, framework_name + ".h")
         opencv_header = "#import <Foundation/Foundation.h>\n\n"
         opencv_header += "// ! Project version number\nFOUNDATION_EXPORT double " + framework_name + "VersionNumber;\n\n"
@@ -1356,15 +1356,15 @@ typedef NS_ENUM(int, {2}) {{
         opencv_modulemap += "\n  export *\n  module * {export *}\n}\n"
         self.save(opencv_modulemap_file, opencv_modulemap)
         cmakelist_template = read_contents(os.path.join(SCRIPT_DIR, 'templates/cmakelists.template'))
-        cmakelist = Template(cmakelist_template).substitute(modules = ";".join(modules), framework = framework_name)
+        cmakelist = Template(cmakelist_template).substitute(modules = ";".join(modules), framework = framework_name, objc_target=objc_target)
         self.save(os.path.join(dstdir, "CMakeLists.txt"), cmakelist)
-        mkdir_p("./framework_build")
-        mkdir_p("./test_build")
-        mkdir_p("./doc_build")
+        mkdir_p(os.path.join(output_objc_build_path, "framework_build"))
+        mkdir_p(os.path.join(output_objc_build_path, "test_build"))
+        mkdir_p(os.path.join(output_objc_build_path, "doc_build"))
         with open(os.path.join(SCRIPT_DIR, '../doc/README.md')) as readme_in:
             readme_body = readme_in.read()
         readme_body += "\n\n\n##Modules\n\n" + ", ".join(["`" + m.capitalize() + "`" for m in modules])
-        with open("./doc_build/README.md", "w") as readme_out:
+        with open(os.path.join(output_objc_build_path, "doc_build/README.md"), "w") as readme_out:
             readme_out.write(readme_body)
         if framework_name != "OpenCV":
             for dirname, dirs, files in os.walk(os.path.join(testdir, "test")):
@@ -1513,6 +1513,11 @@ if __name__ == "__main__":
         config = json.load(f)
 
     ROOT_DIR = config['rootdir']; assert os.path.exists(ROOT_DIR)
+    if 'objc_build_dir' in config:
+        objc_build_dir = config['objc_build_dir']
+        assert os.path.exists(objc_build_dir), objc_build_dir
+    else:
+        objc_build_dir = os.getcwd()
 
     dstdir = "./gen"
     testdir = "./test"
@@ -1608,6 +1613,6 @@ if __name__ == "__main__":
             generator.gen(srcfiles, module, dstdir, objc_base_path, common_headers, manual_classes)
         else:
             logging.info("No generated code for module: %s", module)
-    generator.finalize(objc_base_path)
+    generator.finalize(args.target, objc_base_path, objc_build_dir)
 
     print('Generated files: %d (updated %d)' % (total_files, updated_files))
diff --git a/modules/objc/generator/templates/cmakelists.template b/modules/objc/generator/templates/cmakelists.template
index 2cfc2474cd..67cacbbfa4 100644
--- a/modules/objc/generator/templates/cmakelists.template
+++ b/modules/objc/generator/templates/cmakelists.template
@@ -24,7 +24,7 @@ target_include_directories($framework PRIVATE "$${BUILD_ROOT}")
 target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include")
 target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include/opencv2")
 foreach(m $${MODULES})
-  target_include_directories($framework PRIVATE "$${BUILD_ROOT}/modules/objc/gen/objc/$${m}")
+  target_include_directories($framework PRIVATE "$${BUILD_ROOT}/modules/objc_bindings_generator/$objc_target/gen/objc/$${m}")
 endforeach()
 
 install(TARGETS $framework LIBRARY DESTINATION lib)
diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py
index e759072825..5965cd0a96 100755
--- a/platforms/ios/build_framework.py
+++ b/platforms/ios/build_framework.py
@@ -128,10 +128,10 @@ class Builder:
         self.makeFramework(outdir, dirs)
         if self.build_objc_wrapper:
             if self.run_tests:
-                check_call([sys.argv[0].replace("build_framework", "run_tests"), "--framework_dir=" + outdir, "--framework_name=" + self.framework_name, dirs[0] +  "/modules/objc/test"])
+                check_call([sys.argv[0].replace("build_framework", "run_tests"), "--framework_dir=" + outdir, "--framework_name=" + self.framework_name, dirs[0] +  "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget())])
             else:
                 print("To run tests call:")
-                print(sys.argv[0].replace("build_framework", "run_tests") + " --framework_dir=" + outdir + " --framework_name=" + self.framework_name + " " + dirs[0] +  "/modules/objc/test")
+                print(sys.argv[0].replace("build_framework", "run_tests") + " --framework_dir=" + outdir + " --framework_name=" + self.framework_name + " " + dirs[0] +  "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget()))
             if self.build_docs:
                 check_call([sys.argv[0].replace("build_framework", "build_docs"), dirs[0] + "/modules/objc/framework_build"])
                 doc_path = os.path.join(dirs[0], "modules", "objc", "doc_build", "docs")
@@ -216,6 +216,10 @@ class Builder:
     def getInfoPlist(self, builddirs):
         return os.path.join(builddirs[0], "ios", "Info.plist")
 
+    def getObjcTarget(self):
+        # Obj-C generation target
+        return 'ios'
+
     def makeCMakeCmd(self, arch, target, dir, cmakeargs = []):
         toolchain = self.getToolchain(arch, target)
         cmakecmd = self.getCMakeArgs(arch, target) + \
@@ -255,7 +259,7 @@ class Builder:
         execute(buildcmd + ["-target", "ALL_BUILD", "build"], cwd = builddir)
         execute(["cmake", "-DBUILD_TYPE=%s" % self.getConfiguration(), "-P", "cmake_install.cmake"], cwd = builddir)
         if self.build_objc_wrapper:
-            cmakecmd = self.makeCMakeCmd(arch, target, builddir + "/modules/objc/gen", cmakeargs)
+            cmakecmd = self.makeCMakeCmd(arch, target, builddir + "/modules/objc_bindings_generator/{}/gen".format(self.getObjcTarget()), cmakeargs)
             cmakecmd.append("-DBUILD_ROOT=%s" % builddir)
             cmakecmd.append("-DCMAKE_INSTALL_NAME_TOOL=install_name_tool")
             cmakecmd.append("--no-warn-unused-cli")
diff --git a/platforms/osx/build_framework.py b/platforms/osx/build_framework.py
index ccca582615..de13e665fa 100755
--- a/platforms/osx/build_framework.py
+++ b/platforms/osx/build_framework.py
@@ -14,6 +14,10 @@ MACOSX_DEPLOYMENT_TARGET='10.12'  # default, can be changed via command line opt
 
 class OSXBuilder(Builder):
 
+    def getObjcTarget(self):
+        # Obj-C generation target
+        return 'osx'
+
     def getToolchain(self, arch, target):
         return None
 

From f58f36dc88166060a1b1a65f5f4debeb379061f9 Mon Sep 17 00:00:00 2001
From: Anton Veselskyi <anton.veselskyi@gmail.com>
Date: Sat, 14 Nov 2020 23:29:51 +0200
Subject: [PATCH 094/152] Changed sample code,  fixes #18807

---
 samples/cpp/stitching.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/samples/cpp/stitching.cpp b/samples/cpp/stitching.cpp
index 5bf34f45b1..7de0536452 100644
--- a/samples/cpp/stitching.cpp
+++ b/samples/cpp/stitching.cpp
@@ -52,7 +52,7 @@ void printUsage(char** argv)
          "      for stitching materials under affine transformation, such as scans.\n"
          "  --output <result_img>\n"
          "      The default is 'result.jpg'.\n\n"
-         "Example usage :\n" << argv[0] << " --d3 --try_use_gpu yes --mode scans img1.jpg img2.jpg\n";
+         "Example usage :\n" << argv[0] << " --d3 --mode scans img1.jpg img2.jpg\n";
 }
 
 

From 90bea15e353db50a6e183c2b9b347b2158cc2041 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 14 Nov 2020 19:05:10 +0000
Subject: [PATCH 095/152] objc: robust code generation

- the same generated code from Python2/3
- avoid randomized output due to unpredictable dict/set order
---
 modules/objc/generator/gen_objc.py | 49 ++++++++++++++++--------------
 1 file changed, 26 insertions(+), 23 deletions(-)

diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index e6637a7c4c..1352562c3a 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -111,7 +111,7 @@ T_OBJC_MODULE_BODY = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_modu
 
 class GeneralInfo():
     def __init__(self, type, decl, namespaces):
-        self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces)
+        self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces)
 
         # parse doxygen comments
         self.params={}
@@ -149,13 +149,13 @@ class GeneralInfo():
                 break
         pieces = localName.split(".")
         if len(pieces) > 2: # <class>.<class>.<class>.<name>
-            return spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1]
+            return name, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1]
         elif len(pieces) == 2: # <class>.<name>
-            return spaceName, pieces[0], pieces[0], pieces[1]
+            return name, spaceName, pieces[0], pieces[0], pieces[1]
         elif len(pieces) == 1: # <name>
-            return spaceName, "", "", pieces[0]
+            return name, spaceName, "", "", pieces[0]
         else:
-            return spaceName, "", "" # error?!
+            return name, spaceName, "", "" # error?!
 
     def fullName(self, isCPP=False):
         result = ".".join([self.fullClass(), self.name])
@@ -271,7 +271,7 @@ class ClassInfo(GeneralInfo):
 
     def getForwardDeclarations(self, module):
         enum_decl = [x for x in self.imports if self.isEnum(x) and type_dict[x]["import_module"] != module]
-        enum_imports = list(set([type_dict[m]["import_module"] for m in enum_decl]))
+        enum_imports = sorted(list(set([type_dict[m]["import_module"] for m in enum_decl])))
         class_decl = [x for x in self.imports if not self.isEnum(x)]
         return ["#import \"%s.h\"" % c for c in enum_imports] + [""] + ["@class %s;" % c for c in sorted(class_decl)]
 
@@ -293,8 +293,8 @@ class ClassInfo(GeneralInfo):
 
     def getAllMethods(self):
         result = []
-        result.extend([fi for fi in sorted(self.methods) if fi.isconstructor])
-        result.extend([fi for fi in sorted(self.methods) if not fi.isconstructor])
+        result += [fi for fi in self.methods if fi.isconstructor]
+        result += [fi for fi in self.methods if not fi.isconstructor]
         return result
 
     def addMethod(self, fi):
@@ -677,7 +677,7 @@ class ObjectiveCWrapperGenerator(object):
         self.clear()
 
     def clear(self):
-        self.namespaces = set(["cv"])
+        self.namespaces = ["cv"]
         mat_class_info = ClassInfo([ 'class Mat', '', [], [] ], self.namespaces)
         mat_class_info.namespace = "cv"
         self.classes = { "Mat" : mat_class_info }
@@ -846,9 +846,9 @@ class ObjectiveCWrapperGenerator(object):
             includes.append('#include "' + hdr + '"')
         for hdr in srcfiles:
             decls = parser.parse(hdr)
-            self.namespaces = parser.namespaces
+            self.namespaces = sorted(parser.namespaces)
             logging.info("\n\n===== Header: %s =====", hdr)
-            logging.info("Namespaces: %s", parser.namespaces)
+            logging.info("Namespaces: %s", sorted(parser.namespaces))
             if decls:
                 includes.append('#include "' + hdr + '"')
             else:
@@ -872,7 +872,7 @@ class ObjectiveCWrapperGenerator(object):
         mkdir_p(package_path)
         extension_file = "%s/%s/%sExt.swift" % (output_objc_path, module, self.Module)
 
-        for ci in list(self.classes.values()):
+        for ci in sorted(self.classes.values(), key=lambda x: x.symbol_id):
             if ci.name == "Mat":
                 continue
             ci.initCodeStreams(self.Module)
@@ -898,7 +898,7 @@ class ObjectiveCWrapperGenerator(object):
         report.write("\n".join(self.ported_func_list))
         report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % (len(self.skipped_func_list), total_count))
         report.write("".join(self.skipped_func_list))
-        for i in list(self.def_args_hist.keys()):
+        for i in sorted(self.def_args_hist.keys()):
             report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i]))
         return report.getvalue()
 
@@ -1211,17 +1211,18 @@ $unrefined_call$epilogue$ret
         if ci.consts:
             enumTypes = set([c.enumType for c in ci.consts])
             grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes}
-            for typeName, consts in list(grouped_consts.items()):
+            for typeName in sorted(grouped_consts.keys(), key=lambda x: str(x) if x is not None else ""):
+                consts = grouped_consts[typeName]
                 logging.info("%s", consts)
                 if typeName:
-                    typeName = typeName.rsplit(".", 1)[-1]
+                    typeNameShort = typeName.rsplit(".", 1)[-1]
                     if ci.cname in enum_fix:
-                        typeName = enum_fix[ci.cname].get(typeName, typeName)
+                        typeNameShort = enum_fix[ci.cname].get(typeNameShort, typeNameShort)
 
                     ci.enum_declarations.write("""
-// C++: enum {1}
-typedef NS_ENUM(int, {2}) {{
-    {0}\n}};\n\n""".format(",\n    ".join(["%s = %s" % (c.name, c.value) for c in consts]), typeName, typeName)
+// C++: enum {1} ({2})
+typedef NS_ENUM(int, {1}) {{
+    {0}\n}};\n\n""".format(",\n    ".join(["%s = %s" % (c.name, c.value) for c in consts]), typeNameShort, typeName)
                     )
                 else:
                     if not wrote_consts_pragma:
@@ -1303,9 +1304,11 @@ typedef NS_ENUM(int, {2}) {{
 
         # manual ports
         if ci.name in ManualFuncs:
-            for func in list(ManualFuncs[ci.name].keys()):
-                ci.method_declarations.write( "\n".join(ManualFuncs[ci.name][func]["declaration"]) )
-                ci.method_implementations.write( "\n".join(ManualFuncs[ci.name][func]["implementation"]) )
+            for func in sorted(ManualFuncs[ci.name].keys()):
+                logging.info("manual function: %s", func)
+                fn = ManualFuncs[ci.name][func]
+                ci.method_declarations.write( "\n".join(fn["declaration"]) )
+                ci.method_implementations.write( "\n".join(fn["implementation"]) )
 
     def getClass(self, classname):
         return self.classes[classname or self.Module]
@@ -1489,7 +1492,7 @@ if __name__ == "__main__":
     # initialize logger
     logging.basicConfig(filename='gen_objc.log', format=None, filemode='w', level=logging.INFO)
     handler = logging.StreamHandler()
-    handler.setLevel(logging.WARNING)
+    handler.setLevel(os.environ.get('LOG_LEVEL', logging.WARNING))
     logging.getLogger().addHandler(handler)
 
     # parse command line parameters

From cfbdbffcad2b599b402828d0b1ba93cff9a4790c Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 14 Nov 2020 22:04:57 +0000
Subject: [PATCH 096/152] objc: rework headers import

- 'AdditionalImports' can be removed from misc/objc/gen_dict.json
- avoid using of legacy 'opencv.hpp'
---
 modules/core/misc/objc/common/Converters.h    |  2 +-
 modules/core/misc/objc/common/CvType.h        |  2 +-
 modules/core/misc/objc/common/DMatch.h        |  2 +-
 modules/core/misc/objc/common/Double2.h       |  2 +-
 modules/core/misc/objc/common/Double3.h       |  2 +-
 modules/core/misc/objc/common/Float4.h        |  2 +-
 modules/core/misc/objc/common/Float6.h        |  2 +-
 modules/core/misc/objc/common/Int4.h          |  2 +-
 modules/core/misc/objc/common/KeyPoint.h      |  2 +-
 modules/core/misc/objc/common/Mat.h           |  2 +-
 .../core/misc/objc/common/MinMaxLocResult.h   |  2 +-
 modules/core/misc/objc/common/Point2d.h       |  2 +-
 modules/core/misc/objc/common/Point2f.h       |  2 +-
 modules/core/misc/objc/common/Point2i.h       |  2 +-
 modules/core/misc/objc/common/Point3d.h       |  2 +-
 modules/core/misc/objc/common/Point3f.h       |  2 +-
 modules/core/misc/objc/common/Point3i.h       |  2 +-
 modules/core/misc/objc/common/Range.h         |  2 +-
 modules/core/misc/objc/common/Rect2d.h        |  2 +-
 modules/core/misc/objc/common/Rect2f.h        |  2 +-
 modules/core/misc/objc/common/Rect2i.h        |  2 +-
 modules/core/misc/objc/common/RotatedRect.h   |  2 +-
 modules/core/misc/objc/common/Scalar.h        |  2 +-
 modules/core/misc/objc/common/Size2d.h        |  2 +-
 modules/core/misc/objc/common/Size2f.h        |  2 +-
 modules/core/misc/objc/common/Size2i.h        |  2 +-
 modules/core/misc/objc/common/TermCriteria.h  |  2 +-
 .../imgcodecs/misc/objc/ios/Mat+Converters.h  |  4 +-
 .../misc/objc/macosx/Mat+Converters.h         |  4 +-
 modules/imgproc/misc/objc/common/Moments.h    |  2 +-
 modules/objc/generator/gen_objc.py            | 46 +++++++++++++++----
 .../templates/objc_class_header.template      |  2 +-
 .../templates/objc_module_header.template     |  2 +-
 33 files changed, 70 insertions(+), 44 deletions(-)

diff --git a/modules/core/misc/objc/common/Converters.h b/modules/core/misc/objc/common/Converters.h
index 9a238deb82..29d1b91eb5 100755
--- a/modules/core/misc/objc/common/Converters.h
+++ b/modules/core/misc/objc/common/Converters.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import <opencv2/opencv.hpp>
+#import <opencv2/core.hpp>
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/CvType.h b/modules/core/misc/objc/common/CvType.h
index fb6f86aa48..b1fd71d487 100644
--- a/modules/core/misc/objc/common/CvType.h
+++ b/modules/core/misc/objc/common/CvType.h
@@ -5,7 +5,7 @@
 //
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/DMatch.h b/modules/core/misc/objc/common/DMatch.h
index 51bed493b8..91c2c59bfa 100644
--- a/modules/core/misc/objc/common/DMatch.h
+++ b/modules/core/misc/objc/common/DMatch.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Double2.h b/modules/core/misc/objc/common/Double2.h
index 2162acb6d0..8e46c883d0 100644
--- a/modules/core/misc/objc/common/Double2.h
+++ b/modules/core/misc/objc/common/Double2.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Double3.h b/modules/core/misc/objc/common/Double3.h
index 2aaba9af80..5c741648f7 100644
--- a/modules/core/misc/objc/common/Double3.h
+++ b/modules/core/misc/objc/common/Double3.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Float4.h b/modules/core/misc/objc/common/Float4.h
index 2a89278040..c78e88b72e 100644
--- a/modules/core/misc/objc/common/Float4.h
+++ b/modules/core/misc/objc/common/Float4.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Float6.h b/modules/core/misc/objc/common/Float6.h
index d2ec19a60e..7e09772c5c 100644
--- a/modules/core/misc/objc/common/Float6.h
+++ b/modules/core/misc/objc/common/Float6.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Int4.h b/modules/core/misc/objc/common/Int4.h
index 1a17266572..11cc12db14 100644
--- a/modules/core/misc/objc/common/Int4.h
+++ b/modules/core/misc/objc/common/Int4.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/KeyPoint.h b/modules/core/misc/objc/common/KeyPoint.h
index 547960dc9d..096a1089c9 100644
--- a/modules/core/misc/objc/common/KeyPoint.h
+++ b/modules/core/misc/objc/common/KeyPoint.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Mat.h b/modules/core/misc/objc/common/Mat.h
index 229337f524..fd1dce27ba 100644
--- a/modules/core/misc/objc/common/Mat.h
+++ b/modules/core/misc/objc/common/Mat.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/MinMaxLocResult.h b/modules/core/misc/objc/common/MinMaxLocResult.h
index e8daed4cc3..5ec6029e31 100644
--- a/modules/core/misc/objc/common/MinMaxLocResult.h
+++ b/modules/core/misc/objc/common/MinMaxLocResult.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Point2d.h b/modules/core/misc/objc/common/Point2d.h
index dbb8d55efa..0426b11d9a 100644
--- a/modules/core/misc/objc/common/Point2d.h
+++ b/modules/core/misc/objc/common/Point2d.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Point2f.h b/modules/core/misc/objc/common/Point2f.h
index 0da4fba5d8..6d13c774d8 100644
--- a/modules/core/misc/objc/common/Point2f.h
+++ b/modules/core/misc/objc/common/Point2f.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Point2i.h b/modules/core/misc/objc/common/Point2i.h
index 9e5d74624a..e43ee3a8ec 100644
--- a/modules/core/misc/objc/common/Point2i.h
+++ b/modules/core/misc/objc/common/Point2i.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Point3d.h b/modules/core/misc/objc/common/Point3d.h
index 72b0d39ea8..618ded35fa 100644
--- a/modules/core/misc/objc/common/Point3d.h
+++ b/modules/core/misc/objc/common/Point3d.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Point3f.h b/modules/core/misc/objc/common/Point3f.h
index 2370fffeaa..c98add1cec 100644
--- a/modules/core/misc/objc/common/Point3f.h
+++ b/modules/core/misc/objc/common/Point3f.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Point3i.h b/modules/core/misc/objc/common/Point3i.h
index b0edeaa470..9eab2ee0ea 100644
--- a/modules/core/misc/objc/common/Point3i.h
+++ b/modules/core/misc/objc/common/Point3i.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Range.h b/modules/core/misc/objc/common/Range.h
index dd84edf6aa..df0c01398f 100644
--- a/modules/core/misc/objc/common/Range.h
+++ b/modules/core/misc/objc/common/Range.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Rect2d.h b/modules/core/misc/objc/common/Rect2d.h
index ba91509b77..0ffcae9ab6 100644
--- a/modules/core/misc/objc/common/Rect2d.h
+++ b/modules/core/misc/objc/common/Rect2d.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Rect2f.h b/modules/core/misc/objc/common/Rect2f.h
index 6a8863800f..1f44f56263 100644
--- a/modules/core/misc/objc/common/Rect2f.h
+++ b/modules/core/misc/objc/common/Rect2f.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Rect2i.h b/modules/core/misc/objc/common/Rect2i.h
index 2e4e55cf30..6ed86d50bd 100644
--- a/modules/core/misc/objc/common/Rect2i.h
+++ b/modules/core/misc/objc/common/Rect2i.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/RotatedRect.h b/modules/core/misc/objc/common/RotatedRect.h
index c94053b6c1..a2049e6bf0 100644
--- a/modules/core/misc/objc/common/RotatedRect.h
+++ b/modules/core/misc/objc/common/RotatedRect.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Scalar.h b/modules/core/misc/objc/common/Scalar.h
index 63c3d1de58..d565155010 100644
--- a/modules/core/misc/objc/common/Scalar.h
+++ b/modules/core/misc/objc/common/Scalar.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Size2d.h b/modules/core/misc/objc/common/Size2d.h
index 11c6c50a02..cd2e4e4bc0 100644
--- a/modules/core/misc/objc/common/Size2d.h
+++ b/modules/core/misc/objc/common/Size2d.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Size2f.h b/modules/core/misc/objc/common/Size2f.h
index 2d1f2865c3..73ae9a2da0 100644
--- a/modules/core/misc/objc/common/Size2f.h
+++ b/modules/core/misc/objc/common/Size2f.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/Size2i.h b/modules/core/misc/objc/common/Size2i.h
index 61aa8da885..cd74e2c84a 100644
--- a/modules/core/misc/objc/common/Size2i.h
+++ b/modules/core/misc/objc/common/Size2i.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/core/misc/objc/common/TermCriteria.h b/modules/core/misc/objc/common/TermCriteria.h
index c7396582b2..ff6bfd565c 100644
--- a/modules/core/misc/objc/common/TermCriteria.h
+++ b/modules/core/misc/objc/common/TermCriteria.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/imgcodecs/misc/objc/ios/Mat+Converters.h b/modules/imgcodecs/misc/objc/ios/Mat+Converters.h
index d33abbf4f9..8c185f884a 100644
--- a/modules/imgcodecs/misc/objc/ios/Mat+Converters.h
+++ b/modules/imgcodecs/misc/objc/ios/Mat+Converters.h
@@ -7,14 +7,14 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
 
+#import "Mat.h"
 #import <Foundation/Foundation.h>
 #import <UIKit/UIKit.h>
-#import "Mat.h"
 
 NS_ASSUME_NONNULL_BEGIN
 
diff --git a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
index 4abf806d1e..d87887372d 100644
--- a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
+++ b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
@@ -7,14 +7,14 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
 
+#import "Mat.h"
 #import <Foundation/Foundation.h>
 #import <AppKit/AppKit.h>
-#import "Mat.h"
 
 NS_ASSUME_NONNULL_BEGIN
 
diff --git a/modules/imgproc/misc/objc/common/Moments.h b/modules/imgproc/misc/objc/common/Moments.h
index dfa5653bac..8ce3f75ea8 100644
--- a/modules/imgproc/misc/objc/common/Moments.h
+++ b/modules/imgproc/misc/objc/common/Moments.h
@@ -7,7 +7,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+#import "opencv2/core.hpp"
 #else
 #define CV_EXPORTS
 #endif
diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index e6637a7c4c..a5ffa7e874 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -104,6 +104,15 @@ def mkdir_p(path):
         else:
             raise
 
+def header_import(hdr):
+    """ converts absolute header path to import parameter """
+    pos = hdr.find('/include/')
+    hdr = hdr[pos+9 if pos >= 0 else 0:]
+    #pos = hdr.find('opencv2/')
+    #hdr = hdr[pos+8 if pos >= 0 else 0:]
+    return hdr
+
+
 T_OBJC_CLASS_HEADER = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_class_header.template'))
 T_OBJC_CLASS_BODY = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_class_body.template'))
 T_OBJC_MODULE_HEADER = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_module_header.template'))
@@ -693,17 +702,17 @@ class ObjectiveCWrapperGenerator(object):
         classinfo = ClassInfo(decl, namespaces=self.namespaces)
         if classinfo.name in class_ignore_list:
             logging.info('ignored: %s', classinfo)
-            return
+            return None
         if classinfo.name != self.Module:
             self.classes[self.Module].member_classes.append(classinfo.objc_name)
         name = classinfo.cname
         if self.isWrapped(name) and not classinfo.base:
             logging.warning('duplicated: %s', classinfo)
-            return
+            return None
         self.classes[name] = classinfo
         if name in type_dict and not classinfo.base:
             logging.warning('duplicated: %s', classinfo)
-            return
+            return None
         if name != self.Module:
             type_dict.setdefault(name, {}).update(
                 { "objc_type" : classinfo.objc_name + "*",
@@ -731,6 +740,7 @@ class ObjectiveCWrapperGenerator(object):
             )
 
         logging.info('ok: class %s, name: %s, base: %s', classinfo, name, classinfo.base)
+        return classinfo
 
     def add_const(self, decl, scope=None, enumType=None): # [ "const cname", val, [], [] ]
         constinfo = ConstInfo(decl, namespaces=self.namespaces, enumType=enumType)
@@ -837,27 +847,30 @@ class ObjectiveCWrapperGenerator(object):
         # TODO: support UMat versions of declarations (implement UMat-wrapper for Java)
         parser = hdr_parser.CppHeaderParser(generate_umat_decls=False)
 
-        self.add_class( ['class ' + self.Module, '', [], []]) # [ 'class/struct cname', ':bases', [modlist] [props] ]
+        module_ci = self.add_class( ['class ' + self.Module, '', [], []]) # [ 'class/struct cname', ':bases', [modlist] [props] ]
+        module_ci.header_import = module + '.hpp'
 
         # scan the headers and build more descriptive maps of classes, consts, functions
         includes = []
         for hdr in common_headers:
             logging.info("\n===== Common header : %s =====", hdr)
-            includes.append('#include "' + hdr + '"')
+            includes.append(header_import(hdr))
         for hdr in srcfiles:
             decls = parser.parse(hdr)
             self.namespaces = parser.namespaces
             logging.info("\n\n===== Header: %s =====", hdr)
             logging.info("Namespaces: %s", parser.namespaces)
             if decls:
-                includes.append('#include "' + hdr + '"')
+                includes.append(header_import(hdr))
             else:
                 logging.info("Ignore header: %s", hdr)
             for decl in decls:
                 logging.info("\n--- Incoming ---\n%s", pformat(decl[:5], 4)) # without docstring
                 name = decl[0]
                 if name.startswith("struct") or name.startswith("class"):
-                    self.add_class(decl)
+                    ci = self.add_class(decl)
+                    if ci:
+                        ci.header_import = header_import(hdr)
                 elif name.startswith("const"):
                     self.add_const(decl)
                 elif name.startswith("enum"):
@@ -1190,13 +1203,26 @@ $unrefined_call$epilogue$ret
 
     def gen_class(self, ci, module, extension_implementations, extension_signatures):
         logging.info("%s", ci)
-        if module in AdditionalImports and (ci.name in AdditionalImports[module] or "*" in AdditionalImports[module]):
-            additional_imports = []
+        additional_imports = []
+        if module in AdditionalImports:
             if "*" in AdditionalImports[module]:
                 additional_imports += AdditionalImports[module]["*"]
             if ci.name in AdditionalImports[module]:
                 additional_imports += AdditionalImports[module][ci.name]
-            ci.additionalImports.write("\n".join(["#import %s" % h for h in additional_imports]))
+        if hasattr(ci, 'header_import'):
+            h = '"{}"'.format(ci.header_import)
+            if not h in additional_imports:
+                additional_imports.append(h)
+
+        h = '"{}.hpp"'.format(module)
+        if h in additional_imports:
+            additional_imports.remove(h)
+        h = '"opencv2/{}.hpp"'.format(module)
+        if not h in additional_imports:
+            additional_imports.insert(0, h)
+
+        if additional_imports:
+            ci.additionalImports.write('\n'.join(['#import %s' % h for h in additional_imports]))
 
         # constants
         wrote_consts_pragma = False
diff --git a/modules/objc/generator/templates/objc_class_header.template b/modules/objc/generator/templates/objc_class_header.template
index 0bad670685..77697e8c93 100644
--- a/modules/objc/generator/templates/objc_class_header.template
+++ b/modules/objc/generator/templates/objc_class_header.template
@@ -4,7 +4,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+//#import "opencv.hpp"
 $additionalImports
 #else
 #define CV_EXPORTS
diff --git a/modules/objc/generator/templates/objc_module_header.template b/modules/objc/generator/templates/objc_module_header.template
index fa9e7df6a3..88f45a11cf 100644
--- a/modules/objc/generator/templates/objc_module_header.template
+++ b/modules/objc/generator/templates/objc_module_header.template
@@ -4,7 +4,7 @@
 #pragma once
 
 #ifdef __cplusplus
-#import "opencv.hpp"
+//#import "opencv.hpp"
 $additionalImports
 #else
 #define CV_EXPORTS

From 24f2b7dd3f4a0c756d0ca922df4a089eae9d777d Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 16 Nov 2020 05:22:45 +0000
Subject: [PATCH 097/152] objc(test): repair binary resource files

---
 modules/objc/generator/gen_objc.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index e6637a7c4c..61a89d1d67 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -1368,6 +1368,8 @@ typedef NS_ENUM(int, {2}) {{
             readme_out.write(readme_body)
         if framework_name != "OpenCV":
             for dirname, dirs, files in os.walk(os.path.join(testdir, "test")):
+                if dirname.endswith('/resources'):
+                    continue  # don't touch resource binary files
                 for filename in files:
                     filepath = os.path.join(dirname, filename)
                     with io.open(filepath, encoding="utf-8", errors="ignore") as file:

From dde3cb3b99d83b1e0cf9691f91aa1d6bfa61f808 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 16 Nov 2020 04:30:45 +0000
Subject: [PATCH 098/152] java: workaround handling of base class

---
 modules/java/generator/gen_java.py | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py
index 6c604ed04b..e41117558a 100755
--- a/modules/java/generator/gen_java.py
+++ b/modules/java/generator/gen_java.py
@@ -272,8 +272,16 @@ class ClassInfo(GeneralInfo):
 
         self.base = ''
         if decl[1]:
-            #self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
-            self.base = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "")
+            # FIXIT Use generator to find type properly instead of hacks below
+            base_class = re.sub(r"^: ", "", decl[1])
+            base_class = re.sub(r"^cv::", "", base_class)
+            base_class = base_class.replace('::', '.')
+            base_info = ClassInfo(('class {}'.format(base_class), '', [], [], None, None), [self.namespace])
+            base_type_name = base_info.name
+            if not base_type_name in type_dict:
+                base_type_name = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "")
+            self.base = base_type_name
+            self.addImports(self.base)
 
     def __repr__(self):
         return Template("CLASS $namespace::$classpath.$name : $base").substitute(**self.__dict__)

From 58268b6eef993383fd0ff41fd1a7664983d17a72 Mon Sep 17 00:00:00 2001
From: Ruslan Garnov <ruslan.garnov@intel.com>
Date: Wed, 28 Oct 2020 21:52:53 +0300
Subject: [PATCH 099/152] Added ND GMatDesc serialization test

---
 modules/gapi/test/s11n/gapi_s11n_tests.cpp | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
index 2fc1e46253..e332552b1d 100644
--- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp
+++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp
@@ -365,6 +365,12 @@ TEST_F(S11N_Basic, Test_MatDesc) {
     EXPECT_EQ(v, get<cv::GMatDesc>());
 }
 
+TEST_F(S11N_Basic, Test_MatDescND) {
+    cv::GMatDesc v = { CV_8U, {1,1,224,224} };
+    put(v);
+    EXPECT_EQ(v, get<cv::GMatDesc>());
+}
+
 TEST_F(S11N_Basic, Test_MetaArg_MatDesc) {
     cv::GMatDesc desc = { CV_8U, 1,{ 320,240 } };
     auto v = cv::GMetaArg{ desc };

From 06477743abd51f6e2eba774fcb8204a007e87435 Mon Sep 17 00:00:00 2001
From: Maxim Pashchenkov <maxim.pashchenkov@intel.com>
Date: Mon, 16 Nov 2020 22:24:55 +0300
Subject: [PATCH 100/152] Merge pull request #18744 from
 mpashchenkov:mp/onnx-dynamic-input-tensor

G-API: ONNX. Support tensor input for CNN with dynamic input

* Added support for dynamic input tensor, refactored one input/output tests

* Added multiple input/output fixture, test for mobilenet

* Removed whitespace

* Removed mistake in inferROI

* Small fixes

* One more fix

* Code cleanup

* Code cleanup X2

* bb rstrt

* Fix review comments

* One more fix review comments

* Mistake
---
 .../gapi/src/backends/onnx/gonnxbackend.cpp   |  12 +-
 .../gapi/test/infer/gapi_infer_onnx_test.cpp  | 503 ++++++++++++------
 2 files changed, 359 insertions(+), 156 deletions(-)

diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp
index c81e032969..7ab386ecab 100644
--- a/modules/gapi/src/backends/onnx/gonnxbackend.cpp
+++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp
@@ -167,8 +167,16 @@ inline void preprocess(const cv::Mat& src,
         // No layout or dimension transformations done here!
         // TODO: This needs to be aligned across all NN backends.
         GAPI_Assert(toCV(ti.type) == CV_32F && "Only 32F model input is supported for 32F data");
-        GAPI_Assert(toORT(src.size) == ti.dims && "32F tensor dimensions should match with NN input");
-        GAPI_Assert(!ti.is_dynamic && "Dynamic inputs are not supported for this case");
+        const auto tensor_dims = toORT(src.size);
+        if (tensor_dims.size() == ti.dims.size()) {
+            for (size_t i = 0; i < ti.dims.size(); ++i) {
+                GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) &&
+                            "32F tensor dimensions should match with all non-dynamic NN input dimensions");
+            }
+        } else {
+            GAPI_Assert(false && "32F tensor size should match with NN input");
+        }
+
         dst = src;
     } else {
         // 8U input: full preprocessing path
diff --git a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp
index ebb8020e9a..782e1b093a 100644
--- a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp
+++ b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp
@@ -12,29 +12,26 @@
 #include <onnxruntime_cxx_api.h>
 #include <ade/util/iota_range.hpp>
 
+#include <opencv2/gapi/own/convert.hpp>
 #include <opencv2/gapi/infer/onnx.hpp>
 
 namespace {
-
 struct ONNXInitPath {
     ONNXInitPath() {
         const char* env_path = getenv("OPENCV_GAPI_ONNX_MODEL_PATH");
-        if (env_path)
+        if (env_path) {
             cvtest::addDataSearchPath(env_path);
+        }
     }
 };
 static ONNXInitPath g_init_path;
 
-cv::Mat initMatrixRandU(int type, cv::Size sz_in)
-{
-    cv::Mat in_mat1 = cv::Mat(sz_in, type);
+cv::Mat initMatrixRandU(const int type, const cv::Size& sz_in) {
+    const cv::Mat in_mat1 = cv::Mat(sz_in, type);
 
-    if (CV_MAT_DEPTH(type) < CV_32F)
-    {
+    if (CV_MAT_DEPTH(type) < CV_32F) {
         cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255));
-    }
-    else
-    {
+    } else {
         const int fscale = 256;  // avoid bits near ULP, generate stable test input
         cv::Mat in_mat32s(in_mat1.size(), CV_MAKE_TYPE(CV_32S, CV_MAT_CN(type)));
         cv::randu(in_mat32s, cv::Scalar::all(0), cv::Scalar::all(255 * fscale));
@@ -42,111 +39,238 @@ cv::Mat initMatrixRandU(int type, cv::Size sz_in)
     }
     return in_mat1;
 }
-}
+} // anonymous namespace
 namespace opencv_test
 {
 namespace {
 // FIXME: taken from the DNN module
-void normAssert(cv::InputArray ref, cv::InputArray test,
+void normAssert(const cv::InputArray& ref, const cv::InputArray& test,
                 const char *comment /*= ""*/,
-                double l1 = 0.00001, double lInf = 0.0001)
-{
-    double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
+                const double l1 = 0.00001, const double lInf = 0.0001) {
+    const double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
     EXPECT_LE(normL1, l1) << comment;
 
-    double normInf = cvtest::norm(ref, test, cv::NORM_INF);
+    const double normInf = cvtest::norm(ref, test, cv::NORM_INF);
     EXPECT_LE(normInf, lInf) << comment;
 }
 
-std::string findModel(const std::string &model_name)
-{
-    return findDataFile("vision/classification/squeezenet/model/" + model_name + ".onnx", false);
+inline std::string findModel(const std::string &model_name) {
+    return findDataFile("vision/" + model_name + ".onnx", false);
 }
 
-inline void preprocess(const cv::Mat& src,
-                             cv::Mat& dst,
-                       const cv::Scalar& mean,
-                       const cv::Scalar& std) {
-    int new_h = 224;
-    int new_w = 224;
-    cv::Mat tmp, nmat, cvt;
-    cv::resize(src, dst, cv::Size(new_w, new_h));
-    dst.convertTo(cvt, CV_32F, 1.f / 255);
-    nmat = cvt - mean;
-    tmp = nmat / std;
-    dst.create(cv::Size(new_w, new_h * src.channels()), CV_32F);
+inline void toCHW(const cv::Mat& src, cv::Mat& dst) {
+    dst.create(cv::Size(src.cols, src.rows * src.channels()), CV_32F);
     std::vector<cv::Mat> planes;
     for (int i = 0; i < src.channels(); ++i) {
-        planes.push_back(dst.rowRange(i * new_h, (i + 1) * new_h));
+        planes.push_back(dst.rowRange(i * src.rows, (i + 1) * src.rows));
     }
-    cv::split(tmp, planes);
+    cv::split(src, planes);
 }
 
-void InferONNX(const std::string& model_path,
-               const cv::Mat& in,
-                     cv::Mat& out,
-               const cv::Scalar& mean,
-               const cv::Scalar& std)
-{
-    // FIXME: It must be a FIXTURE test!
-    Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test");
-    Ort::SessionOptions session_options;
-    Ort::Session session(env, model_path.data(), session_options);
-    auto input_node_dims = //    0 - one input
-        session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
-    auto output_node_dims = //    0 - one output
-        session.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
-    Ort::AllocatorWithDefaultOptions allocator;
-    char* in_node_name_p = session.GetInputName(0, allocator);
-    char* out_node_name_p = session.GetOutputName(0, allocator);
-    std::string in_node_name(in_node_name_p);
-    std::string out_node_name(out_node_name_p);
-    allocator.Free(in_node_name_p);
-    allocator.Free(out_node_name_p);
-
-    auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
-    cv::Mat dst;
-    preprocess(in, dst, mean, std);
-
-    out.create(std::vector<int>(output_node_dims.begin(),
-                                output_node_dims.end()), CV_32F); // empty output Mat
-    auto in_tensor = Ort::Value::CreateTensor<float>(memory_info,
-                                                     dst.ptr<float>(),
-                                                     dst.total(),
-                                                     input_node_dims.data(),
-                                                     input_node_dims.size());
-    auto out_tensor = Ort::Value::CreateTensor<float>(memory_info,
-                                                      out.ptr<float>(),
-                                                      out.total(),
-                                                      output_node_dims.data(),
-                                                      output_node_dims.size());
-    std::vector<const char *> in_names = {in_node_name.data()};
-    std::vector<const char *> out_names = {out_node_name.data()};
-    session.Run(Ort::RunOptions{nullptr},
-                in_names.data(),
-                &in_tensor,
-                session.GetInputCount(),
-                out_names.data(),
-                &out_tensor,
-                session.GetOutputCount());
+inline int toCV(const ONNXTensorElementDataType prec) {
+    switch (prec) {
+    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U;
+    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F;
+    default: GAPI_Assert(false && "Unsupported data type");
+    }
+    return -1;
 }
 
-} // anonymous namespace
+inline std::vector<int64_t> toORT(const cv::MatSize &sz) {
+    return cv::to_own<int64_t>(sz);
+}
 
-TEST(ONNX, Infer)
-{
-    cv::Mat in_mat1, out_gapi, out_onnx;
-    std::string model_path = findModel("squeezenet1.0-9");
-    // NOTE: All tests chek "random" image
-    // Ideally it should be a real image
-    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
+inline std::vector<const char*> getCharNames(const std::vector<std::string>& names) {
+    std::vector<const char*> out_vec;
+    for (const auto& el : names) {
+        out_vec.push_back(el.data());
+    }
+    return out_vec;
+}
 
-    cv::Scalar mean = { 0.485, 0.456, 0.406 };
-    cv::Scalar std  = { 0.229, 0.224, 0.225 };
+inline void copyToOut(const cv::Mat& in, cv::Mat& out) {
+    GAPI_Assert(in.depth() == CV_32F);
+    GAPI_Assert(in.size == out.size);
+    const float* const inptr = in.ptr<float>();
+    float* const optr = out.ptr<float>();
+    const int size = in.total();
+    for (int i = 0; i < size; ++i) {
+        optr[i] = inptr[i];
+    }
+}
 
-    // ONNX_API code
-    InferONNX(model_path, in_mat1, out_onnx, mean, std);
+void remapYolo(const std::unordered_map<std::string, cv::Mat> &onnx,
+                      std::unordered_map<std::string, cv::Mat> &gapi) {
+    GAPI_Assert(onnx.size() == 1u);
+    GAPI_Assert(gapi.size() == 1u);
+    // Result from Run method
+    const cv::Mat& in = onnx.begin()->second;
+    // Configured output
+    cv::Mat& out = gapi.begin()->second;
+    // Simple copy
+    copyToOut(in, out);
+}
+
+void remapSsdPorts(const std::unordered_map<std::string, cv::Mat> &onnx,
+                           std::unordered_map<std::string, cv::Mat> &gapi) {
+    // Result from Run method
+    const cv::Mat& in_num     = onnx.at("num_detections:0");
+    const cv::Mat& in_boxes   = onnx.at("detection_boxes:0");
+    const cv::Mat& in_scores  = onnx.at("detection_scores:0");
+    const cv::Mat& in_classes = onnx.at("detection_classes:0");
+    // Configured outputs
+    cv::Mat& out_boxes   = gapi.at("out1");
+    cv::Mat& out_classes = gapi.at("out2");
+    cv::Mat& out_scores  = gapi.at("out3");
+    cv::Mat& out_num     = gapi.at("out4");
+    // Simple copy for outputs
+    copyToOut(in_num, out_num);
+    copyToOut(in_boxes, out_boxes);
+    copyToOut(in_scores, out_scores);
+    copyToOut(in_classes, out_classes);
+}
+
+class ONNXtest : public ::testing::Test {
+public:
+    std::string model_path;
+    size_t num_in, num_out;
+    std::vector<cv::Mat> out_gapi;
+    std::vector<cv::Mat> out_onnx;
+    cv::Mat in_mat1;
 
+    ONNXtest() {
+        env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test");
+        memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+        out_gapi.resize(1);
+        out_onnx.resize(1);
+        // FIXME: All tests chek "random" image
+        // Ideally it should be a real image
+        in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
+    }
+
+    template<typename T>
+    void infer(const std::vector<cv::Mat>& ins,
+                     std::vector<cv::Mat>& outs) {
+        // Prepare session
+        session = Ort::Session(env, model_path.data(), session_options);
+        num_in = session.GetInputCount();
+        num_out = session.GetOutputCount();
+        GAPI_Assert(num_in == ins.size());
+        in_node_names.clear();
+        out_node_names.clear();
+        // Inputs Run params
+        std::vector<Ort::Value> in_tensors;
+        for(size_t i = 0; i < num_in; ++i) {
+            char* in_node_name_p = session.GetInputName(i, allocator);
+            in_node_names.push_back(std::string(in_node_name_p));
+            allocator.Free(in_node_name_p);
+            in_node_dims = toORT(ins[i].size);
+            in_tensors.emplace_back(Ort::Value::CreateTensor<T>(memory_info,
+                                                                const_cast<T*>(ins[i].ptr<T>()),
+                                                                ins[i].total(),
+                                                                in_node_dims.data(),
+                                                                in_node_dims.size()));
+        }
+        // Outputs Run params
+        for(size_t i = 0; i < num_out; ++i) {
+            char* out_node_name_p = session.GetOutputName(i, allocator);
+            out_node_names.push_back(std::string(out_node_name_p));
+            allocator.Free(out_node_name_p);
+        }
+        // Input/output order by names
+        const auto in_run_names  = getCharNames(in_node_names);
+        const auto out_run_names = getCharNames(out_node_names);
+        // Run
+        auto result = session.Run(Ort::RunOptions{nullptr},
+                                  in_run_names.data(),
+                                  &in_tensors.front(),
+                                  num_in,
+                                  out_run_names.data(),
+                                  num_out);
+        // Copy outputs
+        GAPI_Assert(result.size() == num_out);
+        outs.resize(num_out);
+        for (size_t i = 0; i < num_out; ++i) {
+            const auto info = result[i].GetTensorTypeAndShapeInfo();
+            const auto shape = info.GetShape();
+            const auto type = info.GetElementType();
+            cv::Mat mt(std::vector<int>(shape.begin(), shape.end()), toCV(type),
+                       reinterpret_cast<void*>(result[i].GetTensorMutableData<uint8_t*>()));
+            mt.copyTo(outs[i]);
+        }
+    }
+    // One input/output overload
+    template<typename T>
+    void infer(const cv::Mat& in, cv::Mat& out) {
+        std::vector<cv::Mat> result;
+        infer<T>({in}, result);
+        GAPI_Assert(result.size() == 1u);
+        out = result.front();
+    }
+
+    void validate() {
+        GAPI_Assert(!out_gapi.empty() && !out_onnx.empty());
+        ASSERT_EQ(out_gapi.size(), out_onnx.size());
+        const auto size = out_gapi.size();
+        for (size_t i = 0; i < size; ++i) {
+            normAssert(out_onnx[i], out_gapi[i], "Test outputs");
+        }
+    }
+
+    void useModel(const std::string& model_name) {
+        model_path = findModel(model_name);
+    }
+private:
+    Ort::Env env{nullptr};
+    Ort::MemoryInfo memory_info{nullptr};
+    Ort::AllocatorWithDefaultOptions allocator;
+    Ort::SessionOptions session_options;
+    Ort::Session session{nullptr};
+
+    std::vector<int64_t> in_node_dims;
+    std::vector<std::string> in_node_names;
+    std::vector<std::string> out_node_names;
+};
+
+class ONNXClassificationTest : public ONNXtest {
+public:
+    const cv::Scalar mean = { 0.485, 0.456, 0.406 };
+    const cv::Scalar std  = { 0.229, 0.224, 0.225 };
+
+    void preprocess(const cv::Mat& src, cv::Mat& dst) {
+        const int new_h = 224;
+        const int new_w = 224;
+        cv::Mat tmp, cvt, rsz;
+        cv::resize(src, rsz, cv::Size(new_w, new_h));
+        rsz.convertTo(cvt, CV_32F, 1.f / 255);
+        tmp = (cvt - mean) / std;
+        toCHW(tmp, dst);
+        dst = dst.reshape(1, {1, 3, new_h, new_w});
+    }
+};
+
+class ONNXGRayScaleTest : public ONNXtest {
+public:
+    void preprocess(const cv::Mat& src, cv::Mat& dst) {
+        const int new_h = 64;
+        const int new_w = 64;
+        cv::Mat cvc, rsz, cvt;
+        cv::cvtColor(src, cvc, cv::COLOR_BGR2GRAY);
+        cv::resize(cvc, rsz, cv::Size(new_w, new_h));
+        rsz.convertTo(cvt, CV_32F);
+        toCHW(cvt, dst);
+        dst = dst.reshape(1, {1, 1, new_h, new_w});
+    }
+};
+} // anonymous namespace
+
+TEST_F(ONNXClassificationTest, Infer)
+{
+    useModel("classification/squeezenet/model/squeezenet1.0-9");
+    // ONNX_API code
+    cv::Mat processed_mat;
+    preprocess(in_mat1, processed_mat);
+    infer<float>(processed_mat, out_onnx.front());
     // G_API code
     G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
     cv::GMat in;
@@ -154,125 +278,196 @@ TEST(ONNX, Infer)
     cv::GComputation comp(cv::GIn(in), cv::GOut(out));
     // NOTE: We have to normalize U8 tensor
     // so cfgMeanStd() is here
-    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({ mean }, { std });
     comp.apply(cv::gin(in_mat1),
-               cv::gout(out_gapi),
+               cv::gout(out_gapi.front()),
                cv::compile_args(cv::gapi::networks(net)));
-
     // Validate
-    ASSERT_EQ(1000u, out_onnx.total());
-    ASSERT_EQ(1000u, out_gapi.total());
-    normAssert(out_onnx, out_gapi, "Test classification output");
+    validate();
 }
 
-TEST(ONNX, InferROI)
+TEST_F(ONNXtest, InferTensor)
 {
-    cv::Mat in_mat1, out_gapi, out_onnx;
-    std::string model_path = findModel("squeezenet1.0-9");
-    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
-
-    cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean
-    cv::Scalar std  = { 0.229, 0.224, 0.225 }; // squeeznet std
-
-    cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250});
+    useModel("classification/squeezenet/model/squeezenet1.0-9");
+    // Create tensor
+    // FIXME: Test cheks "random" image
+    // Ideally it should be a real image
+    const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{224, 224});
+    const std::vector<int> dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols};
+    const cv::Mat tensor(dims, CV_32F, rand_mat.data);
     // ONNX_API code
-    InferONNX(model_path, in_mat1(ROI), out_onnx, mean, std);
+    infer<float>(tensor, out_onnx.front());
+    // G_API code
+    G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
+    cv::GMat in;
+    cv::GMat out = cv::gapi::infer<SqueezNet>(in);
+    cv::GComputation comp(cv::GIn(in), cv::GOut(out));
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path };
+    comp.apply(cv::gin(tensor),
+               cv::gout(out_gapi.front()),
+               cv::compile_args(cv::gapi::networks(net)));
+    // Validate
+    validate();
+}
 
+TEST_F(ONNXClassificationTest, InferROI)
+{
+    useModel("classification/squeezenet/model/squeezenet1.0-9");
+    const cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250});
+    // ONNX_API code
+    cv::Mat roi_mat;
+    preprocess(in_mat1(ROI), roi_mat);
+    infer<float>(roi_mat, out_onnx.front());
     // G_API code
     G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
     cv::GMat in;
     cv::GOpaque<cv::Rect> rect;
     cv::GMat out = cv::gapi::infer<SqueezNet>(rect, in);
     cv::GComputation comp(cv::GIn(in, rect), cv::GOut(out));
-    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    // NOTE: We have to normalize U8 tensor
+    // so cfgMeanStd() is here
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({ mean }, { std });
     comp.apply(cv::gin(in_mat1, ROI),
-               cv::gout(out_gapi),
+               cv::gout(out_gapi.front()),
                cv::compile_args(cv::gapi::networks(net)));
-
     // Validate
-    ASSERT_EQ(1000u, out_onnx.total());
-    ASSERT_EQ(1000u, out_gapi.total());
-    normAssert(out_onnx, out_gapi, "Test classification output");
+    validate();
 }
 
-TEST(ONNX, InferROIList)
+TEST_F(ONNXClassificationTest, InferROIList)
 {
-    cv::Mat in_mat1;
-    std::string model_path = findModel("squeezenet1.0-9");
-    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
-
-    cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean
-    cv::Scalar std  = { 0.229, 0.224, 0.225 }; // squeeznet std
-
-    std::vector<cv::Rect> rois = {
+    useModel("classification/squeezenet/model/squeezenet1.0-9");
+    const std::vector<cv::Rect> rois = {
         cv::Rect(cv::Point{ 0,   0}, cv::Size{80, 120}),
         cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}),
     };
-    std::vector<cv::Mat> out_gapi;
-    std::vector<cv::Mat> out_onnx(rois.size());
     // ONNX_API code
+    out_onnx.resize(rois.size());
     for (size_t i = 0; i < rois.size(); ++i) {
-        InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std);
+        cv::Mat roi_mat;
+        preprocess(in_mat1(rois[i]), roi_mat);
+        infer<float>(roi_mat, out_onnx[i]);
     }
-
     // G_API code
     G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
     cv::GMat in;
     cv::GArray<cv::Rect> rr;
     cv::GArray<cv::GMat> out = cv::gapi::infer<SqueezNet>(rr, in);
     cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out));
-    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    // NOTE: We have to normalize U8 tensor
+    // so cfgMeanStd() is here
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({ mean }, { std });
     comp.apply(cv::gin(in_mat1, rois),
                cv::gout(out_gapi),
                cv::compile_args(cv::gapi::networks(net)));
-
     // Validate
-    for (size_t i = 0; i < rois.size(); ++i) {
-        ASSERT_EQ(1000u, out_onnx[i].total());
-        ASSERT_EQ(1000u, out_gapi[i].total());
-        normAssert(out_onnx[i], out_gapi[i], "Test classification output");
-    }
+    validate();
 }
 
-TEST(ONNX, Infer2ROIList)
+TEST_F(ONNXClassificationTest, Infer2ROIList)
 {
-    cv::Mat in_mat1;
-    std::string model_path = findModel("squeezenet1.0-9");
-    in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480});
-
-    cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean
-    cv::Scalar std  = { 0.229, 0.224, 0.225 }; // squeeznet std
-
-    std::vector<cv::Rect> rois = {
+    useModel("classification/squeezenet/model/squeezenet1.0-9");
+    const std::vector<cv::Rect> rois = {
         cv::Rect(cv::Point{ 0,   0}, cv::Size{80, 120}),
         cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}),
     };
-    std::vector<cv::Mat> out_gapi;
-    std::vector<cv::Mat> out_onnx(rois.size());
     // ONNX_API code
+    out_onnx.resize(rois.size());
     for (size_t i = 0; i < rois.size(); ++i) {
-        InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std);
+        cv::Mat roi_mat;
+        preprocess(in_mat1(rois[i]), roi_mat);
+        infer<float>(roi_mat, out_onnx[i]);
     }
-
     // G_API code
     G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
     cv::GMat in;
     cv::GArray<cv::Rect> rr;
-    cv::GArray<cv::GMat> out = cv::gapi::infer2<SqueezNet>(in,rr);
+    cv::GArray<cv::GMat> out = cv::gapi::infer2<SqueezNet>(in, rr);
     cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out));
-    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({mean},{std});
+    // NOTE: We have to normalize U8 tensor
+    // so cfgMeanStd() is here
+    auto net = cv::gapi::onnx::Params<SqueezNet> { model_path }.cfgMeanStd({ mean }, { std });
     comp.apply(cv::gin(in_mat1, rois),
                cv::gout(out_gapi),
                cv::compile_args(cv::gapi::networks(net)));
+    // Validate
+    validate();
+}
+
+TEST_F(ONNXtest, InferDynamicInputTensor)
+{
+    useModel("object_detection_segmentation/tiny-yolov2/model/tinyyolov2-8");
+    // Create tensor
+    // FIXME: Test cheks "random" image
+    // Ideally it should be a real image
+    const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{416, 416});
+    const std::vector<int> dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols};
+    cv::Mat tensor(dims, CV_32F, rand_mat.data);
+    const cv::Mat in_tensor = tensor / 255.f;
+    // ONNX_API code
+    infer<float>(in_tensor, out_onnx.front());
+    // G_API code
+    G_API_NET(YoloNet, <cv::GMat(cv::GMat)>, "YoloNet");
+    cv::GMat in;
+    cv::GMat out = cv::gapi::infer<YoloNet>(in);
+    cv::GComputation comp(cv::GIn(in), cv::GOut(out));
+    auto net = cv::gapi::onnx::Params<YoloNet>{model_path}
+        .cfgPostProc({cv::GMatDesc{CV_32F, {1, 125, 13, 13}}}, remapYolo)
+        .cfgOutputLayers({"out"});
+    comp.apply(cv::gin(in_tensor),
+               cv::gout(out_gapi.front()),
+               cv::compile_args(cv::gapi::networks(net)));
+    // Validate
+    validate();
+}
 
+TEST_F(ONNXGRayScaleTest, InferImage)
+{
+    useModel("body_analysis/emotion_ferplus/model/emotion-ferplus-8");
+    // ONNX_API code
+    cv::Mat prep_mat;
+    preprocess(in_mat1, prep_mat);
+    infer<float>(prep_mat, out_onnx.front());
+    // G_API code
+    G_API_NET(EmotionNet, <cv::GMat(cv::GMat)>, "emotion-ferplus");
+    cv::GMat in;
+    cv::GMat out = cv::gapi::infer<EmotionNet>(in);
+    cv::GComputation comp(cv::GIn(in), cv::GOut(out));
+    auto net = cv::gapi::onnx::Params<EmotionNet> { model_path }
+        .cfgNormalize({ false }); // model accepts 0..255 range in FP32;
+    comp.apply(cv::gin(in_mat1),
+               cv::gout(out_gapi.front()),
+               cv::compile_args(cv::gapi::networks(net)));
     // Validate
-    for (size_t i = 0; i < rois.size(); ++i) {
-        ASSERT_EQ(1000u, out_onnx[i].total());
-        ASSERT_EQ(1000u, out_gapi[i].total());
-        normAssert(out_onnx[i], out_gapi[i], "Test classification output");
-    }
+    validate();
 }
 
+TEST_F(ONNXtest, InferMultOutput)
+{
+    useModel("object_detection_segmentation/ssd-mobilenetv1/model/ssd_mobilenet_v1_10");
+    // ONNX_API code
+    const auto prep_mat = in_mat1.reshape(1, {1, in_mat1.rows, in_mat1.cols, in_mat1.channels()});
+    infer<uint8_t>({prep_mat}, out_onnx);
+    // G_API code
+    using SSDOut = std::tuple<cv::GMat, cv::GMat, cv::GMat, cv::GMat>;
+    G_API_NET(MobileNet, <SSDOut(cv::GMat)>, "ssd_mobilenet");
+    cv::GMat in;
+    cv::GMat out1, out2, out3, out4;
+    std::tie(out1, out2, out3, out4) = cv::gapi::infer<MobileNet>(in);
+    cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3, out4));
+    auto net = cv::gapi::onnx::Params<MobileNet>{model_path}
+        .cfgOutputLayers({"out1", "out2", "out3", "out4"})
+        .cfgPostProc({cv::GMatDesc{CV_32F, {1, 100, 4}},
+                      cv::GMatDesc{CV_32F, {1, 100}},
+                      cv::GMatDesc{CV_32F, {1, 100}},
+                      cv::GMatDesc{CV_32F, {1, 1}}}, remapSsdPorts);
+    out_gapi.resize(num_out);
+    comp.apply(cv::gin(in_mat1),
+               cv::gout(out_gapi[0], out_gapi[1], out_gapi[2], out_gapi[3]),
+               cv::compile_args(cv::gapi::networks(net)));
+    // Validate
+    validate();
+}
 } // namespace opencv_test
 
 #endif //  HAVE_ONNX

From 4c9e3723e8a39c4779721cc404a41917dffaf068 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sun, 15 Nov 2020 19:22:05 +0000
Subject: [PATCH 101/152] objc: skip unsupported inner namespaces

---
 modules/objc/generator/gen_objc.py | 53 +++++++++++++++++++++++-------
 1 file changed, 41 insertions(+), 12 deletions(-)

diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index 1975cb46fe..469b82b938 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -27,6 +27,10 @@ updated_files = 0
 
 module_imports = []
 
+# list of namespaces, which should be skipped by wrapper generator
+# the list is loaded from misc/objc/gen_dict.json defined for the module only
+namespace_ignore_list = []
+
 # list of class names, which should be skipped by wrapper generator
 # the list is loaded from misc/objc/gen_dict.json defined for the module and its dependencies
 class_ignore_list = []
@@ -89,6 +93,14 @@ method_dict = {
 
 modules = []
 
+
+class SkipSymbolException(Exception):
+    def __init__(self, text):
+        self.t = text
+    def __str__(self):
+        return self.t
+
+
 def read_contents(fname):
     with open(fname, 'r') as f:
         data = f.read()
@@ -122,6 +134,10 @@ class GeneralInfo():
     def __init__(self, type, decl, namespaces):
         self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces)
 
+        for ns_ignore in namespace_ignore_list:
+            if self.symbol_id.startswith(ns_ignore + '.'):
+                raise SkipSymbolException('ignored namespace ({}): {}'.format(ns_ignore, self.symbol_id))
+
         # parse doxygen comments
         self.params={}
 
@@ -709,6 +725,10 @@ class ObjectiveCWrapperGenerator(object):
         if self.isWrapped(name) and not classinfo.base:
             logging.warning('duplicated: %s', classinfo)
             return None
+        if name in self.classes:  # TODO implement inner namespaces
+            if self.classes[name].symbol_id != classinfo.symbol_id:
+                logging.warning('duplicated under new id: {} (was {})'.format(classinfo.symbol_id, self.classes[name].symbol_id))
+                return None
         self.classes[name] = classinfo
         if name in type_dict and not classinfo.base:
             logging.warning('duplicated: %s', classinfo)
@@ -812,7 +832,12 @@ class ObjectiveCWrapperGenerator(object):
         elif not self.isWrapped(classname):
             logging.warning('not found: %s', fi)
         else:
-            self.getClass(classname).addMethod(fi)
+            ci = self.getClass(classname)
+            if ci.symbol_id != fi.symbol_id[0:fi.symbol_id.rfind('.')] and ci.symbol_id != self.Module:
+                # TODO fix this (inner namepaces)
+                logging.warning('SKIP: mismatched class: {} (class: {})'.format(fi.symbol_id, ci.symbol_id))
+                return
+            ci.addMethod(fi)
             logging.info('ok: %s', fi)
             # calc args with def val
             cnt = len([a for a in fi.args if a.defval])
@@ -867,17 +892,20 @@ class ObjectiveCWrapperGenerator(object):
             for decl in decls:
                 logging.info("\n--- Incoming ---\n%s", pformat(decl[:5], 4)) # without docstring
                 name = decl[0]
-                if name.startswith("struct") or name.startswith("class"):
-                    ci = self.add_class(decl)
-                    if ci:
-                        ci.header_import = header_import(hdr)
-                elif name.startswith("const"):
-                    self.add_const(decl)
-                elif name.startswith("enum"):
-                    # enum
-                    self.add_enum(decl)
-                else: # function
-                    self.add_func(decl)
+                try:
+                    if name.startswith("struct") or name.startswith("class"):
+                        ci = self.add_class(decl)
+                        if ci:
+                            ci.header_import = header_import(hdr)
+                    elif name.startswith("const"):
+                        self.add_const(decl)
+                    elif name.startswith("enum"):
+                        # enum
+                        self.add_enum(decl)
+                    else: # function
+                        self.add_func(decl)
+                except SkipSymbolException as e:
+                    logging.info('SKIP: {} due to {}'.format(name, e))
         self.classes[self.Module].member_classes += manual_classes
 
         logging.info("\n\n===== Generating... =====")
@@ -1602,6 +1630,7 @@ if __name__ == "__main__":
         if os.path.exists(gendict_fname):
             with open(gendict_fname) as f:
                 gen_type_dict = json.load(f)
+            namespace_ignore_list = gen_type_dict.get("namespace_ignore_list", [])
             class_ignore_list += gen_type_dict.get("class_ignore_list", [])
             enum_ignore_list += gen_type_dict.get("enum_ignore_list", [])
             const_ignore_list += gen_type_dict.get("const_ignore_list", [])

From 464d53bb167f1a3476d8daba32a7f3d5fcd8ecc7 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 16 Nov 2020 21:34:42 +0000
Subject: [PATCH 102/152] python: emit "string" => "std::string"

---
 modules/python/src2/gen2.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/modules/python/src2/gen2.py b/modules/python/src2/gen2.py
index 233587c9cb..243442cbdd 100755
--- a/modules/python/src2/gen2.py
+++ b/modules/python/src2/gen2.py
@@ -201,7 +201,8 @@ simple_argtype_mapping = {
     "int": ArgTypeInfo("int", FormatStrings.int, "0", True),
     "float": ArgTypeInfo("float", FormatStrings.float, "0.f", True),
     "double": ArgTypeInfo("double", FormatStrings.double, "0", True),
-    "c_string": ArgTypeInfo("char*", FormatStrings.string, '(char*)""')
+    "c_string": ArgTypeInfo("char*", FormatStrings.string, '(char*)""'),
+    "string": ArgTypeInfo("std::string", FormatStrings.object, None, True),
 }
 
 

From 3a184ae6778fb4a2f2baf1395d07707799ff2c95 Mon Sep 17 00:00:00 2001
From: Liubov Batanina <piccione-mail@yandex.ru>
Date: Tue, 17 Nov 2020 10:14:41 +0300
Subject: [PATCH 103/152] [ONNX] Added handler for int32 tensors

---
 modules/dnn/src/onnx/onnx_graph_simplifier.cpp | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp
index e8b237cab4..e7856cf1a9 100644
--- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp
+++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp
@@ -513,6 +513,19 @@ Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto)
         CV_Assert(!field.empty());
         Mat(sizes, CV_64FC1, (void*)field.data()).convertTo(blob, CV_32FC1);
     }
+    else if (datatype == opencv_onnx::TensorProto_DataType_INT32)
+    {
+        if (!tensor_proto.int32_data().empty())
+        {
+            const ::google::protobuf::RepeatedField<int32_t> field = tensor_proto.int32_data();
+            Mat(sizes, CV_32SC1, (void*)field.data()).copyTo(blob);
+        }
+        else
+        {
+            char* val = const_cast<char*>(tensor_proto.raw_data().c_str());
+            Mat(sizes, CV_32SC1, val).copyTo(blob);
+        }
+    }
     else if (datatype == opencv_onnx::TensorProto_DataType_INT64)
     {
         blob.create(sizes, CV_32SC1);

From 2b82f8f12c2ac6f66872e0e6316f8f0b21b6ee13 Mon Sep 17 00:00:00 2001
From: Sergei Slashchinin <62052793+sl-sergei@users.noreply.github.com>
Date: Tue, 17 Nov 2020 12:52:08 +0300
Subject: [PATCH 104/152] Merge pull request #18296 from sl-sergei:fix_16783

Fix loading issue for Faster RCNN model from #16783

* Add a reproducer with multi-output Gather

* Fix an issue with ONNX graph simplifier

* fix build

* Move checks to correct class

* Minor changes for better code appearence
---
 .../dnn/src/onnx/onnx_graph_simplifier.cpp    | 34 +++++++++++++++++++
 modules/dnn/test/test_onnx_importer.cpp       |  5 +++
 2 files changed, 39 insertions(+)

diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp
index e8b237cab4..30c0b26ead 100644
--- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp
+++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp
@@ -260,6 +260,40 @@ public:
         addNodeToMatch("Cast", gather);
         setFusedNode("Gather", input, index);
     }
+
+    virtual bool match(const Ptr<ImportGraphWrapper>& net, int nodeId,
+                       std::vector<int>& matchedNodesIds,
+                       std::vector<int>& targetNodesIds) CV_OVERRIDE
+    {
+        bool retVal = Subgraph::match(net, nodeId, matchedNodesIds, targetNodesIds);
+        size_t matchedNodesNum = matchedNodesIds.size();
+        // Now we check if merging can be made for these Gather and Cast nodes
+        if (!retVal || matchedNodesNum < 2)
+            return retVal;
+        else {
+            int nodeToMatch = matchedNodesIds[matchedNodesNum - 1];
+            const Ptr<ImportNodeWrapper> node = net->getNode(nodeToMatch);
+            if (node->getType() == "Cast") {
+                int inpNodeId = matchedNodesIds[matchedNodesNum - 2];
+                const Ptr<ImportNodeWrapper> inpNode = net->getNode(inpNodeId);
+                if (inpNode->getType() == "Gather") {
+                    int numNodes = net->getNumNodes();
+                    std::string inpNodeName = node->getInputName(0);
+                    for (int i = 0; i < numNodes; ++i) {
+                        const Ptr<ImportNodeWrapper> node_to_check = net->getNode(i);
+                        int numInp = node_to_check->getNumInputs();
+                        for (int inp = 0; inp < numInp; ++inp) {
+                            if (i != nodeToMatch && inpNodeName == node_to_check->getInputName(0)) {
+                                // Another node has the same input node, so it cannot be merged.
+                                return false;
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        return retVal;
+    }
 };
 
 class ExpandSubgraph : public Subgraph
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 5c6de55da5..14d2d28522 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -705,6 +705,11 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
     normAssert(ref, out, "", default_l1, default_lInf);
 }
 
+TEST_P(Test_ONNX_layers, GatherMultiOutput)
+{
+    testONNXModels("gather_multi_output");
+}
+
 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
 
 class Test_ONNX_nets : public Test_ONNX_layers

From 32e7ef8a3d2520e42a0ae1118872caf637ad945a Mon Sep 17 00:00:00 2001
From: Sergey Slashchinin <sergei.slashchinin@xperience.ai>
Date: Tue, 17 Nov 2020 13:31:04 +0300
Subject: [PATCH 105/152] Add fixes and tests for different layers

---
 modules/dnn/include/opencv2/dnn/dnn.hpp  |  3 +
 modules/dnn/src/dnn.cpp                  | 55 ++++++++++++++++-
 modules/dnn/src/layers/pooling_layer.cpp | 49 ++++++++++-----
 modules/dnn/src/layers/reshape_layer.cpp | 51 +++++++++++++++-
 modules/dnn/src/layers/slice_layer.cpp   | 13 +++-
 modules/dnn/src/onnx/onnx_importer.cpp   | 76 ++++++++++++++++++++----
 modules/dnn/test/test_onnx_importer.cpp  | 21 +++++++
 7 files changed, 240 insertions(+), 28 deletions(-)

diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp
index 98a825940d..9cb7089bdd 100644
--- a/modules/dnn/include/opencv2/dnn/dnn.hpp
+++ b/modules/dnn/include/opencv2/dnn/dnn.hpp
@@ -354,9 +354,12 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
                                      const int requiredOutputs,
                                      std::vector<MatShape> &outputs,
                                      std::vector<MatShape> &internals) const;
+
         virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
                                const std::vector<MatShape> &outputs) const {CV_UNUSED(inputs); CV_UNUSED(outputs); return 0;}
 
+        virtual bool updateMemoryShapes(const std::vector<MatShape> &inputs);
+
         CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
         CV_PROP String type; //!< Type name which was used for creating layer by layer factory.
         CV_PROP int preferableTarget; //!< prefer target for layer forwarding
diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index c789638793..efafd5d325 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -1119,6 +1119,7 @@ struct Net::Impl : public detail::NetImplBase
         preferableBackend = DNN_BACKEND_DEFAULT;
         preferableTarget = DNN_TARGET_CPU;
         skipInfEngineInit = false;
+        hasDynamicShapes = false;
     }
 
     Ptr<DataLayer> netInputLayer;
@@ -1130,6 +1131,7 @@ struct Net::Impl : public detail::NetImplBase
     int preferableTarget;
     String halideConfigFile;
     bool skipInfEngineInit;
+    bool hasDynamicShapes;
     // Map host data to backend specific wrapper.
     std::map<void*, Ptr<BackendWrapper> > backendWrappers;
 
@@ -3074,6 +3076,46 @@ struct Net::Impl : public detail::NetImplBase
         shapes = inOutShapes[layerId];
     }
 
+    void updateLayersShapes()
+    {
+        CV_Assert(!layers[0].outputBlobs.empty());
+        ShapesVec inputShapes;
+        for(int i = 0; i < layers[0].outputBlobs.size(); i++)
+        {
+            Mat& inp = layers[0].outputBlobs[i];
+            CV_Assert(inp.total());
+            if (preferableBackend == DNN_BACKEND_OPENCV &&
+                preferableTarget == DNN_TARGET_OPENCL_FP16)
+            {
+                layers[0].outputBlobs[i].create(inp.dims, inp.size, CV_16S);
+            }
+            inputShapes.push_back(shape(inp));
+        }
+        LayersShapesMap layersShapes;
+        layersShapes[0].in = inputShapes;
+        for (MapIdToLayerData::iterator it = layers.begin();
+             it != layers.end(); it++)
+        {
+            int layerId = it->first;
+            std::vector<LayerPin>& inputLayerIds = it->second.inputBlobsId;
+            if (layersShapes[layerId].in.empty())
+            {
+                for(int i = 0; i < inputLayerIds.size(); i++)
+                {
+                    int inputLayerId = inputLayerIds[i].lid;
+                    LayersShapesMap::iterator inputIt = layersShapes.find(inputLayerId);
+                    if(inputIt == layersShapes.end() || inputIt->second.out.empty())
+                    {
+                        getLayerShapesRecursively(inputLayerId, layersShapes);
+                    }
+                    const MatShape& shape = layersShapes[inputLayerId].out[inputLayerIds[i].oid];
+                    layersShapes[layerId].in.push_back(shape);
+                }
+                it->second.layerInstance->updateMemoryShapes(layersShapes[layerId].in);
+            }
+        }
+    }
+
     LayerPin getLatestLayerPin(const std::vector<LayerPin>& pins)
     {
         return *std::max_element(pins.begin(), pins.end());
@@ -3487,6 +3529,8 @@ int Net::addLayer(const String &name, const String &type, LayerParams &params)
     int id = ++impl->lastLayerId;
     impl->layerNameToId.insert(std::make_pair(name, id));
     impl->layers.insert(std::make_pair(id, LayerData(id, name, type, params)));
+    if (params.get<bool>("has_dynamic_shapes", false))
+        impl->hasDynamicShapes = true;
 
     return id;
 }
@@ -3818,8 +3862,13 @@ void Net::setInput(InputArray blob, const String& name, double scalefactor, cons
     bool oldShape = prevShape == blobShape;
 
     blob_.copyTo(impl->netInputLayer->inputsData[pin.oid]);
-    if (!oldShape)
+    if (!oldShape) {
         ld.outputBlobs[pin.oid] = impl->netInputLayer->inputsData[pin.oid];
+        if (impl->hasDynamicShapes)
+        {
+            impl->updateLayersShapes();
+        }
+    }
 
     if (!ld.outputBlobsWrappers[pin.oid].empty())
     {
@@ -4746,6 +4795,10 @@ bool Layer::getMemoryShapes(const std::vector<MatShape> &inputs,
     return false;
 }
 
+bool Layer::updateMemoryShapes(const std::vector<MatShape> &inputs)
+{
+    return true;
+}
 //////////////////////////////////////////////////////////////////////////
 
 static Mutex& getLayerFactoryMutex()
diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp
index fd08fdbeb3..98417620ed 100644
--- a/modules/dnn/src/layers/pooling_layer.cpp
+++ b/modules/dnn/src/layers/pooling_layer.cpp
@@ -88,6 +88,9 @@ public:
         stride = Size(1, 1);
         pad_t = pad_l = pad_b = pad_r = 0;
 
+        hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
+        shapesInitialized = !hasDynamicShapes;
+
         if (params.has("pool") || params.has("kernel_size") ||
             params.has("kernel_w") || params.has("kernel_h"))
         {
@@ -1043,26 +1046,34 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
             outShape.push_back(pooledSize.height);
             outShape.push_back(pooledSize.width);
         }
-        else if (padMode.empty())
+        else
         {
-            for (int i = 0; i < local_kernel.size(); i++) {
-                float dst = (float)(inpShape[i] + pads_begin[i] + pads_end[i] - local_kernel[i]) / strides[i];
-                outShape.push_back(1 + (ceilMode ? ceil(dst) : floor(dst)));
+            if (hasDynamicShapes && !shapesInitialized)
+            {
+                //Just copy input shapes for width and height to prevent errors on loading stage
+                for (int i = 0; i < inpShape.size(); i++)
+                    outShape.push_back(inpShape[i]);
             }
+            else if (padMode.empty())
+            {
+                for (int i = 0; i < local_kernel.size(); i++) {
+                    float dst = (float) (inpShape[i] + pads_begin[i] + pads_end[i] - local_kernel[i]) / strides[i];
+                    outShape.push_back(1 + (ceilMode ? ceil(dst) : floor(dst)));
+                }
 
-            // If we have padding, ensure that the last pooling starts strictly
-            // inside the image (instead of at the padding); otherwise clip the last.
-            for (int i = 0; i < pads_end.size(); i++) {
-                if (pads_end[i] && (outShape[2 + i] - 1) * strides[i] >= inpShape[i] + pads_end[i]) {
-                    --outShape[2 + i];
-                    CV_Assert((outShape[2 + i] - 1) * strides[i] < inpShape[i] + pads_end[i]);
+                // If we have padding, ensure that the last pooling starts strictly
+                // inside the image (instead of at the padding); otherwise clip the last.
+                for (int i = 0; i < pads_end.size(); i++) {
+                    if (pads_end[i] && (outShape[2 + i] - 1) * strides[i] >= inpShape[i] + pads_end[i]) {
+                        --outShape[2 + i];
+                        CV_Assert((outShape[2 + i] - 1) * strides[i] < inpShape[i] + pads_end[i]);
+                    }
                 }
+            } else {
+                getConvPoolOutParams(inpShape, local_kernel, strides, padMode,
+                                     std::vector<size_t>(local_kernel.size(), 1), outShape);
             }
         }
-        else
-        {
-            getConvPoolOutParams(inpShape, local_kernel, strides, padMode, std::vector<size_t>(local_kernel.size(), 1), outShape);
-        }
         if (type == ROI)
         {
             CV_Assert(inputs.size() == 2);
@@ -1083,6 +1094,14 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
         return false;
     }
 
+    bool updateMemoryShapes(const std::vector<MatShape> &inputs) CV_OVERRIDE
+    {
+        int dims = inputs[0].size();
+        CV_Assert(inputs[0][dims - 1] > 0 && inputs[0][dims - 2] > 0);
+        shapesInitialized = true;
+        return true;
+    }
+
     virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
                            const std::vector<MatShape> &outputs) const CV_OVERRIDE
     {
@@ -1114,6 +1133,8 @@ private:
         ROI,   // RoI pooling, https://arxiv.org/pdf/1504.08083.pdf
         PSROI  // Position-sensitive RoI pooling, https://arxiv.org/pdf/1605.06409.pdf
     };
+    bool hasDynamicShapes;
+    bool shapesInitialized;
 };
 
 Ptr<PoolingLayer> PoolingLayer::create(const LayerParams& params)
diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp
index a85a4e4a2f..642e7c52f6 100644
--- a/modules/dnn/src/layers/reshape_layer.cpp
+++ b/modules/dnn/src/layers/reshape_layer.cpp
@@ -164,6 +164,9 @@ public:
         setParamsFrom(params);
         int axis = params.get<int>("axis", 0);
         int numAxes = params.get<int>("num_axes", -1);
+        hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
+        shapesInitialized = !hasDynamicShapes;
+
         CV_Assert(numAxes >= -1);
         newShapeRange = (numAxes == -1) ? Range(axis, INT_MAX) : Range(axis, axis + numAxes);
 
@@ -176,6 +179,25 @@ public:
             for (i = 0; i < dims; i++)
                 newShapeDesc[i] = paramShape.get<int>(i);
         }
+        if (hasDynamicShapes)
+        {
+            dynamicShapes.clear();
+            inputIndices.clear();
+            if (params.has("dynamic_axes")) {
+                CV_Assert(params.has("input_indices"));
+                const DictValue &dynamicAxes = params.get("dynamic_axes");
+                const DictValue &dynamicInputShapes = params.get("input_indices");
+                int i, dims = dynamicAxes.size();
+                CV_Assert(dims == dynamicInputShapes.size());
+                CV_Assert(dims > 0);
+                dynamicShapes.resize(dims);
+                inputIndices.resize(dims);
+                for (i = 0; i < dims; i++) {
+                    dynamicShapes[i] = dynamicAxes.get<int>(i);
+                    inputIndices[i] = dynamicInputShapes.get<int>(i);
+                }
+            }
+        }
     }
 
     virtual bool supportBackend(int backendId) CV_OVERRIDE
@@ -189,13 +211,21 @@ public:
                          std::vector<MatShape> &outputs,
                          std::vector<MatShape> &internals) const CV_OVERRIDE
     {
+
         if (inputs.size() == 1 || inputs.size() == requiredOutputs)
         {
             outputs.clear();
             for (size_t i = 0; i < inputs.size(); i++)
             {
-                outputs.push_back(MatShape());
-                computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back());
+                if (hasDynamicShapes && !shapesInitialized)
+                {
+                    outputs.push_back(newShapeDesc);
+                }
+                else
+                {
+                    outputs.push_back(MatShape());
+                    computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back());
+                }
             }
         }
         else
@@ -206,6 +236,19 @@ public:
         return true;
     }
 
+    bool updateMemoryShapes(const std::vector<MatShape> &inputs) CV_OVERRIDE
+    {
+        if (hasDynamicShapes)
+        {
+            for (int i = 0; i < dynamicShapes.size(); ++i)
+            {
+                newShapeDesc[dynamicShapes[i]] = inputs[0][inputIndices[i]];
+            }
+        }
+        shapesInitialized = true;
+        return true;
+    }
+
     void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
     {
         std::vector<Mat> outputs;
@@ -287,6 +330,10 @@ public:
 
 private:
     std::vector<MatShape> outShapes;
+    std::vector<int> dynamicShapes; // Which axes shapes are dynamic and require reinitialization with new input
+    std::vector<int> inputIndices; // Which axes from input are needed to compute correct output shape
+    bool hasDynamicShapes;
+    bool shapesInitialized;
 };
 
 Ptr<ReshapeLayer> ReshapeLayer::create(const LayerParams& params)
diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp
index 9994677cb5..fd314b7c57 100644
--- a/modules/dnn/src/layers/slice_layer.cpp
+++ b/modules/dnn/src/layers/slice_layer.cpp
@@ -66,6 +66,8 @@ public:
         setParamsFrom(params);
         axis = params.get<int>("axis", 1);
         num_split = params.get<int>("num_split", 0);
+        hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
+        shapesInitialized = !hasDynamicShapes;
         if (params.has("slice_point"))
         {
             CV_Assert(!params.has("begin") && !params.has("size") && !params.has("end"));
@@ -143,7 +145,8 @@ public:
                 CV_Assert(sliceRanges[i].size() <= inpShape.size());
                 for (int j = 0; j < sliceRanges[i].size(); ++j)
                 {
-                    outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size();
+                    if (shapesInitialized || inpShape[j] > 0)
+                        outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size();
                 }
             }
         }
@@ -158,6 +161,12 @@ public:
         return false;
     }
 
+    bool updateMemoryShapes(const std::vector<MatShape> &inputs) CV_OVERRIDE
+    {
+        shapesInitialized = true;
+        return true;
+    }
+
     void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
     {
 #ifdef HAVE_OPENCL
@@ -564,6 +573,8 @@ public:
 protected:
     // The actual non-negative values determined from @p sliceRanges depends on input size.
     std::vector<std::vector<Range> > finalSliceRanges;
+    bool hasDynamicShapes;
+    bool shapesInitialized;
 };
 
 class CropLayerImpl CV_FINAL : public SliceLayerImpl
diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp
index 9443336305..756c8a5580 100644
--- a/modules/dnn/src/onnx/onnx_importer.cpp
+++ b/modules/dnn/src/onnx/onnx_importer.cpp
@@ -64,6 +64,7 @@ public:
     ONNXImporter(Net& net, const char *onnxFile)
         : dstNet(net)
     {
+        hasDynamicShapes = false;
         CV_Assert(onnxFile);
         CV_LOG_DEBUG(NULL, "DNN/ONNX: processing ONNX model from file: " << onnxFile);
 
@@ -84,6 +85,7 @@ public:
     ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer)
         : dstNet(net)
     {
+        hasDynamicShapes = false;
         CV_LOG_DEBUG(NULL, "DNN/ONNX: processing in-memory ONNX model (" << sizeBuffer << " bytes)");
 
         struct _Buf : public std::streambuf
@@ -115,6 +117,7 @@ protected:
     std::map<std::string, Mat> constBlobs;
 
     std::map<std::string, MatShape> outShapes;  // List of internal blobs shapes.
+    bool hasDynamicShapes;  // Whether the model has inputs with dynamic shapes
     typedef std::map<std::string, MatShape>::iterator IterShape_t;
 
     std::map<std::string, LayerInfo> layer_id;
@@ -413,8 +416,10 @@ void ONNXImporter::populateNet()
         for (int j = 0; j < inpShape.size(); ++j)
         {
             inpShape[j] = tensorShape.dim(j).dim_value();
+            if (!tensorShape.dim(j).dim_param().empty())
+                hasDynamicShapes = true;
         }
-        if (!inpShape.empty())
+        if (!inpShape.empty() && !hasDynamicShapes)
         {
             inpShape[0] = std::max(inpShape[0], 1); // It's OK to have undetermined batch size
         }
@@ -461,6 +466,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
 
         layerParams.name = name;
         layerParams.type = layer_type;
+        layerParams.set("has_dynamic_shapes", hasDynamicShapes);
 
         if (layer_type == "MaxPool")
         {
@@ -1276,6 +1282,20 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
             {
                 layerParams.type = "Reshape";
                 layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
+                if (hasDynamicShapes)
+                {
+                    std::vector<int> dynamicAxes;
+                    std::vector<int> inputIndices;
+                    for (int index = 0; index < inpShape.size(); ++index)
+                    {
+                        if (!maskedAxes[index])
+                            inputIndices.push_back(index);
+                    }
+                    for (int index = 0; index < outShape.size(); ++index)
+                        dynamicAxes.push_back(index);
+                    layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size()));
+                    layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size()));
+                }
             }
             else
                 layerParams.type = "Identity";
@@ -1338,6 +1358,19 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
             outShape.insert(outShape.begin() + axis, 1);
             layerParams.type = "Reshape";
             layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
+            if (hasDynamicShapes)
+            {
+                std::vector<int> dynamicAxes;
+                std::vector<int> inputIndices;
+                for (int index = 0; index < outShape.size(); ++index) {
+                    if (index != axis)
+                        dynamicAxes.push_back(index);
+                }
+                for (int index = 0; index < inpShape.size(); ++index)
+                    inputIndices.push_back(index);
+                layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size()));
+                layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size()));
+            }
         }
         else if (layer_type == "Expand")
         {
@@ -1625,6 +1658,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
                 cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end.data(), end.size());
                 sliceLp.set("begin", paramBegin);
                 sliceLp.set("end", paramEnd);
+                sliceLp.set("has_dynamic_shapes", hasDynamicShapes);
 
                 if (inpShape.size() > 1)
                 {
@@ -1637,6 +1671,17 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
                     layerParams.type = "Reshape";
                     layerParams.set("axis", 0);
                     layerParams.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size()));
+                    if (hasDynamicShapes)
+                    {
+                        std::vector<int> dynamicAxes;
+                        std::vector<int> inputIndices;
+                        for (int index = 0; index < inpShape.size(); ++index)
+                            dynamicAxes.push_back(index);
+                        for (int index = 0; index < inpShape.size(); ++index)
+                            inputIndices.push_back(index);
+                        layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size()));
+                        layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size()));
+                    }
                     node_proto.set_input(0, sliceLp.name);
                 }
                 else
@@ -1676,7 +1721,11 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
             for (int i = 1; i < node_proto.input_size(); i++)
                 CV_Assert(layer_id.find(node_proto.input(i)) == layer_id.end());
 
-            String interp_mode = layerParams.get<String>("coordinate_transformation_mode");
+            String interp_mode;
+            if (layerParams.has("coordinate_transformation_mode"))
+                interp_mode = layerParams.get<String>("coordinate_transformation_mode");
+            else
+                interp_mode = layerParams.get<String>("mode");
             CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
 
             layerParams.set("align_corners", interp_mode == "align_corners");
@@ -1688,16 +1737,23 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
                 shapes.convertTo(shapes, CV_32S);
             int height = shapes.at<int>(2);
             int width  = shapes.at<int>(3);
-            if (node_proto.input_size() == 3)
+            if (hasDynamicShapes)
             {
-                IterShape_t shapeIt = outShapes.find(node_proto.input(0));
-                CV_Assert(shapeIt != outShapes.end());
-                MatShape scales = shapeIt->second;
-                height *= scales[2];
-                width  *= scales[3];
+                layerParams.set("zoom_factor_x", width);
+                layerParams.set("zoom_factor_y", height);
+            }
+            else
+            {
+                if (node_proto.input_size() == 3) {
+                    IterShape_t shapeIt = outShapes.find(node_proto.input(0));
+                    CV_Assert(shapeIt != outShapes.end());
+                    MatShape scales = shapeIt->second;
+                    height *= scales[2];
+                    width *= scales[3];
+                }
+                layerParams.set("width", width);
+                layerParams.set("height", height);
             }
-            layerParams.set("width", width);
-            layerParams.set("height", height);
 
             if (layerParams.get<String>("mode") == "linear") {
                 layerParams.set("mode", interp_mode == "pytorch_half_pixel" ?
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 14d2d28522..9ddc17c97c 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -710,6 +710,27 @@ TEST_P(Test_ONNX_layers, GatherMultiOutput)
     testONNXModels("gather_multi_output");
 }
 
+TEST_P(Test_ONNX_layers, DynamicAxes)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    }
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
+    testONNXModels("squeeze_and_conv_dynamic_axes");
+    testONNXModels("unsqueeze_and_conv_dynamic_axes");
+    testONNXModels("gather_dynamic_axes");
+    testONNXModels("gather_scalar_dynamic_axes");
+    testONNXModels("slice_dynamic_axes");
+    testONNXModels("slice_opset_11_dynamic_axes");
+    testONNXModels("resize_opset11_torch1.6_dynamic_axes");
+    testONNXModels("average_pooling_dynamic_axes");
+    testONNXModels("maxpooling_sigmoid_dynamic_axes");
+}
+
 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
 
 class Test_ONNX_nets : public Test_ONNX_layers

From 23baf1a75e1fd70e300769654afec0024047fe7b Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sat, 31 Oct 2020 14:08:13 +0000
Subject: [PATCH 106/152] dnn: fix High-Level public API (cv::dnn::Model class)

- proxy selected Net methods only (don't derive from Net directly)
- default Model ctor is protected
---
 modules/dnn/include/opencv2/dnn/dnn.hpp     |  43 +++--
 modules/dnn/include/opencv2/dnn/version.hpp |   2 +-
 modules/dnn/src/model.cpp                   | 188 ++++++++++++++------
 modules/dnn/test/test_caffe_importer.cpp    |   2 +-
 4 files changed, 172 insertions(+), 63 deletions(-)

diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp
index 3b12508c74..bf39ad8289 100644
--- a/modules/dnn/include/opencv2/dnn/dnn.hpp
+++ b/modules/dnn/include/opencv2/dnn/dnn.hpp
@@ -1072,14 +1072,17 @@ CV__DNN_INLINE_NS_BEGIN
       * Model creates net from file with trained weights and config,
       * sets preprocessing input and runs forward pass.
       */
-     class CV_EXPORTS_W_SIMPLE Model : public Net
+     class CV_EXPORTS_W_SIMPLE Model
      {
      public:
-         /**
-          * @brief Default constructor.
-          */
+         CV_DEPRECATED_EXTERNAL  // avoid using in C++ code, will be moved to "protected" (need to fix bindings first)
          Model();
 
+         Model(const Model&) = default;
+         Model(Model&&) = default;
+         Model& operator=(const Model&) = default;
+         Model& operator=(Model&&) = default;
+
          /**
           * @brief Create model from deep learning network represented in one of the supported formats.
           * An order of @p model and @p config arguments does not matter.
@@ -1100,13 +1103,12 @@ CV__DNN_INLINE_NS_BEGIN
          */
          CV_WRAP Model& setInputSize(const Size& size);
 
-         /** @brief Set input size for frame.
+         /** @overload
          *  @param[in] width New input width.
          *  @param[in] height New input height.
-         *  @note If shape of the new blob less than 0,
-         *  then frame size not change.
          */
-         CV_WRAP Model& setInputSize(int width, int height);
+         CV_WRAP inline
+         Model& setInputSize(int width, int height) { return setInputSize(Size(width, height)); }
 
          /** @brief Set mean value for frame.
           *  @param[in] mean Scalar with mean values which are subtracted from channels.
@@ -1143,10 +1145,31 @@ CV__DNN_INLINE_NS_BEGIN
           *  @param[in]  frame  The input image.
           *  @param[out] outs Allocated output blobs, which will store results of the computation.
           */
-         CV_WRAP void predict(InputArray frame, OutputArrayOfArrays outs);
+         CV_WRAP void predict(InputArray frame, OutputArrayOfArrays outs) const;
+
+
+         // ============================== Net proxy methods ==============================
+         // Never expose methods with network implementation details, like:
+         // - addLayer, addLayerToPrev, connect, setInputsNames, setInputShape, setParam, getParam
+         // - getLayer*, getUnconnectedOutLayers, getUnconnectedOutLayersNames, getLayersShapes
+         // - forward* methods, setInput
+
+         /// @sa Net::setPreferableBackend
+         CV_WRAP Model& setPreferableBackend(dnn::Backend backendId);
+         /// @sa Net::setPreferableTarget
+         CV_WRAP Model& setPreferableTarget(dnn::Target targetId);
+
+         CV_DEPRECATED_EXTERNAL
+         operator Net&() const { return getNetwork_(); }
+
+     //protected: - internal/tests usage only
+         Net& getNetwork_() const;
+         inline Net& getNetwork_() { return const_cast<const Model*>(this)->getNetwork_(); }
 
-     protected:
          struct Impl;
+         inline Impl* getImpl() const { return impl.get(); }
+         inline Impl& getImplRef() const { CV_DbgAssert(impl); return *impl.get(); }
+     protected:
          Ptr<Impl> impl;
      };
 
diff --git a/modules/dnn/include/opencv2/dnn/version.hpp b/modules/dnn/include/opencv2/dnn/version.hpp
index f91b44d142..62ecadb6f7 100644
--- a/modules/dnn/include/opencv2/dnn/version.hpp
+++ b/modules/dnn/include/opencv2/dnn/version.hpp
@@ -6,7 +6,7 @@
 #define OPENCV_DNN_VERSION_HPP
 
 /// Use with major OpenCV version only.
-#define OPENCV_DNN_API_VERSION 20200908
+#define OPENCV_DNN_API_VERSION 20201117
 
 #if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
 #define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
diff --git a/modules/dnn/src/model.cpp b/modules/dnn/src/model.cpp
index 677228bcf2..acee29e680 100644
--- a/modules/dnn/src/model.cpp
+++ b/modules/dnn/src/model.cpp
@@ -15,6 +15,9 @@ namespace dnn {
 
 struct Model::Impl
 {
+//protected:
+    Net    net;
+
     Size   size;
     Scalar mean;
     double  scale = 1.0;
@@ -23,7 +26,70 @@ struct Model::Impl
     Mat    blob;
     std::vector<String> outNames;
 
-    void predict(Net& net, const Mat& frame, OutputArrayOfArrays outs)
+public:
+    virtual ~Impl() {}
+    Impl() {}
+    Impl(const Impl&) = delete;
+    Impl(Impl&&) = delete;
+
+    virtual Net& getNetwork() const { return const_cast<Net&>(net); }
+
+    virtual void setPreferableBackend(Backend backendId) { net.setPreferableBackend(backendId); }
+    virtual void setPreferableTarget(Target targetId) { net.setPreferableTarget(targetId); }
+
+    /*virtual*/
+    void initNet(const Net& network)
+    {
+        net = network;
+
+        outNames = net.getUnconnectedOutLayersNames();
+        std::vector<MatShape> inLayerShapes;
+        std::vector<MatShape> outLayerShapes;
+        net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
+        if (!inLayerShapes.empty() && inLayerShapes[0].size() == 4)
+            size = Size(inLayerShapes[0][3], inLayerShapes[0][2]);
+        else
+            size = Size();
+    }
+
+    /*virtual*/
+    void setInputParams(double scale_, const Size& size_, const Scalar& mean_,
+                        bool swapRB_, bool crop_)
+    {
+        size = size_;
+        mean = mean_;
+        scale = scale_;
+        crop = crop_;
+        swapRB = swapRB_;
+    }
+    /*virtual*/
+    void setInputSize(const Size& size_)
+    {
+        size = size_;
+    }
+    /*virtual*/
+    void setInputMean(const Scalar& mean_)
+    {
+        mean = mean_;
+    }
+    /*virtual*/
+    void setInputScale(double scale_)
+    {
+        scale = scale_;
+    }
+    /*virtual*/
+    void setInputCrop(bool crop_)
+    {
+        crop = crop_;
+    }
+    /*virtual*/
+    void setInputSwapRB(bool swapRB_)
+    {
+        swapRB = swapRB_;
+    }
+
+    /*virtual*/
+    void processFrame(InputArray frame, OutputArrayOfArrays outs)
     {
         if (size.empty())
             CV_Error(Error::StsBadSize, "Input size not specified");
@@ -34,96 +100,115 @@ struct Model::Impl
         // Faster-RCNN or R-FCN
         if (net.getLayer(0)->outputNameToIndex("im_info") != -1)
         {
-            Mat imInfo = (Mat_<float>(1, 3) << size.height, size.width, 1.6f);
+            Mat imInfo(Matx31f(size.height, size.width, 1.6f));
             net.setInput(imInfo, "im_info");
         }
         net.forward(outs, outNames);
     }
 };
 
-Model::Model() : impl(new Impl) {}
+Model::Model()
+    : impl(makePtr<Impl>())
+{
+    // nothing
+}
 
 Model::Model(const String& model, const String& config)
-    : Net(readNet(model, config)), impl(new Impl)
+    : Model()
 {
-    impl->outNames = getUnconnectedOutLayersNames();
-    std::vector<MatShape> inLayerShapes;
-    std::vector<MatShape> outLayerShapes;
-    getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
-    if (!inLayerShapes.empty() && inLayerShapes[0].size() == 4)
-        impl->size = Size(inLayerShapes[0][3], inLayerShapes[0][2]);
-};
+    impl->initNet(readNet(model, config));
+}
 
-Model::Model(const Net& network) : Net(network), impl(new Impl)
+Model::Model(const Net& network)
+    : Model()
 {
-    impl->outNames = getUnconnectedOutLayersNames();
-    std::vector<MatShape> inLayerShapes;
-    std::vector<MatShape> outLayerShapes;
-    getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
-    if (!inLayerShapes.empty() && inLayerShapes[0].size() == 4)
-        impl->size = Size(inLayerShapes[0][3], inLayerShapes[0][2]);
-};
+    impl->initNet(network);
+}
 
-Model& Model::setInputSize(const Size& size)
+Net& Model::getNetwork_() const
+{
+    CV_DbgAssert(impl);
+    return impl->getNetwork();
+}
+
+Model& Model::setPreferableBackend(Backend backendId)
+{
+    CV_DbgAssert(impl);
+    impl->setPreferableBackend(backendId);
+    return *this;
+}
+Model& Model::setPreferableTarget(Target targetId)
 {
-    impl->size = size;
+    CV_DbgAssert(impl);
+    impl->setPreferableTarget(targetId);
     return *this;
 }
 
-Model& Model::setInputSize(int width, int height)
+Model& Model::setInputSize(const Size& size)
 {
-    impl->size = Size(width, height);
+    CV_DbgAssert(impl);
+    impl->setInputSize(size);
     return *this;
 }
 
 Model& Model::setInputMean(const Scalar& mean)
 {
-    impl->mean = mean;
+    CV_DbgAssert(impl);
+    impl->setInputMean(mean);
     return *this;
 }
 
 Model& Model::setInputScale(double scale)
 {
-    impl->scale = scale;
+    CV_DbgAssert(impl);
+    impl->setInputScale(scale);
     return *this;
 }
 
 Model& Model::setInputCrop(bool crop)
 {
-    impl->crop = crop;
+    CV_DbgAssert(impl);
+    impl->setInputCrop(crop);
     return *this;
 }
 
 Model& Model::setInputSwapRB(bool swapRB)
 {
-    impl->swapRB = swapRB;
+    CV_DbgAssert(impl);
+    impl->setInputSwapRB(swapRB);
     return *this;
 }
 
 void Model::setInputParams(double scale, const Size& size, const Scalar& mean,
                            bool swapRB, bool crop)
 {
-    impl->size = size;
-    impl->mean = mean;
-    impl->scale = scale;
-    impl->crop = crop;
-    impl->swapRB = swapRB;
+    CV_DbgAssert(impl);
+    impl->setInputParams(scale, size, mean, swapRB, crop);
 }
 
-void Model::predict(InputArray frame, OutputArrayOfArrays outs)
+void Model::predict(InputArray frame, OutputArrayOfArrays outs) const
 {
-    impl->predict(*this, frame.getMat(), outs);
+    CV_DbgAssert(impl);
+    impl->processFrame(frame, outs);
 }
 
+
 ClassificationModel::ClassificationModel(const String& model, const String& config)
-    : Model(model, config) {};
+    : Model(model, config)
+{
+    // nothing
+}
 
-ClassificationModel::ClassificationModel(const Net& network) : Model(network) {};
+ClassificationModel::ClassificationModel(const Net& network)
+    : Model(network)
+{
+    // nothing
+}
 
 std::pair<int, float> ClassificationModel::classify(InputArray frame)
 {
     std::vector<Mat> outs;
-    impl->predict(*this, frame.getMat(), outs);
+    impl->processFrame(frame, outs);
     CV_Assert(outs.size() == 1);
 
     double conf;
@@ -145,11 +230,11 @@ KeypointsModel::KeypointsModel(const Net& network) : Model(network) {};
 std::vector<Point2f> KeypointsModel::estimate(InputArray frame, float thresh)
 {
 
-    int frameHeight = frame.getMat().size[0];
-    int frameWidth = frame.getMat().size[1];
+    int frameHeight = frame.rows();
+    int frameWidth = frame.cols();
     std::vector<Mat> outs;
 
-    impl->predict(*this, frame.getMat(), outs);
+    impl->processFrame(frame, outs);
     CV_Assert(outs.size() == 1);
     Mat output = outs[0];
 
@@ -202,9 +287,8 @@ SegmentationModel::SegmentationModel(const Net& network) : Model(network) {};
 
 void SegmentationModel::segment(InputArray frame, OutputArray mask)
 {
-
     std::vector<Mat> outs;
-    impl->predict(*this, frame.getMat(), outs);
+    impl->processFrame(frame, outs);
     CV_Assert(outs.size() == 1);
     Mat score = outs[0];
 
@@ -250,12 +334,14 @@ void disableRegionNMS(Net& net)
 }
 
 DetectionModel::DetectionModel(const String& model, const String& config)
-    : Model(model, config) {
-      disableRegionNMS(*this);
+    : Model(model, config)
+{
+    disableRegionNMS(getNetwork_());  // FIXIT Move to DetectionModel::Impl::initNet()
 }
 
-DetectionModel::DetectionModel(const Net& network) : Model(network) {
-    disableRegionNMS(*this);
+DetectionModel::DetectionModel(const Net& network) : Model(network)
+{
+    disableRegionNMS(getNetwork_());  // FIXIT Move to DetectionModel::Impl::initNet()
 }
 
 void DetectionModel::detect(InputArray frame, CV_OUT std::vector<int>& classIds,
@@ -263,7 +349,7 @@ void DetectionModel::detect(InputArray frame, CV_OUT std::vector<int>& classIds,
                             float confThreshold, float nmsThreshold)
 {
     std::vector<Mat> detections;
-    impl->predict(*this, frame.getMat(), detections);
+    impl->processFrame(frame, detections);
 
     boxes.clear();
     confidences.clear();
@@ -271,15 +357,15 @@ void DetectionModel::detect(InputArray frame, CV_OUT std::vector<int>& classIds,
 
     int frameWidth  = frame.cols();
     int frameHeight = frame.rows();
-    if (getLayer(0)->outputNameToIndex("im_info") != -1)
+    if (getNetwork_().getLayer(0)->outputNameToIndex("im_info") != -1)
     {
         frameWidth = impl->size.width;
         frameHeight = impl->size.height;
     }
 
-    std::vector<String> layerNames = getLayerNames();
-    int lastLayerId = getLayerId(layerNames.back());
-    Ptr<Layer> lastLayer = getLayer(lastLayerId);
+    std::vector<String> layerNames = getNetwork_().getLayerNames();
+    int lastLayerId = getNetwork_().getLayerId(layerNames.back());
+    Ptr<Layer> lastLayer = getNetwork_().getLayer(lastLayerId);
 
     if (lastLayer->type == "DetectionOutput")
     {
diff --git a/modules/dnn/test/test_caffe_importer.cpp b/modules/dnn/test/test_caffe_importer.cpp
index e1ffa762de..5440f4734f 100644
--- a/modules/dnn/test/test_caffe_importer.cpp
+++ b/modules/dnn/test/test_caffe_importer.cpp
@@ -563,7 +563,7 @@ TEST_P(Test_Caffe_nets, DenseNet_121)
     }
     normAssert(outs[0], ref, "", l1, lInf);
     if (target != DNN_TARGET_MYRIAD || getInferenceEngineVPUType() != CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
-        expectNoFallbacksFromIE(model);
+        expectNoFallbacksFromIE(model.getNetwork_());
 }
 
 TEST(Test_Caffe, multiple_inputs)

From 3cdf9264545b1ce47e9cc19ae25e23d2950c53e9 Mon Sep 17 00:00:00 2001
From: Sergey Slashchinin <sergei.slashchinin@xperience.ai>
Date: Tue, 17 Nov 2020 14:33:39 +0300
Subject: [PATCH 107/152] disable Conv1d test on NGRAPH/MYRIAD

---
 modules/dnn/test/test_onnx_importer.cpp | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 14d2d28522..c115be728a 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -685,6 +685,10 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight)
 
 TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
 {
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
     String basename = "conv1d_variable_wb";
     Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
     ASSERT_FALSE(net.empty());

From 72d06080c6fc8009e38eb991c05ad075b44be5e5 Mon Sep 17 00:00:00 2001
From: Liubov Batanina <piccione-mail@yandex.ru>
Date: Tue, 17 Nov 2020 14:45:36 +0300
Subject: [PATCH 108/152] [ONNX] Added Reduce ops for batch and channel

---
 modules/dnn/src/onnx/onnx_importer.cpp  | 33 ++++++++++++++++++++++---
 modules/dnn/test/test_onnx_importer.cpp |  4 ++-
 2 files changed, 32 insertions(+), 5 deletions(-)

diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp
index 9443336305..32f7f02e9d 100644
--- a/modules/dnn/src/onnx/onnx_importer.cpp
+++ b/modules/dnn/src/onnx/onnx_importer.cpp
@@ -551,11 +551,36 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
 
                     CV_Assert(axes.size() <= inpShape.size() - 2);
                     std::vector<int> kernel_size(inpShape.size() - 2, 1);
-                    for (int i = 0; i < axes.size(); i++) {
-                        int axis = clamp(axes.get<int>(i), inpShape.size());
-                        CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
-                        kernel_size[axis - 2] = inpShape[axis];
+                    if (axes.size() == 1 && (clamp(axes.get<int>(0), inpShape.size()) <= 1))
+                    {
+                        int axis = clamp(axes.get<int>(0), inpShape.size());
+                        MatShape newShape = inpShape;
+                        newShape[axis + 1] = total(newShape, axis + 1);
+                        newShape.resize(axis + 2);
+                        newShape.insert(newShape.begin(), 2 - axis, 1);
+
+                        LayerParams reshapeLp;
+                        reshapeLp.type = "Reshape";
+                        reshapeLp.name = layerParams.name + "/reshape";
+                        CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end());
+                        reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], newShape.size()));
+
+                        node_proto.set_output(0, reshapeLp.name);
+                        addLayer(reshapeLp, node_proto);
+
+                        kernel_size.resize(2);
+                        kernel_size[0] = inpShape[axis];
+                        node_proto.set_input(0, node_proto.output(0));
+                    }
+                    else
+                    {
+                        for (int i = 0; i < axes.size(); i++) {
+                            int axis = clamp(axes.get<int>(i), inpShape.size());
+                            CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
+                            kernel_size[axis - 2] = inpShape[axis];
+                        }
                     }
+
                     LayerParams poolLp = layerParams;
                     poolLp.name = layerParams.name + "/avg";
                     CV_Assert(layer_id.find(poolLp.name) == layer_id.end());
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 14d2d28522..1c5d2e5289 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -267,9 +267,11 @@ TEST_P(Test_ONNX_layers, ReduceSum)
     testONNXModels("reduce_sum");
 }
 
-TEST_P(Test_ONNX_layers, ReduceMaxGlobal)
+TEST_P(Test_ONNX_layers, ReduceMax)
 {
     testONNXModels("reduce_max");
+    testONNXModels("reduce_max_axis_0");
+    testONNXModels("reduce_max_axis_1");
 }
 
 TEST_P(Test_ONNX_layers, Scale)

From 2b558a3787dc441b11e6c52d5a461b0e2e05795b Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Wed, 11 Nov 2020 17:57:53 +0000
Subject: [PATCH 109/152] core: fix F16C compilation check

---
 .../include/opencv2/core/cv_cpu_dispatch.h    |  5 +++++
 modules/core/include/opencv2/core/cvdef.h     |  4 ++--
 .../include/opencv2/core/hal/intrin_avx.hpp   | 21 +++++++++++++++++++
 modules/core/src/convert.simd.hpp             |  5 +++++
 modules/core/test/test_intrin.cpp             |  8 +++++--
 modules/core/test/test_intrin_utils.hpp       |  6 +++---
 6 files changed, 42 insertions(+), 7 deletions(-)

diff --git a/modules/core/include/opencv2/core/cv_cpu_dispatch.h b/modules/core/include/opencv2/core/cv_cpu_dispatch.h
index 42651aed5e..540fbb605c 100644
--- a/modules/core/include/opencv2/core/cv_cpu_dispatch.h
+++ b/modules/core/include/opencv2/core/cv_cpu_dispatch.h
@@ -216,6 +216,11 @@ struct VZeroUpperGuard {
 #  define CV_VSX 1
 #endif
 
+#ifdef __F16C__
+#  include <immintrin.h>
+#  define CV_FP16 1
+#endif
+
 #endif // !__OPENCV_BUILD && !__CUDACC (Compatibility code)
 
 
diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h
index 5bd3af33a4..6488b8bd4f 100644
--- a/modules/core/include/opencv2/core/cvdef.h
+++ b/modules/core/include/opencv2/core/cvdef.h
@@ -765,7 +765,7 @@ protected:
     float16_t() {}
     explicit float16_t(float x)
     {
-    #if CV_AVX2
+    #if CV_FP16
         __m128 v = _mm_load_ss(&x);
         w = (ushort)_mm_cvtsi128_si32(_mm_cvtps_ph(v, 0));
     #else
@@ -796,7 +796,7 @@ protected:
 
     operator float() const
     {
-    #if CV_AVX2
+    #if CV_FP16
         float f;
         _mm_store_ss(&f, _mm_cvtph_ps(_mm_cvtsi32_si128(w)));
         return f;
diff --git a/modules/core/include/opencv2/core/hal/intrin_avx.hpp b/modules/core/include/opencv2/core/hal/intrin_avx.hpp
index 5dc5bb567d..54e8927192 100644
--- a/modules/core/include/opencv2/core/hal/intrin_avx.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin_avx.hpp
@@ -3121,18 +3121,39 @@ OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float32x8, float, f32, v_uint32x8, un
 OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int64x4, int64, s64, v_uint64x4, uint64, u64)
 OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float64x4, double, f64, v_uint64x4, uint64, u64)
 
+//
 // FP16
+//
+
 inline v_float32x8 v256_load_expand(const float16_t* ptr)
 {
+#if CV_FP16
     return v_float32x8(_mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr)));
+#else
+    float CV_DECL_ALIGNED(32) buf[8];
+    for (int i = 0; i < 8; i++)
+        buf[i] = (float)ptr[i];
+    return v256_load_aligned(buf);
+#endif
 }
 
 inline void v_pack_store(float16_t* ptr, const v_float32x8& a)
 {
+#if CV_FP16
     __m128i ah = _mm256_cvtps_ph(a.val, 0);
     _mm_storeu_si128((__m128i*)ptr, ah);
+#else
+    float CV_DECL_ALIGNED(32) buf[8];
+    v_store_aligned(buf, a);
+    for (int i = 0; i < 8; i++)
+        ptr[i] = float16_t(buf[i]);
+#endif
 }
 
+//
+// end of FP16
+//
+
 inline void v256_cleanup() { _mm256_zeroall(); }
 
 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
diff --git a/modules/core/src/convert.simd.hpp b/modules/core/src/convert.simd.hpp
index a16a1a8405..4af5533870 100644
--- a/modules/core/src/convert.simd.hpp
+++ b/modules/core/src/convert.simd.hpp
@@ -5,6 +5,11 @@
 #include "precomp.hpp"
 #include "convert.hpp"
 
+#if !defined(OPENCV_SUPRESS_WARNING_AVX2_WITHOUT_FP16C) && \
+    (defined(__GNUC__) && defined(__AVX2__) && !defined(__F16C__))
+#warning "Non-optimal compiler flags: AVX2 without FP16. Generated code is very slow. Consider adding '-mf16c' compiler option."
+#endif
+
 namespace cv {
 CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
 
diff --git a/modules/core/test/test_intrin.cpp b/modules/core/test/test_intrin.cpp
index 321fa64264..71d61e14e0 100644
--- a/modules/core/test/test_intrin.cpp
+++ b/modules/core/test/test_intrin.cpp
@@ -126,9 +126,11 @@ DEFINE_SIMD_TESTS(256, AVX512_SKX)
 
 TEST(hal_intrin256, float16x16_FP16)
 {
+#if CV_TRY_FP16
     //CV_CPU_CALL_FP16_(test_hal_intrin_float16, ());
     CV_CPU_CALL_AVX2_(test_hal_intrin_float16, ());
-    throw SkipTestException("Unsupported hardware: FP16 is not available");
+#endif
+    throw SkipTestException("Unsupported: FP16 is not available");
 }
 
 
@@ -142,8 +144,10 @@ namespace intrin512 {
 
 TEST(hal_intrin512, float16x32_FP16)
 {
+#if CV_TRY_FP16
     CV_CPU_CALL_AVX512_SKX_(test_hal_intrin_float16, ());
-    throw SkipTestException("Unsupported hardware: FP16 is not available");
+#endif
+    throw SkipTestException("Unsupported: FP16 is not available");
 }
 
 
diff --git a/modules/core/test/test_intrin_utils.hpp b/modules/core/test/test_intrin_utils.hpp
index 6731091463..84da496b42 100644
--- a/modules/core/test/test_intrin_utils.hpp
+++ b/modules/core/test/test_intrin_utils.hpp
@@ -1902,21 +1902,21 @@ void test_hal_intrin_float64()
 #endif
 }
 
-#if CV_FP16
 void test_hal_intrin_float16()
 {
     DUMP_ENTRY(v_float16);
 #if CV_FP16
     TheTest<v_float32>()
         .test_loadstore_fp16_f32()
-#endif
 #if CV_SIMD_FP16
         .test_loadstore_fp16()
         .test_float_cvt_fp16()
 #endif
         ;
-}
+#else
+    std::cout << "SKIP: CV_FP16 is not available" << std::endl;
 #endif
+}
 
 /*#if defined(CV_CPU_DISPATCH_MODE_FP16) && CV_CPU_DISPATCH_MODE == FP16
 void test_hal_intrin_float16()

From b866d0dc388dd9705c1171cfbbe16e7aebe1f84c Mon Sep 17 00:00:00 2001
From: Dmitry Matveev <dmitry.matveev@intel.com>
Date: Tue, 17 Nov 2020 17:04:19 +0300
Subject: [PATCH 110/152] Merge pull request #18793 from
 dmatveev:dm/in_graph_metadata

G-API: Introduce runtime in-graph metadata

* G-API: In-graph metadata -- initial implementation

* G-API: Finish the in-graph metadata implementation for Streaming

* G-API: Fix standalone build & warnings for in-graph metadata

* G-API: In-graph meta -- fixed review comments

* G-API: Fix issues with desync causing failing tests
---
 modules/gapi/CMakeLists.txt                   |  10 +-
 modules/gapi/include/opencv2/gapi/garg.hpp    |  67 +++++-
 modules/gapi/include/opencv2/gapi/gopaque.hpp |  13 ++
 .../include/opencv2/gapi/streaming/cap.hpp    |  30 ++-
 .../include/opencv2/gapi/streaming/meta.hpp   |  79 +++++++
 modules/gapi/src/api/gbackend.cpp             |  38 +++-
 modules/gapi/src/api/grunarg.cpp              |  33 +++
 modules/gapi/src/backends/common/gbackend.hpp |  11 +
 .../gapi/src/backends/common/gmetabackend.cpp | 105 ++++++++++
 .../gapi/src/backends/common/gmetabackend.hpp |  16 ++
 modules/gapi/src/compiler/gcompiler.cpp       |   4 +-
 modules/gapi/src/compiler/gislandmodel.cpp    |  35 +++-
 modules/gapi/src/compiler/gislandmodel.hpp    |   4 +
 modules/gapi/src/executor/gexecutor.cpp       |  53 ++++-
 .../gapi/src/executor/gstreamingexecutor.cpp  |  43 +++-
 modules/gapi/test/gapi_graph_meta_tests.cpp   | 195 ++++++++++++++++++
 16 files changed, 681 insertions(+), 55 deletions(-)
 create mode 100644 modules/gapi/include/opencv2/gapi/streaming/meta.hpp
 create mode 100644 modules/gapi/src/api/grunarg.cpp
 create mode 100644 modules/gapi/src/backends/common/gmetabackend.cpp
 create mode 100644 modules/gapi/src/backends/common/gmetabackend.hpp
 create mode 100644 modules/gapi/test/gapi_graph_meta_tests.cpp

diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
index d95f255951..ee275fe1af 100644
--- a/modules/gapi/CMakeLists.txt
+++ b/modules/gapi/CMakeLists.txt
@@ -57,6 +57,7 @@ file(GLOB gapi_ext_hdrs
 
 set(gapi_srcs
     # Front-end part
+    src/api/grunarg.cpp
     src/api/gorigin.cpp
     src/api/gmat.cpp
     src/api/garray.cpp
@@ -131,18 +132,19 @@ set(gapi_srcs
     src/backends/ie/giebackend.cpp
     src/backends/ie/giebackend/giewrapper.cpp
 
-    # ONNX Backend.
+    # ONNX backend
     src/backends/onnx/gonnxbackend.cpp
 
-    # Render Backend.
+    # Render backend
     src/backends/render/grenderocv.cpp
     src/backends/render/ft_render.cpp
 
-    #PlaidML Backend
+    # PlaidML Backend
     src/backends/plaidml/gplaidmlcore.cpp
     src/backends/plaidml/gplaidmlbackend.cpp
 
-    # Compound
+    # Common backend code
+    src/backends/common/gmetabackend.cpp
     src/backends/common/gcompoundbackend.cpp
     src/backends/common/gcompoundkernel.cpp
 
diff --git a/modules/gapi/include/opencv2/gapi/garg.hpp b/modules/gapi/include/opencv2/gapi/garg.hpp
index 67ce0d990c..0838573b56 100644
--- a/modules/gapi/include/opencv2/gapi/garg.hpp
+++ b/modules/gapi/include/opencv2/gapi/garg.hpp
@@ -9,12 +9,14 @@
 #define OPENCV_GAPI_GARG_HPP
 
 #include <vector>
+#include <unordered_map>
 #include <type_traits>
 
 #include <opencv2/gapi/opencv_includes.hpp>
 #include <opencv2/gapi/own/mat.hpp>
 #include <opencv2/gapi/media.hpp>
 
+#include <opencv2/gapi/util/util.hpp>
 #include <opencv2/gapi/util/any.hpp>
 #include <opencv2/gapi/util/variant.hpp>
 
@@ -93,7 +95,7 @@ using GArgs = std::vector<GArg>;
 
 // FIXME: Express as M<GProtoArg...>::type
 // FIXME: Move to a separate file!
-using GRunArg  = util::variant<
+using GRunArgBase  = util::variant<
 #if !defined(GAPI_STANDALONE)
     cv::UMat,
 #endif // !defined(GAPI_STANDALONE)
@@ -105,6 +107,61 @@ using GRunArg  = util::variant<
     cv::detail::OpaqueRef,
     cv::MediaFrame
     >;
+
+namespace detail {
+template<typename,typename>
+struct in_variant;
+
+template<typename T, typename... Types>
+struct in_variant<T, util::variant<Types...> >
+    : std::integral_constant<bool, cv::detail::contains<T, Types...>::value > {
+};
+} // namespace detail
+
+struct GAPI_EXPORTS GRunArg: public GRunArgBase
+{
+    // Metadata information here
+    using Meta = std::unordered_map<std::string, util::any>;
+    Meta meta;
+
+    // Mimic the old GRunArg semantics here, old of the times when
+    // GRunArg was an alias to variant<>
+    GRunArg();
+    GRunArg(const cv::GRunArg &arg);
+    GRunArg(cv::GRunArg &&arg);
+
+    GRunArg& operator= (const GRunArg &arg);
+    GRunArg& operator= (GRunArg &&arg);
+
+    template <typename T>
+    GRunArg(const T &t,
+            const Meta &m = Meta{},
+            typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, int>::type = 0)
+        : GRunArgBase(t)
+        , meta(m)
+    {
+    }
+    template <typename T>
+    GRunArg(T &&t,
+            const Meta &m = Meta{},
+            typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, int>::type = 0)
+        : GRunArgBase(std::move(t))
+        , meta(m)
+    {
+    }
+    template <typename T> auto operator= (const T &t)
+        -> typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, cv::GRunArg>::type&
+    {
+        GRunArgBase::operator=(t);
+        return *this;
+    }
+    template <typename T> auto operator= (T&& t)
+        -> typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, cv::GRunArg>::type&
+    {
+        GRunArgBase::operator=(std::move(t));
+        return *this;
+    }
+};
 using GRunArgs = std::vector<GRunArg>;
 
 // TODO: Think about the addition operator
@@ -129,11 +186,13 @@ namespace gapi
 namespace wip
 {
 /**
- * @brief This aggregate type represents all types which G-API can handle (via variant).
+ * @brief This aggregate type represents all types which G-API can
+ * handle (via variant).
  *
- * It only exists to overcome C++ language limitations (where a `using`-defined class can't be forward-declared).
+ * It only exists to overcome C++ language limitations (where a
+ * `using`-defined class can't be forward-declared).
  */
-struct Data: public GRunArg
+struct GAPI_EXPORTS Data: public GRunArg
 {
     using GRunArg::GRunArg;
     template <typename T>
diff --git a/modules/gapi/include/opencv2/gapi/gopaque.hpp b/modules/gapi/include/opencv2/gapi/gopaque.hpp
index 6ab28910d6..6117971768 100644
--- a/modules/gapi/include/opencv2/gapi/gopaque.hpp
+++ b/modules/gapi/include/opencv2/gapi/gopaque.hpp
@@ -15,6 +15,7 @@
 #include <opencv2/gapi/own/exports.hpp>
 #include <opencv2/gapi/opencv_includes.hpp>
 
+#include <opencv2/gapi/util/any.hpp>
 #include <opencv2/gapi/util/variant.hpp>
 #include <opencv2/gapi/util/throw.hpp>
 #include <opencv2/gapi/util/type_traits.hpp>
@@ -119,6 +120,7 @@ namespace detail
 
         virtual void mov(BasicOpaqueRef &ref) = 0;
         virtual const void* ptr() const = 0;
+        virtual void set(const cv::util::any &a) = 0;
     };
 
     template<typename T> class OpaqueRefT final: public BasicOpaqueRef
@@ -212,6 +214,10 @@ namespace detail
         }
 
         virtual const void* ptr() const override { return &rref(); }
+
+        virtual void set(const cv::util::any &a) override {
+            wref() = util::any_cast<T>(a);
+        }
     };
 
     // This class strips type information from OpaqueRefT<> and makes it usable
@@ -285,6 +291,13 @@ namespace detail
 
         // May be used to uniquely identify this object internally
         const void *ptr() const { return m_ref->ptr(); }
+
+        // Introduced for in-graph meta handling
+        OpaqueRef& operator= (const cv::util::any &a)
+        {
+            m_ref->set(a);
+            return *this;
+        }
     };
 } // namespace detail
 
diff --git a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp
index 9781ef1ffb..aad6af618c 100644
--- a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp
+++ b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp
@@ -21,9 +21,11 @@
  * Note for developers: please don't put videoio dependency in G-API
  * because of this file.
  */
+#include <chrono>
 
 #include <opencv2/videoio.hpp>
 #include <opencv2/gapi/garg.hpp>
+#include <opencv2/gapi/streaming/meta.hpp>
 
 namespace cv {
 namespace gapi {
@@ -55,6 +57,7 @@ protected:
     cv::VideoCapture cap;
     cv::Mat first;
     bool first_pulled = false;
+    int64_t counter = 0;
 
     void prep()
     {
@@ -80,19 +83,26 @@ protected:
             GAPI_Assert(!first.empty());
             first_pulled = true;
             data = first; // no need to clone here since it was cloned already
-            return true;
         }
-
-        if (!cap.isOpened()) return false;
-
-        cv::Mat frame;
-        if (!cap.read(frame))
+        else
         {
-            // end-of-stream happened
-            return false;
+            if (!cap.isOpened()) return false;
+
+            cv::Mat frame;
+            if (!cap.read(frame))
+            {
+                // end-of-stream happened
+                return false;
+            }
+            // Same reason to clone as in prep()
+            data = frame.clone();
         }
-        // Same reason to clone as in prep()
-        data = frame.clone();
+        // Tag data with seq_id/ts
+        const auto now = std::chrono::system_clock::now();
+        const auto dur = std::chrono::duration_cast<std::chrono::microseconds>
+            (now.time_since_epoch());
+        data.meta[cv::gapi::streaming::meta_tag::timestamp] = int64_t{dur.count()};
+        data.meta[cv::gapi::streaming::meta_tag::seq_id]    = int64_t{counter++};
         return true;
     }
 
diff --git a/modules/gapi/include/opencv2/gapi/streaming/meta.hpp b/modules/gapi/include/opencv2/gapi/streaming/meta.hpp
new file mode 100644
index 0000000000..cbcfc3aa37
--- /dev/null
+++ b/modules/gapi/include/opencv2/gapi/streaming/meta.hpp
@@ -0,0 +1,79 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+
+#ifndef OPENCV_GAPI_GSTREAMING_META_HPP
+#define OPENCV_GAPI_GSTREAMING_META_HPP
+
+#include <opencv2/gapi/gopaque.hpp>
+#include <opencv2/gapi/gcall.hpp>
+#include <opencv2/gapi/gkernel.hpp>
+#include <opencv2/gapi/gtype_traits.hpp>
+
+namespace cv {
+namespace gapi {
+namespace streaming {
+
+// FIXME: the name is debatable
+namespace meta_tag {
+static constexpr const char * timestamp = "org.opencv.gapi.meta.timestamp";
+static constexpr const char * seq_id    = "org.opencv.gapi.meta.seq_id";
+} // namespace meta_tag
+
+namespace detail {
+struct GMeta {
+    static const char *id() {
+        return "org.opencv.streaming.meta";
+    }
+    // A universal yield for meta(), same as in GDesync
+    template<typename... R, int... IIs>
+    static std::tuple<R...> yield(cv::GCall &call, cv::detail::Seq<IIs...>) {
+        return std::make_tuple(cv::detail::Yield<R>::yield(call, IIs)...);
+    }
+    // Also a universal outMeta stub here
+    static GMetaArgs getOutMeta(const GMetaArgs &args, const GArgs &) {
+        return args;
+    }
+};
+} // namespace detail
+
+template<typename T, typename G>
+cv::GOpaque<T> meta(G g, const std::string &tag) {
+    using O = cv::GOpaque<T>;
+    cv::GKernel k{
+          detail::GMeta::id()                    // kernel id
+        , tag                                    // kernel tag. Use meta tag here
+        , &detail::GMeta::getOutMeta             // outMeta callback
+        , {cv::detail::GTypeTraits<O>::shape}    // output Shape
+        , {cv::detail::GTypeTraits<G>::op_kind}  // input data kinds
+        , {cv::detail::GObtainCtor<O>::get()}    // output template ctors
+    };
+    cv::GCall call(std::move(k));
+    call.pass(g);
+    return std::get<0>(detail::GMeta::yield<O>(call, cv::detail::MkSeq<1>::type()));
+}
+
+template<typename G>
+cv::GOpaque<int64_t> timestamp(G g) {
+    return meta<int64_t>(g, meta_tag::timestamp);
+}
+
+template<typename G>
+cv::GOpaque<int64_t> seq_id(G g) {
+    return meta<int64_t>(g, meta_tag::seq_id);
+}
+
+template<typename G>
+cv::GOpaque<int64_t> seqNo(G g) {
+    // Old name, compatibility only
+    return seq_id(g);
+}
+
+} // namespace streaming
+} // namespace gapi
+} // namespace cv
+
+#endif // OPENCV_GAPI_GSTREAMING_META_HPP
diff --git a/modules/gapi/src/api/gbackend.cpp b/modules/gapi/src/api/gbackend.cpp
index 6b8d0fcbee..fd4a5eb38b 100644
--- a/modules/gapi/src/api/gbackend.cpp
+++ b/modules/gapi/src/api/gbackend.cpp
@@ -143,6 +143,14 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handle
         if (handleRMat == HandleRMat::SKIP) return;
         GAPI_Assert(arg.index() == GRunArg::index_of<cv::RMat>());
         bindRMat(mag, rc, util::get<cv::RMat>(arg), RMat::Access::R);
+
+        // FIXME: Here meta may^WWILL be copied multiple times!
+        // Replace it is reference-counted object?
+        mag.meta<cv::RMat>()[rc.id] = arg.meta;
+        mag.meta<cv::Mat>()[rc.id] = arg.meta;
+#if !defined(GAPI_STANDALONE)
+        mag.meta<cv::UMat>()[rc.id] = arg.meta;
+#endif
         break;
     }
 
@@ -154,19 +162,23 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handle
         case GRunArg::index_of<cv::Scalar>() : mag_scalar = util::get<cv::Scalar>(arg);    break;
         default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?"));
         }
+        mag.meta<cv::Scalar>()[rc.id] = arg.meta;
         break;
     }
 
     case GShape::GARRAY:
-        mag.template slot<cv::detail::VectorRef>()[rc.id] = util::get<cv::detail::VectorRef>(arg);
+        mag.slot<cv::detail::VectorRef>()[rc.id] = util::get<cv::detail::VectorRef>(arg);
+        mag.meta<cv::detail::VectorRef>()[rc.id] = arg.meta;
         break;
 
     case GShape::GOPAQUE:
-        mag.template slot<cv::detail::OpaqueRef>()[rc.id] = util::get<cv::detail::OpaqueRef>(arg);
+        mag.slot<cv::detail::OpaqueRef>()[rc.id] = util::get<cv::detail::OpaqueRef>(arg);
+        mag.meta<cv::detail::OpaqueRef>()[rc.id] = arg.meta;
         break;
 
     case GShape::GFRAME:
-        mag.template slot<cv::MediaFrame>()[rc.id] = util::get<cv::MediaFrame>(arg);
+        mag.slot<cv::MediaFrame>()[rc.id] = util::get<cv::MediaFrame>(arg);
+        mag.meta<cv::MediaFrame>()[rc.id] = arg.meta;
         break;
 
     default:
@@ -250,13 +262,23 @@ cv::GRunArg getArg(const Mag& mag, const RcDesc &ref)
     // Wrap associated CPU object (either host or an internal one)
     switch (ref.shape)
     {
-    case GShape::GMAT:    return GRunArg(mag.template slot<cv::RMat>().at(ref.id));
-    case GShape::GSCALAR: return GRunArg(mag.template slot<cv::Scalar>().at(ref.id));
+    case GShape::GMAT:
+        return GRunArg(mag.slot<cv::RMat>().at(ref.id),
+                       mag.meta<cv::RMat>().at(ref.id));
+    case GShape::GSCALAR:
+        return GRunArg(mag.slot<cv::Scalar>().at(ref.id),
+                       mag.meta<cv::Scalar>().at(ref.id));
     // Note: .at() is intentional for GArray and GOpaque as objects MUST be already there
     //   (and constructed by either bindIn/Out or resetInternal)
-    case GShape::GARRAY:  return GRunArg(mag.template slot<cv::detail::VectorRef>().at(ref.id));
-    case GShape::GOPAQUE: return GRunArg(mag.template slot<cv::detail::OpaqueRef>().at(ref.id));
-    case GShape::GFRAME:  return GRunArg(mag.template slot<cv::MediaFrame>().at(ref.id));
+    case GShape::GARRAY:
+        return GRunArg(mag.slot<cv::detail::VectorRef>().at(ref.id),
+                       mag.meta<cv::detail::VectorRef>().at(ref.id));
+    case GShape::GOPAQUE:
+        return GRunArg(mag.slot<cv::detail::OpaqueRef>().at(ref.id),
+                       mag.meta<cv::detail::OpaqueRef>().at(ref.id));
+    case GShape::GFRAME:
+        return GRunArg(mag.slot<cv::MediaFrame>().at(ref.id),
+                       mag.meta<cv::MediaFrame>().at(ref.id));
     default:
         util::throw_error(std::logic_error("Unsupported GShape type"));
         break;
diff --git a/modules/gapi/src/api/grunarg.cpp b/modules/gapi/src/api/grunarg.cpp
new file mode 100644
index 0000000000..30ae2adbc0
--- /dev/null
+++ b/modules/gapi/src/api/grunarg.cpp
@@ -0,0 +1,33 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#include "precomp.hpp"
+#include <opencv2/gapi/garg.hpp>
+
+cv::GRunArg::GRunArg() {
+}
+
+cv::GRunArg::GRunArg(const cv::GRunArg &arg)
+    : cv::GRunArgBase(static_cast<const cv::GRunArgBase&>(arg))
+    , meta(arg.meta) {
+}
+
+cv::GRunArg::GRunArg(cv::GRunArg &&arg)
+    : cv::GRunArgBase(std::move(static_cast<const cv::GRunArgBase&>(arg)))
+    , meta(std::move(arg.meta)) {
+}
+
+cv::GRunArg& cv::GRunArg::operator= (const cv::GRunArg &arg) {
+    cv::GRunArgBase::operator=(static_cast<const cv::GRunArgBase&>(arg));
+    meta = arg.meta;
+    return *this;
+}
+
+cv::GRunArg& cv::GRunArg::operator= (cv::GRunArg &&arg) {
+    cv::GRunArgBase::operator=(std::move(static_cast<const cv::GRunArgBase&>(arg)));
+    meta = std::move(arg.meta);
+    return *this;
+}
diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp
index 4914715fa7..576168db53 100644
--- a/modules/gapi/src/backends/common/gbackend.hpp
+++ b/modules/gapi/src/backends/common/gbackend.hpp
@@ -62,6 +62,8 @@ namespace magazine {
     template<typename... Ts> struct Class
     {
         template<typename T> using MapT = std::unordered_map<int, T>;
+        using MapM = std::unordered_map<int, GRunArg::Meta>;
+
         template<typename T>       MapT<T>& slot()
         {
             return std::get<ade::util::type_list_index<T, Ts...>::value>(slots);
@@ -70,8 +72,17 @@ namespace magazine {
         {
             return std::get<ade::util::type_list_index<T, Ts...>::value>(slots);
         }
+        template<typename T> MapM& meta()
+        {
+            return metas[ade::util::type_list_index<T, Ts...>::value];
+        }
+        template<typename T> const MapM& meta() const
+        {
+            return metas[ade::util::type_list_index<T, Ts...>::value];
+        }
     private:
         std::tuple<MapT<Ts>...> slots;
+        std::array<MapM, sizeof...(Ts)> metas;
     };
 
 } // namespace magazine
diff --git a/modules/gapi/src/backends/common/gmetabackend.cpp b/modules/gapi/src/backends/common/gmetabackend.cpp
new file mode 100644
index 0000000000..5364152b65
--- /dev/null
+++ b/modules/gapi/src/backends/common/gmetabackend.cpp
@@ -0,0 +1,105 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#include "precomp.hpp"
+
+#include <opencv2/gapi/gcommon.hpp>        // compile args
+#include <opencv2/gapi/util/any.hpp>       // any
+#include <opencv2/gapi/streaming/meta.hpp> // GMeta
+
+#include "compiler/gobjref.hpp"            // RcDesc
+#include "compiler/gmodel.hpp"             // GModel, Op
+#include "backends/common/gbackend.hpp"
+#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
+
+#include "backends/common/gmetabackend.hpp"
+
+namespace {
+
+class GraphMetaExecutable final: public cv::gimpl::GIslandExecutable {
+    std::string m_meta_tag;
+
+public:
+    GraphMetaExecutable(const ade::Graph& g,
+                        const std::vector<ade::NodeHandle>& nodes);
+    bool canReshape() const override;
+    void reshape(ade::Graph&, const cv::GCompileArgs&) override;
+
+    void run(std::vector<InObj> &&input_objs,
+             std::vector<OutObj> &&output_objs) override;
+};
+
+bool GraphMetaExecutable::canReshape() const {
+    return true;
+}
+void GraphMetaExecutable::reshape(ade::Graph&, const cv::GCompileArgs&) {
+    // do nothing here
+}
+
+GraphMetaExecutable::GraphMetaExecutable(const ade::Graph& g,
+                                         const std::vector<ade::NodeHandle>& nodes) {
+    // There may be only one node in the graph
+    GAPI_Assert(nodes.size() == 1u);
+
+    cv::gimpl::GModel::ConstGraph cg(g);
+    const auto &op = cg.metadata(nodes[0]).get<cv::gimpl::Op>();
+    GAPI_Assert(op.k.name == cv::gapi::streaming::detail::GMeta::id());
+    m_meta_tag = op.k.tag;
+}
+
+void GraphMetaExecutable::run(std::vector<InObj>  &&input_objs,
+                              std::vector<OutObj> &&output_objs) {
+    GAPI_Assert(input_objs.size() == 1u);
+    GAPI_Assert(output_objs.size() == 1u);
+
+    const cv::GRunArg in_arg = input_objs[0].second;
+    cv::GRunArgP out_arg = output_objs[0].second;
+
+    auto it = in_arg.meta.find(m_meta_tag);
+    if (it == in_arg.meta.end()) {
+        cv::util::throw_error
+            (std::logic_error("Run-time meta "
+                              + m_meta_tag
+                              + " is not found in object "
+                              + std::to_string(static_cast<int>(input_objs[0].first.shape))
+                              + "/"
+                              + std::to_string(input_objs[0].first.id)));
+    }
+    cv::util::get<cv::detail::OpaqueRef>(out_arg) = it->second;
+}
+
+class GraphMetaBackendImpl final: public cv::gapi::GBackend::Priv {
+    virtual void unpackKernel(ade::Graph            &,
+                              const ade::NodeHandle &,
+                              const cv::GKernelImpl &) override {
+        // Do nothing here
+    }
+
+    virtual EPtr compile(const ade::Graph& graph,
+                         const cv::GCompileArgs&,
+                         const std::vector<ade::NodeHandle>& nodes,
+                         const std::vector<cv::gimpl::Data>&,
+                         const std::vector<cv::gimpl::Data>&) const override {
+        return EPtr{new GraphMetaExecutable(graph, nodes)};
+    }
+};
+
+cv::gapi::GBackend graph_meta_backend() {
+    static cv::gapi::GBackend this_backend(std::make_shared<GraphMetaBackendImpl>());
+    return this_backend;
+}
+
+struct InGraphMetaKernel final: public cv::detail::KernelTag {
+    using API = cv::gapi::streaming::detail::GMeta;
+    static cv::gapi::GBackend backend() { return graph_meta_backend(); }
+    static int                kernel()  { return 42; }
+};
+
+} // anonymous namespace
+
+cv::gapi::GKernelPackage cv::gimpl::meta::kernels() {
+    return cv::gapi::kernels<InGraphMetaKernel>();
+}
diff --git a/modules/gapi/src/backends/common/gmetabackend.hpp b/modules/gapi/src/backends/common/gmetabackend.hpp
new file mode 100644
index 0000000000..56f61d0e3d
--- /dev/null
+++ b/modules/gapi/src/backends/common/gmetabackend.hpp
@@ -0,0 +1,16 @@
+#ifndef OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP
+#define OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP
+
+#include <opencv2/gapi/gkernel.hpp>
+
+namespace cv {
+namespace gimpl {
+namespace meta {
+
+cv::gapi::GKernelPackage kernels();
+
+} // namespace meta
+} // namespace gimpl
+} // namespace cv
+
+#endif // OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP
diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp
index eb75f44e0e..f6fa398c17 100644
--- a/modules/gapi/src/compiler/gcompiler.cpp
+++ b/modules/gapi/src/compiler/gcompiler.cpp
@@ -35,6 +35,7 @@
 #include "executor/gexecutor.hpp"
 #include "executor/gstreamingexecutor.hpp"
 #include "backends/common/gbackend.hpp"
+#include "backends/common/gmetabackend.hpp"
 
 // <FIXME:>
 #if !defined(GAPI_STANDALONE)
@@ -58,7 +59,8 @@ namespace
             for (const auto &b : pkg.backends()) {
                 aux_pkg = combine(aux_pkg, b.priv().auxiliaryKernels());
             }
-            return combine(pkg, aux_pkg);
+            // Always include built-in meta<> implementation
+            return combine(pkg, aux_pkg, cv::gimpl::meta::kernels());
         };
 
         auto has_use_only = cv::gapi::getCompileArg<cv::gapi::use_only>(args);
diff --git a/modules/gapi/src/compiler/gislandmodel.cpp b/modules/gapi/src/compiler/gislandmodel.cpp
index 9ffc605372..4d0feaea71 100644
--- a/modules/gapi/src/compiler/gislandmodel.cpp
+++ b/modules/gapi/src/compiler/gislandmodel.cpp
@@ -357,26 +357,21 @@ void GIslandExecutable::run(GIslandExecutable::IInput &in, GIslandExecutable::IO
     for (auto &&it: ade::util::zip(ade::util::toRange(in_desc),
                                    ade::util::toRange(in_vector)))
     {
-        // FIXME: Not every Island expects a cv::Mat instead of own::Mat on input
-        // This kludge should go as a result of de-ownification
         const cv::GRunArg& in_data_orig = std::get<1>(it);
         cv::GRunArg in_data;
-#if !defined(GAPI_STANDALONE)
         switch (in_data_orig.index())
         {
         case cv::GRunArg::index_of<cv::Mat>():
-            in_data = cv::GRunArg{cv::make_rmat<cv::gimpl::RMatAdapter>(cv::util::get<cv::Mat>(in_data_orig))};
-            break;
-        case cv::GRunArg::index_of<cv::Scalar>():
-            in_data = cv::GRunArg{(cv::util::get<cv::Scalar>(in_data_orig))};
+            // FIXME: This whole construct is ugly, from
+            // its writing to a need in this in general
+            in_data = cv::GRunArg{ cv::make_rmat<cv::gimpl::RMatAdapter>(cv::util::get<cv::Mat>(in_data_orig))
+                                 , in_data_orig.meta
+                                 };
             break;
         default:
             in_data = in_data_orig;
             break;
         }
-#else
-        in_data = in_data_orig;
-#endif // GAPI_STANDALONE
         in_objs.emplace_back(std::get<0>(it), std::move(in_data));
     }
     for (auto &&it: ade::util::indexed(ade::util::toRange(out_desc)))
@@ -385,9 +380,27 @@ void GIslandExecutable::run(GIslandExecutable::IInput &in, GIslandExecutable::IO
                               out.get(ade::util::checked_cast<int>(ade::util::index(it))));
     }
     run(std::move(in_objs), std::move(out_objs));
+
+    // Propagate in-graph meta down to the graph
+    // Note: this is not a complete implementation! Mainly this is a stub
+    // and the proper implementation should come later.
+    //
+    // Propagating the meta information here has its pros and cons.
+    // Pros: it works here uniformly for both regular and streaming cases,
+    //   also for the majority of old-fashioned (synchronous) backends
+    // Cons: backends implementing the asynchronous run(IInput,IOutput)
+    //   won't get it out of the box
+    cv::GRunArg::Meta stub_meta;
+    for (auto &&in_arg : in_vector)
+    {
+        stub_meta.insert(in_arg.meta.begin(), in_arg.meta.end());
+    }
+    // Report output objects as "ready" to the executor, also post
+    // calculated in-graph meta for the objects
     for (auto &&it: out_objs)
     {
-        out.post(std::move(it.second)); // report output objects as "ready" to the executor
+        out.meta(it.second, stub_meta);
+        out.post(std::move(it.second));
     }
 }
 
diff --git a/modules/gapi/src/compiler/gislandmodel.hpp b/modules/gapi/src/compiler/gislandmodel.hpp
index c2e7b96d45..e8eb73692b 100644
--- a/modules/gapi/src/compiler/gislandmodel.hpp
+++ b/modules/gapi/src/compiler/gislandmodel.hpp
@@ -172,6 +172,10 @@ struct GIslandExecutable::IOutput: public GIslandExecutable::IODesc {
     virtual GRunArgP get(int idx) = 0;  // Allocate (wrap) a new data object for output idx
     virtual void post(GRunArgP&&) = 0;  // Release the object back to the framework (mark available)
     virtual void post(EndOfStream&&) = 0; // Post end-of-stream marker back to the framework
+
+    // Assign accumulated metadata to the given output object.
+    // This method can only be called after get() and before post().
+    virtual void meta(const GRunArgP&, const GRunArg::Meta &) = 0;
 };
 
 // GIslandEmitter - a backend-specific thing which feeds data into
diff --git a/modules/gapi/src/executor/gexecutor.cpp b/modules/gapi/src/executor/gexecutor.cpp
index d9f5cfafe6..66f3b24771 100644
--- a/modules/gapi/src/executor/gexecutor.cpp
+++ b/modules/gapi/src/executor/gexecutor.cpp
@@ -12,6 +12,8 @@
 #include <ade/util/zip_range.hpp>
 
 #include <opencv2/gapi/opencv_includes.hpp>
+
+#include "api/gproto_priv.hpp" // ptr(GRunArgP)
 #include "executor/gexecutor.hpp"
 #include "compiler/passes/passes.hpp"
 
@@ -105,6 +107,9 @@ void bindInArgExec(Mag& mag, const RcDesc &rc, const GRunArg &arg)
         mag_rmat = util::get<cv::RMat>(arg); break;
     default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?"));
     }
+    // FIXME: has to take extra care about meta here for this particuluar
+    // case, just because this function exists at all
+    mag.meta<cv::RMat>()[rc.id] = arg.meta;
 }
 
 void bindOutArgExec(Mag& mag, const RcDesc &rc, const GRunArgP &arg)
@@ -131,7 +136,7 @@ cv::GRunArgP getObjPtrExec(Mag& mag, const RcDesc &rc)
     {
         return getObjPtr(mag, rc);
     }
-    return GRunArgP(&mag.template slot<cv::RMat>()[rc.id]);
+    return GRunArgP(&mag.slot<cv::RMat>()[rc.id]);
 }
 
 void writeBackExec(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg)
@@ -155,6 +160,25 @@ void writeBackExec(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg)
     default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?"));
     }
 }
+
+void assignMetaStubExec(Mag& mag, const RcDesc &rc, const cv::GRunArg::Meta &meta) {
+    switch (rc.shape)
+    {
+    case GShape::GARRAY:  mag.meta<cv::detail::VectorRef>()[rc.id] = meta; break;
+    case GShape::GOPAQUE: mag.meta<cv::detail::OpaqueRef>()[rc.id] = meta; break;
+    case GShape::GSCALAR: mag.meta<cv::Scalar>()[rc.id]            = meta; break;
+    case GShape::GFRAME:  mag.meta<cv::MediaFrame>()[rc.id]        = meta; break;
+    case GShape::GMAT:
+        mag.meta<cv::Mat>() [rc.id] = meta;
+        mag.meta<cv::RMat>()[rc.id] = meta;
+#if !defined(GAPI_STANDALONE)
+        mag.meta<cv::UMat>()[rc.id] = meta;
+#endif
+        break;
+    default: util::throw_error(std::logic_error("Unsupported GShape type")); break;
+    }
+}
+
 } // anonymous namespace
 }}} // namespace cv::gimpl::magazine
 
@@ -231,11 +255,28 @@ public:
 class cv::gimpl::GExecutor::Output final: public cv::gimpl::GIslandExecutable::IOutput
 {
     cv::gimpl::Mag &mag;
-    virtual GRunArgP get(int idx) override { return magazine::getObjPtrExec(mag, desc()[idx]); }
-    virtual void post(GRunArgP&&) override { } // Do nothing here
-    virtual void post(EndOfStream&&) override {} // Do nothing here too
+    std::unordered_map<const void*, int> out_idx;
+
+    GRunArgP get(int idx) override
+    {
+        auto r = magazine::getObjPtrExec(mag, desc()[idx]);
+        // Remember the output port for this output object
+        out_idx[cv::gimpl::proto::ptr(r)] = idx;
+        return r;
+    }
+    void post(GRunArgP&&) override { } // Do nothing here
+    void post(EndOfStream&&) override {} // Do nothing here too
+    void meta(const GRunArgP &out, const GRunArg::Meta &m) override
+    {
+        const auto idx = out_idx.at(cv::gimpl::proto::ptr(out));
+        magazine::assignMetaStubExec(mag, desc()[idx], m);
+    }
 public:
-    Output(cv::gimpl::Mag &m, const std::vector<RcDesc> &rcs) : mag(m) { set(rcs); }
+    Output(cv::gimpl::Mag &m, const std::vector<RcDesc> &rcs)
+        : mag(m)
+    {
+        set(rcs);
+    }
 };
 
 void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args)
@@ -330,7 +371,7 @@ void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args)
     // Run the script
     for (auto &op : m_ops)
     {
-        // (5)
+        // (5), (6)
         Input i{m_res, op.in_objects};
         Output o{m_res, op.out_objects};
         op.isl_exec->run(i, o);
diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp
index 653d20e712..58789889a3 100644
--- a/modules/gapi/src/executor/gstreamingexecutor.cpp
+++ b/modules/gapi/src/executor/gstreamingexecutor.cpp
@@ -350,16 +350,14 @@ bool QueueReader::getInputVector(std::vector<Q*> &in_queues,
             // value-initialized scalar)
             // It can also hold a constant value received with
             // Stop::Kind::CNST message (see above).
-            // FIXME: Variant move problem
-            isl_inputs[id] = const_cast<const cv::GRunArg&>(in_constants[id]);
+            isl_inputs[id] = in_constants[id];
             continue;
         }
 
         q->pop(m_cmd[id]);
         if (!cv::util::holds_alternative<Stop>(m_cmd[id]))
         {
-            // FIXME: Variant move problem
-            isl_inputs[id] = const_cast<const cv::GRunArg &>(cv::util::get<cv::GRunArg>(m_cmd[id]));
+            isl_inputs[id] = cv::util::get<cv::GRunArg>(m_cmd[id]);
         }
         else // A Stop sign
         {
@@ -382,7 +380,7 @@ bool QueueReader::getInputVector(std::vector<Q*> &in_queues,
                 // NEXT time (on a next call to getInputVector()), the
                 // "q==nullptr" check above will be triggered, but now
                 // we need to make it manually:
-                isl_inputs[id] = const_cast<const cv::GRunArg&>(in_constants[id]);
+                isl_inputs[id] = in_constants[id];
             }
             else
             {
@@ -666,8 +664,7 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput
             Cmd cmd;
             if (cv::util::holds_alternative<cv::GRunArg>(post_iter->data))
             {
-                // FIXME: That ugly VARIANT problem
-                cmd = Cmd{const_cast<const cv::GRunArg&>(cv::util::get<cv::GRunArg>(post_iter->data))};
+                cmd = Cmd{cv::util::get<cv::GRunArg>(post_iter->data)};
             }
             else
             {
@@ -677,8 +674,7 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput
             }
             for (auto &&q : m_out_queues[out_idx])
             {
-                // FIXME: This ugly VARIANT problem
-                q->push(const_cast<const Cmd&>(cmd));
+                q->push(cmd);
             }
             post_iter = m_postings[out_idx].erase(post_iter);
         }
@@ -708,6 +704,15 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput
             }
         }
     }
+    void meta(const cv::GRunArgP &out, const cv::GRunArg::Meta &m) override
+    {
+        const auto it = m_postIdx.find(cv::gimpl::proto::ptr(out));
+        GAPI_Assert(it != m_postIdx.end());
+
+        const auto out_iter = it->second.second;
+        cv::util::get<cv::GRunArg>(out_iter->data).meta = m;
+    }
+
 public:
     explicit StreamingOutput(const cv::GMetaArgs &metas,
                              std::vector< std::vector<Q*> > &out_queues,
@@ -769,6 +774,7 @@ void islandActorThread(std::vector<cv::gimpl::RcDesc> in_rcs,                //
 void collectorThread(std::vector<Q*>   in_queues,
                      std::vector<int>  in_mapping,
                      const std::size_t out_size,
+                     const bool        handle_stop,
                      Q&                out_queue)
 {
     // These flags are static now: regardless if the sync or
@@ -783,9 +789,14 @@ void collectorThread(std::vector<Q*>   in_queues,
     while (true)
     {
         cv::GRunArgs this_result(out_size);
-        if (!qr.getResultsVector(in_queues, in_mapping, out_size, this_result))
+        const bool ok = qr.getResultsVector(in_queues, in_mapping, out_size, this_result);
+        if (!ok)
         {
-            out_queue.push(Cmd{Stop{}});
+            if (handle_stop)
+            {
+                out_queue.push(Cmd{Stop{}});
+            }
+            // Terminate the thread anyway
             return;
         }
         out_queue.push(Cmd{Result{std::move(this_result), flags}});
@@ -1263,12 +1274,22 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins)
     // If there are desynchronized parts in the graph, there may be
     // multiple theads polling every separate (desynchronized)
     // branch in the graph individually.
+    const bool has_main_path = m_sink_sync.end() !=
+        std::find(m_sink_sync.begin(), m_sink_sync.end(), -1);
     for (auto &&info : m_collector_map) {
         m_threads.emplace_back(collectorThread,
                                info.second.queues,
                                info.second.mapping,
                                m_sink_queues.size(),
+                               has_main_path ? info.first == -1 : true, // see below (*)
                                std::ref(m_out_queue));
+
+        // (*) - there may be a problem with desynchronized paths when those work
+        // faster than the main path. In this case, the desync paths get "Stop" message
+        // earlier and thus broadcast it down to pipeline gets stopped when there is
+        // some "main path" data to process. This new collectorThread's flag regulates it:
+        // - desync paths should never post Stop message if there is a main path.
+        // - if there is no main path, than any desync path can terminate the execution.
     }
     state = State::READY;
 }
diff --git a/modules/gapi/test/gapi_graph_meta_tests.cpp b/modules/gapi/test/gapi_graph_meta_tests.cpp
new file mode 100644
index 0000000000..73c0da3c9e
--- /dev/null
+++ b/modules/gapi/test/gapi_graph_meta_tests.cpp
@@ -0,0 +1,195 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2020 Intel Corporation
+
+#include <tuple>
+#include <unordered_set>
+
+#include "test_precomp.hpp"
+#include "opencv2/gapi/streaming/meta.hpp"
+#include "opencv2/gapi/streaming/cap.hpp"
+
+namespace opencv_test {
+
+namespace {
+void initTestDataPath() {
+#ifndef WINRT
+    static bool initialized = false;
+    if (!initialized)
+    {
+        // Since G-API has no own test data (yet), it is taken from the common space
+        const char* testDataPath = getenv("OPENCV_TEST_DATA_PATH");
+        if (testDataPath != nullptr) {
+            cvtest::addDataSearchPath(testDataPath);
+            initialized = true;
+        }
+    }
+#endif // WINRT
+}
+} // anonymous namespace
+
+TEST(GraphMeta, Trad_AccessInput) {
+    cv::GMat in;
+    cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3));
+    cv::GOpaque<int> out2 = cv::gapi::streaming::meta<int>(in, "foo");
+    cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2));
+
+    cv::Mat in_mat = cv::Mat::eye(cv::Size(64, 64), CV_8UC1);
+    cv::Mat out_mat;
+    int out_meta = 0;
+
+    // manually set metadata in the input fields
+    auto inputs = cv::gin(in_mat);
+    inputs[0].meta["foo"] = 42;
+
+    graph.apply(std::move(inputs), cv::gout(out_mat, out_meta));
+    EXPECT_EQ(42, out_meta);
+}
+
+TEST(GraphMeta, Trad_AccessTmp) {
+    cv::GMat in;
+    cv::GMat tmp = cv::gapi::blur(in, cv::Size(3,3));
+    cv::GMat out1 = tmp+1;
+    cv::GOpaque<float> out2 = cv::gapi::streaming::meta<float>(tmp, "bar");
+    cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2));
+
+    cv::Mat in_mat = cv::Mat::eye(cv::Size(64, 64), CV_8UC1);
+    cv::Mat out_mat;
+    float out_meta = 0.f;
+
+    // manually set metadata in the input fields
+    auto inputs = cv::gin(in_mat);
+    inputs[0].meta["bar"] = 1.f;
+
+    graph.apply(std::move(inputs), cv::gout(out_mat, out_meta));
+    EXPECT_EQ(1.f, out_meta);
+}
+
+TEST(GraphMeta, Trad_AccessOutput) {
+    cv::GMat in;
+    cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3));
+    cv::GOpaque<std::string> out2 = cv::gapi::streaming::meta<std::string>(out1, "baz");
+    cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2));
+
+    cv::Mat in_mat = cv::Mat::eye(cv::Size(64, 64), CV_8UC1);
+    cv::Mat out_mat;
+    std::string out_meta;
+
+    // manually set metadata in the input fields
+    auto inputs = cv::gin(in_mat);
+
+    // NOTE: Assigning explicitly an std::string is important,
+    // otherwise a "const char*" will be stored and won't be
+    // translated properly by util::any since std::string is
+    // used within the graph.
+    inputs[0].meta["baz"] = std::string("opencv");
+
+    graph.apply(std::move(inputs), cv::gout(out_mat, out_meta));
+    EXPECT_EQ("opencv", out_meta);
+}
+
+TEST(GraphMeta, Streaming_AccessInput) {
+    initTestDataPath();
+
+    cv::GMat in;
+    cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3));
+    cv::GOpaque<int64_t> out2 = cv::gapi::streaming::seq_id(in);
+    cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2));
+
+    auto ccomp = graph.compileStreaming();
+    ccomp.setSource<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi", false));
+    ccomp.start();
+
+    cv::Mat out_mat;
+    int64_t out_meta = 0;
+    int64_t expected_counter = 0;
+
+    while (ccomp.pull(cv::gout(out_mat, out_meta))) {
+        EXPECT_EQ(expected_counter, out_meta);
+        ++expected_counter;
+    }
+}
+
+TEST(GraphMeta, Streaming_AccessOutput) {
+    initTestDataPath();
+
+    cv::GMat in;
+    cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3));
+    cv::GOpaque<int64_t> out2 = cv::gapi::streaming::seq_id(out1);
+    cv::GOpaque<int64_t> out3 = cv::gapi::streaming::timestamp(out1);
+    cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2, out3));
+
+    auto ccomp = graph.compileStreaming();
+    ccomp.setSource<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi", false));
+    ccomp.start();
+
+    cv::Mat out_mat;
+    int64_t out_meta = 0;
+    int64_t out_timestamp = 0;
+    int64_t expected_counter = 0;
+    int64_t prev_timestamp = -1;
+
+    while (ccomp.pull(cv::gout(out_mat, out_meta, out_timestamp))) {
+        EXPECT_EQ(expected_counter, out_meta);
+        ++expected_counter;
+
+        EXPECT_NE(prev_timestamp, out_timestamp);
+        prev_timestamp = out_timestamp;
+    }
+}
+
+TEST(GraphMeta, Streaming_AccessDesync) {
+    initTestDataPath();
+
+    cv::GMat in;
+    cv::GOpaque<int64_t> out1 = cv::gapi::streaming::seq_id(in);
+    cv::GOpaque<int64_t> out2 = cv::gapi::streaming::timestamp(in);
+    cv::GMat             out3 = cv::gapi::blur(in, cv::Size(3,3));
+
+    cv::GMat tmp = cv::gapi::streaming::desync(in);
+    cv::GScalar mean = cv::gapi::mean(tmp);
+    cv::GOpaque<int64_t> out4 = cv::gapi::streaming::seq_id(mean);
+    cv::GOpaque<int64_t> out5 = cv::gapi::streaming::timestamp(mean);
+    cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2, out3, out4, out5));
+
+    auto ccomp = graph.compileStreaming();
+    ccomp.setSource<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi", false));
+    ccomp.start();
+
+    cv::optional<int64_t> out_sync_id;
+    cv::optional<int64_t> out_sync_ts;
+    cv::optional<cv::Mat> out_sync_mat;
+
+    cv::optional<int64_t> out_desync_id;
+    cv::optional<int64_t> out_desync_ts;
+
+    std::unordered_set<int64_t> sync_ids;
+    std::unordered_set<int64_t> desync_ids;
+
+    while (ccomp.pull(cv::gout(out_sync_id, out_sync_ts, out_sync_mat,
+                               out_desync_id, out_desync_ts))) {
+        if (out_sync_id.has_value()) {
+            CV_Assert(out_sync_ts.has_value());
+            CV_Assert(out_sync_mat.has_value());
+            sync_ids.insert(out_sync_id.value());
+        }
+        if (out_desync_id.has_value()) {
+            CV_Assert(out_desync_ts.has_value());
+            desync_ids.insert(out_desync_id.value());
+        }
+    }
+    // Visually report that everything is really ok
+    std::cout << sync_ids.size() << " vs " << desync_ids.size() << std::endl;
+
+    // Desync path should generate less objects than the synchronized one
+    EXPECT_GE(sync_ids.size(), desync_ids.size());
+
+    // ..but all desynchronized IDs must be present in the synchronized set
+    for (auto &&d_id : desync_ids) {
+        EXPECT_TRUE(sync_ids.count(d_id) > 0);
+    }
+}
+
+} // namespace opencv_test

From fe9a8ebea2a6f0cfe1ac3c720548695e5d3a6ce2 Mon Sep 17 00:00:00 2001
From: shioko <ichisadashioko@gmail.com>
Date: Tue, 17 Nov 2020 15:02:55 +0000
Subject: [PATCH 111/152] Fix typo 'Applicatioin'

---
 apps/interactive-calibration/parametersController.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/apps/interactive-calibration/parametersController.cpp b/apps/interactive-calibration/parametersController.cpp
index c76b915c63..3bcf5b86e9 100644
--- a/apps/interactive-calibration/parametersController.cpp
+++ b/apps/interactive-calibration/parametersController.cpp
@@ -32,7 +32,7 @@ bool calib::parametersController::loadFromFile(const std::string &inputFileName)
 
     if(!reader.isOpened()) {
         std::cerr << "Warning: Unable to open " << inputFileName <<
-                     " Applicatioin stated with default advanced parameters" << std::endl;
+                     " Application started with default advanced parameters" << std::endl;
         return true;
     }
 

From 2c6a2f0381b6b51d93b7db7907c55e5c944b6ce9 Mon Sep 17 00:00:00 2001
From: Orest Chura <orest.chura@intel.com>
Date: Tue, 17 Nov 2020 18:59:59 +0300
Subject: [PATCH 112/152] Merge pull request #18790 from OrestChura:oc/fitLine

[G-API]: fitLine() Standard Kernel Implementation

* fitLine API (Mat, 32S, 32F) (2D, 3D)

* Complete fitLine kernel & accuracy tests
 - initialization for vectors of cv::Point and Mats via vectors added
 - comparison functions for Vec<T, n> added:
   - straight average difference comparison
   - comparison by equasion for 2d line
 - stream overload for cv::DistanceTypes added

* Fix precommit warnings

* Fix docs

* Address comments
Try to fix warning

* Disable warning in tests
---
 modules/gapi/include/opencv2/gapi/imgproc.hpp | 213 +++++++++++++++++-
 modules/gapi/src/api/kernels_imgproc.cpp      |  48 ++++
 modules/gapi/src/backends/cpu/gcpuimgproc.cpp |  80 +++++++
 .../gapi/test/common/gapi_imgproc_tests.hpp   |  16 ++
 .../test/common/gapi_imgproc_tests_inl.hpp    | 186 +++++++++++++++
 .../gapi/test/common/gapi_tests_common.hpp    | 205 +++++++++++++++++
 .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp  |  68 ++++++
 7 files changed, 815 insertions(+), 1 deletion(-)

diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp
index 0e4254cb87..e41c2507f2 100644
--- a/modules/gapi/include/opencv2/gapi/imgproc.hpp
+++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp
@@ -46,7 +46,7 @@ void validateFindingContoursMeta(const int depth, const int chan, const int mode
 
 // Checks if the passed mat is a set of n-dimentional points of the given depth
 bool isPointsVector(const int chan, const cv::Size &size, const int depth,
-                    const int n, const int ddepth)
+                    const int n, const int ddepth = -1)
 {
     return (ddepth == depth || ddepth < 0) &&
            ((chan == n && (size.height == 1 || size.width == 1)) ||
@@ -234,6 +234,70 @@ namespace imgproc {
         }
     };
 
+    G_TYPED_KERNEL(GFitLine2DMat, <GOpaque<Vec4f>(GMat,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine2DMat") {
+        static GOpaqueDesc outMeta(GMatDesc in,DistanceTypes,double,double,double) {
+            GAPI_Assert(isPointsVector(in.chan, in.size, in.depth, 2, -1));
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFitLine2DVector32S,
+                   <GOpaque<Vec4f>(GArray<Point2i>,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine2DVector32S") {
+        static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFitLine2DVector32F,
+                   <GOpaque<Vec4f>(GArray<Point2f>,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine2DVector32F") {
+        static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFitLine2DVector64F,
+                   <GOpaque<Vec4f>(GArray<Point2d>,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine2DVector64F") {
+        static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFitLine3DMat, <GOpaque<Vec6f>(GMat,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine3DMat") {
+        static GOpaqueDesc outMeta(GMatDesc in,int,double,double,double) {
+            GAPI_Assert(isPointsVector(in.chan, in.size, in.depth, 3, -1));
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFitLine3DVector32S,
+                   <GOpaque<Vec6f>(GArray<Point3i>,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine3DVector32S") {
+        static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFitLine3DVector32F,
+                   <GOpaque<Vec6f>(GArray<Point3f>,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine3DVector32F") {
+        static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
+            return empty_gopaque_desc();
+        }
+    };
+
+    G_TYPED_KERNEL(GFitLine3DVector64F,
+                   <GOpaque<Vec6f>(GArray<Point3d>,DistanceTypes,double,double,double)>,
+                   "org.opencv.imgproc.shape.fitLine3DVector64F") {
+        static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
+            return empty_gopaque_desc();
+        }
+    };
+
     G_TYPED_KERNEL(GBGR2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2rgb") {
         static GMatDesc outMeta(GMatDesc in) {
             return in; // type still remains CV_8UC3;
@@ -1111,6 +1175,153 @@ Calculates the up-right bounding rectangle of a point set.
  */
 GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2f>& src);
 
+/** @brief Fits a line to a 2D point set.
+
+The function fits a line to a 2D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
+\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
+function, one of the following:
+-  DIST_L2
+\f[\rho (r) = r^2/2  \quad \text{(the simplest and the fastest least-squares method)}\f]
+- DIST_L1
+\f[\rho (r) = r\f]
+- DIST_L12
+\f[\rho (r) = 2  \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
+- DIST_FAIR
+\f[\rho \left (r \right ) = C^2  \cdot \left (  \frac{r}{C} -  \log{\left(1 + \frac{r}{C}\right)} \right )  \quad \text{where} \quad C=1.3998\f]
+- DIST_WELSCH
+\f[\rho \left (r \right ) =  \frac{C^2}{2} \cdot \left ( 1 -  \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right )  \quad \text{where} \quad C=2.9846\f]
+- DIST_HUBER
+\f[\rho (r) =  \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
+
+The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
+that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
+weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DMat"
+
+@param src Input set of 2D points stored in one of possible containers: Mat,
+std::vector<cv::Point2i>, std::vector<cv::Point2f>, std::vector<cv::Point2d>.
+
+@note In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
+or column if there are N channels, or have N columns if there is a single channel.
+
+@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
+and @ref DIST_C are not suppored.
+@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
+is chosen.
+@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
+line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
+@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
+If it is 0, a default value is chosen.
+
+@return Output line parameters: a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0),
+where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line.
+ */
+GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GMat& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
+/** @overload
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32S"
+
+ */
+GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2i>& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
+/** @overload
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32F"
+
+ */
+GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2f>& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
+/** @overload
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector64F"
+
+ */
+GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2d>& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
+/** @brief Fits a line to a 3D point set.
+
+The function fits a line to a 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
+\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
+function, one of the following:
+-  DIST_L2
+\f[\rho (r) = r^2/2  \quad \text{(the simplest and the fastest least-squares method)}\f]
+- DIST_L1
+\f[\rho (r) = r\f]
+- DIST_L12
+\f[\rho (r) = 2  \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
+- DIST_FAIR
+\f[\rho \left (r \right ) = C^2  \cdot \left (  \frac{r}{C} -  \log{\left(1 + \frac{r}{C}\right)} \right )  \quad \text{where} \quad C=1.3998\f]
+- DIST_WELSCH
+\f[\rho \left (r \right ) =  \frac{C^2}{2} \cdot \left ( 1 -  \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right )  \quad \text{where} \quad C=2.9846\f]
+- DIST_HUBER
+\f[\rho (r) =  \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
+
+The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
+that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
+weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DMat"
+
+@param src Input set of 3D points stored in one of possible containers: Mat,
+std::vector<cv::Point3i>, std::vector<cv::Point3f>, std::vector<cv::Point3d>.
+
+@note In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
+or column if there are N channels, or have N columns if there is a single channel.
+
+@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
+and @ref DIST_C are not suppored.
+@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
+is chosen.
+@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
+line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
+@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
+If it is 0, a default value is chosen.
+
+@return Output line parameters: a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0),
+where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on
+the line.
+ */
+GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GMat& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
+/** @overload
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32S"
+
+ */
+GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3i>& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
+/** @overload
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32F"
+
+ */
+GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3f>& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
+/** @overload
+
+@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector64F"
+
+ */
+GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3d>& src, const DistanceTypes distType,
+                                      const double param = 0., const double reps = 0.,
+                                      const double aeps = 0.);
+
 //! @} gapi_shape
 
 //! @addtogroup gapi_colorconvert
diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp
index faf8de54c7..41085a7ebf 100644
--- a/modules/gapi/src/api/kernels_imgproc.cpp
+++ b/modules/gapi/src/api/kernels_imgproc.cpp
@@ -164,6 +164,54 @@ GOpaque<Rect> boundingRect(const GArray<Point2f>& src)
     return imgproc::GBoundingRectVector32F::on(src);
 }
 
+GOpaque<Vec4f> fitLine2D(const GMat& src, const DistanceTypes distType, const double param,
+                         const double reps, const double aeps)
+{
+    return imgproc::GFitLine2DMat::on(src, distType, param, reps, aeps);
+}
+
+GOpaque<Vec4f> fitLine2D(const GArray<Point2i>& src, const DistanceTypes distType,
+                         const double param, const double reps, const double aeps)
+{
+    return imgproc::GFitLine2DVector32S::on(src, distType, param, reps, aeps);
+}
+
+GOpaque<Vec4f> fitLine2D(const GArray<Point2f>& src, const DistanceTypes distType,
+                         const double param, const double reps, const double aeps)
+{
+    return imgproc::GFitLine2DVector32F::on(src, distType, param, reps, aeps);
+}
+
+GOpaque<Vec4f> fitLine2D(const GArray<Point2d>& src, const DistanceTypes distType,
+                         const double param, const double reps, const double aeps)
+{
+    return imgproc::GFitLine2DVector64F::on(src, distType, param, reps, aeps);
+}
+
+GOpaque<Vec6f> fitLine3D(const GMat& src, const DistanceTypes distType, const double param,
+                         const double reps, const double aeps)
+{
+    return imgproc::GFitLine3DMat::on(src, distType, param, reps, aeps);
+}
+
+GOpaque<Vec6f> fitLine3D(const GArray<Point3i>& src, const DistanceTypes distType,
+                         const double param, const double reps, const double aeps)
+{
+    return imgproc::GFitLine3DVector32S::on(src, distType, param, reps, aeps);
+}
+
+GOpaque<Vec6f> fitLine3D(const GArray<Point3f>& src, const DistanceTypes distType,
+                         const double param, const double reps, const double aeps)
+{
+    return imgproc::GFitLine3DVector32F::on(src, distType, param, reps, aeps);
+}
+
+GOpaque<Vec6f> fitLine3D(const GArray<Point3d>& src, const DistanceTypes distType,
+                         const double param, const double reps, const double aeps)
+{
+    return imgproc::GFitLine3DVector64F::on(src, distType, param, reps, aeps);
+}
+
 GMat BGR2RGB(const GMat& src)
 {
     return imgproc::GBGR2RGB::on(src);
diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
index 9eca0f12f0..6cbf0d32f0 100644
--- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
+++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp
@@ -285,6 +285,78 @@ GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVecto
     }
 };
 
+GAPI_OCV_KERNEL(GCPUFitLine2DMat, cv::gapi::imgproc::GFitLine2DMat)
+{
+    static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param,
+                    const double reps, const double aeps, cv::Vec4f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFitLine2DVector32S, cv::gapi::imgproc::GFitLine2DVector32S)
+{
+    static void run(const std::vector<cv::Point2i>& in, const cv::DistanceTypes distType,
+                    const double param, const double reps, const double aeps, cv::Vec4f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFitLine2DVector32F, cv::gapi::imgproc::GFitLine2DVector32F)
+{
+    static void run(const std::vector<cv::Point2f>& in, const cv::DistanceTypes distType,
+                    const double param, const double reps, const double aeps, cv::Vec4f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFitLine2DVector64F, cv::gapi::imgproc::GFitLine2DVector64F)
+{
+    static void run(const std::vector<cv::Point2d>& in, const cv::DistanceTypes distType,
+                    const double param, const double reps, const double aeps, cv::Vec4f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFitLine3DMat, cv::gapi::imgproc::GFitLine3DMat)
+{
+    static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param,
+                    const double reps, const double aeps, cv::Vec6f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFitLine3DVector32S, cv::gapi::imgproc::GFitLine3DVector32S)
+{
+    static void run(const std::vector<cv::Point3i>& in, const cv::DistanceTypes distType,
+                    const double param, const double reps, const double aeps, cv::Vec6f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFitLine3DVector32F, cv::gapi::imgproc::GFitLine3DVector32F)
+{
+    static void run(const std::vector<cv::Point3f>& in, const cv::DistanceTypes distType,
+                    const double param, const double reps, const double aeps, cv::Vec6f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
+GAPI_OCV_KERNEL(GCPUFitLine3DVector64F, cv::gapi::imgproc::GFitLine3DVector64F)
+{
+    static void run(const std::vector<cv::Point3d>& in, const cv::DistanceTypes distType,
+                    const double param, const double reps, const double aeps, cv::Vec6f& out)
+    {
+        cv::fitLine(in, out, distType, param, reps, aeps);
+    }
+};
+
 GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB)
 {
     static void run(const cv::Mat& in, cv::Mat &out)
@@ -569,6 +641,14 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels()
         , GCPUBoundingRectMat
         , GCPUBoundingRectVector32S
         , GCPUBoundingRectVector32F
+        , GCPUFitLine2DMat
+        , GCPUFitLine2DVector32S
+        , GCPUFitLine2DVector32F
+        , GCPUFitLine2DVector64F
+        , GCPUFitLine3DMat
+        , GCPUFitLine3DVector32S
+        , GCPUFitLine3DVector32F
+        , GCPUFitLine3DVector64F
         , GCPUYUV2RGB
         , GCPUBGR2I420
         , GCPURGB2I420
diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp
index b27da28c87..b48b7b6732 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp
@@ -83,6 +83,22 @@ GAPI_TEST_FIXTURE(BoundingRectVector32STest, initNothing, FIXTURE_API(CompareRec
 GAPI_TEST_FIXTURE(BoundingRectVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
 GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
+GAPI_TEST_FIXTURE(FitLine2DMatVectorTest, initMatByPointsVectorRandU<cv::Point_>,
+                  FIXTURE_API(CompareVecs<float, 4>,cv::DistanceTypes), 2, cmpF, distType)
+GAPI_TEST_FIXTURE(FitLine2DVector32STest, initNothing,
+                  FIXTURE_API(CompareVecs<float, 4>,cv::DistanceTypes), 2, cmpF, distType)
+GAPI_TEST_FIXTURE(FitLine2DVector32FTest, initNothing,
+                  FIXTURE_API(CompareVecs<float, 4>,cv::DistanceTypes), 2, cmpF, distType)
+GAPI_TEST_FIXTURE(FitLine2DVector64FTest, initNothing,
+                  FIXTURE_API(CompareVecs<float, 4>,cv::DistanceTypes), 2, cmpF, distType)
+GAPI_TEST_FIXTURE(FitLine3DMatVectorTest, initMatByPointsVectorRandU<cv::Point3_>,
+                  FIXTURE_API(CompareVecs<float, 6>,cv::DistanceTypes), 2, cmpF, distType)
+GAPI_TEST_FIXTURE(FitLine3DVector32STest, initNothing,
+                  FIXTURE_API(CompareVecs<float, 6>,cv::DistanceTypes), 2, cmpF, distType)
+GAPI_TEST_FIXTURE(FitLine3DVector32FTest, initNothing,
+                  FIXTURE_API(CompareVecs<float, 6>,cv::DistanceTypes), 2, cmpF, distType)
+GAPI_TEST_FIXTURE(FitLine3DVector64FTest, initNothing,
+                  FIXTURE_API(CompareVecs<float, 6>,cv::DistanceTypes), 2, cmpF, distType)
 GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(RGB2YUVTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
 GAPI_TEST_FIXTURE(BGR2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
index 91e676c5e7..2a4f2e64ea 100644
--- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
+++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp
@@ -752,6 +752,192 @@ TEST_P(BoundingRectVector32FTest, AccuracyTest)
     }
 }
 
+TEST_P(FitLine2DMatVectorTest, AccuracyTest)
+{
+    cv::Vec4f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_mat1), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_mat1, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
+TEST_P(FitLine2DVector32STest, AccuracyTest)
+{
+    cv::Vec4f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    std::vector<cv::Point2i> in_vec;
+    initPointsVectorRandU(sz.width, in_vec);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point2i> in;
+    auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
+TEST_P(FitLine2DVector32FTest, AccuracyTest)
+{
+    cv::Vec4f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    std::vector<cv::Point2f> in_vec;
+    initPointsVectorRandU(sz.width, in_vec);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point2f> in;
+    auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
+TEST_P(FitLine2DVector64FTest, AccuracyTest)
+{
+    cv::Vec4f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    std::vector<cv::Point2d> in_vec;
+    initPointsVectorRandU(sz.width, in_vec);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point2d> in;
+    auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
+TEST_P(FitLine3DMatVectorTest, AccuracyTest)
+{
+    cv::Vec6f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GMat in;
+    auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_mat1), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_mat1, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
+TEST_P(FitLine3DVector32STest, AccuracyTest)
+{
+    cv::Vec6f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    std::vector<cv::Point3i> in_vec;
+    initPointsVectorRandU(sz.width, in_vec);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point3i> in;
+    auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
+TEST_P(FitLine3DVector32FTest, AccuracyTest)
+{
+    cv::Vec6f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    std::vector<cv::Point3f> in_vec;
+    initPointsVectorRandU(sz.width, in_vec);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point3f> in;
+    auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
+TEST_P(FitLine3DVector64FTest, AccuracyTest)
+{
+    cv::Vec6f out_vec_gapi, out_vec_ocv;
+    double paramDefault = 0., repsDefault = 0., aepsDefault = 0.;
+
+    std::vector<cv::Point3d> in_vec;
+    initPointsVectorRandU(sz.width, in_vec);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    cv::GArray<cv::Point3d> in;
+    auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault);
+
+    cv::GComputation c(cv::GIn(in), cv::GOut(out));
+    c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs());
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv));
+    }
+}
+
 TEST_P(BGR2RGBTest, AccuracyTest)
 {
     // G-API code //////////////////////////////////////////////////////////////
diff --git a/modules/gapi/test/common/gapi_tests_common.hpp b/modules/gapi/test/common/gapi_tests_common.hpp
index 948476fa10..514fa2be38 100644
--- a/modules/gapi/test/common/gapi_tests_common.hpp
+++ b/modules/gapi/test/common/gapi_tests_common.hpp
@@ -74,6 +74,50 @@ namespace
         }
 #endif // WINRT
     }
+
+    template <typename T> inline void initPointRandU(cv::RNG &rng, cv::Point_<T>& pt)
+    {
+        GAPI_Assert(std::is_integral<T>::value);
+        pt = cv::Point_<T>(static_cast<T>(static_cast<char>(rng(CHAR_MAX + 1U))),
+                           static_cast<T>(static_cast<char>(rng(CHAR_MAX + 1U))));
+    }
+
+    template <typename T> inline void initPointRandU(cv::RNG &rng, cv::Point3_<T>& pt)
+    {
+        GAPI_Assert(std::is_integral<T>::value);
+        pt = cv::Point3_<T>(static_cast<T>(static_cast<char>(rng(CHAR_MAX + 1U))),
+                            static_cast<T>(static_cast<char>(rng(CHAR_MAX + 1U))),
+                            static_cast<T>(static_cast<char>(rng(CHAR_MAX + 1U))));
+    }
+
+    template <typename F> inline void initFloatPointRandU(cv::RNG &rng, cv::Point_<F> &pt)
+    {
+        GAPI_Assert(std::is_floating_point<F>::value);
+        static const int fscale = 256;  // avoid bits near ULP, generate stable test input
+        pt = cv::Point_<F>(rng.uniform(0, 255 * fscale) / static_cast<F>(fscale),
+                           rng.uniform(0, 255 * fscale) / static_cast<F>(fscale));
+    }
+
+    template<> inline void initPointRandU(cv::RNG &rng, cv::Point2f &pt)
+    { initFloatPointRandU(rng, pt); }
+
+    template<> inline void initPointRandU(cv::RNG &rng, cv::Point2d &pt)
+    { initFloatPointRandU(rng, pt); }
+
+    template <typename F> inline void initFloatPointRandU(cv::RNG &rng, cv::Point3_<F> &pt)
+    {
+        GAPI_Assert(std::is_floating_point<F>::value);
+        static const int fscale = 256;  // avoid bits near ULP, generate stable test input
+        pt = cv::Point3_<F>(rng.uniform(0, 255 * fscale) / static_cast<F>(fscale),
+                            rng.uniform(0, 255 * fscale) / static_cast<F>(fscale),
+                            rng.uniform(0, 255 * fscale) / static_cast<F>(fscale));
+    }
+
+    template<> inline void initPointRandU(cv::RNG &rng, cv::Point3f &pt)
+    { initFloatPointRandU(rng, pt); }
+
+    template<> inline void initPointRandU(cv::RNG &rng, cv::Point3d &pt)
+    { initFloatPointRandU(rng, pt); }
 } // namespace
 
 namespace opencv_test
@@ -279,6 +323,80 @@ public:
         }
     }
 
+    template <typename T>
+    inline void initPointRandU(cv::RNG& rng, T& pt)
+    { ::initPointRandU(rng, pt); }
+
+// Disable unreachable code warning for MSVS 2015
+#if defined _MSC_VER && _MSC_VER < 1910 /*MSVS 2017*/
+#pragma warning(push)
+#pragma warning(disable: 4702)
+#endif
+    // initialize std::vector<cv::Point_<T>>/std::vector<cv::Point3_<T>>
+    template <typename T, template <typename> class Pt>
+    void initPointsVectorRandU(const int sz_in, std::vector<Pt<T>> &vec_)
+    {
+        cv::RNG& rng = theRNG();
+
+        vec_.clear();
+        vec_.reserve(sz_in);
+
+        for (int i = 0; i < sz_in; i++)
+        {
+            Pt<T> pt;
+            initPointRandU(rng, pt);
+            vec_.emplace_back(pt);
+        }
+    }
+#if defined _MSC_VER && _MSC_VER < 1910 /*MSVS 2017*/
+#pragma warning(pop)
+#endif
+
+    template<typename Pt>
+    inline void initMatByPointsVectorRandU(const cv::Size &sz_in)
+    {
+            std::vector<Pt> in_vector;
+            initPointsVectorRandU(sz_in.width, in_vector);
+            in_mat1 = cv::Mat(in_vector, true);
+    }
+
+    // initialize Mat by a vector of Points
+    template<template <typename> class Pt>
+    inline void initMatByPointsVectorRandU(int type, cv::Size sz_in, int)
+    {
+        int depth = CV_MAT_DEPTH(type);
+        switch (depth)
+        {
+        case CV_8U:
+            initMatByPointsVectorRandU<Pt<uchar>>(sz_in);
+            break;
+        case CV_8S:
+            initMatByPointsVectorRandU<Pt<char>>(sz_in);
+            break;
+        case CV_16U:
+            initMatByPointsVectorRandU<Pt<ushort>>(sz_in);
+            break;
+        case CV_16S:
+            initMatByPointsVectorRandU<Pt<short>>(sz_in);
+            break;
+        case CV_32S:
+            initMatByPointsVectorRandU<Pt<int>>(sz_in);
+            break;
+        case CV_32F:
+            initMatByPointsVectorRandU<Pt<float>>(sz_in);
+            break;
+        case CV_64F:
+            initMatByPointsVectorRandU<Pt<double>>(sz_in);
+            break;
+        case CV_16F:
+            initMatByPointsVectorRandU<Pt<cv::float16_t>>(sz_in);
+            break;
+        default:
+            GAPI_Assert(false && "Unsupported depth");
+            break;
+        }
+    }
+
     // empty function intended to show that nothing is to be initialized via TestFunctional methods
     void initNothing(int, cv::Size, int, bool = true) {}
 };
@@ -469,6 +587,9 @@ template<typename Elem>
 using compare_vector_f = std::function<bool(const std::vector<Elem> &a,
                                             const std::vector<Elem> &b)>;
 
+template<typename Elem, int cn>
+using compare_vec_f = std::function<bool(const cv::Vec<Elem, cn> &a, const cv::Vec<Elem, cn> &b)>;
+
 template<typename T1, typename T2>
 struct CompareF
 {
@@ -495,6 +616,9 @@ using CompareRects = CompareF<cv::Rect, cv::Rect>;
 template<typename Elem>
 using CompareVectors = CompareF<std::vector<Elem>, std::vector<Elem>>;
 
+template<typename Elem, int cn>
+using CompareVecs = CompareF<cv::Vec<Elem, cn>, cv::Vec<Elem, cn>>;
+
 template<typename T>
 struct Wrappable
 {
@@ -580,6 +704,27 @@ struct WrappableVector
     }
 };
 
+template<typename T, typename Elem, int cn>
+struct WrappableVec
+{
+    compare_vec_f<Elem, cn> to_compare_f()
+    {
+        T t = *static_cast<T* const>(this);
+        return [t](const cv::Vec<Elem, cn> &a, const cv::Vec<Elem, cn> &b)
+        {
+            return t(a, b);
+        };
+    }
+
+    CompareVecs<Elem, cn> to_compare_obj()
+    {
+        T t = *static_cast<T* const>(this);
+        std::stringstream ss;
+        ss << t;
+        return CompareVecs<Elem, cn>(to_compare_f(), ss.str());
+    }
+};
+
 
 class AbsExact : public Wrappable<AbsExact>
 {
@@ -855,6 +1000,41 @@ public:
         return os << "AbsExactVector()";
     }
 };
+
+template<typename Elem, int cn>
+class RelDiffToleranceVec : public WrappableVec<RelDiffToleranceVec<Elem, cn>, Elem, cn>
+{
+public:
+    RelDiffToleranceVec(double tol) : _tol(tol) {}
+    bool operator() (const cv::Vec<Elem, cn> &in1, const cv::Vec<Elem, cn> &in2) const
+    {
+        double abs_err  = cv::norm(in1, in2, cv::NORM_L1);
+        double in2_norm = cv::norm(in2, cv::NORM_L1);
+        // Checks to avoid dividing by zero
+        double err = abs_err ? abs_err / (in2_norm ? in2_norm : cv::norm(in1, cv::NORM_L1))
+                             : abs_err;
+        if (err > _tol)
+        {
+            std::cout << "RelDiffToleranceVec error: err=" << err << "  tolerance=" << _tol;
+            for (int i = 0; i < cn; i++)
+            {
+                std::cout << " in1[" << i << "]=" << in1[i] << " in2[" << i << "]=" << in2[i];
+            }
+            std::cout << std::endl;
+            return false;
+        }
+        else
+        {
+            return true;
+        }
+    }
+    friend std::ostream& operator<<(std::ostream& os, const RelDiffToleranceVec<Elem, cn>& obj)
+    {
+        return os << "RelDiffToleranceVec(" << std::to_string(obj._tol) << ")";
+    }
+    private:
+        double _tol;
+};
 } // namespace opencv_test
 
 namespace
@@ -879,6 +1059,12 @@ inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_vec
 {
     return os << "compare_vector_f";
 }
+
+template<typename Elem, int cn>
+inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_vec_f<Elem, cn>&)
+{
+    return os << "compare_vec_f";
+}
 }  // anonymous namespace
 
 // Note: namespace must match the namespace of the type of the printed object
@@ -969,6 +1155,25 @@ inline std::ostream& operator<<(std::ostream& os, MorphTypes op)
 #undef CASE
     return os;
 }
+
+inline std::ostream& operator<<(std::ostream& os, DistanceTypes op)
+{
+#define CASE(v) case DistanceTypes::v: os << #v; break
+    switch (op)
+    {
+        CASE(DIST_USER);
+        CASE(DIST_L1);
+        CASE(DIST_L2);
+        CASE(DIST_C);
+        CASE(DIST_L12);
+        CASE(DIST_FAIR);
+        CASE(DIST_WELSCH);
+        CASE(DIST_HUBER);
+        default: GAPI_Assert(false && "unknown DistanceTypes value");
+    }
+#undef CASE
+    return os;
+}
 }  // namespace cv
 
 #endif //OPENCV_GAPI_TESTS_COMMON_HPP
diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
index cea0e0da32..884bf0dbae 100644
--- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
+++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp
@@ -337,6 +337,74 @@ INSTANTIATE_TEST_CASE_P(BoundingRectVector32STestCPU, BoundingRectVector32STest,
                                  Values(IMGPROC_CPU),
                                  Values(IoUToleranceRect(1e-5).to_compare_obj())));
 
+INSTANTIATE_TEST_CASE_P(FitLine2DMatVectorTestCPU, FitLine2DMatVectorTest,
+                        Combine(Values(CV_8U, CV_8S, CV_16U, CV_16S,
+                                       CV_32S, CV_32F, CV_64F),
+                                Values(cv::Size(8, 0), cv::Size(1024, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 4>(0.01).to_compare_obj()),
+                                Values(DIST_L1, DIST_L2, DIST_L12, DIST_FAIR,
+                                       DIST_WELSCH, DIST_HUBER)));
+
+INSTANTIATE_TEST_CASE_P(FitLine2DVector32STestCPU, FitLine2DVector32STest,
+                        Combine(Values(-1),
+                                Values(cv::Size(8, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 4>(0.01).to_compare_obj()),
+                                Values(DIST_L1)));
+
+INSTANTIATE_TEST_CASE_P(FitLine2DVector32FTestCPU, FitLine2DVector32FTest,
+                        Combine(Values(-1),
+                                Values(cv::Size(8, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 4>(0.01).to_compare_obj()),
+                                Values(DIST_L1)));
+
+INSTANTIATE_TEST_CASE_P(FitLine2DVector64FTestCPU, FitLine2DVector64FTest,
+                        Combine(Values(-1),
+                                Values(cv::Size(8, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 4>(0.01).to_compare_obj()),
+                                Values(DIST_L1)));
+
+INSTANTIATE_TEST_CASE_P(FitLine3DMatVectorTestCPU, FitLine3DMatVectorTest,
+                        Combine(Values(CV_8UC1, CV_8SC1, CV_16UC1, CV_16SC1,
+                                       CV_32SC1, CV_32FC1, CV_64FC1),
+                                Values(cv::Size(8, 0), cv::Size(1024, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 6>(0.01).to_compare_obj()),
+                                Values(DIST_L1, DIST_L2, DIST_L12, DIST_FAIR,
+                                       DIST_WELSCH, DIST_HUBER)));
+
+INSTANTIATE_TEST_CASE_P(FitLine3DVector32STestCPU, FitLine3DVector32STest,
+                        Combine(Values(-1),
+                                Values(cv::Size(8, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 6>(0.01).to_compare_obj()),
+                                Values(DIST_L1)));
+
+INSTANTIATE_TEST_CASE_P(FitLine3DVector32FTestCPU, FitLine3DVector32FTest,
+                        Combine(Values(-1),
+                                Values(cv::Size(8, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 6>(0.01).to_compare_obj()),
+                                Values(DIST_L1)));
+
+INSTANTIATE_TEST_CASE_P(FitLine3DVector64FTestCPU, FitLine3DVector64FTest,
+                        Combine(Values(-1),
+                                Values(cv::Size(8, 0)),
+                                Values(-1),
+                                Values(IMGPROC_CPU),
+                                Values(RelDiffToleranceVec<float, 6>(0.01).to_compare_obj()),
+                                Values(DIST_L1)));
+
 INSTANTIATE_TEST_CASE_P(BGR2RGBTestCPU, BGR2RGBTest,
                         Combine(Values(CV_8UC3),
                                 Values(cv::Size(1280, 720),

From d266fee8bb0ecdd472e8a89597e8c8c0ac4507cb Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Tue, 17 Nov 2020 18:16:34 +0000
Subject: [PATCH 113/152] 3rdparty: libjpeg-turbo 2.0.5 => 2.0.6

https://github.com/libjpeg-turbo/libjpeg-turbo/releases/tag/2.0.6
---
 3rdparty/libjpeg-turbo/CMakeLists.txt |  4 +-
 3rdparty/libjpeg-turbo/LICENSE.md     |  2 +-
 3rdparty/libjpeg-turbo/README.ijg     | 22 ++++-------
 3rdparty/libjpeg-turbo/README.md      | 21 +++++-----
 3rdparty/libjpeg-turbo/src/jchuff.c   |  4 +-
 3rdparty/libjpeg-turbo/src/jcinit.c   |  5 ++-
 3rdparty/libjpeg-turbo/src/jcphuff.c  |  4 +-
 3rdparty/libjpeg-turbo/src/jctrans.c  |  5 ++-
 3rdparty/libjpeg-turbo/src/jdapistd.c | 45 +++++++++++++++++-----
 3rdparty/libjpeg-turbo/src/jdcoefct.c |  8 ++--
 3rdparty/libjpeg-turbo/src/jdcolor.c  |  9 ++---
 3rdparty/libjpeg-turbo/src/jdmerge.c  | 55 +++++++--------------------
 3rdparty/libjpeg-turbo/src/jdmerge.h  | 47 +++++++++++++++++++++++
 3rdparty/libjpeg-turbo/src/jdmrg565.c | 10 ++---
 3rdparty/libjpeg-turbo/src/jdmrgext.c |  6 +--
 3rdparty/libjpeg-turbo/src/jdtrans.c  |  5 ++-
 3rdparty/libjpeg-turbo/src/jfdctint.c |  4 +-
 3rdparty/libjpeg-turbo/src/jidctint.c |  4 +-
 3rdparty/libjpeg-turbo/src/jmorecfg.h |  8 ++--
 3rdparty/libjpeg-turbo/src/jpegcomp.h |  3 +-
 3rdparty/libjpeg-turbo/src/jpeglib.h  |  8 ++--
 3rdparty/libjpeg-turbo/src/jquant2.c  |  6 +--
 3rdparty/libjpeg-turbo/src/jversion.h | 14 ++++---
 23 files changed, 174 insertions(+), 125 deletions(-)
 create mode 100644 3rdparty/libjpeg-turbo/src/jdmerge.h

diff --git a/3rdparty/libjpeg-turbo/CMakeLists.txt b/3rdparty/libjpeg-turbo/CMakeLists.txt
index 8da98b6020..901669a4a8 100644
--- a/3rdparty/libjpeg-turbo/CMakeLists.txt
+++ b/3rdparty/libjpeg-turbo/CMakeLists.txt
@@ -4,9 +4,9 @@ ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter -Wsign-compare -Wshorten-6
 
 set(VERSION_MAJOR 2)
 set(VERSION_MINOR 0)
-set(VERSION_REVISION 5)
+set(VERSION_REVISION 6)
 set(VERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION})
-set(LIBJPEG_TURBO_VERSION_NUMBER 2000005)
+set(LIBJPEG_TURBO_VERSION_NUMBER 2000006)
 
 string(TIMESTAMP BUILD "opencv-${OPENCV_VERSION}-libjpeg-turbo")
 if(CMAKE_BUILD_TYPE STREQUAL "Debug")
diff --git a/3rdparty/libjpeg-turbo/LICENSE.md b/3rdparty/libjpeg-turbo/LICENSE.md
index 5ca512b34d..99c9aadcc4 100644
--- a/3rdparty/libjpeg-turbo/LICENSE.md
+++ b/3rdparty/libjpeg-turbo/LICENSE.md
@@ -91,7 +91,7 @@ best of our understanding.
 The Modified (3-clause) BSD License
 ===================================
 
-Copyright (C)2009-2019 D. R. Commander.  All Rights Reserved.
+Copyright (C)2009-2020 D. R. Commander.  All Rights Reserved.
 Copyright (C)2015 Viktor Szathmáry.  All Rights Reserved.
 
 Redistribution and use in source and binary forms, with or without
diff --git a/3rdparty/libjpeg-turbo/README.ijg b/3rdparty/libjpeg-turbo/README.ijg
index 2e39f965c2..d681cf1273 100644
--- a/3rdparty/libjpeg-turbo/README.ijg
+++ b/3rdparty/libjpeg-turbo/README.ijg
@@ -223,12 +223,12 @@ https://www.iso.org/standard/54989.html and http://www.itu.int/rec/T-REC-T.871.
 A PDF file of the older JFIF 1.02 specification is available at
 http://www.w3.org/Graphics/JPEG/jfif3.pdf.
 
-The TIFF 6.0 file format specification can be obtained by FTP from
-ftp://ftp.sgi.com/graphics/tiff/TIFF6.ps.gz.  The JPEG incorporation scheme
-found in the TIFF 6.0 spec of 3-June-92 has a number of serious problems.
-IJG does not recommend use of the TIFF 6.0 design (TIFF Compression tag 6).
-Instead, we recommend the JPEG design proposed by TIFF Technical Note #2
-(Compression tag 7).  Copies of this Note can be obtained from
+The TIFF 6.0 file format specification can be obtained from
+http://mirrors.ctan.org/graphics/tiff/TIFF6.ps.gz.  The JPEG incorporation
+scheme found in the TIFF 6.0 spec of 3-June-92 has a number of serious
+problems.  IJG does not recommend use of the TIFF 6.0 design (TIFF Compression
+tag 6).  Instead, we recommend the JPEG design proposed by TIFF Technical Note
+#2 (Compression tag 7).  Copies of this Note can be obtained from
 http://www.ijg.org/files/.  It is expected that the next revision
 of the TIFF spec will replace the 6.0 JPEG design with the Note's design.
 Although IJG's own code does not support TIFF/JPEG, the free libtiff library
@@ -243,14 +243,8 @@ The most recent released version can always be found there in
 directory "files".
 
 The JPEG FAQ (Frequently Asked Questions) article is a source of some
-general information about JPEG.
-It is available on the World Wide Web at http://www.faqs.org/faqs/jpeg-faq/
-and other news.answers archive sites, including the official news.answers
-archive at rtfm.mit.edu: ftp://rtfm.mit.edu/pub/usenet/news.answers/jpeg-faq/.
-If you don't have Web or FTP access, send e-mail to mail-server@rtfm.mit.edu
-with body
-        send usenet/news.answers/jpeg-faq/part1
-        send usenet/news.answers/jpeg-faq/part2
+general information about JPEG.  It is available at
+http://www.faqs.org/faqs/jpeg-faq.
 
 
 FILE FORMAT COMPATIBILITY
diff --git a/3rdparty/libjpeg-turbo/README.md b/3rdparty/libjpeg-turbo/README.md
index e7ff743a47..90a4a43ee1 100644
--- a/3rdparty/libjpeg-turbo/README.md
+++ b/3rdparty/libjpeg-turbo/README.md
@@ -2,7 +2,7 @@ Background
 ==========
 
 libjpeg-turbo is a JPEG image codec that uses SIMD instructions to accelerate
-baseline JPEG compression and decompression on x86, x86-64, ARM, PowerPC, and
+baseline JPEG compression and decompression on x86, x86-64, Arm, PowerPC, and
 MIPS systems, as well as progressive JPEG compression on x86 and x86-64
 systems.  On such systems, libjpeg-turbo is generally 2-6x as fast as libjpeg,
 all else being equal.  On other types of systems, libjpeg-turbo can still
@@ -179,8 +179,8 @@ supported and which aren't.
 
 NOTE:  As of this writing, extensive research has been conducted into the
 usefulness of DCT scaling as a means of data reduction and SmartScale as a
-means of quality improvement.  The reader is invited to peruse the research at
-<http://www.libjpeg-turbo.org/About/SmartScale> and draw his/her own conclusions,
+means of quality improvement.  Readers are invited to peruse the research at
+<http://www.libjpeg-turbo.org/About/SmartScale> and draw their own conclusions,
 but it is the general belief of our project that these features have not
 demonstrated sufficient usefulness to justify inclusion in libjpeg-turbo.
 
@@ -287,12 +287,13 @@ following reasons:
   (and slightly faster) floating point IDCT algorithm introduced in libjpeg
   v8a as opposed to the algorithm used in libjpeg v6b.  It should be noted,
   however, that this algorithm basically brings the accuracy of the floating
-  point IDCT in line with the accuracy of the slow integer IDCT.  The floating
-  point DCT/IDCT algorithms are mainly a legacy feature, and they do not
-  produce significantly more accuracy than the slow integer algorithms (to put
-  numbers on this, the typical difference in PNSR between the two algorithms
-  is less than 0.10 dB, whereas changing the quality level by 1 in the upper
-  range of the quality scale is typically more like a 1.0 dB difference.)
+  point IDCT in line with the accuracy of the accurate integer IDCT.  The
+  floating point DCT/IDCT algorithms are mainly a legacy feature, and they do
+  not produce significantly more accuracy than the accurate integer algorithms
+  (to put numbers on this, the typical difference in PNSR between the two
+  algorithms is less than 0.10 dB, whereas changing the quality level by 1 in
+  the upper range of the quality scale is typically more like a 1.0 dB
+  difference.)
 
 - If the floating point algorithms in libjpeg-turbo are not implemented using
   SIMD instructions on a particular platform, then the accuracy of the
@@ -340,7 +341,7 @@ The algorithm used by the SIMD-accelerated quantization function cannot produce
 correct results whenever the fast integer forward DCT is used along with a JPEG
 quality of 98-100.  Thus, libjpeg-turbo must use the non-SIMD quantization
 function in those cases.  This causes performance to drop by as much as 40%.
-It is therefore strongly advised that you use the slow integer forward DCT
+It is therefore strongly advised that you use the accurate integer forward DCT
 whenever encoding images with a JPEG quality of 98 or higher.
 
 
diff --git a/3rdparty/libjpeg-turbo/src/jchuff.c b/3rdparty/libjpeg-turbo/src/jchuff.c
index cb05055d99..db85ce114f 100644
--- a/3rdparty/libjpeg-turbo/src/jchuff.c
+++ b/3rdparty/libjpeg-turbo/src/jchuff.c
@@ -34,10 +34,10 @@
  * memory footprint by 64k, which is important for some mobile applications
  * that create many isolated instances of libjpeg-turbo (web browsers, for
  * instance.)  This may improve performance on some mobile platforms as well.
- * This feature is enabled by default only on ARM processors, because some x86
+ * This feature is enabled by default only on Arm processors, because some x86
  * chips have a slow implementation of bsr, and the use of clz/bsr cannot be
  * shown to have a significant performance impact even on the x86 chips that
- * have a fast implementation of it.  When building for ARMv6, you can
+ * have a fast implementation of it.  When building for Armv6, you can
  * explicitly disable the use of clz/bsr by adding -mthumb to the compiler
  * flags (this defines __thumb__).
  */
diff --git a/3rdparty/libjpeg-turbo/src/jcinit.c b/3rdparty/libjpeg-turbo/src/jcinit.c
index 78aa465786..157353a22e 100644
--- a/3rdparty/libjpeg-turbo/src/jcinit.c
+++ b/3rdparty/libjpeg-turbo/src/jcinit.c
@@ -1,8 +1,10 @@
 /*
  * jcinit.c
  *
+ * This file was part of the Independent JPEG Group's software:
  * Copyright (C) 1991-1997, Thomas G. Lane.
- * This file is part of the Independent JPEG Group's software.
+ * libjpeg-turbo Modifications:
+ * Copyright (C) 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -19,6 +21,7 @@
 #define JPEG_INTERNALS
 #include "jinclude.h"
 #include "jpeglib.h"
+#include "jpegcomp.h"
 
 
 /*
diff --git a/3rdparty/libjpeg-turbo/src/jcphuff.c b/3rdparty/libjpeg-turbo/src/jcphuff.c
index 8c4efaf16c..a8b94bed84 100644
--- a/3rdparty/libjpeg-turbo/src/jcphuff.c
+++ b/3rdparty/libjpeg-turbo/src/jcphuff.c
@@ -43,10 +43,10 @@
  * memory footprint by 64k, which is important for some mobile applications
  * that create many isolated instances of libjpeg-turbo (web browsers, for
  * instance.)  This may improve performance on some mobile platforms as well.
- * This feature is enabled by default only on ARM processors, because some x86
+ * This feature is enabled by default only on Arm processors, because some x86
  * chips have a slow implementation of bsr, and the use of clz/bsr cannot be
  * shown to have a significant performance impact even on the x86 chips that
- * have a fast implementation of it.  When building for ARMv6, you can
+ * have a fast implementation of it.  When building for Armv6, you can
  * explicitly disable the use of clz/bsr by adding -mthumb to the compiler
  * flags (this defines __thumb__).
  */
diff --git a/3rdparty/libjpeg-turbo/src/jctrans.c b/3rdparty/libjpeg-turbo/src/jctrans.c
index ce70a30940..ab6a2186db 100644
--- a/3rdparty/libjpeg-turbo/src/jctrans.c
+++ b/3rdparty/libjpeg-turbo/src/jctrans.c
@@ -4,8 +4,8 @@
  * This file was part of the Independent JPEG Group's software:
  * Copyright (C) 1995-1998, Thomas G. Lane.
  * Modified 2000-2009 by Guido Vollbeding.
- * It was modified by The libjpeg-turbo Project to include only code relevant
- * to libjpeg-turbo.
+ * libjpeg-turbo Modifications:
+ * Copyright (C) 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -17,6 +17,7 @@
 #define JPEG_INTERNALS
 #include "jinclude.h"
 #include "jpeglib.h"
+#include "jpegcomp.h"
 
 
 /* Forward declarations */
diff --git a/3rdparty/libjpeg-turbo/src/jdapistd.c b/3rdparty/libjpeg-turbo/src/jdapistd.c
index 2c808fa564..38bd1110d9 100644
--- a/3rdparty/libjpeg-turbo/src/jdapistd.c
+++ b/3rdparty/libjpeg-turbo/src/jdapistd.c
@@ -4,7 +4,7 @@
  * This file was part of the Independent JPEG Group's software:
  * Copyright (C) 1994-1996, Thomas G. Lane.
  * libjpeg-turbo Modifications:
- * Copyright (C) 2010, 2015-2018, D. R. Commander.
+ * Copyright (C) 2010, 2015-2018, 2020, D. R. Commander.
  * Copyright (C) 2015, Google, Inc.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
@@ -21,6 +21,8 @@
 #include "jinclude.h"
 #include "jdmainct.h"
 #include "jdcoefct.h"
+#include "jdmaster.h"
+#include "jdmerge.h"
 #include "jdsample.h"
 #include "jmemsys.h"
 
@@ -316,6 +318,8 @@ LOCAL(void)
 read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
 {
   JDIMENSION n;
+  my_master_ptr master = (my_master_ptr)cinfo->master;
+  JSAMPARRAY scanlines = NULL;
   void (*color_convert) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
                          JDIMENSION input_row, JSAMPARRAY output_buf,
                          int num_rows) = NULL;
@@ -332,8 +336,13 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
     cinfo->cquantize->color_quantize = noop_quantize;
   }
 
+  if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
+    my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+    scanlines = &upsample->spare_row;
+  }
+
   for (n = 0; n < num_lines; n++)
-    jpeg_read_scanlines(cinfo, NULL, 1);
+    jpeg_read_scanlines(cinfo, scanlines, 1);
 
   if (color_convert)
     cinfo->cconvert->color_convert = color_convert;
@@ -353,6 +362,12 @@ increment_simple_rowgroup_ctr(j_decompress_ptr cinfo, JDIMENSION rows)
 {
   JDIMENSION rows_left;
   my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
+  my_master_ptr master = (my_master_ptr)cinfo->master;
+
+  if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
+    read_and_discard_scanlines(cinfo, rows);
+    return;
+  }
 
   /* Increment the counter to the next row group after the skipped rows. */
   main_ptr->rowgroup_ctr += rows / cinfo->max_v_samp_factor;
@@ -382,21 +397,27 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
 {
   my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
   my_coef_ptr coef = (my_coef_ptr)cinfo->coef;
+  my_master_ptr master = (my_master_ptr)cinfo->master;
   my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
   JDIMENSION i, x;
   int y;
   JDIMENSION lines_per_iMCU_row, lines_left_in_iMCU_row, lines_after_iMCU_row;
   JDIMENSION lines_to_skip, lines_to_read;
 
+  /* Two-pass color quantization is not supported. */
+  if (cinfo->quantize_colors && cinfo->two_pass_quantize)
+    ERREXIT(cinfo, JERR_NOTIMPL);
+
   if (cinfo->global_state != DSTATE_SCANNING)
     ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
 
   /* Do not skip past the bottom of the image. */
   if (cinfo->output_scanline + num_lines >= cinfo->output_height) {
+    num_lines = cinfo->output_height - cinfo->output_scanline;
     cinfo->output_scanline = cinfo->output_height;
     (*cinfo->inputctl->finish_input_pass) (cinfo);
     cinfo->inputctl->eoi_reached = TRUE;
-    return cinfo->output_height - cinfo->output_scanline;
+    return num_lines;
   }
 
   if (num_lines == 0)
@@ -445,8 +466,10 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
     main_ptr->buffer_full = FALSE;
     main_ptr->rowgroup_ctr = 0;
     main_ptr->context_state = CTX_PREPARE_FOR_IMCU;
-    upsample->next_row_out = cinfo->max_v_samp_factor;
-    upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+    if (!master->using_merged_upsample) {
+      upsample->next_row_out = cinfo->max_v_samp_factor;
+      upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+    }
   }
 
   /* Skipping is much simpler when context rows are not required. */
@@ -458,8 +481,10 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
       cinfo->output_scanline += lines_left_in_iMCU_row;
       main_ptr->buffer_full = FALSE;
       main_ptr->rowgroup_ctr = 0;
-      upsample->next_row_out = cinfo->max_v_samp_factor;
-      upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+      if (!master->using_merged_upsample) {
+        upsample->next_row_out = cinfo->max_v_samp_factor;
+        upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+      }
     }
   }
 
@@ -494,7 +519,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
       cinfo->output_iMCU_row += lines_to_skip / lines_per_iMCU_row;
       increment_simple_rowgroup_ctr(cinfo, lines_to_read);
     }
-    upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+    if (!master->using_merged_upsample)
+      upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
     return num_lines;
   }
 
@@ -535,7 +561,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
    * bit odd, since "rows_to_go" seems to be redundantly keeping track of
    * output_scanline.
    */
-  upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+  if (!master->using_merged_upsample)
+    upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
 
   /* Always skip the requested number of lines. */
   return num_lines;
diff --git a/3rdparty/libjpeg-turbo/src/jdcoefct.c b/3rdparty/libjpeg-turbo/src/jdcoefct.c
index 723a9ac2be..2ba6aa11e4 100644
--- a/3rdparty/libjpeg-turbo/src/jdcoefct.c
+++ b/3rdparty/libjpeg-turbo/src/jdcoefct.c
@@ -6,7 +6,7 @@
  * libjpeg-turbo Modifications:
  * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
  * Copyright (C) 2010, 2015-2016, D. R. Commander.
- * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, 2020, Google, Inc.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -495,11 +495,13 @@ decompress_smooth_data(j_decompress_ptr cinfo, JSAMPIMAGE output_buf)
       if (first_row && block_row == 0)
         prev_block_row = buffer_ptr;
       else
-        prev_block_row = buffer[block_row - 1];
+        prev_block_row = buffer[block_row - 1] +
+                         cinfo->master->first_MCU_col[ci];
       if (last_row && block_row == block_rows - 1)
         next_block_row = buffer_ptr;
       else
-        next_block_row = buffer[block_row + 1];
+        next_block_row = buffer[block_row + 1] +
+                         cinfo->master->first_MCU_col[ci];
       /* We fetch the surrounding DC values using a sliding-register approach.
        * Initialize all nine here so as to do the right thing on narrow pics.
        */
diff --git a/3rdparty/libjpeg-turbo/src/jdcolor.c b/3rdparty/libjpeg-turbo/src/jdcolor.c
index dc0e3b6c0e..d3ae40c7da 100644
--- a/3rdparty/libjpeg-turbo/src/jdcolor.c
+++ b/3rdparty/libjpeg-turbo/src/jdcolor.c
@@ -571,11 +571,10 @@ ycck_cmyk_convert(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
  * RGB565 conversion
  */
 
-#define PACK_SHORT_565_LE(r, g, b)  ((((r) << 8) & 0xF800) | \
-                                     (((g) << 3) & 0x7E0) | ((b) >> 3))
-#define PACK_SHORT_565_BE(r, g, b)  (((r) & 0xF8) | ((g) >> 5) | \
-                                     (((g) << 11) & 0xE000) | \
-                                     (((b) << 5) & 0x1F00))
+#define PACK_SHORT_565_LE(r, g, b) \
+  ((((r) << 8) & 0xF800) | (((g) << 3) & 0x7E0) | ((b) >> 3))
+#define PACK_SHORT_565_BE(r, g, b) \
+  (((r) & 0xF8) | ((g) >> 5) | (((g) << 11) & 0xE000) | (((b) << 5) & 0x1F00))
 
 #define PACK_TWO_PIXELS_LE(l, r)    ((r << 16) | l)
 #define PACK_TWO_PIXELS_BE(l, r)    ((l << 16) | r)
diff --git a/3rdparty/libjpeg-turbo/src/jdmerge.c b/3rdparty/libjpeg-turbo/src/jdmerge.c
index dff5a35087..3a456d6581 100644
--- a/3rdparty/libjpeg-turbo/src/jdmerge.c
+++ b/3rdparty/libjpeg-turbo/src/jdmerge.c
@@ -5,7 +5,7 @@
  * Copyright (C) 1994-1996, Thomas G. Lane.
  * libjpeg-turbo Modifications:
  * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
- * Copyright (C) 2009, 2011, 2014-2015, D. R. Commander.
+ * Copyright (C) 2009, 2011, 2014-2015, 2020, D. R. Commander.
  * Copyright (C) 2013, Linaro Limited.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
@@ -40,41 +40,13 @@
 #define JPEG_INTERNALS
 #include "jinclude.h"
 #include "jpeglib.h"
+#include "jdmerge.h"
 #include "jsimd.h"
 #include "jconfigint.h"
 
 #ifdef UPSAMPLE_MERGING_SUPPORTED
 
 
-/* Private subobject */
-
-typedef struct {
-  struct jpeg_upsampler pub;    /* public fields */
-
-  /* Pointer to routine to do actual upsampling/conversion of one row group */
-  void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
-                    JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
-
-  /* Private state for YCC->RGB conversion */
-  int *Cr_r_tab;                /* => table for Cr to R conversion */
-  int *Cb_b_tab;                /* => table for Cb to B conversion */
-  JLONG *Cr_g_tab;              /* => table for Cr to G conversion */
-  JLONG *Cb_g_tab;              /* => table for Cb to G conversion */
-
-  /* For 2:1 vertical sampling, we produce two output rows at a time.
-   * We need a "spare" row buffer to hold the second output row if the
-   * application provides just a one-row buffer; we also use the spare
-   * to discard the dummy last row if the image height is odd.
-   */
-  JSAMPROW spare_row;
-  boolean spare_full;           /* T if spare buffer is occupied */
-
-  JDIMENSION out_row_width;     /* samples per output row */
-  JDIMENSION rows_to_go;        /* counts rows remaining in image */
-} my_upsampler;
-
-typedef my_upsampler *my_upsample_ptr;
-
 #define SCALEBITS       16      /* speediest right-shift on some machines */
 #define ONE_HALF        ((JLONG)1 << (SCALEBITS - 1))
 #define FIX(x)          ((JLONG)((x) * (1L << SCALEBITS) + 0.5))
@@ -189,7 +161,7 @@ typedef my_upsampler *my_upsample_ptr;
 LOCAL(void)
 build_ycc_rgb_table(j_decompress_ptr cinfo)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   int i;
   JLONG x;
   SHIFT_TEMPS
@@ -232,7 +204,7 @@ build_ycc_rgb_table(j_decompress_ptr cinfo)
 METHODDEF(void)
 start_pass_merged_upsample(j_decompress_ptr cinfo)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
 
   /* Mark the spare buffer empty */
   upsample->spare_full = FALSE;
@@ -254,7 +226,7 @@ merged_2v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
                    JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
 /* 2:1 vertical sampling case: may need a spare row. */
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   JSAMPROW work_ptrs[2];
   JDIMENSION num_rows;          /* number of rows returned to caller */
 
@@ -305,7 +277,7 @@ merged_1v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
                    JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
 /* 1:1 vertical sampling case: much easier, never need a spare row. */
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
 
   /* Just do the upsampling. */
   (*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr,
@@ -420,11 +392,10 @@ h2v2_merged_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
  * RGB565 conversion
  */
 
-#define PACK_SHORT_565_LE(r, g, b)  ((((r) << 8) & 0xF800) | \
-                                     (((g) << 3) & 0x7E0) | ((b) >> 3))
-#define PACK_SHORT_565_BE(r, g, b)  (((r) & 0xF8) | ((g) >> 5) | \
-                                     (((g) << 11) & 0xE000) | \
-                                     (((b) << 5) & 0x1F00))
+#define PACK_SHORT_565_LE(r, g, b) \
+  ((((r) << 8) & 0xF800) | (((g) << 3) & 0x7E0) | ((b) >> 3))
+#define PACK_SHORT_565_BE(r, g, b) \
+  (((r) & 0xF8) | ((g) >> 5) | (((g) << 11) & 0xE000) | (((b) << 5) & 0x1F00))
 
 #define PACK_TWO_PIXELS_LE(l, r)    ((r << 16) | l)
 #define PACK_TWO_PIXELS_BE(l, r)    ((l << 16) | r)
@@ -566,11 +537,11 @@ h2v2_merged_upsample_565D(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
 GLOBAL(void)
 jinit_merged_upsampler(j_decompress_ptr cinfo)
 {
-  my_upsample_ptr upsample;
+  my_merged_upsample_ptr upsample;
 
-  upsample = (my_upsample_ptr)
+  upsample = (my_merged_upsample_ptr)
     (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
-                                sizeof(my_upsampler));
+                                sizeof(my_merged_upsampler));
   cinfo->upsample = (struct jpeg_upsampler *)upsample;
   upsample->pub.start_pass = start_pass_merged_upsample;
   upsample->pub.need_context_rows = FALSE;
diff --git a/3rdparty/libjpeg-turbo/src/jdmerge.h b/3rdparty/libjpeg-turbo/src/jdmerge.h
new file mode 100644
index 0000000000..b583396b10
--- /dev/null
+++ b/3rdparty/libjpeg-turbo/src/jdmerge.h
@@ -0,0 +1,47 @@
+/*
+ * jdmerge.h
+ *
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+ * Copyright (C) 2020, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ */
+
+#define JPEG_INTERNALS
+#include "jpeglib.h"
+
+#ifdef UPSAMPLE_MERGING_SUPPORTED
+
+
+/* Private subobject */
+
+typedef struct {
+  struct jpeg_upsampler pub;    /* public fields */
+
+  /* Pointer to routine to do actual upsampling/conversion of one row group */
+  void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+                    JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
+
+  /* Private state for YCC->RGB conversion */
+  int *Cr_r_tab;                /* => table for Cr to R conversion */
+  int *Cb_b_tab;                /* => table for Cb to B conversion */
+  JLONG *Cr_g_tab;              /* => table for Cr to G conversion */
+  JLONG *Cb_g_tab;              /* => table for Cb to G conversion */
+
+  /* For 2:1 vertical sampling, we produce two output rows at a time.
+   * We need a "spare" row buffer to hold the second output row if the
+   * application provides just a one-row buffer; we also use the spare
+   * to discard the dummy last row if the image height is odd.
+   */
+  JSAMPROW spare_row;
+  boolean spare_full;           /* T if spare buffer is occupied */
+
+  JDIMENSION out_row_width;     /* samples per output row */
+  JDIMENSION rows_to_go;        /* counts rows remaining in image */
+} my_merged_upsampler;
+
+typedef my_merged_upsampler *my_merged_upsample_ptr;
+
+#endif /* UPSAMPLE_MERGING_SUPPORTED */
diff --git a/3rdparty/libjpeg-turbo/src/jdmrg565.c b/3rdparty/libjpeg-turbo/src/jdmrg565.c
index 1b87e3718d..53f1e16700 100644
--- a/3rdparty/libjpeg-turbo/src/jdmrg565.c
+++ b/3rdparty/libjpeg-turbo/src/jdmrg565.c
@@ -5,7 +5,7 @@
  * Copyright (C) 1994-1996, Thomas G. Lane.
  * libjpeg-turbo Modifications:
  * Copyright (C) 2013, Linaro Limited.
- * Copyright (C) 2014-2015, 2018, D. R. Commander.
+ * Copyright (C) 2014-2015, 2018, 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -19,7 +19,7 @@ h2v1_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
                                   JDIMENSION in_row_group_ctr,
                                   JSAMPARRAY output_buf)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   register int y, cred, cgreen, cblue;
   int cb, cr;
   register JSAMPROW outptr;
@@ -90,7 +90,7 @@ h2v1_merged_upsample_565D_internal(j_decompress_ptr cinfo,
                                    JDIMENSION in_row_group_ctr,
                                    JSAMPARRAY output_buf)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   register int y, cred, cgreen, cblue;
   int cb, cr;
   register JSAMPROW outptr;
@@ -163,7 +163,7 @@ h2v2_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
                                   JDIMENSION in_row_group_ctr,
                                   JSAMPARRAY output_buf)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   register int y, cred, cgreen, cblue;
   int cb, cr;
   register JSAMPROW outptr0, outptr1;
@@ -259,7 +259,7 @@ h2v2_merged_upsample_565D_internal(j_decompress_ptr cinfo,
                                    JDIMENSION in_row_group_ctr,
                                    JSAMPARRAY output_buf)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   register int y, cred, cgreen, cblue;
   int cb, cr;
   register JSAMPROW outptr0, outptr1;
diff --git a/3rdparty/libjpeg-turbo/src/jdmrgext.c b/3rdparty/libjpeg-turbo/src/jdmrgext.c
index b1c27df56a..c9a44d8219 100644
--- a/3rdparty/libjpeg-turbo/src/jdmrgext.c
+++ b/3rdparty/libjpeg-turbo/src/jdmrgext.c
@@ -4,7 +4,7 @@
  * This file was part of the Independent JPEG Group's software:
  * Copyright (C) 1994-1996, Thomas G. Lane.
  * libjpeg-turbo Modifications:
- * Copyright (C) 2011, 2015, D. R. Commander.
+ * Copyright (C) 2011, 2015, 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -25,7 +25,7 @@ h2v1_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
                               JDIMENSION in_row_group_ctr,
                               JSAMPARRAY output_buf)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   register int y, cred, cgreen, cblue;
   int cb, cr;
   register JSAMPROW outptr;
@@ -97,7 +97,7 @@ h2v2_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
                               JDIMENSION in_row_group_ctr,
                               JSAMPARRAY output_buf)
 {
-  my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+  my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
   register int y, cred, cgreen, cblue;
   int cb, cr;
   register JSAMPROW outptr0, outptr1;
diff --git a/3rdparty/libjpeg-turbo/src/jdtrans.c b/3rdparty/libjpeg-turbo/src/jdtrans.c
index 56713efe64..d7ec4b83b3 100644
--- a/3rdparty/libjpeg-turbo/src/jdtrans.c
+++ b/3rdparty/libjpeg-turbo/src/jdtrans.c
@@ -3,8 +3,8 @@
  *
  * This file was part of the Independent JPEG Group's software:
  * Copyright (C) 1995-1997, Thomas G. Lane.
- * It was modified by The libjpeg-turbo Project to include only code relevant
- * to libjpeg-turbo.
+ * libjpeg-turbo Modifications:
+ * Copyright (C) 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -16,6 +16,7 @@
 #define JPEG_INTERNALS
 #include "jinclude.h"
 #include "jpeglib.h"
+#include "jpegcomp.h"
 
 
 /* Forward declarations */
diff --git a/3rdparty/libjpeg-turbo/src/jfdctint.c b/3rdparty/libjpeg-turbo/src/jfdctint.c
index b47c3061ac..c95a3a7fb8 100644
--- a/3rdparty/libjpeg-turbo/src/jfdctint.c
+++ b/3rdparty/libjpeg-turbo/src/jfdctint.c
@@ -4,11 +4,11 @@
  * This file was part of the Independent JPEG Group's software:
  * Copyright (C) 1991-1996, Thomas G. Lane.
  * libjpeg-turbo Modifications:
- * Copyright (C) 2015, D. R. Commander.
+ * Copyright (C) 2015, 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
- * This file contains a slow-but-accurate integer implementation of the
+ * This file contains a slower but more accurate integer implementation of the
  * forward DCT (Discrete Cosine Transform).
  *
  * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT
diff --git a/3rdparty/libjpeg-turbo/src/jidctint.c b/3rdparty/libjpeg-turbo/src/jidctint.c
index 98425d5fd0..50f385da33 100644
--- a/3rdparty/libjpeg-turbo/src/jidctint.c
+++ b/3rdparty/libjpeg-turbo/src/jidctint.c
@@ -5,11 +5,11 @@
  * Copyright (C) 1991-1998, Thomas G. Lane.
  * Modification developed 2002-2009 by Guido Vollbeding.
  * libjpeg-turbo Modifications:
- * Copyright (C) 2015, D. R. Commander.
+ * Copyright (C) 2015, 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
- * This file contains a slow-but-accurate integer implementation of the
+ * This file contains a slower but more accurate integer implementation of the
  * inverse DCT (Discrete Cosine Transform).  In the IJG code, this routine
  * must also perform dequantization of the input coefficients.
  *
diff --git a/3rdparty/libjpeg-turbo/src/jmorecfg.h b/3rdparty/libjpeg-turbo/src/jmorecfg.h
index d0b930079a..aa29f0f9f1 100644
--- a/3rdparty/libjpeg-turbo/src/jmorecfg.h
+++ b/3rdparty/libjpeg-turbo/src/jmorecfg.h
@@ -5,7 +5,7 @@
  * Copyright (C) 1991-1997, Thomas G. Lane.
  * Modified 1997-2009 by Guido Vollbeding.
  * libjpeg-turbo Modifications:
- * Copyright (C) 2009, 2011, 2014-2015, 2018, D. R. Commander.
+ * Copyright (C) 2009, 2011, 2014-2015, 2018, 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -273,9 +273,9 @@ typedef int boolean;
 
 /* Capability options common to encoder and decoder: */
 
-#define DCT_ISLOW_SUPPORTED     /* slow but accurate integer algorithm */
-#define DCT_IFAST_SUPPORTED     /* faster, less accurate integer method */
-#define DCT_FLOAT_SUPPORTED     /* floating-point: accurate, fast on fast HW */
+#define DCT_ISLOW_SUPPORTED     /* accurate integer method */
+#define DCT_IFAST_SUPPORTED     /* less accurate int method [legacy feature] */
+#define DCT_FLOAT_SUPPORTED     /* floating-point method [legacy feature] */
 
 /* Encoder capability options: */
 
diff --git a/3rdparty/libjpeg-turbo/src/jpegcomp.h b/3rdparty/libjpeg-turbo/src/jpegcomp.h
index b32d544bf1..c4834ac0df 100644
--- a/3rdparty/libjpeg-turbo/src/jpegcomp.h
+++ b/3rdparty/libjpeg-turbo/src/jpegcomp.h
@@ -1,7 +1,7 @@
 /*
  * jpegcomp.h
  *
- * Copyright (C) 2010, D. R. Commander.
+ * Copyright (C) 2010, 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -19,6 +19,7 @@
 #define _min_DCT_v_scaled_size  min_DCT_v_scaled_size
 #define _jpeg_width  jpeg_width
 #define _jpeg_height  jpeg_height
+#define JERR_ARITH_NOTIMPL  JERR_NOT_COMPILED
 #else
 #define _DCT_scaled_size  DCT_scaled_size
 #define _DCT_h_scaled_size  DCT_scaled_size
diff --git a/3rdparty/libjpeg-turbo/src/jpeglib.h b/3rdparty/libjpeg-turbo/src/jpeglib.h
index 33f8ad2791..d7664f0630 100644
--- a/3rdparty/libjpeg-turbo/src/jpeglib.h
+++ b/3rdparty/libjpeg-turbo/src/jpeglib.h
@@ -5,7 +5,7 @@
  * Copyright (C) 1991-1998, Thomas G. Lane.
  * Modified 2002-2009 by Guido Vollbeding.
  * libjpeg-turbo Modifications:
- * Copyright (C) 2009-2011, 2013-2014, 2016-2017, D. R. Commander.
+ * Copyright (C) 2009-2011, 2013-2014, 2016-2017, 2020, D. R. Commander.
  * Copyright (C) 2015, Google, Inc.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
@@ -244,9 +244,9 @@ typedef enum {
 /* DCT/IDCT algorithm options. */
 
 typedef enum {
-  JDCT_ISLOW,             /* slow but accurate integer algorithm */
-  JDCT_IFAST,             /* faster, less accurate integer method */
-  JDCT_FLOAT              /* floating-point: accurate, fast on fast HW */
+  JDCT_ISLOW,             /* accurate integer method */
+  JDCT_IFAST,             /* less accurate integer method [legacy feature] */
+  JDCT_FLOAT              /* floating-point method [legacy feature] */
 } J_DCT_METHOD;
 
 #ifndef JDCT_DEFAULT            /* may be overridden in jconfig.h */
diff --git a/3rdparty/libjpeg-turbo/src/jquant2.c b/3rdparty/libjpeg-turbo/src/jquant2.c
index 0ce0ca5472..6570613bb9 100644
--- a/3rdparty/libjpeg-turbo/src/jquant2.c
+++ b/3rdparty/libjpeg-turbo/src/jquant2.c
@@ -4,7 +4,7 @@
  * This file was part of the Independent JPEG Group's software:
  * Copyright (C) 1991-1996, Thomas G. Lane.
  * libjpeg-turbo Modifications:
- * Copyright (C) 2009, 2014-2015, D. R. Commander.
+ * Copyright (C) 2009, 2014-2015, 2020, D. R. Commander.
  * For conditions of distribution and use, see the accompanying README.ijg
  * file.
  *
@@ -1145,7 +1145,7 @@ start_pass_2_quant(j_decompress_ptr cinfo, boolean is_pre_scan)
   int i;
 
   /* Only F-S dithering or no dithering is supported. */
-  /* If user asks for ordered dither, give him F-S. */
+  /* If user asks for ordered dither, give them F-S. */
   if (cinfo->dither_mode != JDITHER_NONE)
     cinfo->dither_mode = JDITHER_FS;
 
@@ -1263,7 +1263,7 @@ jinit_2pass_quantizer(j_decompress_ptr cinfo)
     cquantize->sv_colormap = NULL;
 
   /* Only F-S dithering or no dithering is supported. */
-  /* If user asks for ordered dither, give him F-S. */
+  /* If user asks for ordered dither, give them F-S. */
   if (cinfo->dither_mode != JDITHER_NONE)
     cinfo->dither_mode = JDITHER_FS;
 
diff --git a/3rdparty/libjpeg-turbo/src/jversion.h b/3rdparty/libjpeg-turbo/src/jversion.h
index ab4a2c5703..4462b94104 100644
--- a/3rdparty/libjpeg-turbo/src/jversion.h
+++ b/3rdparty/libjpeg-turbo/src/jversion.h
@@ -30,23 +30,25 @@
  * NOTE: It is our convention to place the authors in the following order:
  * - libjpeg-turbo authors (2009-) in descending order of the date of their
  *   most recent contribution to the project, then in ascending order of the
- *   date of their first contribution to the project
+ *   date of their first contribution to the project, then in alphabetical
+ *   order
  * - Upstream authors in descending order of the date of the first inclusion of
  *   their code
  */
 
 #define JCOPYRIGHT \
   "Copyright (C) 2009-2020 D. R. Commander\n" \
-  "Copyright (C) 2011-2016 Siarhei Siamashka\n" \
+  "Copyright (C) 2015, 2020 Google, Inc.\n" \
+  "Copyright (C) 2019 Arm Limited\n" \
   "Copyright (C) 2015-2016, 2018 Matthieu Darbois\n" \
+  "Copyright (C) 2011-2016 Siarhei Siamashka\n" \
   "Copyright (C) 2015 Intel Corporation\n" \
-  "Copyright (C) 2015 Google, Inc.\n" \
+  "Copyright (C) 2013-2014 Linaro Limited\n" \
   "Copyright (C) 2013-2014 MIPS Technologies, Inc.\n" \
-  "Copyright (C) 2013 Linaro Limited\n" \
+  "Copyright (C) 2009, 2012 Pierre Ossman for Cendio AB\n" \
   "Copyright (C) 2009-2011 Nokia Corporation and/or its subsidiary(-ies)\n" \
-  "Copyright (C) 2009 Pierre Ossman for Cendio AB\n" \
   "Copyright (C) 1999-2006 MIYASAKA Masaru\n" \
-  "Copyright (C) 1991-2016 Thomas G. Lane, Guido Vollbeding"
+  "Copyright (C) 1991-2017 Thomas G. Lane, Guido Vollbeding"
 
 #define JCOPYRIGHT_SHORT \
   "Copyright (C) 1991-2020 The libjpeg-turbo Project and many others"

From a316b11aaa5baab3a810fa804425f53a3a0a5ca7 Mon Sep 17 00:00:00 2001
From: Omar Alzaibaq <66904570+Omar-AE@users.noreply.github.com>
Date: Tue, 17 Nov 2020 21:47:24 +0200
Subject: [PATCH 114/152] Merge pull request #18220 from Omar-AE:hddl-supported

* added HDDL VPU support

* changed to return True in one line if any device connected

* dnn: use releaseHDDLPlugin()

* dnn(hddl): fix conditions
---
 modules/dnn/include/opencv2/dnn/dnn.hpp       |  4 +-
 .../opencv2/dnn/utils/inference_engine.hpp    |  5 ++
 modules/dnn/perf/perf_net.cpp                 |  4 +-
 modules/dnn/src/dnn.cpp                       | 19 ++++++-
 modules/dnn/src/ie_ngraph.cpp                 |  5 +-
 modules/dnn/src/layers/blank_layer.cpp        |  2 +-
 modules/dnn/src/layers/convolution_layer.cpp  |  5 +-
 modules/dnn/src/layers/mvn_layer.cpp          |  5 +-
 .../dnn/src/layers/normalize_bbox_layer.cpp   |  3 +-
 modules/dnn/src/layers/padding_layer.cpp      |  5 +-
 modules/dnn/src/layers/pooling_layer.cpp      |  2 +-
 modules/dnn/src/layers/proposal_layer.cpp     | 10 +++-
 modules/dnn/src/layers/slice_layer.cpp        |  2 +-
 modules/dnn/src/op_inf_engine.cpp             | 52 ++++++++++++++-----
 modules/dnn/test/test_common.impl.hpp         | 11 ++--
 modules/dnn/test/test_ie_models.cpp           |  2 +
 samples/dnn/classification.py                 |  5 +-
 samples/dnn/human_parsing.py                  |  5 +-
 samples/dnn/object_detection.py               |  5 +-
 samples/dnn/segmentation.py                   |  5 +-
 samples/dnn/virtual_try_on.py                 |  5 +-
 21 files changed, 116 insertions(+), 45 deletions(-)

diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp
index 3b12508c74..78662544d8 100644
--- a/modules/dnn/include/opencv2/dnn/dnn.hpp
+++ b/modules/dnn/include/opencv2/dnn/dnn.hpp
@@ -93,7 +93,8 @@ CV__DNN_INLINE_NS_BEGIN
         DNN_TARGET_VULKAN,
         DNN_TARGET_FPGA,  //!< FPGA device with CPU fallbacks using Inference Engine's Heterogeneous plugin.
         DNN_TARGET_CUDA,
-        DNN_TARGET_CUDA_FP16
+        DNN_TARGET_CUDA_FP16,
+        DNN_TARGET_HDDL
     };
 
     CV_EXPORTS std::vector< std::pair<Backend, Target> > getAvailableBackends();
@@ -571,6 +572,7 @@ CV__DNN_INLINE_NS_BEGIN
          * | DNN_TARGET_FPGA        |                    |                            + |                    |                   |
          * | DNN_TARGET_CUDA        |                    |                              |                    |                 + |
          * | DNN_TARGET_CUDA_FP16   |                    |                              |                    |                 + |
+         * | DNN_TARGET_HDDL        |                    |                            + |                    |                   |
          */
         CV_WRAP void setPreferableTarget(int targetId);
 
diff --git a/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp b/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
index 7db93a916d..29882b92b0 100644
--- a/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
+++ b/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
@@ -58,6 +58,11 @@ CV_EXPORTS_W void resetMyriadDevice();
 CV_EXPORTS_W cv::String getInferenceEngineVPUType();
 
 
+/** @brief Release a HDDL plugin.
+ */
+CV_EXPORTS_W void releaseHDDLPlugin();
+
+
 CV__DNN_INLINE_NS_END
 }} // namespace
 
diff --git a/modules/dnn/perf/perf_net.cpp b/modules/dnn/perf/perf_net.cpp
index 600193915d..aef3bc2c31 100644
--- a/modules/dnn/perf/perf_net.cpp
+++ b/modules/dnn/perf/perf_net.cpp
@@ -130,7 +130,7 @@ PERF_TEST_P_(DNNTestNetwork, OpenFace)
     if (backend == DNN_BACKEND_HALIDE)
         throw SkipTestException("");
 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
-    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_HDDL))
         throw SkipTestException("");
 #endif
     processNet("dnn/openface_nn4.small2.v1.t7", "", "",
@@ -172,7 +172,7 @@ PERF_TEST_P_(DNNTestNetwork, DenseNet_121)
 PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
 {
     if (backend == DNN_BACKEND_HALIDE ||
-        (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD))
+        (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_HDDL)))
         throw SkipTestException("");
     // The same .caffemodel but modified .prototxt
     // See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp
diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index 46ee650ef9..6ec3fdadbd 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -122,6 +122,8 @@ public:
         {
             if (std::string::npos != i->find("MYRIAD") && target == DNN_TARGET_MYRIAD)
                 return true;
+            if (std::string::npos != i->find("HDDL") && target == DNN_TARGET_HDDL)
+                return true;
             else if (std::string::npos != i->find("FPGA") && target == DNN_TARGET_FPGA)
                 return true;
             else if (std::string::npos != i->find("CPU") && target == DNN_TARGET_CPU)
@@ -184,6 +186,14 @@ private:
 #endif
 #ifdef HAVE_DNN_NGRAPH
             backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
+#endif
+        }
+        if (checkIETarget(DNN_TARGET_HDDL)) {
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+            backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_HDDL));
+#endif
+#ifdef HAVE_DNN_NGRAPH
+            backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_HDDL));
 #endif
         }
 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
@@ -1379,6 +1389,7 @@ struct Net::Impl : public detail::NetImplBase
                   preferableTarget == DNN_TARGET_OPENCL ||
                   preferableTarget == DNN_TARGET_OPENCL_FP16 ||
                   preferableTarget == DNN_TARGET_MYRIAD ||
+                  preferableTarget == DNN_TARGET_HDDL ||
                   preferableTarget == DNN_TARGET_FPGA
             );
         }
@@ -1813,7 +1824,7 @@ struct Net::Impl : public detail::NetImplBase
                                     INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
                                     supportsCPUFallback;
                 // TODO: there is a bug in Myriad plugin with custom layers shape infer.
-                if (preferableTarget == DNN_TARGET_MYRIAD)
+                if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
                 {
                     for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
                     {
@@ -1823,6 +1834,7 @@ struct Net::Impl : public detail::NetImplBase
 
                 // TODO: fix these workarounds
                 if (preferableTarget == DNN_TARGET_MYRIAD ||
+                    preferableTarget == DNN_TARGET_HDDL ||
                     preferableTarget == DNN_TARGET_OPENCL ||
                     preferableTarget == DNN_TARGET_OPENCL_FP16)
                     customizable &= ld.type != "Concat";
@@ -1910,6 +1922,7 @@ struct Net::Impl : public detail::NetImplBase
             // Convert weights in FP16 for specific targets.
             if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
                  preferableTarget == DNN_TARGET_MYRIAD ||
+                 preferableTarget == DNN_TARGET_HDDL ||
                  preferableTarget == DNN_TARGET_FPGA) && !fused)
             {
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
@@ -2104,7 +2117,7 @@ struct Net::Impl : public detail::NetImplBase
                 bool customizable = ld.id != 0 && supportsCPUFallback;
 
                 // TODO: there is a bug in Myriad plugin with custom layers shape infer.
-                if (preferableTarget == DNN_TARGET_MYRIAD)
+                if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
                 {
                     for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
                     {
@@ -2114,6 +2127,7 @@ struct Net::Impl : public detail::NetImplBase
 
                 // TODO: fix these workarounds
                 if (preferableTarget == DNN_TARGET_MYRIAD ||
+                    preferableTarget == DNN_TARGET_HDDL ||
                     preferableTarget == DNN_TARGET_OPENCL ||
                     preferableTarget == DNN_TARGET_OPENCL_FP16)
                     customizable &= ld.type != "Concat";
@@ -4541,6 +4555,7 @@ string Net::Impl::dump()
             case DNN_TARGET_OPENCL: out << "OCL"; colorId = 1; break;
             case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16"; colorId = 2; break;
             case DNN_TARGET_MYRIAD: out << "MYRIAD"; colorId = 3; break;
+            case DNN_TARGET_HDDL: out << "HDDL"; colorId = 8; break;
             case DNN_TARGET_VULKAN: out << "VULKAN"; colorId = 7; break;
             case DNN_TARGET_FPGA: out << "FPGA"; colorId = 4; break;
             case DNN_TARGET_CUDA: out << "CUDA"; colorId = 5; break;
diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp
index 84b984ac97..c646c1fe3a 100644
--- a/modules/dnn/src/ie_ngraph.cpp
+++ b/modules/dnn/src/ie_ngraph.cpp
@@ -556,6 +556,9 @@ void InfEngineNgraphNet::init(Target targetId)
         case DNN_TARGET_MYRIAD:
             device_name = "MYRIAD";
             break;
+        case DNN_TARGET_HDDL:
+            device_name = "HDDL";
+            break;
         case DNN_TARGET_FPGA:
             device_name = "FPGA";
             break;
@@ -683,7 +686,7 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
 #endif
         }
         std::map<std::string, std::string> config;
-        if (device_name == "MYRIAD") {
+        if (device_name == "MYRIAD" || device_name == "HDDL") {
 #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
             config.emplace("MYRIAD_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
 #else
diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp
index 5acdc3fa1e..5f93b45886 100644
--- a/modules/dnn/src/layers/blank_layer.cpp
+++ b/modules/dnn/src/layers/blank_layer.cpp
@@ -125,7 +125,7 @@ public:
 
         InferenceEngine::Builder::Layer ieLayer(name);
         ieLayer.setName(name);
-        if (preferableTarget == DNN_TARGET_MYRIAD)
+        if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
         {
             ieLayer.setType("Copy");
         }
diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp
index 82766509b3..02495f45ea 100644
--- a/modules/dnn/src/layers/convolution_layer.cpp
+++ b/modules/dnn/src/layers/convolution_layer.cpp
@@ -325,9 +325,10 @@ public:
                 return false;
             if (ksize == 3)
                 return preferableTarget == DNN_TARGET_CPU;
-            if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
+            bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
+            if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
                 return false;
-            return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
+            return (!isMyriad || dilation.width == dilation.height);
         }
 #endif
         if (backendId == DNN_BACKEND_OPENCV)
diff --git a/modules/dnn/src/layers/mvn_layer.cpp b/modules/dnn/src/layers/mvn_layer.cpp
index 4a096ce19c..db986bc897 100644
--- a/modules/dnn/src/layers/mvn_layer.cpp
+++ b/modules/dnn/src/layers/mvn_layer.cpp
@@ -126,7 +126,10 @@ public:
     {
 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
-            return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f);
+        {
+            bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
+            return !zeroDev && (!isMyriad || eps <= 1e-7f);
+        }
 #endif
 #ifdef HAVE_DNN_NGRAPH
         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp
index 3d33511d17..a979fdedb6 100644
--- a/modules/dnn/src/layers/normalize_bbox_layer.cpp
+++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp
@@ -75,7 +75,8 @@ public:
             if (pnorm != 2)
                 return false;
 
-            if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && preferableTarget == DNN_TARGET_MYRIAD)
+            bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
+            if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && isMyriad)
                 return !acrossSpatial;
 
             return startAxis == 1;
diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp
index c83cf026de..b286133419 100644
--- a/modules/dnn/src/layers/padding_layer.cpp
+++ b/modules/dnn/src/layers/padding_layer.cpp
@@ -103,9 +103,12 @@ public:
     {
 #ifdef HAVE_INF_ENGINE
         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+        {
+            bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
             return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
-                   (preferableTarget != DNN_TARGET_MYRIAD ||
+                   (!isMyriad ||
                     (dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
+        }
 #endif
         return backendId == DNN_BACKEND_OPENCV ||
                backendId == DNN_BACKEND_CUDA ||
diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp
index 0e07e5352e..ef3d10e68b 100644
--- a/modules/dnn/src/layers/pooling_layer.cpp
+++ b/modules/dnn/src/layers/pooling_layer.cpp
@@ -202,7 +202,7 @@ public:
                 return false;
             if (kernel_size.size() == 3)
                 return preferableTarget == DNN_TARGET_CPU;
-            if (preferableTarget == DNN_TARGET_MYRIAD) {
+            if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL) {
 #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
                 if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
                     return !isMyriadX();
diff --git a/modules/dnn/src/layers/proposal_layer.cpp b/modules/dnn/src/layers/proposal_layer.cpp
index 990cfeda30..4658e7b41f 100644
--- a/modules/dnn/src/layers/proposal_layer.cpp
+++ b/modules/dnn/src/layers/proposal_layer.cpp
@@ -95,8 +95,14 @@ public:
 
     virtual bool supportBackend(int backendId) CV_OVERRIDE
     {
-        return backendId == DNN_BACKEND_OPENCV ||
-               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && preferableTarget != DNN_TARGET_MYRIAD);
+#ifdef HAVE_INF_ENGINE
+        if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+        {
+            bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
+            return !isMyriad;
+        }
+#endif
+        return backendId == DNN_BACKEND_OPENCV;
     }
 
     bool getMemoryShapes(const std::vector<MatShape> &inputs,
diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp
index 6deabb5884..f73a6440ef 100644
--- a/modules/dnn/src/layers/slice_layer.cpp
+++ b/modules/dnn/src/layers/slice_layer.cpp
@@ -492,7 +492,7 @@ public:
         std::vector<size_t> axes, offsets, dims;
         int from, to, step;
         int numDims = finalSliceRanges[0].size();
-        if (preferableTarget == DNN_TARGET_MYRIAD)
+        if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
         {
             from = axis;
             to = numDims;
diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp
index 745d86ef3c..b7cdc2ad94 100644
--- a/modules/dnn/src/op_inf_engine.cpp
+++ b/modules/dnn/src/op_inf_engine.cpp
@@ -367,6 +367,7 @@ void InfEngineBackendNet::init(Target targetId)
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
             // Inference Engine determines network precision by ports.
             InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD ||
+                                            targetId == DNN_TARGET_HDDL ||
                                             targetId == DNN_TARGET_OPENCL_FP16) ?
                                            InferenceEngine::Precision::FP16 :
                                            InferenceEngine::Precision::FP32;
@@ -391,6 +392,9 @@ void InfEngineBackendNet::init(Target targetId)
         case DNN_TARGET_MYRIAD:
             device_name = "MYRIAD";
             break;
+        case DNN_TARGET_HDDL:
+            device_name = "HDDL";
+            break;
         case DNN_TARGET_FPGA:
             device_name = "FPGA";
             break;
@@ -652,20 +656,20 @@ InferenceEngine::Core& getCore(const std::string& id)
 #endif
 
 #if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
-static bool detectMyriadX_()
+static bool detectMyriadX_(std::string device)
 {
     AutoLock lock(getInitializationMutex());
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3)
     // Lightweight detection
-    InferenceEngine::Core& ie = getCore("MYRIAD");
+    InferenceEngine::Core& ie = getCore(device);
     const std::vector<std::string> devices = ie.GetAvailableDevices();
     for (std::vector<std::string>::const_iterator i = devices.begin(); i != devices.end(); ++i)
     {
-        if (i->find("MYRIAD") != std::string::npos)
+        if (i->find(device) != std::string::npos)
         {
             const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
             CV_LOG_INFO(NULL, "Myriad device: " << name);
-            return name.find("MyriadX") != std::string::npos  || name.find("Myriad X") != std::string::npos;
+            return name.find("MyriadX") != std::string::npos || name.find("Myriad X") != std::string::npos || name.find("HDDL") != std::string::npos;
         }
     }
     return false;
@@ -702,13 +706,13 @@ static bool detectMyriadX_()
     InferenceEngine::InferenceEnginePluginPtr enginePtr;
     {
         auto& sharedPlugins = getSharedPlugins();
-        auto pluginIt = sharedPlugins.find("MYRIAD");
+        auto pluginIt = sharedPlugins.find(device);
         if (pluginIt != sharedPlugins.end()) {
             enginePtr = pluginIt->second;
         } else {
             auto dispatcher = InferenceEngine::PluginDispatcher({""});
-            enginePtr = dispatcher.getPluginByDevice("MYRIAD");
-            sharedPlugins["MYRIAD"] = enginePtr;
+            enginePtr = dispatcher.getPluginByDevice(device);
+            sharedPlugins[device] = enginePtr;
         }
     }
     auto plugin = InferenceEngine::InferencePlugin(enginePtr);
@@ -719,9 +723,9 @@ static bool detectMyriadX_()
     try
     {
 #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
-        auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}});
+        auto netExec = getCore(device).LoadNetwork(cnn, device, {{"VPU_PLATFORM", "VPU_2480"}});
 #else
-        auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_MYRIAD_PLATFORM", "VPU_MYRIAD_2480"}});
+        auto netExec = getCore(device).LoadNetwork(cnn, device, {{"VPU_MYRIAD_PLATFORM", "VPU_MYRIAD_2480"}});
 #endif
 #endif
         auto infRequest = netExec.CreateInferRequest();
@@ -1155,11 +1159,30 @@ void resetMyriadDevice()
 #endif  // HAVE_INF_ENGINE
 }
 
+void releaseHDDLPlugin()
+{
+#ifdef HAVE_INF_ENGINE
+    AutoLock lock(getInitializationMutex());
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
+    getSharedPlugins().erase("HDDL");
+#else
+    // Unregister both "HDDL" and "HETERO:HDDL,CPU" plugins
+    InferenceEngine::Core& ie = getCore("HDDL");
+    try
+    {
+        ie.UnregisterPlugin("HDDL");
+        ie.UnregisterPlugin("HETERO");
+    }
+    catch (...) {}
+#endif
+#endif  // HAVE_INF_ENGINE
+}
+
 #ifdef HAVE_INF_ENGINE
 bool isMyriadX()
 {
-     static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
-     return myriadX;
+    static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
+    return myriadX;
 }
 
 static std::string getInferenceEngineVPUType_()
@@ -1170,10 +1193,11 @@ static std::string getInferenceEngineVPUType_()
 #if defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
         param_vpu_type = OPENCV_DNN_IE_VPU_TYPE_DEFAULT;
 #else
-        CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter");
+        CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X or HDDL. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter");
         try {
-            bool isMyriadX_ = detectMyriadX_();
-            if (isMyriadX_)
+            bool isMyriadX_ = detectMyriadX_("MYRIAD");
+            bool isHDDL_ = detectMyriadX_("HDDL");
+            if (isMyriadX_ || isHDDL_)
             {
                 param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
             }
diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp
index 09b3d2e61c..cf1b558391 100644
--- a/modules/dnn/test/test_common.impl.hpp
+++ b/modules/dnn/test/test_common.impl.hpp
@@ -40,6 +40,7 @@ void PrintTo(const cv::dnn::Target& v, std::ostream* os)
     case DNN_TARGET_OPENCL: *os << "OCL"; return;
     case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
     case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
+    case DNN_TARGET_HDDL: *os << "HDDL"; return;
     case DNN_TARGET_VULKAN: *os << "VULKAN"; return;
     case DNN_TARGET_FPGA: *os << "FPGA"; return;
     case DNN_TARGET_CUDA: *os << "CUDA"; return;
@@ -221,7 +222,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
         available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
         {
-            if (*i == DNN_TARGET_MYRIAD && !withVPU)
+            if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
                 continue;
             targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
         }
@@ -231,7 +232,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
         available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
         {
-            if (*i == DNN_TARGET_MYRIAD && !withVPU)
+            if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
                 continue;
             targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
         }
@@ -281,7 +282,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
         available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
         {
-            if (*i == DNN_TARGET_MYRIAD && !withVPU)
+            if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
                 continue;
             targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
         }
@@ -291,7 +292,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
         available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
         {
-            if (*i == DNN_TARGET_MYRIAD && !withVPU)
+            if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
                 continue;
             targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
         }
@@ -323,7 +324,7 @@ static bool validateVPUType_()
     bool have_vpu_target = false;
     for (std::vector<Target>::const_iterator i = available.begin(); i != available.end(); ++i)
     {
-        if (*i == DNN_TARGET_MYRIAD)
+        if (*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL)
         {
             have_vpu_target = true;
             break;
diff --git a/modules/dnn/test/test_ie_models.cpp b/modules/dnn/test/test_ie_models.cpp
index bd36c86d6c..b285e91d96 100644
--- a/modules/dnn/test/test_ie_models.cpp
+++ b/modules/dnn/test/test_ie_models.cpp
@@ -340,6 +340,8 @@ TEST_P(DNNTestOpenVINO, models)
     // Single Myriad device cannot be shared across multiple processes.
     if (targetId == DNN_TARGET_MYRIAD)
         resetMyriadDevice();
+    if (targetId == DNN_TARGET_HDDL)
+        releaseHDDLPlugin();
     EXPECT_NO_THROW(runIE(targetId, xmlPath, binPath, inputsMap, ieOutputsMap)) << "runIE";
     EXPECT_NO_THROW(runCV(backendId, targetId, xmlPath, binPath, inputsMap, cvOutputsMap)) << "runCV";
 
diff --git a/samples/dnn/classification.py b/samples/dnn/classification.py
index 5a2373d363..1c6908a2bc 100644
--- a/samples/dnn/classification.py
+++ b/samples/dnn/classification.py
@@ -5,7 +5,7 @@ import numpy as np
 from common import *
 
 backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
 
 parser = argparse.ArgumentParser(add_help=False)
 parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@@ -25,7 +25,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
                          '%d: CPU target (by default), '
                          '%d: OpenCL, '
                          '%d: OpenCL fp16 (half-float precision), '
-                         '%d: VPU' % targets)
+                         '%d: NCS2 VPU, '
+                         '%d: HDDL VPU' % targets)
 args, _ = parser.parse_known_args()
 add_preproc_args(args.zoo, parser, 'classification')
 parser = argparse.ArgumentParser(parents=[parser],
diff --git a/samples/dnn/human_parsing.py b/samples/dnn/human_parsing.py
index f84d2038e4..09371fe4a9 100644
--- a/samples/dnn/human_parsing.py
+++ b/samples/dnn/human_parsing.py
@@ -46,7 +46,7 @@ import cv2 as cv
 
 
 backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
 
 
 def preprocess(image):
@@ -168,7 +168,8 @@ if __name__ == '__main__':
                              '%d: CPU target (by default), '
                              '%d: OpenCL, '
                              '%d: OpenCL fp16 (half-float precision), '
-                             '%d: VPU' % targets)
+                             '%d: NCS2 VPU, '
+                             '%d: HDDL VPU' % targets)
     args, _ = parser.parse_known_args()
 
     if not os.path.isfile(args.model):
diff --git a/samples/dnn/object_detection.py b/samples/dnn/object_detection.py
index babac0dbe8..ec8bf82866 100644
--- a/samples/dnn/object_detection.py
+++ b/samples/dnn/object_detection.py
@@ -15,7 +15,7 @@ from tf_text_graph_ssd import createSSDGraph
 from tf_text_graph_faster_rcnn import createFasterRCNNGraph
 
 backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
 
 parser = argparse.ArgumentParser(add_help=False)
 parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@@ -41,7 +41,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
                          '%d: CPU target (by default), '
                          '%d: OpenCL, '
                          '%d: OpenCL fp16 (half-float precision), '
-                         '%d: VPU' % targets)
+                         '%d: NCS2 VPU, '
+                         '%d: HDDL VPU' % targets)
 parser.add_argument('--async', type=int, default=0,
                     dest='asyncN',
                     help='Number of asynchronous forwards at the same time. '
diff --git a/samples/dnn/segmentation.py b/samples/dnn/segmentation.py
index 1a228c63aa..8eeb59ba14 100644
--- a/samples/dnn/segmentation.py
+++ b/samples/dnn/segmentation.py
@@ -6,7 +6,7 @@ import sys
 from common import *
 
 backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
 
 parser = argparse.ArgumentParser(add_help=False)
 parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@@ -28,7 +28,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
                          '%d: CPU target (by default), '
                          '%d: OpenCL, '
                          '%d: OpenCL fp16 (half-float precision), '
-                         '%d: VPU' % targets)
+                         '%d: NCS2 VPU, '
+                         '%d: HDDL VPU' % targets)
 args, _ = parser.parse_known_args()
 add_preproc_args(args.zoo, parser, 'segmentation')
 parser = argparse.ArgumentParser(parents=[parser],
diff --git a/samples/dnn/virtual_try_on.py b/samples/dnn/virtual_try_on.py
index e4f2e518ec..d1cdd4e021 100644
--- a/samples/dnn/virtual_try_on.py
+++ b/samples/dnn/virtual_try_on.py
@@ -17,7 +17,7 @@ from common import findFile
 from human_parsing import parse_human
 
 backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
 
 parser = argparse.ArgumentParser(description='Use this script to run virtial try-on using CP-VTON',
                                  formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@@ -39,7 +39,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
                             '%d: CPU target (by default), '
                             '%d: OpenCL, '
                             '%d: OpenCL fp16 (half-float precision), '
-                            '%d: VPU' % targets)
+                            '%d: NCS2 VPU, '
+                            '%d: HDDL VPU' % targets)
 args, _ = parser.parse_known_args()
 
 

From 474a67231c5445f84186bf95c45a07be903b058a Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Tue, 17 Nov 2020 19:52:07 +0000
Subject: [PATCH 115/152] dnn(test): skip gather_multi_output test on Myriad

---
 modules/dnn/test/test_onnx_importer.cpp | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 14d2d28522..f0128945cc 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -707,6 +707,11 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
 
 TEST_P(Test_ONNX_layers, GatherMultiOutput)
 {
+#if defined(INF_ENGINE_RELEASE)
+    if (target == DNN_TARGET_MYRIAD)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE);
+#endif
+
     testONNXModels("gather_multi_output");
 }
 

From 9485113923a318f8e313531efe210cfc6123d691 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 16 Nov 2020 22:10:23 +0000
Subject: [PATCH 116/152] pre: OpenCV 3.4.13 (version++)

---
 .../cross_referencing/tutorial_cross_referencing.markdown     | 4 ++--
 modules/core/include/opencv2/core/version.hpp                 | 4 ++--
 modules/dnn/include/opencv2/dnn/dnn.hpp                       | 4 ++--
 modules/python/package/setup.py                               | 2 +-
 platforms/android/build_sdk.py                                | 2 +-
 platforms/android/service/readme.txt                          | 2 +-
 platforms/maven/opencv-it/pom.xml                             | 2 +-
 platforms/maven/opencv/pom.xml                                | 2 +-
 platforms/maven/pom.xml                                       | 2 +-
 9 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown b/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown
index bf5f62a013..f99387eb84 100644
--- a/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown
+++ b/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown
@@ -39,14 +39,14 @@ Open your Doxyfile using your favorite text editor and search for the key
 `TAGFILES`. Change it as follows:
 
 @code
-TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/3.4.12
+TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/3.4.13
 @endcode
 
 If you had other definitions already, you can append the line using a `\`:
 
 @code
 TAGFILES = ./docs/doxygen-tags/libstdc++.tag=https://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen \
-           ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/3.4.12
+           ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/3.4.13
 @endcode
 
 Doxygen can now use the information from the tag file to link to the OpenCV
diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp
index 95d31926fc..16e0fb2a4c 100644
--- a/modules/core/include/opencv2/core/version.hpp
+++ b/modules/core/include/opencv2/core/version.hpp
@@ -7,8 +7,8 @@
 
 #define CV_VERSION_MAJOR    3
 #define CV_VERSION_MINOR    4
-#define CV_VERSION_REVISION 12
-#define CV_VERSION_STATUS   "-dev"
+#define CV_VERSION_REVISION 13
+#define CV_VERSION_STATUS   "-pre"
 
 #define CVAUX_STR_EXP(__A)  #__A
 #define CVAUX_STR(__A)      CVAUX_STR_EXP(__A)
diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp
index 9cb7089bdd..b2dd0e6e16 100644
--- a/modules/dnn/include/opencv2/dnn/dnn.hpp
+++ b/modules/dnn/include/opencv2/dnn/dnn.hpp
@@ -47,9 +47,9 @@
 #include "opencv2/core/async.hpp"
 
 #if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_EXPERIMENTAL_NS
-#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_34_v19 {
+#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_34_v20 {
 #define CV__DNN_EXPERIMENTAL_NS_END }
-namespace cv { namespace dnn { namespace experimental_dnn_34_v19 { } using namespace experimental_dnn_34_v19; }}
+namespace cv { namespace dnn { namespace experimental_dnn_34_v20 { } using namespace experimental_dnn_34_v20; }}
 #else
 #define CV__DNN_EXPERIMENTAL_NS_BEGIN
 #define CV__DNN_EXPERIMENTAL_NS_END
diff --git a/modules/python/package/setup.py b/modules/python/package/setup.py
index d6d74d5e3e..5e4b0daa57 100644
--- a/modules/python/package/setup.py
+++ b/modules/python/package/setup.py
@@ -9,7 +9,7 @@ def main():
     os.chdir(SCRIPT_DIR)
 
     package_name = 'opencv'
-    package_version = os.environ.get('OPENCV_VERSION', '3.4.12')  # TODO
+    package_version = os.environ.get('OPENCV_VERSION', '3.4.13')  # TODO
 
     long_description = 'Open Source Computer Vision Library Python bindings'  # TODO
 
diff --git a/platforms/android/build_sdk.py b/platforms/android/build_sdk.py
index 86c534d732..a0054eaf5e 100755
--- a/platforms/android/build_sdk.py
+++ b/platforms/android/build_sdk.py
@@ -269,7 +269,7 @@ class Builder:
         # Add extra data
         apkxmldest = check_dir(os.path.join(apkdest, "res", "xml"), create=True)
         apklibdest = check_dir(os.path.join(apkdest, "libs", abi.name), create=True)
-        for ver, d in self.extra_packs + [("3.4.12", os.path.join(self.libdest, "lib"))]:
+        for ver, d in self.extra_packs + [("3.4.13", os.path.join(self.libdest, "lib"))]:
             r = ET.Element("library", attrib={"version": ver})
             log.info("Adding libraries from %s", d)
 
diff --git a/platforms/android/service/readme.txt b/platforms/android/service/readme.txt
index 42df960bbd..769864b5ab 100644
--- a/platforms/android/service/readme.txt
+++ b/platforms/android/service/readme.txt
@@ -12,7 +12,7 @@ manually using adb tool:
 
     adb install <path-to-OpenCV-sdk>/apk/OpenCV_<version>_Manager_<app_version>_<platform>.apk
 
-Example: OpenCV_3.4.12-dev_Manager_3.49_armeabi-v7a.apk
+Example: OpenCV_3.4.13-dev_Manager_3.49_armeabi-v7a.apk
 
 Use the list of platforms below to determine proper OpenCV Manager package for your device:
 
diff --git a/platforms/maven/opencv-it/pom.xml b/platforms/maven/opencv-it/pom.xml
index 4feab3e367..c043363773 100644
--- a/platforms/maven/opencv-it/pom.xml
+++ b/platforms/maven/opencv-it/pom.xml
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opencv</groupId>
         <artifactId>opencv-parent</artifactId>
-        <version>3.4.12</version>
+        <version>3.4.13</version>
     </parent>
     <groupId>org.opencv</groupId>
     <artifactId>opencv-it</artifactId>
diff --git a/platforms/maven/opencv/pom.xml b/platforms/maven/opencv/pom.xml
index 29856097f2..705a4da98a 100644
--- a/platforms/maven/opencv/pom.xml
+++ b/platforms/maven/opencv/pom.xml
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opencv</groupId>
         <artifactId>opencv-parent</artifactId>
-        <version>3.4.12</version>
+        <version>3.4.13</version>
     </parent>
     <groupId>org.opencv</groupId>
     <artifactId>opencv</artifactId>
diff --git a/platforms/maven/pom.xml b/platforms/maven/pom.xml
index b8f00d4f49..771635ecb6 100644
--- a/platforms/maven/pom.xml
+++ b/platforms/maven/pom.xml
@@ -3,7 +3,7 @@
     <modelVersion>4.0.0</modelVersion>
     <groupId>org.opencv</groupId>
     <artifactId>opencv-parent</artifactId>
-    <version>3.4.12</version>
+    <version>3.4.13</version>
     <packaging>pom</packaging>
     <name>OpenCV Parent POM</name>
     <licenses>

From 94e8a08d1d587a929e0557b751bc0c3d505d1b97 Mon Sep 17 00:00:00 2001
From: Maxim Pashchenkov <maxim.pashchenkov@intel.com>
Date: Wed, 18 Nov 2020 01:57:57 +0300
Subject: [PATCH 117/152] Merge pull request #18819 from
 mpashchenkov:mp/ocv-gapi-skip-centos-tests

G-API: Adding a skip for failed streaming tests

* Skip tests

* Pathfinding

* Pathfinding part 2

* Pathfinding part 3

* Fix review comments
---
 .../cpu/gapi_ocv_stateful_kernel_tests.cpp    |  40 ++--
 .../test/streaming/gapi_streaming_tests.cpp   | 183 +++++++++++++-----
 2 files changed, 161 insertions(+), 62 deletions(-)

diff --git a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp
index fe6a1f94af..416c141076 100644
--- a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp
+++ b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp
@@ -51,9 +51,9 @@ namespace
         {
             // Since G-API has no own test data (yet), it is taken from the common space
             const char* testDataPath = getenv("OPENCV_TEST_DATA_PATH");
-            GAPI_Assert(testDataPath != nullptr);
-
-            cvtest::addDataSearchPath(testDataPath);
+            if (testDataPath) {
+                cvtest::addDataSearchPath(testDataPath);
+            }
             initialized = true;
         }
 #endif // WINRT
@@ -192,8 +192,12 @@ TEST(StatefulKernel, StateIsAutoResetForNewStream)
     // Compilation & testing
     auto ccomp = c.compileStreaming(cv::compile_args(pkg));
 
-    ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>
-                               (findDataFile("cv/video/768x576.avi")));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
     EXPECT_TRUE(ccomp.running());
 
@@ -204,8 +208,12 @@ TEST(StatefulKernel, StateIsAutoResetForNewStream)
     }
     EXPECT_FALSE(ccomp.running());
 
-    ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>
-                               (findDataFile("cv/video/1920x1080.avi")));
+    path = findDataFile("cv/video/1920x1080.avi");
+    try {
+        ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
     EXPECT_TRUE(ccomp.running());
 
@@ -335,14 +343,22 @@ TEST(StatefulKernel, StateIsInitViaCompArgsInStreaming)
                            cv::compile_args(pkg, BackSubStateParams { "knn" }));
 
     // Testing G-API Background Substractor in streaming mode
-    gapiBackSub.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>
-                               (findDataFile("cv/video/768x576.avi")));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        gapiBackSub.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     // Allowing 1% difference of all pixels between G-API and reference OpenCV results
     testBackSubInStreaming(gapiBackSub, 1);
 
-    // Additionally, test the case when the new stream happens
-    gapiBackSub.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>
-                               (findDataFile("cv/video/1920x1080.avi")));
+    path = findDataFile("cv/video/1920x1080.avi");
+    try {
+        // Additionally, test the case when the new stream happens
+        gapiBackSub.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     // Allowing 5% difference of all pixels between G-API and reference OpenCV results
     testBackSubInStreaming(gapiBackSub, 5);
 }
diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp
index 69b85c0d34..8370aee262 100644
--- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp
+++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp
@@ -34,9 +34,9 @@ void initTestDataPath()
     {
         // Since G-API has no own test data (yet), it is taken from the common space
         const char* testDataPath = getenv("OPENCV_TEST_DATA_PATH");
-        GAPI_Assert(testDataPath != nullptr);
-
-        cvtest::addDataSearchPath(testDataPath);
+        if (testDataPath) {
+            cvtest::addDataSearchPath(testDataPath);
+        }
         initialized = true;
     }
 #endif // WINRT
@@ -202,8 +202,12 @@ TEST_P(GAPI_Streaming, SmokeTest_VideoInput_GMat)
     EXPECT_TRUE(ccomp);
     EXPECT_FALSE(ccomp.running());
 
-    ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi")));
-
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
     EXPECT_TRUE(ccomp.running());
 
@@ -272,8 +276,13 @@ TEST_P(GAPI_Streaming, SmokeTest_StartRestart)
     EXPECT_FALSE(ccomp.running());
 
     // Run 1
+    auto path = findDataFile("cv/video/768x576.avi");
     std::size_t num_frames1 = 0u;
-    ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi")));
+    try {
+        ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
     EXPECT_TRUE(ccomp.running());
 
@@ -284,7 +293,11 @@ TEST_P(GAPI_Streaming, SmokeTest_StartRestart)
 
     // Run 2
     std::size_t num_frames2 = 0u;
-    ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi")));
+    try {
+        ccomp.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
     EXPECT_TRUE(ccomp.running());
     while (ccomp.pull(cv::gout(out1, out2))) num_frames2++;
@@ -306,7 +319,12 @@ TEST_P(GAPI_Streaming, SmokeTest_VideoConstSource_NoHang)
     }).compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
                         cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
 
-    refc.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi")));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        refc.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     refc.start();
     std::size_t ref_frames = 0u;
     cv::Mat tmp;
@@ -325,7 +343,7 @@ TEST_P(GAPI_Streaming, SmokeTest_VideoConstSource_NoHang)
 
     cv::Mat in_const = cv::Mat::eye(cv::Size(256,256), CV_8UC3);
     testc.setSource(cv::gin(in_const,
-                            gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi"))));
+                            gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
     testc.start();
     std::size_t test_frames = 0u;
     while (testc.pull(cv::gout(tmp))) test_frames++;
@@ -348,8 +366,12 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta)
     cv::Mat tmp;
 
     // Test with one video source
-    auto in_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi"));
-    testc.setSource(cv::gin(in_const, in_src));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        testc.setSource(cv::gin(in_const, gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     testc.start();
 
     std::size_t test_frames = 0u;
@@ -357,8 +379,12 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta)
     EXPECT_EQ(100u, test_frames);
 
     // Now test with another one
-    in_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/1920x1080.avi"));
-    testc.setSource(cv::gin(in_const, in_src));
+    path = findDataFile("cv/video/1920x1080.avi");
+    try {
+        testc.setSource(cv::gin(in_const, gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     testc.start();
 
     test_frames = 0u;
@@ -411,8 +437,12 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta_VideoScalar)
 
     cv::Mat tmp;
     // Test with one video source and scalar
-    auto in_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi"));
-    testc.setSource(cv::gin(in_src, cv::Scalar{1.25}));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        testc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path), cv::Scalar{1.25}));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     testc.start();
 
     std::size_t test_frames = 0u;
@@ -420,8 +450,12 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta_VideoScalar)
     EXPECT_EQ(100u, test_frames);
 
     // Now test with another one video source and scalar
-    in_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/1920x1080.avi"));
-    testc.setSource(cv::gin(in_src, cv::Scalar{0.75}));
+    path = findDataFile("cv/video/1920x1080.avi");
+    try {
+        testc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path), cv::Scalar{0.75}));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     testc.start();
 
     test_frames = 0u;
@@ -516,9 +550,13 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta_VideoArray)
 
     cv::Mat tmp;
     // Test with one video source and vector
-    auto in_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi"));
+    auto path = findDataFile("cv/video/768x576.avi");
     std::vector<int> first_in_vec(768*3, 1);
-    testc.setSource(cv::gin(in_src, first_in_vec));
+    try {
+        testc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path), first_in_vec));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     testc.start();
 
     std::size_t test_frames = 0u;
@@ -526,9 +564,13 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta_VideoArray)
     EXPECT_EQ(100u, test_frames);
 
     // Now test with another one
-    in_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/1920x1080.avi"));
+    path = findDataFile("cv/video/1920x1080.avi");
     std::vector<int> second_in_vec(1920*3, 1);
-    testc.setSource(cv::gin(in_src, second_in_vec));
+    try {
+        testc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path), second_in_vec));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     testc.start();
 
     test_frames = 0u;
@@ -647,8 +689,13 @@ TEST(GAPI_Streaming_Types, XChangeScalar)
 
     // Compile streaming pipeline
     auto sc = c.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
-                                 cv::compile_args(cv::gapi::use_only{kernels}));
-    sc.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi")));
+                                cv::compile_args(cv::gapi::use_only{kernels}));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     cv::Mat in_frame;
@@ -708,8 +755,13 @@ TEST(GAPI_Streaming_Types, XChangeVector)
     auto sc = c.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
                                  cv::GMatDesc{CV_8U,3,cv::Size{576,576}},
                                  cv::compile_args(cv::gapi::use_only{kernels}));
-    sc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi")),
-                         in_eye));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path),
+                             in_eye));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     cv::Mat in_frame;
@@ -737,8 +789,13 @@ TEST(GAPI_Streaming_Types, OutputScalar)
     auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out))
         .compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}});
 
-    const auto video_path = findDataFile("cv/video/768x576.avi");
-    sc.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(video_path));
+    std::string video_path;
+    video_path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(video_path));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     cv::VideoCapture cap;
@@ -783,8 +840,13 @@ TEST(GAPI_Streaming_Types, OutputVector)
     };
 
     cv::Mat in_eye = cv::Mat::eye(cv::Size(256, 256), CV_8UC3);
-    const auto video_path = findDataFile("cv/video/768x576.avi");
-    sc.setSource(cv::gin(in_eye, gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(video_path)));
+    std::string video_path;
+    video_path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(cv::gin(in_eye, gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(video_path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     cv::VideoCapture cap;
@@ -936,17 +998,22 @@ struct GAPI_Streaming_Unit: public ::testing::Test {
 
 TEST_F(GAPI_Streaming_Unit, TestTwoVideoSourcesFail)
 {
-    const auto c_ptr = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi"));
     auto c_desc = cv::GMatDesc{CV_8U,3,{768,576}};
     auto m_desc = cv::descr_of(m);
-
-    sc = cc.compileStreaming(c_desc, m_desc);
-    EXPECT_NO_THROW(sc.setSource(cv::gin(c_ptr, m)));
-
-    sc = cc.compileStreaming(m_desc, c_desc);
-    EXPECT_NO_THROW(sc.setSource(cv::gin(m, c_ptr)));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc = cc.compileStreaming(c_desc, m_desc);
+        // FIXME: it should be EXPECT_NO_THROW()
+        sc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path), m));
+        sc = cc.compileStreaming(m_desc, c_desc);
+        // FIXME: it should be EXPECT_NO_THROW()
+        sc.setSource(cv::gin(m, gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
 
     sc = cc.compileStreaming(c_desc, c_desc);
+    auto c_ptr = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path);
     EXPECT_ANY_THROW(sc.setSource(cv::gin(c_ptr, c_ptr)));
 }
 
@@ -1015,11 +1082,15 @@ TEST_F(GAPI_Streaming_Unit, StartStopStress_Video)
     sc = cc.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
                              cv::GMatDesc{CV_8U,3,cv::Size{768,576}});
     m = cv::Mat::eye(cv::Size{768,576}, CV_8UC3);
+    auto path = findDataFile("cv/video/768x576.avi");
     for (int i = 0; i < 100; i++)
     {
-        auto src = cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi"));
         sc.stop();
-        sc.setSource(cv::gin(src, m));
+        try {
+            sc.setSource(cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path), m));
+        } catch(...) {
+            throw SkipTestException("Video file can not be opened");
+        }
         sc.start();
         cv::Mat out;
         for (int j = 0; j < 5; j++) EXPECT_TRUE(sc.pull(cv::gout(out)));
@@ -1158,9 +1229,12 @@ TEST(GAPI_Streaming_Desync, SmokeTest_Streaming)
 
     auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
         .compileStreaming(cv::compile_args(cv::gapi::kernels<OCVDelay>()));
-    auto sc_file = findDataFile("cv/video/768x576.avi");
-    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
-    sc.setSource(cv::gin(sc_src));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     std::size_t out1_hits = 0u;
@@ -1195,9 +1269,12 @@ TEST(GAPI_Streaming_Desync, SmokeTest_Streaming_TwoParts)
     // The code should compile and execute well (desynchronized parts don't cross)
     auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2, out3))
         .compileStreaming();
-    auto sc_file = findDataFile("cv/video/768x576.avi");
-    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
-    sc.setSource(cv::gin(sc_src));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     std::size_t test_frames = 0u;
@@ -1323,9 +1400,12 @@ TEST(GAPI_Streaming_Desync, Negative_SynchronizedPull)
     auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
         .compileStreaming();
 
-    auto sc_file = findDataFile("cv/video/768x576.avi");
-    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
-    sc.setSource(cv::gin(sc_src));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     cv::Mat o1, o2;
@@ -1345,9 +1425,12 @@ TEST(GAPI_Streaming_Desync, UseSpecialPull)
     auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2))
         .compileStreaming();
 
-    auto sc_file = findDataFile("cv/video/768x576.avi");
-    auto sc_src = gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(sc_file);
-    sc.setSource(cv::gin(sc_src));
+    auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        sc.setSource(cv::gin(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path)));
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     sc.start();
 
     cv::optional<cv::Mat> o1, o2;

From aab6362705e9afcd99dacb137a011b51061affcd Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Wed, 18 Nov 2020 14:04:15 +0300
Subject: [PATCH 118/152] Merge pull request #18838 from
 alalek:video_tracking_api

Tracking API: move to video/tracking.hpp

* video(tracking): moved code from opencv_contrib/tracking module

- Tracker API
- MIL, GOTURN trackers
- applied clang-format

* video(tracking): cleanup unused code

* samples: add tracker.py sample

* video(tracking): avoid div by zero

* static analyzer
---
 modules/video/CMakeLists.txt                  |  12 +-
 modules/video/doc/video.bib                   |  40 +-
 .../opencv2/video/detail/tracking.private.hpp | 406 ++++++++++++
 .../video/detail/tracking_feature.private.hpp | 168 +++++
 .../video/include/opencv2/video/tracking.hpp  | 115 ++++
 .../misc/java/test/TrackerCreateTest.java     |  32 +
 modules/video/misc/python/pyopencv_video.hpp  |   4 +
 .../video/misc/python/test/test_tracking.py   |  19 +
 modules/video/perf/perf_main.cpp              |  17 +-
 modules/video/perf/perf_trackers.cpp          | 104 ++++
 .../src/tracking/detail/tracker_feature.cpp   |  25 +
 .../detail/tracker_feature_haar.impl.hpp      | 121 ++++
 .../tracking/detail/tracker_feature_set.cpp   |  60 ++
 .../src/tracking/detail/tracker_mil_model.cpp |  85 +++
 .../src/tracking/detail/tracker_mil_model.hpp |  67 ++
 .../src/tracking/detail/tracker_mil_state.cpp | 159 +++++
 .../src/tracking/detail/tracker_mil_state.hpp |  87 +++
 .../src/tracking/detail/tracker_model.cpp     | 132 ++++
 .../src/tracking/detail/tracker_sampler.cpp   |  68 ++
 .../detail/tracker_sampler_algorithm.cpp      | 124 ++++
 .../detail/tracker_state_estimator.cpp        |  37 ++
 .../src/tracking/detail/tracking_feature.cpp  | 582 ++++++++++++++++++
 .../tracking/detail/tracking_online_mil.cpp   | 356 +++++++++++
 .../tracking/detail/tracking_online_mil.hpp   |  79 +++
 modules/video/src/tracking/tracker.cpp        |  19 +
 modules/video/src/tracking/tracker_goturn.cpp | 140 +++++
 modules/video/src/tracking/tracker_mil.cpp    | 227 +++++++
 modules/video/test/test_main.cpp              |  17 +-
 modules/video/test/test_trackers.cpp          |  97 +++
 modules/video/test/test_trackers.impl.hpp     | 368 +++++++++++
 samples/python/tracker.py                     |  80 +++
 31 files changed, 3843 insertions(+), 4 deletions(-)
 create mode 100644 modules/video/include/opencv2/video/detail/tracking.private.hpp
 create mode 100644 modules/video/include/opencv2/video/detail/tracking_feature.private.hpp
 create mode 100644 modules/video/misc/java/test/TrackerCreateTest.java
 create mode 100644 modules/video/misc/python/pyopencv_video.hpp
 create mode 100644 modules/video/misc/python/test/test_tracking.py
 create mode 100644 modules/video/perf/perf_trackers.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_feature.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_feature_haar.impl.hpp
 create mode 100644 modules/video/src/tracking/detail/tracker_feature_set.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_mil_model.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_mil_model.hpp
 create mode 100644 modules/video/src/tracking/detail/tracker_mil_state.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_mil_state.hpp
 create mode 100644 modules/video/src/tracking/detail/tracker_model.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_sampler.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_sampler_algorithm.cpp
 create mode 100644 modules/video/src/tracking/detail/tracker_state_estimator.cpp
 create mode 100644 modules/video/src/tracking/detail/tracking_feature.cpp
 create mode 100644 modules/video/src/tracking/detail/tracking_online_mil.cpp
 create mode 100644 modules/video/src/tracking/detail/tracking_online_mil.hpp
 create mode 100644 modules/video/src/tracking/tracker.cpp
 create mode 100644 modules/video/src/tracking/tracker_goturn.cpp
 create mode 100644 modules/video/src/tracking/tracker_mil.cpp
 create mode 100644 modules/video/test/test_trackers.cpp
 create mode 100644 modules/video/test/test_trackers.impl.hpp
 create mode 100644 samples/python/tracker.py

diff --git a/modules/video/CMakeLists.txt b/modules/video/CMakeLists.txt
index e25f0b7e0e..8499de9169 100644
--- a/modules/video/CMakeLists.txt
+++ b/modules/video/CMakeLists.txt
@@ -1,2 +1,12 @@
 set(the_description "Video Analysis")
-ocv_define_module(video opencv_imgproc OPTIONAL opencv_calib3d WRAP java objc python js)
+ocv_define_module(video
+    opencv_imgproc
+    OPTIONAL
+      opencv_calib3d
+      opencv_dnn
+    WRAP
+      java
+      objc
+      python
+      js
+)
diff --git a/modules/video/doc/video.bib b/modules/video/doc/video.bib
index 46116bb931..e78c348a46 100644
--- a/modules/video/doc/video.bib
+++ b/modules/video/doc/video.bib
@@ -1,6 +1,44 @@
+@article{AAM,
+  title={Adaptive appearance modeling for video tracking: survey and evaluation},
+  author={Salti, Samuele and Cavallaro, Andrea and Di Stefano, Luigi},
+  journal={Image Processing, IEEE Transactions on},
+  volume={21},
+  number={10},
+  pages={4334--4348},
+  year={2012},
+  publisher={IEEE}
+}
+
+@article{AMVOT,
+  title={A survey of appearance models in visual object tracking},
+  author={Li, Xi and Hu, Weiming and Shen, Chunhua and Zhang, Zhongfei and Dick, Anthony and Hengel, Anton Van Den},
+  journal={ACM Transactions on Intelligent Systems and Technology (TIST)},
+  volume={4},
+  number={4},
+  pages={58},
+  year={2013},
+  publisher={ACM}
+}
+
+@inproceedings{GOTURN,
+  title={Learning to Track at 100 FPS with Deep Regression Networks},
+  author={Held, David and Thrun, Sebastian and Savarese, Silvio},
+  booktitle={European Conference Computer Vision (ECCV)},
+  year={2016}
+}
+
 @inproceedings{Kroeger2016,
   author={Till Kroeger and Radu Timofte and Dengxin Dai and Luc Van Gool},
   title={Fast Optical Flow using Dense Inverse Search},
   booktitle={Proceedings of the European Conference on Computer Vision ({ECCV})},
-  year = {2016}
+  year={2016}
+}
+
+@inproceedings{MIL,
+  title={Visual tracking with online multiple instance learning},
+  author={Babenko, Boris and Yang, Ming-Hsuan and Belongie, Serge},
+  booktitle={Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on},
+  pages={983--990},
+  year={2009},
+  organization={IEEE}
 }
diff --git a/modules/video/include/opencv2/video/detail/tracking.private.hpp b/modules/video/include/opencv2/video/detail/tracking.private.hpp
new file mode 100644
index 0000000000..1e6107900d
--- /dev/null
+++ b/modules/video/include/opencv2/video/detail/tracking.private.hpp
@@ -0,0 +1,406 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_VIDEO_DETAIL_TRACKING_HPP
+#define OPENCV_VIDEO_DETAIL_TRACKING_HPP
+
+/*
+ * Partially based on:
+ * ====================================================================================================================
+ *  - [AAM] S. Salti, A. Cavallaro, L. Di Stefano, Adaptive Appearance Modeling for Video Tracking: Survey and Evaluation
+ *  - [AMVOT] X. Li, W. Hu, C. Shen, Z. Zhang, A. Dick, A. van den Hengel, A Survey of Appearance Models in Visual Object Tracking
+ *
+ * This Tracking API has been designed with PlantUML. If you modify this API please change UML files under modules/tracking/doc/uml
+ *
+ */
+
+#include "opencv2/core.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+/** @addtogroup tracking_detail
+@{
+*/
+
+/************************************ TrackerFeature Base Classes ************************************/
+
+/** @brief Abstract base class for TrackerFeature that represents the feature.
+*/
+class CV_EXPORTS TrackerFeature
+{
+public:
+    virtual ~TrackerFeature();
+
+    /** @brief Compute the features in the images collection
+    @param images The images
+    @param response The output response
+    */
+    void compute(const std::vector<Mat>& images, Mat& response);
+
+protected:
+    virtual bool computeImpl(const std::vector<Mat>& images, Mat& response) = 0;
+};
+
+/** @brief Class that manages the extraction and selection of features
+
+@cite AAM Feature Extraction and Feature Set Refinement (Feature Processing and Feature Selection).
+See table I and section III C @cite AMVOT Appearance modelling -\> Visual representation (Table II,
+section 3.1 - 3.2)
+
+TrackerFeatureSet is an aggregation of TrackerFeature
+
+@sa
+   TrackerFeature
+
+*/
+class CV_EXPORTS TrackerFeatureSet
+{
+public:
+    TrackerFeatureSet();
+
+    ~TrackerFeatureSet();
+
+    /** @brief Extract features from the images collection
+    @param images The input images
+    */
+    void extraction(const std::vector<Mat>& images);
+
+    /** @brief Add TrackerFeature in the collection. Return true if TrackerFeature is added, false otherwise
+    @param feature The TrackerFeature class
+    */
+    bool addTrackerFeature(const Ptr<TrackerFeature>& feature);
+
+    /** @brief Get the TrackerFeature collection (TrackerFeature name, TrackerFeature pointer)
+    */
+    const std::vector<Ptr<TrackerFeature>>& getTrackerFeatures() const;
+
+    /** @brief Get the responses
+    @note Be sure to call extraction before getResponses Example TrackerFeatureSet::getResponses
+    */
+    const std::vector<Mat>& getResponses() const;
+
+private:
+    void clearResponses();
+    bool blockAddTrackerFeature;
+
+    std::vector<Ptr<TrackerFeature>> features;  // list of features
+    std::vector<Mat> responses;  // list of response after compute
+};
+
+/************************************ TrackerSampler Base Classes ************************************/
+
+/** @brief Abstract base class for TrackerSamplerAlgorithm that represents the algorithm for the specific
+sampler.
+*/
+class CV_EXPORTS TrackerSamplerAlgorithm
+{
+public:
+    virtual ~TrackerSamplerAlgorithm();
+
+    /** @brief Computes the regions starting from a position in an image.
+
+    Return true if samples are computed, false otherwise
+
+    @param image The current frame
+    @param boundingBox The bounding box from which regions can be calculated
+
+    @param sample The computed samples @cite AAM Fig. 1 variable Sk
+    */
+    virtual bool sampling(const Mat& image, const Rect& boundingBox, std::vector<Mat>& sample) = 0;
+};
+
+/**
+ * \brief Class that manages the sampler in order to select regions for the update the model of the tracker
+ * [AAM] Sampling e Labeling. See table I and section III B
+ */
+
+/** @brief Class that manages the sampler in order to select regions for the update the model of the tracker
+
+@cite AAM Sampling e Labeling. See table I and section III B
+
+TrackerSampler is an aggregation of TrackerSamplerAlgorithm
+@sa
+   TrackerSamplerAlgorithm
+ */
+class CV_EXPORTS TrackerSampler
+{
+public:
+    TrackerSampler();
+
+    ~TrackerSampler();
+
+    /** @brief Computes the regions starting from a position in an image
+    @param image The current frame
+    @param boundingBox The bounding box from which regions can be calculated
+    */
+    void sampling(const Mat& image, Rect boundingBox);
+
+    /** @brief Return the collection of the TrackerSamplerAlgorithm
+    */
+    const std::vector<Ptr<TrackerSamplerAlgorithm>>& getSamplers() const;
+
+    /** @brief Return the samples from all TrackerSamplerAlgorithm, @cite AAM Fig. 1 variable Sk
+    */
+    const std::vector<Mat>& getSamples() const;
+
+    /** @brief Add TrackerSamplerAlgorithm in the collection. Return true if sampler is added, false otherwise
+    @param sampler The TrackerSamplerAlgorithm
+    */
+    bool addTrackerSamplerAlgorithm(const Ptr<TrackerSamplerAlgorithm>& sampler);
+
+private:
+    std::vector<Ptr<TrackerSamplerAlgorithm>> samplers;
+    std::vector<Mat> samples;
+    bool blockAddTrackerSampler;
+
+    void clearSamples();
+};
+
+/************************************ TrackerModel Base Classes ************************************/
+
+/** @brief Abstract base class for TrackerTargetState that represents a possible state of the target.
+
+See @cite AAM \f$\hat{x}^{i}_{k}\f$ all the states candidates.
+
+Inherits this class with your Target state, In own implementation you can add scale variation,
+width, height, orientation, etc.
+*/
+class CV_EXPORTS TrackerTargetState
+{
+public:
+    virtual ~TrackerTargetState() {};
+    /** @brief Get the position
+    * @return The position
+    */
+    Point2f getTargetPosition() const;
+
+    /** @brief Set the position
+    * @param position The position
+    */
+    void setTargetPosition(const Point2f& position);
+    /** @brief Get the width of the target
+    * @return The width of the target
+    */
+    int getTargetWidth() const;
+
+    /** @brief Set the width of the target
+    * @param width The width of the target
+    */
+    void setTargetWidth(int width);
+    /** @brief Get the height of the target
+    * @return The height of the target
+    */
+    int getTargetHeight() const;
+
+    /** @brief Set the height of the target
+    * @param height The height of the target
+    */
+    void setTargetHeight(int height);
+
+protected:
+    Point2f targetPosition;
+    int targetWidth;
+    int targetHeight;
+};
+
+/** @brief Represents the model of the target at frame \f$k\f$ (all states and scores)
+
+See @cite AAM The set of the pair \f$\langle \hat{x}^{i}_{k}, C^{i}_{k} \rangle\f$
+@sa TrackerTargetState
+*/
+typedef std::vector<std::pair<Ptr<TrackerTargetState>, float>> ConfidenceMap;
+
+/** @brief Represents the estimate states for all frames
+
+@cite AAM \f$x_{k}\f$ is the trajectory of the target up to time \f$k\f$
+
+@sa TrackerTargetState
+*/
+typedef std::vector<Ptr<TrackerTargetState>> Trajectory;
+
+/** @brief Abstract base class for TrackerStateEstimator that estimates the most likely target state.
+
+See @cite AAM State estimator
+
+See @cite AMVOT Statistical modeling (Fig. 3), Table III (generative) - IV (discriminative) - V (hybrid)
+*/
+class CV_EXPORTS TrackerStateEstimator
+{
+public:
+    virtual ~TrackerStateEstimator();
+
+    /** @brief Estimate the most likely target state, return the estimated state
+    @param confidenceMaps The overall appearance model as a list of :cConfidenceMap
+    */
+    Ptr<TrackerTargetState> estimate(const std::vector<ConfidenceMap>& confidenceMaps);
+
+    /** @brief Update the ConfidenceMap with the scores
+    @param confidenceMaps The overall appearance model as a list of :cConfidenceMap
+    */
+    void update(std::vector<ConfidenceMap>& confidenceMaps);
+
+    /** @brief Create TrackerStateEstimator by tracker state estimator type
+    @param trackeStateEstimatorType The TrackerStateEstimator name
+
+    The modes available now:
+
+    -   "BOOSTING" -- Boosting-based discriminative appearance models. See @cite AMVOT section 4.4
+
+    The modes available soon:
+
+    -   "SVM" -- SVM-based discriminative appearance models. See @cite AMVOT section 4.5
+    */
+    static Ptr<TrackerStateEstimator> create(const String& trackeStateEstimatorType);
+
+    /** @brief Get the name of the specific TrackerStateEstimator
+    */
+    String getClassName() const;
+
+protected:
+    virtual Ptr<TrackerTargetState> estimateImpl(const std::vector<ConfidenceMap>& confidenceMaps) = 0;
+    virtual void updateImpl(std::vector<ConfidenceMap>& confidenceMaps) = 0;
+    String className;
+};
+
+/** @brief Abstract class that represents the model of the target.
+
+It must be instantiated by specialized tracker
+
+See @cite AAM Ak
+
+Inherits this with your TrackerModel
+*/
+class CV_EXPORTS TrackerModel
+{
+public:
+    TrackerModel();
+
+    virtual ~TrackerModel();
+
+    /** @brief Set TrackerEstimator, return true if the tracker state estimator is added, false otherwise
+    @param trackerStateEstimator The TrackerStateEstimator
+    @note You can add only one TrackerStateEstimator
+    */
+    bool setTrackerStateEstimator(Ptr<TrackerStateEstimator> trackerStateEstimator);
+
+    /** @brief Estimate the most likely target location
+
+    @cite AAM ME, Model Estimation table I
+    @param responses Features extracted from TrackerFeatureSet
+    */
+    void modelEstimation(const std::vector<Mat>& responses);
+
+    /** @brief Update the model
+
+    @cite AAM MU, Model Update table I
+    */
+    void modelUpdate();
+
+    /** @brief Run the TrackerStateEstimator, return true if is possible to estimate a new state, false otherwise
+    */
+    bool runStateEstimator();
+
+    /** @brief Set the current TrackerTargetState in the Trajectory
+    @param lastTargetState The current TrackerTargetState
+    */
+    void setLastTargetState(const Ptr<TrackerTargetState>& lastTargetState);
+
+    /** @brief Get the last TrackerTargetState from Trajectory
+    */
+    Ptr<TrackerTargetState> getLastTargetState() const;
+
+    /** @brief Get the list of the ConfidenceMap
+    */
+    const std::vector<ConfidenceMap>& getConfidenceMaps() const;
+
+    /** @brief Get the last ConfidenceMap for the current frame
+    */
+    const ConfidenceMap& getLastConfidenceMap() const;
+
+    /** @brief Get the TrackerStateEstimator
+    */
+    Ptr<TrackerStateEstimator> getTrackerStateEstimator() const;
+
+private:
+    void clearCurrentConfidenceMap();
+
+protected:
+    std::vector<ConfidenceMap> confidenceMaps;
+    Ptr<TrackerStateEstimator> stateEstimator;
+    ConfidenceMap currentConfidenceMap;
+    Trajectory trajectory;
+    int maxCMLength;
+
+    virtual void modelEstimationImpl(const std::vector<Mat>& responses) = 0;
+    virtual void modelUpdateImpl() = 0;
+};
+
+/************************************ Specific TrackerStateEstimator Classes ************************************/
+
+// None
+
+/************************************ Specific TrackerSamplerAlgorithm Classes ************************************/
+
+/** @brief TrackerSampler based on CSC (current state centered), used by MIL algorithm TrackerMIL
+ */
+class CV_EXPORTS TrackerSamplerCSC : public TrackerSamplerAlgorithm
+{
+public:
+    ~TrackerSamplerCSC();
+
+    enum MODE
+    {
+        MODE_INIT_POS = 1,  //!< mode for init positive samples
+        MODE_INIT_NEG = 2,  //!< mode for init negative samples
+        MODE_TRACK_POS = 3,  //!< mode for update positive samples
+        MODE_TRACK_NEG = 4,  //!< mode for update negative samples
+        MODE_DETECT = 5  //!< mode for detect samples
+    };
+
+    struct CV_EXPORTS Params
+    {
+        Params();
+        float initInRad;  //!< radius for gathering positive instances during init
+        float trackInPosRad;  //!< radius for gathering positive instances during tracking
+        float searchWinSize;  //!< size of search window
+        int initMaxNegNum;  //!< # negative samples to use during init
+        int trackMaxPosNum;  //!< # positive samples to use during training
+        int trackMaxNegNum;  //!< # negative samples to use during training
+    };
+
+    /** @brief Constructor
+    @param parameters TrackerSamplerCSC parameters TrackerSamplerCSC::Params
+    */
+    TrackerSamplerCSC(const TrackerSamplerCSC::Params& parameters = TrackerSamplerCSC::Params());
+
+    /** @brief Set the sampling mode of TrackerSamplerCSC
+    @param samplingMode The sampling mode
+
+    The modes are:
+
+    -   "MODE_INIT_POS = 1" -- for the positive sampling in initialization step
+    -   "MODE_INIT_NEG = 2" -- for the negative sampling in initialization step
+    -   "MODE_TRACK_POS = 3" -- for the positive sampling in update step
+    -   "MODE_TRACK_NEG = 4" -- for the negative sampling in update step
+    -   "MODE_DETECT = 5" -- for the sampling in detection step
+    */
+    void setMode(int samplingMode);
+
+    bool sampling(const Mat& image, const Rect& boundingBox, std::vector<Mat>& sample) CV_OVERRIDE;
+
+private:
+    Params params;
+    int mode;
+    RNG rng;
+
+    std::vector<Mat> sampleImage(const Mat& img, int x, int y, int w, int h, float inrad, float outrad = 0, int maxnum = 1000000);
+};
+
+//! @}
+
+}}}  // namespace cv::detail::tracking
+
+#endif  // OPENCV_VIDEO_DETAIL_TRACKING_HPP
diff --git a/modules/video/include/opencv2/video/detail/tracking_feature.private.hpp b/modules/video/include/opencv2/video/detail/tracking_feature.private.hpp
new file mode 100644
index 0000000000..659b467abc
--- /dev/null
+++ b/modules/video/include/opencv2/video/detail/tracking_feature.private.hpp
@@ -0,0 +1,168 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_VIDEO_DETAIL_TRACKING_FEATURE_HPP
+#define OPENCV_VIDEO_DETAIL_TRACKING_FEATURE_HPP
+
+#include "opencv2/core.hpp"
+#include "opencv2/imgproc.hpp"
+
+/*
+ * TODO This implementation is based on apps/traincascade/
+ * TODO Changed CvHaarEvaluator based on ADABOOSTING implementation (Grabner et al.)
+ */
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+//! @addtogroup tracking_detail
+//! @{
+
+inline namespace feature {
+
+class CvParams
+{
+public:
+    CvParams();
+    virtual ~CvParams()
+    {
+    }
+};
+
+class CvFeatureParams : public CvParams
+{
+public:
+    enum FeatureType
+    {
+        HAAR = 0,
+        LBP = 1,
+        HOG = 2
+    };
+
+    CvFeatureParams();
+    static Ptr<CvFeatureParams> create(CvFeatureParams::FeatureType featureType);
+    int maxCatCount;  // 0 in case of numerical features
+    int featSize;  // 1 in case of simple features (HAAR, LBP) and N_BINS(9)*N_CELLS(4) in case of Dalal's HOG features
+    int numFeatures;
+};
+
+class CvFeatureEvaluator
+{
+public:
+    virtual ~CvFeatureEvaluator()
+    {
+    }
+    virtual void init(const CvFeatureParams* _featureParams, int _maxSampleCount, Size _winSize);
+    virtual void setImage(const Mat& img, uchar clsLabel, int idx);
+    static Ptr<CvFeatureEvaluator> create(CvFeatureParams::FeatureType type);
+
+    int getNumFeatures() const
+    {
+        return numFeatures;
+    }
+    int getMaxCatCount() const
+    {
+        return featureParams->maxCatCount;
+    }
+    int getFeatureSize() const
+    {
+        return featureParams->featSize;
+    }
+    const Mat& getCls() const
+    {
+        return cls;
+    }
+    float getCls(int si) const
+    {
+        return cls.at<float>(si, 0);
+    }
+
+protected:
+    virtual void generateFeatures() = 0;
+
+    int npos, nneg;
+    int numFeatures;
+    Size winSize;
+    CvFeatureParams* featureParams;
+    Mat cls;
+};
+
+class CvHaarFeatureParams : public CvFeatureParams
+{
+public:
+    CvHaarFeatureParams();
+    bool isIntegral;
+};
+
+class CvHaarEvaluator : public CvFeatureEvaluator
+{
+public:
+    class FeatureHaar
+    {
+
+    public:
+        FeatureHaar(Size patchSize);
+        bool eval(const Mat& image, Rect ROI, float* result) const;
+        inline int getNumAreas() const { return m_numAreas; }
+        inline const std::vector<float>& getWeights() const { return m_weights; }
+        inline const std::vector<Rect>& getAreas() const { return m_areas; }
+
+    private:
+        int m_type;
+        int m_numAreas;
+        std::vector<float> m_weights;
+        float m_initMean;
+        float m_initSigma;
+        void generateRandomFeature(Size imageSize);
+        float getSum(const Mat& image, Rect imgROI) const;
+        std::vector<Rect> m_areas;  // areas within the patch over which to compute the feature
+        cv::Size m_initSize;  // size of the patch used during training
+        cv::Size m_curSize;  // size of the patches currently under investigation
+        float m_scaleFactorHeight;  // scaling factor in vertical direction
+        float m_scaleFactorWidth;  // scaling factor in horizontal direction
+        std::vector<Rect> m_scaleAreas;  // areas after scaling
+        std::vector<float> m_scaleWeights;  // weights after scaling
+    };
+
+    virtual void init(const CvFeatureParams* _featureParams, int _maxSampleCount, Size _winSize) CV_OVERRIDE;
+    virtual void setImage(const Mat& img, uchar clsLabel = 0, int idx = 1) CV_OVERRIDE;
+    inline const std::vector<CvHaarEvaluator::FeatureHaar>& getFeatures() const { return features; }
+    inline CvHaarEvaluator::FeatureHaar& getFeatures(int idx)
+    {
+        return features[idx];
+    }
+    inline void setWinSize(Size patchSize) { winSize = patchSize; }
+    inline Size getWinSize() const { return winSize; }
+    virtual void generateFeatures() CV_OVERRIDE;
+
+    /**
+    * \brief Overload the original generateFeatures in order to limit the number of the features
+    * @param numFeatures Number of the features
+    */
+    virtual void generateFeatures(int numFeatures);
+
+protected:
+    bool isIntegral;
+
+    /* TODO Added from MIL implementation */
+    Mat _ii_img;
+    void compute_integral(const cv::Mat& img, std::vector<cv::Mat_<float>>& ii_imgs)
+    {
+        Mat ii_img;
+        integral(img, ii_img, CV_32F);
+        split(ii_img, ii_imgs);
+    }
+
+    std::vector<FeatureHaar> features;
+    Mat sum; /* sum images (each row represents image) */
+};
+
+}  // namespace feature
+
+//! @}
+
+}}}  // namespace cv::detail::tracking
+
+#endif
diff --git a/modules/video/include/opencv2/video/tracking.hpp b/modules/video/include/opencv2/video/tracking.hpp
index e5852eb190..b44f6855f8 100644
--- a/modules/video/include/opencv2/video/tracking.hpp
+++ b/modules/video/include/opencv2/video/tracking.hpp
@@ -705,6 +705,121 @@ public:
             double minEigThreshold = 1e-4);
 };
 
+
+
+
+/** @brief Base abstract class for the long-term tracker
+ */
+class CV_EXPORTS_W Tracker
+{
+protected:
+    Tracker();
+public:
+    virtual ~Tracker();
+
+    /** @brief Initialize the tracker with a known bounding box that surrounded the target
+    @param image The initial frame
+    @param boundingBox The initial bounding box
+    */
+    CV_WRAP virtual
+    void init(InputArray image, const Rect& boundingBox) = 0;
+
+    /** @brief Update the tracker, find the new most likely bounding box for the target
+    @param image The current frame
+    @param boundingBox The bounding box that represent the new target location, if true was returned, not
+    modified otherwise
+
+    @return True means that target was located and false means that tracker cannot locate target in
+    current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
+    missing from the frame (say, out of sight)
+    */
+    CV_WRAP virtual
+    bool update(InputArray image, CV_OUT Rect& boundingBox) = 0;
+};
+
+
+
+/** @brief The MIL algorithm trains a classifier in an online manner to separate the object from the
+background.
+
+Multiple Instance Learning avoids the drift problem for a robust tracking. The implementation is
+based on @cite MIL .
+
+Original code can be found here <http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml>
+ */
+class CV_EXPORTS_W TrackerMIL : public Tracker
+{
+protected:
+    TrackerMIL();  // use ::create()
+public:
+    virtual ~TrackerMIL() CV_OVERRIDE;
+
+    struct CV_EXPORTS_W_SIMPLE Params
+    {
+        CV_WRAP Params();
+        //parameters for sampler
+        CV_PROP_RW float samplerInitInRadius;  //!< radius for gathering positive instances during init
+        CV_PROP_RW int samplerInitMaxNegNum;  //!< # negative samples to use during init
+        CV_PROP_RW float samplerSearchWinSize;  //!< size of search window
+        CV_PROP_RW float samplerTrackInRadius;  //!< radius for gathering positive instances during tracking
+        CV_PROP_RW int samplerTrackMaxPosNum;  //!< # positive samples to use during tracking
+        CV_PROP_RW int samplerTrackMaxNegNum;  //!< # negative samples to use during tracking
+        CV_PROP_RW int featureSetNumFeatures;  //!< # features
+    };
+
+    /** @brief Create MIL tracker instance
+     *  @param parameters MIL parameters TrackerMIL::Params
+     */
+    static CV_WRAP
+    Ptr<TrackerMIL> create(const TrackerMIL::Params &parameters = TrackerMIL::Params());
+
+    //void init(InputArray image, const Rect& boundingBox) CV_OVERRIDE;
+    //bool update(InputArray image, CV_OUT Rect& boundingBox) CV_OVERRIDE;
+};
+
+
+
+/** @brief the GOTURN (Generic Object Tracking Using Regression Networks) tracker
+ *
+ *  GOTURN (@cite GOTURN) is kind of trackers based on Convolutional Neural Networks (CNN). While taking all advantages of CNN trackers,
+ *  GOTURN is much faster due to offline training without online fine-tuning nature.
+ *  GOTURN tracker addresses the problem of single target tracking: given a bounding box label of an object in the first frame of the video,
+ *  we track that object through the rest of the video. NOTE: Current method of GOTURN does not handle occlusions; however, it is fairly
+ *  robust to viewpoint changes, lighting changes, and deformations.
+ *  Inputs of GOTURN are two RGB patches representing Target and Search patches resized to 227x227.
+ *  Outputs of GOTURN are predicted bounding box coordinates, relative to Search patch coordinate system, in format X1,Y1,X2,Y2.
+ *  Original paper is here: <http://davheld.github.io/GOTURN/GOTURN.pdf>
+ *  As long as original authors implementation: <https://github.com/davheld/GOTURN#train-the-tracker>
+ *  Implementation of training algorithm is placed in separately here due to 3d-party dependencies:
+ *  <https://github.com/Auron-X/GOTURN_Training_Toolkit>
+ *  GOTURN architecture goturn.prototxt and trained model goturn.caffemodel are accessible on opencv_extra GitHub repository.
+ */
+class CV_EXPORTS_W TrackerGOTURN : public Tracker
+{
+protected:
+    TrackerGOTURN();  // use ::create()
+public:
+    virtual ~TrackerGOTURN() CV_OVERRIDE;
+
+    struct CV_EXPORTS_W_SIMPLE Params
+    {
+        CV_WRAP Params();
+        CV_PROP_RW std::string modelTxt;
+        CV_PROP_RW std::string modelBin;
+    };
+
+    /** @brief Constructor
+    @param parameters GOTURN parameters TrackerGOTURN::Params
+    */
+    static CV_WRAP
+    Ptr<TrackerGOTURN> create(const TrackerGOTURN::Params& parameters = TrackerGOTURN::Params());
+
+    //void init(InputArray image, const Rect& boundingBox) CV_OVERRIDE;
+    //bool update(InputArray image, CV_OUT Rect& boundingBox) CV_OVERRIDE;
+};
+
+
+
 //! @} video_track
 
 } // cv
diff --git a/modules/video/misc/java/test/TrackerCreateTest.java b/modules/video/misc/java/test/TrackerCreateTest.java
new file mode 100644
index 0000000000..dad696bebf
--- /dev/null
+++ b/modules/video/misc/java/test/TrackerCreateTest.java
@@ -0,0 +1,32 @@
+package org.opencv.test.video;
+
+import org.opencv.core.Core;
+import org.opencv.core.CvException;
+import org.opencv.test.OpenCVTestCase;
+
+import org.opencv.video.Tracker;
+import org.opencv.video.TrackerGOTURN;
+import org.opencv.video.TrackerMIL;
+
+public class TrackerCreateTest extends OpenCVTestCase {
+
+    @Override
+    protected void setUp() throws Exception {
+        super.setUp();
+    }
+
+
+    public void testCreateTrackerGOTURN() {
+        try {
+            Tracker tracker = TrackerGOTURN.create();
+            assert(tracker != null);
+        } catch (CvException e) {
+            // expected, model files may be missing
+        }
+    }
+
+    public void testCreateTrackerMIL() {
+        Tracker tracker = TrackerMIL.create();
+    }
+
+}
diff --git a/modules/video/misc/python/pyopencv_video.hpp b/modules/video/misc/python/pyopencv_video.hpp
new file mode 100644
index 0000000000..761905c8bf
--- /dev/null
+++ b/modules/video/misc/python/pyopencv_video.hpp
@@ -0,0 +1,4 @@
+#ifdef HAVE_OPENCV_VIDEO
+typedef TrackerMIL::Params TrackerMIL_Params;
+typedef TrackerGOTURN::Params TrackerGOTURN_Params;
+#endif
diff --git a/modules/video/misc/python/test/test_tracking.py b/modules/video/misc/python/test/test_tracking.py
new file mode 100644
index 0000000000..40f1570d9f
--- /dev/null
+++ b/modules/video/misc/python/test/test_tracking.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+import os
+import numpy as np
+import cv2 as cv
+
+from tests_common import NewOpenCVTests, unittest
+
+class tracking_test(NewOpenCVTests):
+
+    def test_createTracker(self):
+        t = cv.TrackerMIL_create()
+        try:
+            t = cv.TrackerGOTURN_create()
+        except cv.error as e:
+            pass  # may fail due to missing DL model files
+
+
+if __name__ == '__main__':
+    NewOpenCVTests.bootstrap()
diff --git a/modules/video/perf/perf_main.cpp b/modules/video/perf/perf_main.cpp
index b6fd57a32d..1879aeff90 100644
--- a/modules/video/perf/perf_main.cpp
+++ b/modules/video/perf/perf_main.cpp
@@ -4,4 +4,19 @@
     #include <hpx/hpx_main.hpp>
 #endif
 
-CV_PERF_TEST_MAIN(video)
+static
+void initTests()
+{
+    const char* extraTestDataPath =
+#ifdef WINRT
+        NULL;
+#else
+        getenv("OPENCV_DNN_TEST_DATA_PATH");
+#endif
+    if (extraTestDataPath)
+        cvtest::addDataSearchPath(extraTestDataPath);
+
+    cvtest::addDataSearchSubDirectory("");  // override "cv" prefix below to access without "../dnn" hacks
+}
+
+CV_PERF_TEST_MAIN(video, initTests())
diff --git a/modules/video/perf/perf_trackers.cpp b/modules/video/perf/perf_trackers.cpp
new file mode 100644
index 0000000000..44f5184693
--- /dev/null
+++ b/modules/video/perf/perf_trackers.cpp
@@ -0,0 +1,104 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "perf_precomp.hpp"
+
+namespace opencv_test { namespace {
+using namespace perf;
+
+typedef tuple<string, int, Rect> TrackingParams_t;
+
+std::vector<TrackingParams_t> getTrackingParams()
+{
+    std::vector<TrackingParams_t> params {
+        TrackingParams_t("david/data/david.webm", 300, Rect(163,62,47,56)),
+        TrackingParams_t("dudek/data/dudek.webm", 1, Rect(123,87,132,176)),
+        TrackingParams_t("faceocc2/data/faceocc2.webm", 1, Rect(118,57,82,98))
+    };
+    return params;
+}
+
+class Tracking : public perf::TestBaseWithParam<TrackingParams_t>
+{
+public:
+    template<typename ROI_t = Rect2d, typename Tracker>
+    void runTrackingTest(const Ptr<Tracker>& tracker, const TrackingParams_t& params);
+};
+
+template<typename ROI_t, typename Tracker>
+void Tracking::runTrackingTest(const Ptr<Tracker>& tracker, const TrackingParams_t& params)
+{
+    const int N = 10;
+    string video = get<0>(params);
+    int startFrame = get<1>(params);
+    //int endFrame = startFrame + N;
+    Rect boundingBox = get<2>(params);
+
+    string videoPath = findDataFile(std::string("cv/tracking/") + video);
+
+    VideoCapture c;
+    c.open(videoPath);
+    if (!c.isOpened())
+        throw SkipTestException("Can't open video file");
+#if 0
+    // c.set(CAP_PROP_POS_FRAMES, startFrame);
+#else
+    if (startFrame)
+        std::cout << "startFrame = " << startFrame << std::endl;
+    for (int i = 0; i < startFrame; i++)
+    {
+        Mat dummy_frame;
+        c >> dummy_frame;
+        ASSERT_FALSE(dummy_frame.empty()) << i << ": " << videoPath;
+    }
+#endif
+
+    // decode frames into memory (don't measure decoding performance)
+    std::vector<Mat> frames;
+    for (int i = 0; i < N; ++i)
+    {
+        Mat frame;
+        c >> frame;
+        ASSERT_FALSE(frame.empty()) << "i=" << i;
+        frames.push_back(frame);
+    }
+
+    std::cout << "frame size = " << frames[0].size() << std::endl;
+
+    PERF_SAMPLE_BEGIN();
+    {
+        tracker->init(frames[0], (ROI_t)boundingBox);
+        for (int i = 1; i < N; ++i)
+        {
+            ROI_t rc;
+            tracker->update(frames[i], rc);
+            ASSERT_FALSE(rc.empty());
+        }
+    }
+    PERF_SAMPLE_END();
+
+    SANITY_CHECK_NOTHING();
+}
+
+
+//==================================================================================================
+
+PERF_TEST_P(Tracking, MIL, testing::ValuesIn(getTrackingParams()))
+{
+    auto tracker = TrackerMIL::create();
+    runTrackingTest<Rect>(tracker, GetParam());
+}
+
+PERF_TEST_P(Tracking, GOTURN, testing::ValuesIn(getTrackingParams()))
+{
+    std::string model = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.prototxt");
+    std::string weights = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.caffemodel", false);
+    TrackerGOTURN::Params params;
+    params.modelTxt = model;
+    params.modelBin = weights;
+    auto tracker = TrackerGOTURN::create(params);
+    runTrackingTest<Rect>(tracker, GetParam());
+}
+
+}} // namespace
diff --git a/modules/video/src/tracking/detail/tracker_feature.cpp b/modules/video/src/tracking/detail/tracker_feature.cpp
new file mode 100644
index 0000000000..47651f6657
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_feature.cpp
@@ -0,0 +1,25 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+TrackerFeature::~TrackerFeature()
+{
+    // nothing
+}
+
+void TrackerFeature::compute(const std::vector<Mat>& images, Mat& response)
+{
+    if (images.empty())
+        return;
+
+    computeImpl(images, response);
+}
+
+}}}  // namespace cv::detail::tracking
\ No newline at end of file
diff --git a/modules/video/src/tracking/detail/tracker_feature_haar.impl.hpp b/modules/video/src/tracking/detail/tracker_feature_haar.impl.hpp
new file mode 100644
index 0000000000..6590abf34f
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_feature_haar.impl.hpp
@@ -0,0 +1,121 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+#include "opencv2/video/detail/tracking_feature.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+inline namespace internal {
+
+class TrackerFeatureHAAR : public TrackerFeature
+{
+public:
+    struct Params
+    {
+        Params();
+        int numFeatures;  //!< # of rects
+        Size rectSize;  //!< rect size
+        bool isIntegral;  //!< true if input images are integral, false otherwise
+    };
+
+    TrackerFeatureHAAR(const TrackerFeatureHAAR::Params& parameters = TrackerFeatureHAAR::Params());
+
+    virtual ~TrackerFeatureHAAR() CV_OVERRIDE {}
+
+protected:
+    bool computeImpl(const std::vector<Mat>& images, Mat& response) CV_OVERRIDE;
+
+private:
+    Params params;
+    Ptr<CvHaarEvaluator> featureEvaluator;
+};
+
+/**
+ * Parameters
+ */
+
+TrackerFeatureHAAR::Params::Params()
+{
+    numFeatures = 250;
+    rectSize = Size(100, 100);
+    isIntegral = false;
+}
+
+TrackerFeatureHAAR::TrackerFeatureHAAR(const TrackerFeatureHAAR::Params& parameters)
+    : params(parameters)
+{
+    CvHaarFeatureParams haarParams;
+    haarParams.numFeatures = params.numFeatures;
+    haarParams.isIntegral = params.isIntegral;
+    featureEvaluator = makePtr<CvHaarEvaluator>();
+    featureEvaluator->init(&haarParams, 1, params.rectSize);
+}
+
+class Parallel_compute : public cv::ParallelLoopBody
+{
+private:
+    Ptr<CvHaarEvaluator> featureEvaluator;
+    std::vector<Mat> images;
+    Mat response;
+    //std::vector<CvHaarEvaluator::FeatureHaar> features;
+public:
+    Parallel_compute(Ptr<CvHaarEvaluator>& fe, const std::vector<Mat>& img, Mat& resp)
+        : featureEvaluator(fe)
+        , images(img)
+        , response(resp)
+    {
+
+        //features = featureEvaluator->getFeatures();
+    }
+
+    virtual void operator()(const cv::Range& r) const CV_OVERRIDE
+    {
+        for (int jf = r.start; jf != r.end; ++jf)
+        {
+            int cols = images[jf].cols;
+            int rows = images[jf].rows;
+            for (int j = 0; j < featureEvaluator->getNumFeatures(); j++)
+            {
+                float res = 0;
+                featureEvaluator->getFeatures()[j].eval(images[jf], Rect(0, 0, cols, rows), &res);
+                (Mat_<float>(response))(j, jf) = res;
+            }
+        }
+    }
+};
+
+bool TrackerFeatureHAAR::computeImpl(const std::vector<Mat>& images, Mat& response)
+{
+    if (images.empty())
+    {
+        return false;
+    }
+
+    int numFeatures = featureEvaluator->getNumFeatures();
+
+    response = Mat_<float>(Size((int)images.size(), numFeatures));
+
+    std::vector<CvHaarEvaluator::FeatureHaar> f = featureEvaluator->getFeatures();
+    //for each sample compute #n_feature -> put each feature (n Rect) in response
+    parallel_for_(Range(0, (int)images.size()), Parallel_compute(featureEvaluator, images, response));
+
+    /*for ( size_t i = 0; i < images.size(); i++ )
+  {
+    int c = images[i].cols;
+    int r = images[i].rows;
+    for ( int j = 0; j < numFeatures; j++ )
+    {
+      float res = 0;
+      featureEvaluator->getFeatures( j ).eval( images[i], Rect( 0, 0, c, r ), &res );
+      ( Mat_<float>( response ) )( j, i ) = res;
+    }
+  }*/
+
+    return true;
+}
+
+}}}}  // namespace cv::detail::tracking::internal
diff --git a/modules/video/src/tracking/detail/tracker_feature_set.cpp b/modules/video/src/tracking/detail/tracker_feature_set.cpp
new file mode 100644
index 0000000000..43f3203c52
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_feature_set.cpp
@@ -0,0 +1,60 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+TrackerFeatureSet::TrackerFeatureSet()
+{
+    blockAddTrackerFeature = false;
+}
+
+TrackerFeatureSet::~TrackerFeatureSet()
+{
+    // nothing
+}
+
+void TrackerFeatureSet::extraction(const std::vector<Mat>& images)
+{
+    blockAddTrackerFeature = true;
+
+    clearResponses();
+    responses.resize(features.size());
+
+    for (size_t i = 0; i < features.size(); i++)
+    {
+        CV_DbgAssert(features[i]);
+        features[i]->compute(images, responses[i]);
+    }
+}
+
+bool TrackerFeatureSet::addTrackerFeature(const Ptr<TrackerFeature>& feature)
+{
+    CV_Assert(!blockAddTrackerFeature);
+    CV_Assert(feature);
+
+    features.push_back(feature);
+    return true;
+}
+
+const std::vector<Ptr<TrackerFeature>>& TrackerFeatureSet::getTrackerFeatures() const
+{
+    return features;
+}
+
+const std::vector<Mat>& TrackerFeatureSet::getResponses() const
+{
+    return responses;
+}
+
+void TrackerFeatureSet::clearResponses()
+{
+    responses.clear();
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracker_mil_model.cpp b/modules/video/src/tracking/detail/tracker_mil_model.cpp
new file mode 100644
index 0000000000..8769d66c09
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_mil_model.cpp
@@ -0,0 +1,85 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "tracker_mil_model.hpp"
+
+/**
+ * TrackerMILModel
+ */
+
+namespace cv {
+inline namespace tracking {
+namespace impl {
+
+TrackerMILModel::TrackerMILModel(const Rect& boundingBox)
+{
+    currentSample.clear();
+    mode = MODE_POSITIVE;
+    width = boundingBox.width;
+    height = boundingBox.height;
+
+    Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState> initState = Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState>(
+            new TrackerStateEstimatorMILBoosting::TrackerMILTargetState(Point2f((float)boundingBox.x, (float)boundingBox.y), boundingBox.width, boundingBox.height,
+                    true, Mat()));
+    trajectory.push_back(initState);
+}
+
+void TrackerMILModel::responseToConfidenceMap(const std::vector<Mat>& responses, ConfidenceMap& confidenceMap)
+{
+    if (currentSample.empty())
+    {
+        CV_Error(-1, "The samples in Model estimation are empty");
+    }
+
+    for (size_t i = 0; i < responses.size(); i++)
+    {
+        //for each column (one sample) there are #num_feature
+        //get informations from currentSample
+        for (int j = 0; j < responses.at(i).cols; j++)
+        {
+
+            Size currentSize;
+            Point currentOfs;
+            currentSample.at(j).locateROI(currentSize, currentOfs);
+            bool foreground = false;
+            if (mode == MODE_POSITIVE || mode == MODE_ESTIMATON)
+            {
+                foreground = true;
+            }
+            else if (mode == MODE_NEGATIVE)
+            {
+                foreground = false;
+            }
+
+            //get the column of the HAAR responses
+            Mat singleResponse = responses.at(i).col(j);
+
+            //create the state
+            Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState> currentState = Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState>(
+                    new TrackerStateEstimatorMILBoosting::TrackerMILTargetState(currentOfs, width, height, foreground, singleResponse));
+
+            confidenceMap.push_back(std::make_pair(currentState, 0.0f));
+        }
+    }
+}
+
+void TrackerMILModel::modelEstimationImpl(const std::vector<Mat>& responses)
+{
+    responseToConfidenceMap(responses, currentConfidenceMap);
+}
+
+void TrackerMILModel::modelUpdateImpl()
+{
+}
+
+void TrackerMILModel::setMode(int trainingMode, const std::vector<Mat>& samples)
+{
+    currentSample.clear();
+    currentSample = samples;
+
+    mode = trainingMode;
+}
+
+}}}  // namespace cv::tracking::impl
diff --git a/modules/video/src/tracking/detail/tracker_mil_model.hpp b/modules/video/src/tracking/detail/tracker_mil_model.hpp
new file mode 100644
index 0000000000..04d9176298
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_mil_model.hpp
@@ -0,0 +1,67 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef __OPENCV_TRACKER_MIL_MODEL_HPP__
+#define __OPENCV_TRACKER_MIL_MODEL_HPP__
+
+#include "opencv2/video/detail/tracking.private.hpp"
+#include "tracker_mil_state.hpp"
+
+namespace cv {
+inline namespace tracking {
+namespace impl {
+
+using namespace cv::detail::tracking;
+
+/**
+ * \brief Implementation of TrackerModel for MIL algorithm
+ */
+class TrackerMILModel : public detail::TrackerModel
+{
+public:
+    enum
+    {
+        MODE_POSITIVE = 1,  // mode for positive features
+        MODE_NEGATIVE = 2,  // mode for negative features
+        MODE_ESTIMATON = 3  // mode for estimation step
+    };
+
+    /**
+   * \brief Constructor
+   * \param boundingBox The first boundingBox
+   */
+    TrackerMILModel(const Rect& boundingBox);
+
+    /**
+   * \brief Destructor
+   */
+    ~TrackerMILModel() {};
+
+    /**
+   * \brief Set the mode
+   */
+    void setMode(int trainingMode, const std::vector<Mat>& samples);
+
+    /**
+   * \brief Create the ConfidenceMap from a list of responses
+   * \param responses The list of the responses
+   * \param confidenceMap The output
+   */
+    void responseToConfidenceMap(const std::vector<Mat>& responses, ConfidenceMap& confidenceMap);
+
+protected:
+    void modelEstimationImpl(const std::vector<Mat>& responses) CV_OVERRIDE;
+    void modelUpdateImpl() CV_OVERRIDE;
+
+private:
+    int mode;
+    std::vector<Mat> currentSample;
+
+    int width;  //initial width of the boundingBox
+    int height;  //initial height of the boundingBox
+};
+
+}}}  // namespace cv::tracking::impl
+
+#endif
diff --git a/modules/video/src/tracking/detail/tracker_mil_state.cpp b/modules/video/src/tracking/detail/tracker_mil_state.cpp
new file mode 100644
index 0000000000..63591382b0
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_mil_state.cpp
@@ -0,0 +1,159 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+#include "tracker_mil_state.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+/**
+ * TrackerStateEstimatorMILBoosting::TrackerMILTargetState
+ */
+TrackerStateEstimatorMILBoosting::TrackerMILTargetState::TrackerMILTargetState(const Point2f& position, int width, int height, bool foreground,
+        const Mat& features)
+{
+    setTargetPosition(position);
+    setTargetWidth(width);
+    setTargetHeight(height);
+    setTargetFg(foreground);
+    setFeatures(features);
+}
+
+void TrackerStateEstimatorMILBoosting::TrackerMILTargetState::setTargetFg(bool foreground)
+{
+    isTarget = foreground;
+}
+
+void TrackerStateEstimatorMILBoosting::TrackerMILTargetState::setFeatures(const Mat& features)
+{
+    targetFeatures = features;
+}
+
+bool TrackerStateEstimatorMILBoosting::TrackerMILTargetState::isTargetFg() const
+{
+    return isTarget;
+}
+
+Mat TrackerStateEstimatorMILBoosting::TrackerMILTargetState::getFeatures() const
+{
+    return targetFeatures;
+}
+
+TrackerStateEstimatorMILBoosting::TrackerStateEstimatorMILBoosting(int nFeatures)
+{
+    className = "BOOSTING";
+    trained = false;
+    numFeatures = nFeatures;
+}
+
+TrackerStateEstimatorMILBoosting::~TrackerStateEstimatorMILBoosting()
+{
+}
+
+void TrackerStateEstimatorMILBoosting::setCurrentConfidenceMap(ConfidenceMap& confidenceMap)
+{
+    currentConfidenceMap.clear();
+    currentConfidenceMap = confidenceMap;
+}
+
+uint TrackerStateEstimatorMILBoosting::max_idx(const std::vector<float>& v)
+{
+    const float* findPtr = &(*std::max_element(v.begin(), v.end()));
+    const float* beginPtr = &(*v.begin());
+    return (uint)(findPtr - beginPtr);
+}
+
+Ptr<TrackerTargetState> TrackerStateEstimatorMILBoosting::estimateImpl(const std::vector<ConfidenceMap>& /*confidenceMaps*/)
+{
+    //run ClfMilBoost classify in order to compute next location
+    if (currentConfidenceMap.empty())
+        return Ptr<TrackerTargetState>();
+
+    Mat positiveStates;
+    Mat negativeStates;
+
+    prepareData(currentConfidenceMap, positiveStates, negativeStates);
+
+    std::vector<float> prob = boostMILModel.classify(positiveStates);
+
+    int bestind = max_idx(prob);
+    //float resp = prob[bestind];
+
+    return currentConfidenceMap.at(bestind).first;
+}
+
+void TrackerStateEstimatorMILBoosting::prepareData(const ConfidenceMap& confidenceMap, Mat& positive, Mat& negative)
+{
+
+    int posCounter = 0;
+    int negCounter = 0;
+
+    for (size_t i = 0; i < confidenceMap.size(); i++)
+    {
+        Ptr<TrackerMILTargetState> currentTargetState = confidenceMap.at(i).first.staticCast<TrackerMILTargetState>();
+        CV_DbgAssert(currentTargetState);
+        if (currentTargetState->isTargetFg())
+            posCounter++;
+        else
+            negCounter++;
+    }
+
+    positive.create(posCounter, numFeatures, CV_32FC1);
+    negative.create(negCounter, numFeatures, CV_32FC1);
+
+    //TODO change with mat fast access
+    //initialize trainData (positive and negative)
+
+    int pc = 0;
+    int nc = 0;
+    for (size_t i = 0; i < confidenceMap.size(); i++)
+    {
+        Ptr<TrackerMILTargetState> currentTargetState = confidenceMap.at(i).first.staticCast<TrackerMILTargetState>();
+        Mat stateFeatures = currentTargetState->getFeatures();
+
+        if (currentTargetState->isTargetFg())
+        {
+            for (int j = 0; j < stateFeatures.rows; j++)
+            {
+                //fill the positive trainData with the value of the feature j for sample i
+                positive.at<float>(pc, j) = stateFeatures.at<float>(j, 0);
+            }
+            pc++;
+        }
+        else
+        {
+            for (int j = 0; j < stateFeatures.rows; j++)
+            {
+                //fill the negative trainData with the value of the feature j for sample i
+                negative.at<float>(nc, j) = stateFeatures.at<float>(j, 0);
+            }
+            nc++;
+        }
+    }
+}
+
+void TrackerStateEstimatorMILBoosting::updateImpl(std::vector<ConfidenceMap>& confidenceMaps)
+{
+
+    if (!trained)
+    {
+        //this is the first time that the classifier is built
+        //init MIL
+        boostMILModel.init();
+        trained = true;
+    }
+
+    ConfidenceMap lastConfidenceMap = confidenceMaps.back();
+    Mat positiveStates;
+    Mat negativeStates;
+
+    prepareData(lastConfidenceMap, positiveStates, negativeStates);
+    //update MIL
+    boostMILModel.update(positiveStates, negativeStates);
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracker_mil_state.hpp b/modules/video/src/tracking/detail/tracker_mil_state.hpp
new file mode 100644
index 0000000000..e78b19dec2
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_mil_state.hpp
@@ -0,0 +1,87 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_VIDEO_DETAIL_TRACKING_MIL_STATE_HPP
+#define OPENCV_VIDEO_DETAIL_TRACKING_MIL_STATE_HPP
+
+#include "opencv2/video/detail/tracking.private.hpp"
+#include "tracking_online_mil.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+/** @brief TrackerStateEstimator based on Boosting
+*/
+class CV_EXPORTS TrackerStateEstimatorMILBoosting : public TrackerStateEstimator
+{
+public:
+    /**
+    * Implementation of the target state for TrackerStateEstimatorMILBoosting
+    */
+    class TrackerMILTargetState : public TrackerTargetState
+    {
+
+    public:
+        /**
+        * \brief Constructor
+        * \param position Top left corner of the bounding box
+        * \param width Width of the bounding box
+        * \param height Height of the bounding box
+        * \param foreground label for target or background
+        * \param features features extracted
+        */
+        TrackerMILTargetState(const Point2f& position, int width, int height, bool foreground, const Mat& features);
+
+        ~TrackerMILTargetState() {};
+
+        /** @brief Set label: true for target foreground, false for background
+        @param foreground Label for background/foreground
+        */
+        void setTargetFg(bool foreground);
+        /** @brief Set the features extracted from TrackerFeatureSet
+        @param features The features extracted
+        */
+        void setFeatures(const Mat& features);
+        /** @brief Get the label. Return true for target foreground, false for background
+        */
+        bool isTargetFg() const;
+        /** @brief Get the features extracted
+        */
+        Mat getFeatures() const;
+
+    private:
+        bool isTarget;
+        Mat targetFeatures;
+    };
+
+    /** @brief Constructor
+    @param nFeatures Number of features for each sample
+    */
+    TrackerStateEstimatorMILBoosting(int nFeatures = 250);
+    ~TrackerStateEstimatorMILBoosting();
+
+    /** @brief Set the current confidenceMap
+    @param confidenceMap The current :cConfidenceMap
+    */
+    void setCurrentConfidenceMap(ConfidenceMap& confidenceMap);
+
+protected:
+    Ptr<TrackerTargetState> estimateImpl(const std::vector<ConfidenceMap>& confidenceMaps) CV_OVERRIDE;
+    void updateImpl(std::vector<ConfidenceMap>& confidenceMaps) CV_OVERRIDE;
+
+private:
+    uint max_idx(const std::vector<float>& v);
+    void prepareData(const ConfidenceMap& confidenceMap, Mat& positive, Mat& negative);
+
+    ClfMilBoost boostMILModel;
+    bool trained;
+    int numFeatures;
+
+    ConfidenceMap currentConfidenceMap;
+};
+
+}}}  // namespace cv::detail::tracking
+
+#endif
diff --git a/modules/video/src/tracking/detail/tracker_model.cpp b/modules/video/src/tracking/detail/tracker_model.cpp
new file mode 100644
index 0000000000..c9ea424aaf
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_model.cpp
@@ -0,0 +1,132 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+TrackerModel::TrackerModel()
+{
+    stateEstimator = Ptr<TrackerStateEstimator>();
+    maxCMLength = 10;
+}
+
+TrackerModel::~TrackerModel()
+{
+    // nothing
+}
+
+bool TrackerModel::setTrackerStateEstimator(Ptr<TrackerStateEstimator> trackerStateEstimator)
+{
+    if (stateEstimator.get())
+    {
+        return false;
+    }
+
+    stateEstimator = trackerStateEstimator;
+    return true;
+}
+
+Ptr<TrackerStateEstimator> TrackerModel::getTrackerStateEstimator() const
+{
+    return stateEstimator;
+}
+
+void TrackerModel::modelEstimation(const std::vector<Mat>& responses)
+{
+    modelEstimationImpl(responses);
+}
+
+void TrackerModel::clearCurrentConfidenceMap()
+{
+    currentConfidenceMap.clear();
+}
+
+void TrackerModel::modelUpdate()
+{
+    modelUpdateImpl();
+
+    if (maxCMLength != -1 && (int)confidenceMaps.size() >= maxCMLength - 1)
+    {
+        int l = maxCMLength / 2;
+        confidenceMaps.erase(confidenceMaps.begin(), confidenceMaps.begin() + l);
+    }
+    if (maxCMLength != -1 && (int)trajectory.size() >= maxCMLength - 1)
+    {
+        int l = maxCMLength / 2;
+        trajectory.erase(trajectory.begin(), trajectory.begin() + l);
+    }
+    confidenceMaps.push_back(currentConfidenceMap);
+    stateEstimator->update(confidenceMaps);
+
+    clearCurrentConfidenceMap();
+}
+
+bool TrackerModel::runStateEstimator()
+{
+    if (!stateEstimator)
+    {
+        CV_Error(-1, "Tracker state estimator is not setted");
+    }
+    Ptr<TrackerTargetState> targetState = stateEstimator->estimate(confidenceMaps);
+    if (!targetState)
+        return false;
+
+    setLastTargetState(targetState);
+    return true;
+}
+
+void TrackerModel::setLastTargetState(const Ptr<TrackerTargetState>& lastTargetState)
+{
+    trajectory.push_back(lastTargetState);
+}
+
+Ptr<TrackerTargetState> TrackerModel::getLastTargetState() const
+{
+    return trajectory.back();
+}
+
+const std::vector<ConfidenceMap>& TrackerModel::getConfidenceMaps() const
+{
+    return confidenceMaps;
+}
+
+const ConfidenceMap& TrackerModel::getLastConfidenceMap() const
+{
+    return confidenceMaps.back();
+}
+
+Point2f TrackerTargetState::getTargetPosition() const
+{
+    return targetPosition;
+}
+
+void TrackerTargetState::setTargetPosition(const Point2f& position)
+{
+    targetPosition = position;
+}
+
+int TrackerTargetState::getTargetWidth() const
+{
+    return targetWidth;
+}
+
+void TrackerTargetState::setTargetWidth(int width)
+{
+    targetWidth = width;
+}
+int TrackerTargetState::getTargetHeight() const
+{
+    return targetHeight;
+}
+
+void TrackerTargetState::setTargetHeight(int height)
+{
+    targetHeight = height;
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracker_sampler.cpp b/modules/video/src/tracking/detail/tracker_sampler.cpp
new file mode 100644
index 0000000000..ec11656958
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_sampler.cpp
@@ -0,0 +1,68 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+
+#include "opencv2/video/detail/tracking.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+TrackerSampler::TrackerSampler()
+{
+    blockAddTrackerSampler = false;
+}
+
+TrackerSampler::~TrackerSampler()
+{
+    // nothing
+}
+
+void TrackerSampler::sampling(const Mat& image, Rect boundingBox)
+{
+    clearSamples();
+
+    for (size_t i = 0; i < samplers.size(); i++)
+    {
+        CV_DbgAssert(samplers[i]);
+        std::vector<Mat> current_samples;
+        samplers[i]->sampling(image, boundingBox, current_samples);
+
+        //push in samples all current_samples
+        for (size_t j = 0; j < current_samples.size(); j++)
+        {
+            std::vector<Mat>::iterator it = samples.end();
+            samples.insert(it, current_samples.at(j));
+        }
+    }
+
+    blockAddTrackerSampler = true;
+}
+
+bool TrackerSampler::addTrackerSamplerAlgorithm(const Ptr<TrackerSamplerAlgorithm>& sampler)
+{
+    CV_Assert(!blockAddTrackerSampler);
+    CV_Assert(sampler);
+
+    samplers.push_back(sampler);
+    return true;
+}
+
+const std::vector<Ptr<TrackerSamplerAlgorithm>>& TrackerSampler::getSamplers() const
+{
+    return samplers;
+}
+
+const std::vector<Mat>& TrackerSampler::getSamples() const
+{
+    return samples;
+}
+
+void TrackerSampler::clearSamples()
+{
+    samples.clear();
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracker_sampler_algorithm.cpp b/modules/video/src/tracking/detail/tracker_sampler_algorithm.cpp
new file mode 100644
index 0000000000..b5eb285e1a
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_sampler_algorithm.cpp
@@ -0,0 +1,124 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+TrackerSamplerAlgorithm::~TrackerSamplerAlgorithm()
+{
+    // nothing
+}
+
+TrackerSamplerCSC::Params::Params()
+{
+    initInRad = 3;
+    initMaxNegNum = 65;
+    searchWinSize = 25;
+    trackInPosRad = 4;
+    trackMaxNegNum = 65;
+    trackMaxPosNum = 100000;
+}
+
+TrackerSamplerCSC::TrackerSamplerCSC(const TrackerSamplerCSC::Params& parameters)
+    : params(parameters)
+{
+    mode = MODE_INIT_POS;
+    rng = theRNG();
+}
+
+TrackerSamplerCSC::~TrackerSamplerCSC()
+{
+    // nothing
+}
+
+bool TrackerSamplerCSC::sampling(const Mat& image, const Rect& boundingBox, std::vector<Mat>& sample)
+{
+    CV_Assert(!image.empty());
+
+    float inrad = 0;
+    float outrad = 0;
+    int maxnum = 0;
+
+    switch (mode)
+    {
+    case MODE_INIT_POS:
+        inrad = params.initInRad;
+        sample = sampleImage(image, boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, inrad);
+        break;
+    case MODE_INIT_NEG:
+        inrad = 2.0f * params.searchWinSize;
+        outrad = 1.5f * params.initInRad;
+        maxnum = params.initMaxNegNum;
+        sample = sampleImage(image, boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, inrad, outrad, maxnum);
+        break;
+    case MODE_TRACK_POS:
+        inrad = params.trackInPosRad;
+        outrad = 0;
+        maxnum = params.trackMaxPosNum;
+        sample = sampleImage(image, boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, inrad, outrad, maxnum);
+        break;
+    case MODE_TRACK_NEG:
+        inrad = 1.5f * params.searchWinSize;
+        outrad = params.trackInPosRad + 5;
+        maxnum = params.trackMaxNegNum;
+        sample = sampleImage(image, boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, inrad, outrad, maxnum);
+        break;
+    case MODE_DETECT:
+        inrad = params.searchWinSize;
+        sample = sampleImage(image, boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, inrad);
+        break;
+    default:
+        inrad = params.initInRad;
+        sample = sampleImage(image, boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, inrad);
+        break;
+    }
+    return false;
+}
+
+void TrackerSamplerCSC::setMode(int samplingMode)
+{
+    mode = samplingMode;
+}
+
+std::vector<Mat> TrackerSamplerCSC::sampleImage(const Mat& img, int x, int y, int w, int h, float inrad, float outrad, int maxnum)
+{
+    int rowsz = img.rows - h - 1;
+    int colsz = img.cols - w - 1;
+    float inradsq = inrad * inrad;
+    float outradsq = outrad * outrad;
+    int dist;
+
+    uint minrow = max(0, (int)y - (int)inrad);
+    uint maxrow = min((int)rowsz - 1, (int)y + (int)inrad);
+    uint mincol = max(0, (int)x - (int)inrad);
+    uint maxcol = min((int)colsz - 1, (int)x + (int)inrad);
+
+    //fprintf(stderr,"inrad=%f minrow=%d maxrow=%d mincol=%d maxcol=%d\n",inrad,minrow,maxrow,mincol,maxcol);
+
+    std::vector<Mat> samples;
+    samples.resize((maxrow - minrow + 1) * (maxcol - mincol + 1));
+    int i = 0;
+
+    float prob = ((float)(maxnum)) / samples.size();
+
+    for (int r = minrow; r <= int(maxrow); r++)
+        for (int c = mincol; c <= int(maxcol); c++)
+        {
+            dist = (y - r) * (y - r) + (x - c) * (x - c);
+            if (float(rng.uniform(0.f, 1.f)) < prob && dist < inradsq && dist >= outradsq)
+            {
+                samples[i] = img(Rect(c, r, w, h));
+                i++;
+            }
+        }
+
+    samples.resize(min(i, maxnum));
+    return samples;
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracker_state_estimator.cpp b/modules/video/src/tracking/detail/tracker_state_estimator.cpp
new file mode 100644
index 0000000000..2410b5b076
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracker_state_estimator.cpp
@@ -0,0 +1,37 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+TrackerStateEstimator::~TrackerStateEstimator()
+{
+}
+
+Ptr<TrackerTargetState> TrackerStateEstimator::estimate(const std::vector<ConfidenceMap>& confidenceMaps)
+{
+    if (confidenceMaps.empty())
+        return Ptr<TrackerTargetState>();
+
+    return estimateImpl(confidenceMaps);
+}
+
+void TrackerStateEstimator::update(std::vector<ConfidenceMap>& confidenceMaps)
+{
+    if (confidenceMaps.empty())
+        return;
+
+    return updateImpl(confidenceMaps);
+}
+
+String TrackerStateEstimator::getClassName() const
+{
+    return className;
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracking_feature.cpp b/modules/video/src/tracking/detail/tracking_feature.cpp
new file mode 100644
index 0000000000..1850995fee
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracking_feature.cpp
@@ -0,0 +1,582 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "opencv2/video/detail/tracking.private.hpp"
+#include "opencv2/video/detail/tracking_feature.private.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+/*
+ * TODO This implementation is based on apps/traincascade/
+ * TODO Changed CvHaarEvaluator based on ADABOOSTING implementation (Grabner et al.)
+ */
+
+CvParams::CvParams()
+{
+    // nothing
+}
+
+//---------------------------- FeatureParams --------------------------------------
+
+CvFeatureParams::CvFeatureParams()
+    : maxCatCount(0)
+    , featSize(1)
+    , numFeatures(1)
+{
+    // nothing
+}
+
+//------------------------------------- FeatureEvaluator ---------------------------------------
+
+void CvFeatureEvaluator::init(const CvFeatureParams* _featureParams, int _maxSampleCount, Size _winSize)
+{
+    CV_Assert(_featureParams);
+    CV_Assert(_maxSampleCount > 0);
+    featureParams = (CvFeatureParams*)_featureParams;
+    winSize = _winSize;
+    numFeatures = _featureParams->numFeatures;
+    cls.create((int)_maxSampleCount, 1, CV_32FC1);
+    generateFeatures();
+}
+
+void CvFeatureEvaluator::setImage(const Mat& img, uchar clsLabel, int idx)
+{
+    winSize.width = img.cols;
+    winSize.height = img.rows;
+    //CV_Assert( img.cols == winSize.width );
+    //CV_Assert( img.rows == winSize.height );
+    CV_Assert(idx < cls.rows);
+    cls.ptr<float>(idx)[0] = clsLabel;
+}
+
+CvHaarFeatureParams::CvHaarFeatureParams()
+{
+    isIntegral = false;
+}
+
+//--------------------- HaarFeatureEvaluator ----------------
+
+void CvHaarEvaluator::init(const CvFeatureParams* _featureParams, int /*_maxSampleCount*/, Size _winSize)
+{
+    CV_Assert(_featureParams);
+    int cols = (_winSize.width + 1) * (_winSize.height + 1);
+    sum.create((int)1, cols, CV_32SC1);
+    isIntegral = ((CvHaarFeatureParams*)_featureParams)->isIntegral;
+    CvFeatureEvaluator::init(_featureParams, 1, _winSize);
+}
+
+void CvHaarEvaluator::setImage(const Mat& img, uchar /*clsLabel*/, int /*idx*/)
+{
+    CV_DbgAssert(!sum.empty());
+
+    winSize.width = img.cols;
+    winSize.height = img.rows;
+
+    CvFeatureEvaluator::setImage(img, 1, 0);
+    if (!isIntegral)
+    {
+        std::vector<Mat_<float>> ii_imgs;
+        compute_integral(img, ii_imgs);
+        _ii_img = ii_imgs[0];
+    }
+    else
+    {
+        _ii_img = img;
+    }
+}
+
+void CvHaarEvaluator::generateFeatures()
+{
+    generateFeatures(featureParams->numFeatures);
+}
+
+void CvHaarEvaluator::generateFeatures(int nFeatures)
+{
+    for (int i = 0; i < nFeatures; i++)
+    {
+        CvHaarEvaluator::FeatureHaar feature(Size(winSize.width, winSize.height));
+        features.push_back(feature);
+    }
+}
+
+#define INITSIGMA(numAreas) (static_cast<float>(sqrt(256.0f * 256.0f / 12.0f * (numAreas))));
+
+CvHaarEvaluator::FeatureHaar::FeatureHaar(Size patchSize)
+{
+    try
+    {
+        generateRandomFeature(patchSize);
+    }
+    catch (...)
+    {
+        // FIXIT
+        throw;
+    }
+}
+
+void CvHaarEvaluator::FeatureHaar::generateRandomFeature(Size patchSize)
+{
+    cv::Point2i position;
+    Size baseDim;
+    Size sizeFactor;
+    int area;
+
+    CV_Assert(!patchSize.empty());
+
+    //Size minSize = Size( 3, 3 );
+    int minArea = 9;
+
+    bool valid = false;
+    while (!valid)
+    {
+        //choose position and scale
+        position.y = rand() % (patchSize.height);
+        position.x = rand() % (patchSize.width);
+
+        baseDim.width = (int)((1 - sqrt(1 - (float)rand() * (float)(1.0 / RAND_MAX))) * patchSize.width);
+        baseDim.height = (int)((1 - sqrt(1 - (float)rand() * (float)(1.0 / RAND_MAX))) * patchSize.height);
+
+        //select types
+        //float probType[11] = {0.0909f, 0.0909f, 0.0909f, 0.0909f, 0.0909f, 0.0909f, 0.0909f, 0.0909f, 0.0909f, 0.0909f, 0.0950f};
+        float probType[11] = { 0.2f, 0.2f, 0.2f, 0.2f, 0.2f, 0.2f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
+        float prob = (float)rand() * (float)(1.0 / RAND_MAX);
+
+        if (prob < probType[0])
+        {
+            //check if feature is valid
+            sizeFactor.height = 2;
+            sizeFactor.width = 1;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 1;
+            m_numAreas = 2;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x;
+            m_areas[1].y = position.y + baseDim.height;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1])
+        {
+            //check if feature is valid
+            sizeFactor.height = 1;
+            sizeFactor.width = 2;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 2;
+            m_numAreas = 2;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x + baseDim.width;
+            m_areas[1].y = position.y;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1] + probType[2])
+        {
+            //check if feature is valid
+            sizeFactor.height = 4;
+            sizeFactor.width = 1;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 3;
+            m_numAreas = 3;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -2;
+            m_weights[2] = 1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x;
+            m_areas[1].y = position.y + baseDim.height;
+            m_areas[1].height = 2 * baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_areas[2].y = position.y + 3 * baseDim.height;
+            m_areas[2].x = position.x;
+            m_areas[2].height = baseDim.height;
+            m_areas[2].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1] + probType[2] + probType[3])
+        {
+            //check if feature is valid
+            sizeFactor.height = 1;
+            sizeFactor.width = 4;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 3;
+            m_numAreas = 3;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -2;
+            m_weights[2] = 1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x + baseDim.width;
+            m_areas[1].y = position.y;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = 2 * baseDim.width;
+            m_areas[2].y = position.y;
+            m_areas[2].x = position.x + 3 * baseDim.width;
+            m_areas[2].height = baseDim.height;
+            m_areas[2].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1] + probType[2] + probType[3] + probType[4])
+        {
+            //check if feature is valid
+            sizeFactor.height = 2;
+            sizeFactor.width = 2;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 5;
+            m_numAreas = 4;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -1;
+            m_weights[2] = -1;
+            m_weights[3] = 1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x + baseDim.width;
+            m_areas[1].y = position.y;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_areas[2].y = position.y + baseDim.height;
+            m_areas[2].x = position.x;
+            m_areas[2].height = baseDim.height;
+            m_areas[2].width = baseDim.width;
+            m_areas[3].y = position.y + baseDim.height;
+            m_areas[3].x = position.x + baseDim.width;
+            m_areas[3].height = baseDim.height;
+            m_areas[3].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1] + probType[2] + probType[3] + probType[4] + probType[5])
+        {
+            //check if feature is valid
+            sizeFactor.height = 3;
+            sizeFactor.width = 3;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 6;
+            m_numAreas = 2;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -9;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = 3 * baseDim.height;
+            m_areas[0].width = 3 * baseDim.width;
+            m_areas[1].x = position.x + baseDim.width;
+            m_areas[1].y = position.y + baseDim.height;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_initMean = -8 * 128;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1] + probType[2] + probType[3] + probType[4] + probType[5] + probType[6])
+        {
+            //check if feature is valid
+            sizeFactor.height = 3;
+            sizeFactor.width = 1;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 7;
+            m_numAreas = 3;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -2;
+            m_weights[2] = 1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x;
+            m_areas[1].y = position.y + baseDim.height;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_areas[2].y = position.y + baseDim.height * 2;
+            m_areas[2].x = position.x;
+            m_areas[2].height = baseDim.height;
+            m_areas[2].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1] + probType[2] + probType[3] + probType[4] + probType[5] + probType[6] + probType[7])
+        {
+            //check if feature is valid
+            sizeFactor.height = 1;
+            sizeFactor.width = 3;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+
+            if (area < minArea)
+                continue;
+
+            m_type = 8;
+            m_numAreas = 3;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -2;
+            m_weights[2] = 1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x + baseDim.width;
+            m_areas[1].y = position.y;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_areas[2].y = position.y;
+            m_areas[2].x = position.x + 2 * baseDim.width;
+            m_areas[2].height = baseDim.height;
+            m_areas[2].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob < probType[0] + probType[1] + probType[2] + probType[3] + probType[4] + probType[5] + probType[6] + probType[7] + probType[8])
+        {
+            //check if feature is valid
+            sizeFactor.height = 3;
+            sizeFactor.width = 3;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 9;
+            m_numAreas = 2;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -2;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = 3 * baseDim.height;
+            m_areas[0].width = 3 * baseDim.width;
+            m_areas[1].x = position.x + baseDim.width;
+            m_areas[1].y = position.y + baseDim.height;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_initMean = 0;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob
+                < probType[0] + probType[1] + probType[2] + probType[3] + probType[4] + probType[5] + probType[6] + probType[7] + probType[8] + probType[9])
+        {
+            //check if feature is valid
+            sizeFactor.height = 3;
+            sizeFactor.width = 1;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 10;
+            m_numAreas = 3;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -1;
+            m_weights[2] = 1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x;
+            m_areas[1].y = position.y + baseDim.height;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_areas[2].y = position.y + baseDim.height * 2;
+            m_areas[2].x = position.x;
+            m_areas[2].height = baseDim.height;
+            m_areas[2].width = baseDim.width;
+            m_initMean = 128;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else if (prob
+                < probType[0] + probType[1] + probType[2] + probType[3] + probType[4] + probType[5] + probType[6] + probType[7] + probType[8] + probType[9]
+                        + probType[10])
+        {
+            //check if feature is valid
+            sizeFactor.height = 1;
+            sizeFactor.width = 3;
+            if (position.y + baseDim.height * sizeFactor.height >= patchSize.height || position.x + baseDim.width * sizeFactor.width >= patchSize.width)
+                continue;
+            area = baseDim.height * sizeFactor.height * baseDim.width * sizeFactor.width;
+            if (area < minArea)
+                continue;
+
+            m_type = 11;
+            m_numAreas = 3;
+            m_weights.resize(m_numAreas);
+            m_weights[0] = 1;
+            m_weights[1] = -1;
+            m_weights[2] = 1;
+            m_areas.resize(m_numAreas);
+            m_areas[0].x = position.x;
+            m_areas[0].y = position.y;
+            m_areas[0].height = baseDim.height;
+            m_areas[0].width = baseDim.width;
+            m_areas[1].x = position.x + baseDim.width;
+            m_areas[1].y = position.y;
+            m_areas[1].height = baseDim.height;
+            m_areas[1].width = baseDim.width;
+            m_areas[2].y = position.y;
+            m_areas[2].x = position.x + 2 * baseDim.width;
+            m_areas[2].height = baseDim.height;
+            m_areas[2].width = baseDim.width;
+            m_initMean = 128;
+            m_initSigma = INITSIGMA(m_numAreas);
+            valid = true;
+        }
+        else
+            CV_Error(Error::StsAssert, "");
+    }
+
+    m_initSize = patchSize;
+    m_curSize = m_initSize;
+    m_scaleFactorWidth = m_scaleFactorHeight = 1.0f;
+    m_scaleAreas.resize(m_numAreas);
+    m_scaleWeights.resize(m_numAreas);
+    for (int curArea = 0; curArea < m_numAreas; curArea++)
+    {
+        m_scaleAreas[curArea] = m_areas[curArea];
+        m_scaleWeights[curArea] = (float)m_weights[curArea] / (float)(m_areas[curArea].width * m_areas[curArea].height);
+    }
+}
+
+bool CvHaarEvaluator::FeatureHaar::eval(const Mat& image, Rect /*ROI*/, float* result) const
+{
+
+    *result = 0.0f;
+
+    for (int curArea = 0; curArea < m_numAreas; curArea++)
+    {
+        *result += (float)getSum(image, Rect(m_areas[curArea].x, m_areas[curArea].y, m_areas[curArea].width, m_areas[curArea].height))
+                * m_scaleWeights[curArea];
+    }
+
+    /*
+   if( image->getUseVariance() )
+   {
+   float variance = (float) image->getVariance( ROI );
+   *result /= variance;
+   }
+   */
+
+    return true;
+}
+
+float CvHaarEvaluator::FeatureHaar::getSum(const Mat& image, Rect imageROI) const
+{
+    // left upper Origin
+    int OriginX = imageROI.x;
+    int OriginY = imageROI.y;
+
+    // Check and fix width and height
+    int Width = imageROI.width;
+    int Height = imageROI.height;
+
+    if (OriginX + Width >= image.cols - 1)
+        Width = (image.cols - 1) - OriginX;
+    if (OriginY + Height >= image.rows - 1)
+        Height = (image.rows - 1) - OriginY;
+
+    float value = 0;
+    int depth = image.depth();
+
+    if (depth == CV_8U || depth == CV_32S)
+        value = static_cast<float>(image.at<int>(OriginY + Height, OriginX + Width) + image.at<int>(OriginY, OriginX) - image.at<int>(OriginY, OriginX + Width)
+                - image.at<int>(OriginY + Height, OriginX));
+    else if (depth == CV_64F)
+        value = static_cast<float>(image.at<double>(OriginY + Height, OriginX + Width) + image.at<double>(OriginY, OriginX)
+                - image.at<double>(OriginY, OriginX + Width) - image.at<double>(OriginY + Height, OriginX));
+    else if (depth == CV_32F)
+        value = static_cast<float>(image.at<float>(OriginY + Height, OriginX + Width) + image.at<float>(OriginY, OriginX) - image.at<float>(OriginY, OriginX + Width)
+                - image.at<float>(OriginY + Height, OriginX));
+
+    return value;
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracking_online_mil.cpp b/modules/video/src/tracking/detail/tracking_online_mil.cpp
new file mode 100644
index 0000000000..c9472aa947
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracking_online_mil.cpp
@@ -0,0 +1,356 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../../precomp.hpp"
+#include "tracking_online_mil.hpp"
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+#define sign(s) ((s > 0) ? 1 : ((s < 0) ? -1 : 0))
+
+template <class T>
+class SortableElementRev
+{
+public:
+    T _val;
+    int _ind;
+    SortableElementRev()
+        : _val(), _ind(0)
+    {
+    }
+    SortableElementRev(T val, int ind)
+    {
+        _val = val;
+        _ind = ind;
+    }
+    bool operator<(SortableElementRev<T>& b)
+    {
+        return (_val < b._val);
+    };
+};
+
+static bool CompareSortableElementRev(const SortableElementRev<float>& i, const SortableElementRev<float>& j)
+{
+    return i._val < j._val;
+}
+
+template <class T>
+void sort_order_des(std::vector<T>& v, std::vector<int>& order)
+{
+    uint n = (uint)v.size();
+    std::vector<SortableElementRev<T>> v2;
+    v2.resize(n);
+    order.clear();
+    order.resize(n);
+    for (uint i = 0; i < n; i++)
+    {
+        v2[i]._ind = i;
+        v2[i]._val = v[i];
+    }
+    //std::sort( v2.begin(), v2.end() );
+    std::sort(v2.begin(), v2.end(), CompareSortableElementRev);
+    for (uint i = 0; i < n; i++)
+    {
+        order[i] = v2[i]._ind;
+        v[i] = v2[i]._val;
+    }
+};
+
+//implementations for strong classifier
+
+ClfMilBoost::Params::Params()
+{
+    _numSel = 50;
+    _numFeat = 250;
+    _lRate = 0.85f;
+}
+
+ClfMilBoost::ClfMilBoost()
+    : _numsamples(0)
+    , _counter(0)
+{
+    _myParams = ClfMilBoost::Params();
+    _numsamples = 0;
+}
+
+ClfMilBoost::~ClfMilBoost()
+{
+    _selectors.clear();
+    for (size_t i = 0; i < _weakclf.size(); i++)
+        delete _weakclf.at(i);
+}
+
+void ClfMilBoost::init(const ClfMilBoost::Params& parameters)
+{
+    _myParams = parameters;
+    _numsamples = 0;
+
+    //_ftrs = Ftr::generate( _myParams->_ftrParams, _myParams->_numFeat );
+    // if( params->_storeFtrHistory )
+    //  Ftr::toViz( _ftrs, "haarftrs" );
+    _weakclf.resize(_myParams._numFeat);
+    for (int k = 0; k < _myParams._numFeat; k++)
+    {
+        _weakclf[k] = new ClfOnlineStump(k);
+        _weakclf[k]->_lRate = _myParams._lRate;
+    }
+    _counter = 0;
+}
+
+void ClfMilBoost::update(const Mat& posx, const Mat& negx)
+{
+    int numneg = negx.rows;
+    int numpos = posx.rows;
+
+    // compute ftrs
+    //if( !posx.ftrsComputed() )
+    //  Ftr::compute( posx, _ftrs );
+    //if( !negx.ftrsComputed() )
+    //  Ftr::compute( negx, _ftrs );
+
+    // initialize H
+    static std::vector<float> Hpos, Hneg;
+    Hpos.clear();
+    Hneg.clear();
+    Hpos.resize(posx.rows, 0.0f), Hneg.resize(negx.rows, 0.0f);
+
+    _selectors.clear();
+    std::vector<float> posw(posx.rows), negw(negx.rows);
+    std::vector<std::vector<float>> pospred(_weakclf.size()), negpred(_weakclf.size());
+
+    // train all weak classifiers without weights
+#ifdef _OPENMP
+#pragma omp parallel for
+#endif
+    for (int m = 0; m < _myParams._numFeat; m++)
+    {
+        _weakclf[m]->update(posx, negx);
+        pospred[m] = _weakclf[m]->classifySetF(posx);
+        negpred[m] = _weakclf[m]->classifySetF(negx);
+    }
+
+    // pick the best features
+    for (int s = 0; s < _myParams._numSel; s++)
+    {
+
+        // compute errors/likl for all weak clfs
+        std::vector<float> poslikl(_weakclf.size(), 1.0f), neglikl(_weakclf.size()), likl(_weakclf.size());
+#ifdef _OPENMP
+#pragma omp parallel for
+#endif
+        for (int w = 0; w < (int)_weakclf.size(); w++)
+        {
+            float lll = 1.0f;
+            for (int j = 0; j < numpos; j++)
+                lll *= (1 - sigmoid(Hpos[j] + pospred[w][j]));
+            poslikl[w] = (float)-log(1 - lll + 1e-5);
+
+            lll = 0.0f;
+            for (int j = 0; j < numneg; j++)
+                lll += (float)-log(1e-5f + 1 - sigmoid(Hneg[j] + negpred[w][j]));
+            neglikl[w] = lll;
+
+            likl[w] = poslikl[w] / numpos + neglikl[w] / numneg;
+        }
+
+        // pick best weak clf
+        std::vector<int> order;
+        sort_order_des(likl, order);
+
+        // find best weakclf that isn't already included
+        for (uint k = 0; k < order.size(); k++)
+            if (std::count(_selectors.begin(), _selectors.end(), order[k]) == 0)
+            {
+                _selectors.push_back(order[k]);
+                break;
+            }
+
+            // update H = H + h_m
+#ifdef _OPENMP
+#pragma omp parallel for
+#endif
+        for (int k = 0; k < posx.rows; k++)
+            Hpos[k] += pospred[_selectors[s]][k];
+#ifdef _OPENMP
+#pragma omp parallel for
+#endif
+        for (int k = 0; k < negx.rows; k++)
+            Hneg[k] += negpred[_selectors[s]][k];
+    }
+
+    //if( _myParams->_storeFtrHistory )
+    //for ( uint j = 0; j < _selectors.size(); j++ )
+    // _ftrHist( _selectors[j], _counter ) = 1.0f / ( j + 1 );
+
+    _counter++;
+    /* */
+    return;
+}
+
+std::vector<float> ClfMilBoost::classify(const Mat& x, bool logR)
+{
+    int numsamples = x.rows;
+    std::vector<float> res(numsamples);
+    std::vector<float> tr;
+
+    for (uint w = 0; w < _selectors.size(); w++)
+    {
+        tr = _weakclf[_selectors[w]]->classifySetF(x);
+#ifdef _OPENMP
+#pragma omp parallel for
+#endif
+        for (int j = 0; j < numsamples; j++)
+        {
+            res[j] += tr[j];
+        }
+    }
+
+    // return probabilities or log odds ratio
+    if (!logR)
+    {
+#ifdef _OPENMP
+#pragma omp parallel for
+#endif
+        for (int j = 0; j < (int)res.size(); j++)
+        {
+            res[j] = sigmoid(res[j]);
+        }
+    }
+
+    return res;
+}
+
+//implementations for weak classifier
+
+ClfOnlineStump::ClfOnlineStump()
+    : _mu0(0), _mu1(0), _sig0(0), _sig1(0)
+    , _q(0)
+    , _s(0)
+    , _log_n1(0), _log_n0(0)
+    , _e1(0), _e0(0)
+    , _lRate(0)
+{
+    _trained = false;
+    _ind = -1;
+    init();
+}
+
+ClfOnlineStump::ClfOnlineStump(int ind)
+    : _mu0(0), _mu1(0), _sig0(0), _sig1(0)
+    , _q(0)
+    , _s(0)
+    , _log_n1(0), _log_n0(0)
+    , _e1(0), _e0(0)
+    , _lRate(0)
+{
+    _trained = false;
+    _ind = ind;
+    init();
+}
+void ClfOnlineStump::init()
+{
+    _mu0 = 0;
+    _mu1 = 0;
+    _sig0 = 1;
+    _sig1 = 1;
+    _lRate = 0.85f;
+    _trained = false;
+}
+
+void ClfOnlineStump::update(const Mat& posx, const Mat& negx, const Mat_<float>& /*posw*/, const Mat_<float>& /*negw*/)
+{
+    //std::cout << " ClfOnlineStump::update" << _ind << std::endl;
+    float posmu = 0.0, negmu = 0.0;
+    if (posx.cols > 0)
+        posmu = float(mean(posx.col(_ind))[0]);
+    if (negx.cols > 0)
+        negmu = float(mean(negx.col(_ind))[0]);
+
+    if (_trained)
+    {
+        if (posx.cols > 0)
+        {
+            _mu1 = (_lRate * _mu1 + (1 - _lRate) * posmu);
+            cv::Mat diff = posx.col(_ind) - _mu1;
+            _sig1 = _lRate * _sig1 + (1 - _lRate) * float(mean(diff.mul(diff))[0]);
+        }
+        if (negx.cols > 0)
+        {
+            _mu0 = (_lRate * _mu0 + (1 - _lRate) * negmu);
+            cv::Mat diff = negx.col(_ind) - _mu0;
+            _sig0 = _lRate * _sig0 + (1 - _lRate) * float(mean(diff.mul(diff))[0]);
+        }
+
+        _q = (_mu1 - _mu0) / 2;
+        _s = sign(_mu1 - _mu0);
+        _log_n0 = std::log(float(1.0f / pow(_sig0, 0.5f)));
+        _log_n1 = std::log(float(1.0f / pow(_sig1, 0.5f)));
+        //_e1 = -1.0f/(2.0f*_sig1+1e-99f);
+        //_e0 = -1.0f/(2.0f*_sig0+1e-99f);
+        _e1 = -1.0f / (2.0f * _sig1 + std::numeric_limits<float>::min());
+        _e0 = -1.0f / (2.0f * _sig0 + std::numeric_limits<float>::min());
+    }
+    else
+    {
+        _trained = true;
+        if (posx.cols > 0)
+        {
+            _mu1 = posmu;
+            cv::Scalar scal_mean, scal_std_dev;
+            cv::meanStdDev(posx.col(_ind), scal_mean, scal_std_dev);
+            _sig1 = float(scal_std_dev[0]) * float(scal_std_dev[0]) + 1e-9f;
+        }
+
+        if (negx.cols > 0)
+        {
+            _mu0 = negmu;
+            cv::Scalar scal_mean, scal_std_dev;
+            cv::meanStdDev(negx.col(_ind), scal_mean, scal_std_dev);
+            _sig0 = float(scal_std_dev[0]) * float(scal_std_dev[0]) + 1e-9f;
+        }
+
+        _q = (_mu1 - _mu0) / 2;
+        _s = sign(_mu1 - _mu0);
+        _log_n0 = std::log(float(1.0f / pow(_sig0, 0.5f)));
+        _log_n1 = std::log(float(1.0f / pow(_sig1, 0.5f)));
+        //_e1 = -1.0f/(2.0f*_sig1+1e-99f);
+        //_e0 = -1.0f/(2.0f*_sig0+1e-99f);
+        _e1 = -1.0f / (2.0f * _sig1 + std::numeric_limits<float>::min());
+        _e0 = -1.0f / (2.0f * _sig0 + std::numeric_limits<float>::min());
+    }
+}
+
+bool ClfOnlineStump::classify(const Mat& x, int i)
+{
+    float xx = x.at<float>(i, _ind);
+    double log_p0 = (xx - _mu0) * (xx - _mu0) * _e0 + _log_n0;
+    double log_p1 = (xx - _mu1) * (xx - _mu1) * _e1 + _log_n1;
+    return log_p1 > log_p0;
+}
+
+float ClfOnlineStump::classifyF(const Mat& x, int i)
+{
+    float xx = x.at<float>(i, _ind);
+    double log_p0 = (xx - _mu0) * (xx - _mu0) * _e0 + _log_n0;
+    double log_p1 = (xx - _mu1) * (xx - _mu1) * _e1 + _log_n1;
+    return float(log_p1 - log_p0);
+}
+
+inline std::vector<float> ClfOnlineStump::classifySetF(const Mat& x)
+{
+    std::vector<float> res(x.rows);
+
+#ifdef _OPENMP
+#pragma omp parallel for
+#endif
+    for (int k = 0; k < (int)res.size(); k++)
+    {
+        res[k] = classifyF(x, k);
+    }
+    return res;
+}
+
+}}}  // namespace cv::detail::tracking
diff --git a/modules/video/src/tracking/detail/tracking_online_mil.hpp b/modules/video/src/tracking/detail/tracking_online_mil.hpp
new file mode 100644
index 0000000000..b08a628296
--- /dev/null
+++ b/modules/video/src/tracking/detail/tracking_online_mil.hpp
@@ -0,0 +1,79 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_VIDEO_DETAIL_TRACKING_ONLINE_MIL_HPP
+#define OPENCV_VIDEO_DETAIL_TRACKING_ONLINE_MIL_HPP
+
+#include <limits>
+
+namespace cv {
+namespace detail {
+inline namespace tracking {
+
+//! @addtogroup tracking_detail
+//! @{
+
+//TODO based on the original implementation
+//http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml
+
+class ClfOnlineStump;
+
+class CV_EXPORTS ClfMilBoost
+{
+public:
+    struct CV_EXPORTS Params
+    {
+        Params();
+        int _numSel;
+        int _numFeat;
+        float _lRate;
+    };
+
+    ClfMilBoost();
+    ~ClfMilBoost();
+    void init(const ClfMilBoost::Params& parameters = ClfMilBoost::Params());
+    void update(const Mat& posx, const Mat& negx);
+    std::vector<float> classify(const Mat& x, bool logR = true);
+
+    inline float sigmoid(float x)
+    {
+        return 1.0f / (1.0f + exp(-x));
+    }
+
+private:
+    uint _numsamples;
+    ClfMilBoost::Params _myParams;
+    std::vector<int> _selectors;
+    std::vector<ClfOnlineStump*> _weakclf;
+    uint _counter;
+};
+
+class ClfOnlineStump
+{
+public:
+    float _mu0, _mu1, _sig0, _sig1;
+    float _q;
+    int _s;
+    float _log_n1, _log_n0;
+    float _e1, _e0;
+    float _lRate;
+
+    ClfOnlineStump();
+    ClfOnlineStump(int ind);
+    void init();
+    void update(const Mat& posx, const Mat& negx, const cv::Mat_<float>& posw = cv::Mat_<float>(), const cv::Mat_<float>& negw = cv::Mat_<float>());
+    bool classify(const Mat& x, int i);
+    float classifyF(const Mat& x, int i);
+    std::vector<float> classifySetF(const Mat& x);
+
+private:
+    bool _trained;
+    int _ind;
+};
+
+//! @}
+
+}}}  // namespace cv::detail::tracking
+
+#endif
diff --git a/modules/video/src/tracking/tracker.cpp b/modules/video/src/tracking/tracker.cpp
new file mode 100644
index 0000000000..ef2f416a4b
--- /dev/null
+++ b/modules/video/src/tracking/tracker.cpp
@@ -0,0 +1,19 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../precomp.hpp"
+
+namespace cv {
+
+Tracker::Tracker()
+{
+    // nothing
+}
+
+Tracker::~Tracker()
+{
+    // nothing
+}
+
+}  // namespace cv
diff --git a/modules/video/src/tracking/tracker_goturn.cpp b/modules/video/src/tracking/tracker_goturn.cpp
new file mode 100644
index 0000000000..a19f64994a
--- /dev/null
+++ b/modules/video/src/tracking/tracker_goturn.cpp
@@ -0,0 +1,140 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../precomp.hpp"
+
+#ifdef HAVE_OPENCV_DNN
+#include "opencv2/dnn.hpp"
+#endif
+
+namespace cv {
+
+TrackerGOTURN::TrackerGOTURN()
+{
+    // nothing
+}
+
+TrackerGOTURN::~TrackerGOTURN()
+{
+    // nothing
+}
+
+TrackerGOTURN::Params::Params()
+{
+    modelTxt = "goturn.prototxt";
+    modelBin = "goturn.caffemodel";
+}
+
+#ifdef HAVE_OPENCV_DNN
+
+class TrackerGOTURNImpl : public TrackerGOTURN
+{
+public:
+    TrackerGOTURNImpl(const TrackerGOTURN::Params& parameters)
+        : params(parameters)
+    {
+        // Load GOTURN architecture from *.prototxt and pretrained weights from *.caffemodel
+        net = dnn::readNetFromCaffe(params.modelTxt, params.modelBin);
+        CV_Assert(!net.empty());
+    }
+
+    void init(InputArray image, const Rect& boundingBox) CV_OVERRIDE;
+    bool update(InputArray image, Rect& boundingBox) CV_OVERRIDE;
+
+    void setBoudingBox(Rect boundingBox)
+    {
+        if (image_.empty())
+            CV_Error(Error::StsInternal, "Set image first");
+        boundingBox_ = boundingBox & Rect(Point(0, 0), image_.size());
+    }
+
+    TrackerGOTURN::Params params;
+
+    dnn::Net net;
+    Rect boundingBox_;
+    Mat image_;
+};
+
+void TrackerGOTURNImpl::init(InputArray image, const Rect& boundingBox)
+{
+    image_ = image.getMat().clone();
+    setBoudingBox(boundingBox);
+}
+
+bool TrackerGOTURNImpl::update(InputArray image, Rect& boundingBox)
+{
+    int INPUT_SIZE = 227;
+    //Using prevFrame & prevBB from model and curFrame GOTURN calculating curBB
+    InputArray curFrame = image;
+    Mat prevFrame = image_;
+    Rect2d prevBB = boundingBox_;
+    Rect curBB;
+
+    float padTargetPatch = 2.0;
+    Rect2f searchPatchRect, targetPatchRect;
+    Point2f currCenter, prevCenter;
+    Mat prevFramePadded, curFramePadded;
+    Mat searchPatch, targetPatch;
+
+    prevCenter.x = (float)(prevBB.x + prevBB.width / 2);
+    prevCenter.y = (float)(prevBB.y + prevBB.height / 2);
+
+    targetPatchRect.width = (float)(prevBB.width * padTargetPatch);
+    targetPatchRect.height = (float)(prevBB.height * padTargetPatch);
+    targetPatchRect.x = (float)(prevCenter.x - prevBB.width * padTargetPatch / 2.0 + targetPatchRect.width);
+    targetPatchRect.y = (float)(prevCenter.y - prevBB.height * padTargetPatch / 2.0 + targetPatchRect.height);
+
+    targetPatchRect.width = std::min(targetPatchRect.width, (float)prevFrame.cols);
+    targetPatchRect.height = std::min(targetPatchRect.height, (float)prevFrame.rows);
+    targetPatchRect.x = std::max(-prevFrame.cols * 0.5f, std::min(targetPatchRect.x, prevFrame.cols * 1.5f));
+    targetPatchRect.y = std::max(-prevFrame.rows * 0.5f, std::min(targetPatchRect.y, prevFrame.rows * 1.5f));
+
+    copyMakeBorder(prevFrame, prevFramePadded, (int)targetPatchRect.height, (int)targetPatchRect.height, (int)targetPatchRect.width, (int)targetPatchRect.width, BORDER_REPLICATE);
+    targetPatch = prevFramePadded(targetPatchRect).clone();
+
+    copyMakeBorder(curFrame, curFramePadded, (int)targetPatchRect.height, (int)targetPatchRect.height, (int)targetPatchRect.width, (int)targetPatchRect.width, BORDER_REPLICATE);
+    searchPatch = curFramePadded(targetPatchRect).clone();
+
+    // Preprocess
+    // Resize
+    resize(targetPatch, targetPatch, Size(INPUT_SIZE, INPUT_SIZE), 0, 0, INTER_LINEAR_EXACT);
+    resize(searchPatch, searchPatch, Size(INPUT_SIZE, INPUT_SIZE), 0, 0, INTER_LINEAR_EXACT);
+
+    // Convert to Float type and subtract mean
+    Mat targetBlob = dnn::blobFromImage(targetPatch, 1.0f, Size(), Scalar::all(128), false);
+    Mat searchBlob = dnn::blobFromImage(searchPatch, 1.0f, Size(), Scalar::all(128), false);
+
+    net.setInput(targetBlob, "data1");
+    net.setInput(searchBlob, "data2");
+
+    Mat resMat = net.forward("scale").reshape(1, 1);
+
+    curBB.x = cvRound(targetPatchRect.x + (resMat.at<float>(0) * targetPatchRect.width / INPUT_SIZE) - targetPatchRect.width);
+    curBB.y = cvRound(targetPatchRect.y + (resMat.at<float>(1) * targetPatchRect.height / INPUT_SIZE) - targetPatchRect.height);
+    curBB.width = cvRound((resMat.at<float>(2) - resMat.at<float>(0)) * targetPatchRect.width / INPUT_SIZE);
+    curBB.height = cvRound((resMat.at<float>(3) - resMat.at<float>(1)) * targetPatchRect.height / INPUT_SIZE);
+
+    // Predicted BB
+    boundingBox = curBB & Rect(Point(0, 0), image_.size());
+
+    // Set new model image and BB from current frame
+    image_ = image.getMat().clone();
+    setBoudingBox(curBB);
+    return true;
+}
+
+Ptr<TrackerGOTURN> TrackerGOTURN::create(const TrackerGOTURN::Params& parameters)
+{
+    return makePtr<TrackerGOTURNImpl>(parameters);
+}
+
+#else  // OPENCV_HAVE_DNN
+Ptr<TrackerGOTURN> TrackerGOTURN::create(const TrackerGOTURN::Params& parameters)
+{
+    (void)(parameters);
+    CV_Error(cv::Error::StsNotImplemented, "to use GOTURN, the tracking module needs to be built with opencv_dnn !");
+}
+#endif  // OPENCV_HAVE_DNN
+
+}  // namespace cv
diff --git a/modules/video/src/tracking/tracker_mil.cpp b/modules/video/src/tracking/tracker_mil.cpp
new file mode 100644
index 0000000000..ffe1be8483
--- /dev/null
+++ b/modules/video/src/tracking/tracker_mil.cpp
@@ -0,0 +1,227 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../precomp.hpp"
+#include "detail/tracker_mil_model.hpp"
+
+#include "detail/tracker_feature_haar.impl.hpp"
+
+namespace cv {
+inline namespace tracking {
+namespace impl {
+
+using cv::detail::tracking::internal::TrackerFeatureHAAR;
+
+
+class TrackerMILImpl CV_FINAL : public TrackerMIL
+{
+public:
+    TrackerMILImpl(const TrackerMIL::Params& parameters);
+
+    virtual void init(InputArray image, const Rect& boundingBox) CV_OVERRIDE;
+    virtual bool update(InputArray image, Rect& boundingBox) CV_OVERRIDE;
+
+    void compute_integral(const Mat& img, Mat& ii_img);
+
+    TrackerMIL::Params params;
+
+    Ptr<TrackerMILModel> model;
+    Ptr<TrackerSampler> sampler;
+    Ptr<TrackerFeatureSet> featureSet;
+};
+
+TrackerMILImpl::TrackerMILImpl(const TrackerMIL::Params& parameters)
+    : params(parameters)
+{
+    // nothing
+}
+
+void TrackerMILImpl::compute_integral(const Mat& img, Mat& ii_img)
+{
+    Mat ii;
+    std::vector<Mat> ii_imgs;
+    integral(img, ii, CV_32F);  // FIXIT split first
+    split(ii, ii_imgs);
+    ii_img = ii_imgs[0];
+}
+
+void TrackerMILImpl::init(InputArray image, const Rect& boundingBox)
+{
+    sampler = makePtr<TrackerSampler>();
+    featureSet = makePtr<TrackerFeatureSet>();
+
+    Mat intImage;
+    compute_integral(image.getMat(), intImage);
+    TrackerSamplerCSC::Params CSCparameters;
+    CSCparameters.initInRad = params.samplerInitInRadius;
+    CSCparameters.searchWinSize = params.samplerSearchWinSize;
+    CSCparameters.initMaxNegNum = params.samplerInitMaxNegNum;
+    CSCparameters.trackInPosRad = params.samplerTrackInRadius;
+    CSCparameters.trackMaxPosNum = params.samplerTrackMaxPosNum;
+    CSCparameters.trackMaxNegNum = params.samplerTrackMaxNegNum;
+
+    Ptr<TrackerSamplerAlgorithm> CSCSampler = makePtr<TrackerSamplerCSC>(CSCparameters);
+    CV_Assert(sampler->addTrackerSamplerAlgorithm(CSCSampler));
+
+    //or add CSC sampler with default parameters
+    //sampler->addTrackerSamplerAlgorithm( "CSC" );
+
+    //Positive sampling
+    CSCSampler.staticCast<TrackerSamplerCSC>()->setMode(TrackerSamplerCSC::MODE_INIT_POS);
+    sampler->sampling(intImage, boundingBox);
+    std::vector<Mat> posSamples = sampler->getSamples();
+
+    //Negative sampling
+    CSCSampler.staticCast<TrackerSamplerCSC>()->setMode(TrackerSamplerCSC::MODE_INIT_NEG);
+    sampler->sampling(intImage, boundingBox);
+    std::vector<Mat> negSamples = sampler->getSamples();
+
+    CV_Assert(!posSamples.empty());
+    CV_Assert(!negSamples.empty());
+
+    //compute HAAR features
+    TrackerFeatureHAAR::Params HAARparameters;
+    HAARparameters.numFeatures = params.featureSetNumFeatures;
+    HAARparameters.rectSize = Size((int)boundingBox.width, (int)boundingBox.height);
+    HAARparameters.isIntegral = true;
+    Ptr<TrackerFeature> trackerFeature = makePtr<TrackerFeatureHAAR>(HAARparameters);
+    featureSet->addTrackerFeature(trackerFeature);
+
+    featureSet->extraction(posSamples);
+    const std::vector<Mat> posResponse = featureSet->getResponses();
+
+    featureSet->extraction(negSamples);
+    const std::vector<Mat> negResponse = featureSet->getResponses();
+
+    model = makePtr<TrackerMILModel>(boundingBox);
+    Ptr<TrackerStateEstimatorMILBoosting> stateEstimator = makePtr<TrackerStateEstimatorMILBoosting>(params.featureSetNumFeatures);
+    model->setTrackerStateEstimator(stateEstimator);
+
+    //Run model estimation and update
+    model.staticCast<TrackerMILModel>()->setMode(TrackerMILModel::MODE_POSITIVE, posSamples);
+    model->modelEstimation(posResponse);
+    model.staticCast<TrackerMILModel>()->setMode(TrackerMILModel::MODE_NEGATIVE, negSamples);
+    model->modelEstimation(negResponse);
+    model->modelUpdate();
+}
+
+bool TrackerMILImpl::update(InputArray image, Rect& boundingBox)
+{
+    Mat intImage;
+    compute_integral(image.getMat(), intImage);
+
+    //get the last location [AAM] X(k-1)
+    Ptr<TrackerTargetState> lastLocation = model->getLastTargetState();
+    Rect lastBoundingBox((int)lastLocation->getTargetPosition().x, (int)lastLocation->getTargetPosition().y, lastLocation->getTargetWidth(),
+            lastLocation->getTargetHeight());
+
+    //sampling new frame based on last location
+    auto& samplers = sampler->getSamplers();
+    CV_Assert(!samplers.empty());
+    CV_Assert(samplers[0]);
+    samplers[0].staticCast<TrackerSamplerCSC>()->setMode(TrackerSamplerCSC::MODE_DETECT);
+    sampler->sampling(intImage, lastBoundingBox);
+    std::vector<Mat> detectSamples = sampler->getSamples();
+    if (detectSamples.empty())
+        return false;
+
+    /*//TODO debug samples
+   Mat f;
+   image.copyTo(f);
+
+   for( size_t i = 0; i < detectSamples.size(); i=i+10 )
+   {
+   Size sz;
+   Point off;
+   detectSamples.at(i).locateROI(sz, off);
+   rectangle(f, Rect(off.x,off.y,detectSamples.at(i).cols,detectSamples.at(i).rows), Scalar(255,0,0), 1);
+   }*/
+
+    //extract features from new samples
+    featureSet->extraction(detectSamples);
+    std::vector<Mat> response = featureSet->getResponses();
+
+    //predict new location
+    ConfidenceMap cmap;
+    model.staticCast<TrackerMILModel>()->setMode(TrackerMILModel::MODE_ESTIMATON, detectSamples);
+    model.staticCast<TrackerMILModel>()->responseToConfidenceMap(response, cmap);
+    model->getTrackerStateEstimator().staticCast<TrackerStateEstimatorMILBoosting>()->setCurrentConfidenceMap(cmap);
+
+    if (!model->runStateEstimator())
+    {
+        return false;
+    }
+
+    Ptr<TrackerTargetState> currentState = model->getLastTargetState();
+    boundingBox = Rect((int)currentState->getTargetPosition().x, (int)currentState->getTargetPosition().y, currentState->getTargetWidth(),
+            currentState->getTargetHeight());
+
+    /*//TODO debug
+   rectangle(f, lastBoundingBox, Scalar(0,255,0), 1);
+   rectangle(f, boundingBox, Scalar(0,0,255), 1);
+   imshow("f", f);
+   //waitKey( 0 );*/
+
+    //sampling new frame based on new location
+    //Positive sampling
+    samplers[0].staticCast<TrackerSamplerCSC>()->setMode(TrackerSamplerCSC::MODE_INIT_POS);
+    sampler->sampling(intImage, boundingBox);
+    std::vector<Mat> posSamples = sampler->getSamples();
+
+    //Negative sampling
+    samplers[0].staticCast<TrackerSamplerCSC>()->setMode(TrackerSamplerCSC::MODE_INIT_NEG);
+    sampler->sampling(intImage, boundingBox);
+    std::vector<Mat> negSamples = sampler->getSamples();
+
+    if (posSamples.empty() || negSamples.empty())
+        return false;
+
+    //extract features
+    featureSet->extraction(posSamples);
+    std::vector<Mat> posResponse = featureSet->getResponses();
+
+    featureSet->extraction(negSamples);
+    std::vector<Mat> negResponse = featureSet->getResponses();
+
+    //model estimate
+    model.staticCast<TrackerMILModel>()->setMode(TrackerMILModel::MODE_POSITIVE, posSamples);
+    model->modelEstimation(posResponse);
+    model.staticCast<TrackerMILModel>()->setMode(TrackerMILModel::MODE_NEGATIVE, negSamples);
+    model->modelEstimation(negResponse);
+
+    //model update
+    model->modelUpdate();
+
+    return true;
+}
+
+}}  // namespace tracking::impl
+
+TrackerMIL::Params::Params()
+{
+    samplerInitInRadius = 3;
+    samplerSearchWinSize = 25;
+    samplerInitMaxNegNum = 65;
+    samplerTrackInRadius = 4;
+    samplerTrackMaxPosNum = 100000;
+    samplerTrackMaxNegNum = 65;
+    featureSetNumFeatures = 250;
+}
+
+TrackerMIL::TrackerMIL()
+{
+    // nothing
+}
+
+TrackerMIL::~TrackerMIL()
+{
+    // nothing
+}
+
+Ptr<TrackerMIL> TrackerMIL::create(const TrackerMIL::Params& parameters)
+{
+    return makePtr<tracking::impl::TrackerMILImpl>(parameters);
+}
+
+}  // namespace cv
diff --git a/modules/video/test/test_main.cpp b/modules/video/test/test_main.cpp
index 93e4d2860e..9968380a17 100644
--- a/modules/video/test/test_main.cpp
+++ b/modules/video/test/test_main.cpp
@@ -7,4 +7,19 @@
     #include <hpx/hpx_main.hpp>
 #endif
 
-CV_TEST_MAIN("cv")
+static
+void initTests()
+{
+    const char* extraTestDataPath =
+#ifdef WINRT
+        NULL;
+#else
+        getenv("OPENCV_DNN_TEST_DATA_PATH");
+#endif
+    if (extraTestDataPath)
+        cvtest::addDataSearchPath(extraTestDataPath);
+
+    cvtest::addDataSearchSubDirectory("");  // override "cv" prefix below to access without "../dnn" hacks
+}
+
+CV_TEST_MAIN("cv", initTests())
diff --git a/modules/video/test/test_trackers.cpp b/modules/video/test/test_trackers.cpp
new file mode 100644
index 0000000000..7fd0470181
--- /dev/null
+++ b/modules/video/test/test_trackers.cpp
@@ -0,0 +1,97 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "test_precomp.hpp"
+
+//#define DEBUG_TEST
+#ifdef DEBUG_TEST
+#include <opencv2/highgui.hpp>
+#endif
+
+namespace opencv_test { namespace {
+//using namespace cv::tracking;
+
+#define TESTSET_NAMES testing::Values("david", "dudek", "faceocc2")
+
+const string TRACKING_DIR = "tracking";
+const string FOLDER_IMG = "data";
+const string FOLDER_OMIT_INIT = "initOmit";
+
+#include "test_trackers.impl.hpp"
+
+//[TESTDATA]
+PARAM_TEST_CASE(DistanceAndOverlap, string)
+{
+    string dataset;
+    virtual void SetUp()
+    {
+        dataset = GET_PARAM(0);
+    }
+};
+
+TEST_P(DistanceAndOverlap, MIL)
+{
+    TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .65f, NoTransform);
+    test.run();
+}
+
+TEST_P(DistanceAndOverlap, Shifted_Data_MIL)
+{
+    TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .6f, CenterShiftLeft);
+    test.run();
+}
+
+/***************************************************************************************/
+//Tests with scaled initial window
+
+TEST_P(DistanceAndOverlap, Scaled_Data_MIL)
+{
+    TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .7f, Scale_1_1);
+    test.run();
+}
+
+TEST_P(DistanceAndOverlap, GOTURN)
+{
+    std::string model = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.prototxt");
+    std::string weights = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.caffemodel", false);
+    cv::TrackerGOTURN::Params params;
+    params.modelTxt = model;
+    params.modelBin = weights;
+    TrackerTest<Tracker, Rect> test(TrackerGOTURN::create(params), dataset, 35, .35f, NoTransform);
+    test.run();
+}
+
+INSTANTIATE_TEST_CASE_P(Tracking, DistanceAndOverlap, TESTSET_NAMES);
+
+TEST(GOTURN, memory_usage)
+{
+    cv::Rect roi(145, 70, 85, 85);
+
+    std::string model = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.prototxt");
+    std::string weights = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.caffemodel", false);
+    cv::TrackerGOTURN::Params params;
+    params.modelTxt = model;
+    params.modelBin = weights;
+    cv::Ptr<Tracker> tracker = TrackerGOTURN::create(params);
+
+    string inputVideo = cvtest::findDataFile("tracking/david/data/david.webm");
+    cv::VideoCapture video(inputVideo);
+    ASSERT_TRUE(video.isOpened()) << inputVideo;
+
+    cv::Mat frame;
+    video >> frame;
+    ASSERT_FALSE(frame.empty()) << inputVideo;
+    tracker->init(frame, roi);
+    string ground_truth_bb;
+    for (int nframes = 0; nframes < 15; ++nframes)
+    {
+        std::cout << "Frame: " << nframes << std::endl;
+        video >> frame;
+        bool res = tracker->update(frame, roi);
+        ASSERT_TRUE(res);
+        std::cout << "Predicted ROI: " << roi << std::endl;
+    }
+}
+
+}}  // namespace opencv_test::
diff --git a/modules/video/test/test_trackers.impl.hpp b/modules/video/test/test_trackers.impl.hpp
new file mode 100644
index 0000000000..7fce94e748
--- /dev/null
+++ b/modules/video/test/test_trackers.impl.hpp
@@ -0,0 +1,368 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+/*
+ * The Evaluation Methodologies are partially based on:
+ * ====================================================================================================================
+ *  [OTB] Y. Wu, J. Lim, and M.-H. Yang, "Online object tracking: A benchmark," in Computer Vision and Pattern Recognition (CVPR), 2013
+ *
+ */
+
+enum BBTransformations
+{
+    NoTransform = 0,
+    CenterShiftLeft = 1,
+    CenterShiftRight = 2,
+    CenterShiftUp = 3,
+    CenterShiftDown = 4,
+    CornerShiftTopLeft = 5,
+    CornerShiftTopRight = 6,
+    CornerShiftBottomLeft = 7,
+    CornerShiftBottomRight = 8,
+    Scale_0_8 = 9,
+    Scale_0_9 = 10,
+    Scale_1_1 = 11,
+    Scale_1_2 = 12
+};
+
+namespace {
+
+std::vector<std::string> splitString(const std::string& s_, const std::string& delimiter)
+{
+    std::string s = s_;
+    std::vector<string> token;
+    size_t pos = 0;
+    while ((pos = s.find(delimiter)) != std::string::npos)
+    {
+        token.push_back(s.substr(0, pos));
+        s.erase(0, pos + delimiter.length());
+    }
+    token.push_back(s);
+    return token;
+}
+
+float calcDistance(const Rect& a, const Rect& b)
+{
+    Point2f p_a((float)(a.x + a.width / 2), (float)(a.y + a.height / 2));
+    Point2f p_b((float)(b.x + b.width / 2), (float)(b.y + b.height / 2));
+    return sqrt(pow(p_a.x - p_b.x, 2) + pow(p_a.y - p_b.y, 2));
+}
+
+float calcOverlap(const Rect& a, const Rect& b)
+{
+    float rectIntersectionArea = (float)(a & b).area();
+    return rectIntersectionArea / (a.area() + b.area() - rectIntersectionArea);
+}
+
+}  // namespace
+
+template <typename Tracker, typename ROI_t = Rect2d>
+class TrackerTest
+{
+public:
+    TrackerTest(const Ptr<Tracker>& tracker, const string& video, float distanceThreshold,
+            float overlapThreshold, int shift = NoTransform, int segmentIdx = 1, int numSegments = 10);
+    ~TrackerTest() {}
+    void run();
+
+protected:
+    void checkDataTest();
+
+    void distanceAndOverlapTest();
+
+    Ptr<Tracker> tracker;
+    string video;
+    std::vector<Rect> bbs;
+    int startFrame;
+    string suffix;
+    string prefix;
+    float overlapThreshold;
+    float distanceThreshold;
+    int segmentIdx;
+    int shift;
+    int numSegments;
+
+    int gtStartFrame;
+    int endFrame;
+    vector<int> validSequence;
+
+private:
+    Rect applyShift(const Rect& bb);
+};
+
+template <typename Tracker, typename ROI_t>
+TrackerTest<Tracker, ROI_t>::TrackerTest(const Ptr<Tracker>& _tracker, const string& _video, float _distanceThreshold,
+        float _overlapThreshold, int _shift, int _segmentIdx, int _numSegments)
+    : tracker(_tracker)
+    , video(_video)
+    , overlapThreshold(_overlapThreshold)
+    , distanceThreshold(_distanceThreshold)
+    , segmentIdx(_segmentIdx)
+    , shift(_shift)
+    , numSegments(_numSegments)
+{
+    // nothing
+}
+
+template <typename Tracker, typename ROI_t>
+Rect TrackerTest<Tracker, ROI_t>::applyShift(const Rect& bb_)
+{
+    Rect bb = bb_;
+    Point center(bb.x + (bb.width / 2), bb.y + (bb.height / 2));
+
+    int xLimit = bb.x + bb.width - 1;
+    int yLimit = bb.y + bb.height - 1;
+
+    int h = 0;
+    int w = 0;
+    float ratio = 1.0;
+
+    switch (shift)
+    {
+    case CenterShiftLeft:
+        bb.x = bb.x - (int)ceil(0.1 * bb.width);
+        break;
+    case CenterShiftRight:
+        bb.x = bb.x + (int)ceil(0.1 * bb.width);
+        break;
+    case CenterShiftUp:
+        bb.y = bb.y - (int)ceil(0.1 * bb.height);
+        break;
+    case CenterShiftDown:
+        bb.y = bb.y + (int)ceil(0.1 * bb.height);
+        break;
+    case CornerShiftTopLeft:
+        bb.x = (int)cvRound(bb.x - 0.1 * bb.width);
+        bb.y = (int)cvRound(bb.y - 0.1 * bb.height);
+
+        bb.width = xLimit - bb.x + 1;
+        bb.height = yLimit - bb.y + 1;
+        break;
+    case CornerShiftTopRight:
+        xLimit = (int)cvRound(xLimit + 0.1 * bb.width);
+
+        bb.y = (int)cvRound(bb.y - 0.1 * bb.height);
+        bb.width = xLimit - bb.x + 1;
+        bb.height = yLimit - bb.y + 1;
+        break;
+    case CornerShiftBottomLeft:
+        bb.x = (int)cvRound(bb.x - 0.1 * bb.width);
+        yLimit = (int)cvRound(yLimit + 0.1 * bb.height);
+
+        bb.width = xLimit - bb.x + 1;
+        bb.height = yLimit - bb.y + 1;
+        break;
+    case CornerShiftBottomRight:
+        xLimit = (int)cvRound(xLimit + 0.1 * bb.width);
+        yLimit = (int)cvRound(yLimit + 0.1 * bb.height);
+
+        bb.width = xLimit - bb.x + 1;
+        bb.height = yLimit - bb.y + 1;
+        break;
+    case Scale_0_8:
+        ratio = 0.8f;
+        w = (int)(ratio * bb.width);
+        h = (int)(ratio * bb.height);
+
+        bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
+        break;
+    case Scale_0_9:
+        ratio = 0.9f;
+        w = (int)(ratio * bb.width);
+        h = (int)(ratio * bb.height);
+
+        bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
+        break;
+    case 11:
+        //scale 1.1
+        ratio = 1.1f;
+        w = (int)(ratio * bb.width);
+        h = (int)(ratio * bb.height);
+
+        bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
+        break;
+    case 12:
+        //scale 1.2
+        ratio = 1.2f;
+        w = (int)(ratio * bb.width);
+        h = (int)(ratio * bb.height);
+
+        bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
+        break;
+    default:
+        break;
+    }
+
+    return bb;
+}
+
+template <typename Tracker, typename ROI_t>
+void TrackerTest<Tracker, ROI_t>::distanceAndOverlapTest()
+{
+    bool initialized = false;
+
+    int fc = (startFrame - gtStartFrame);
+
+    bbs.at(fc) = applyShift(bbs.at(fc));
+    Rect currentBBi = bbs.at(fc);
+    ROI_t currentBB(currentBBi);
+    float sumDistance = 0;
+    float sumOverlap = 0;
+
+    string folder = cvtest::TS::ptr()->get_data_path() + "/" + TRACKING_DIR + "/" + video + "/" + FOLDER_IMG;
+    string videoPath = folder + "/" + video + ".webm";
+
+    VideoCapture c;
+    c.open(videoPath);
+    if (!c.isOpened())
+        throw SkipTestException("Can't open video file");
+#if 0
+    c.set(CAP_PROP_POS_FRAMES, startFrame);
+#else
+    if (startFrame)
+        std::cout << "startFrame = " << startFrame << std::endl;
+    for (int i = 0; i < startFrame; i++)
+    {
+        Mat dummy_frame;
+        c >> dummy_frame;
+        ASSERT_FALSE(dummy_frame.empty()) << i << ": " << videoPath;
+    }
+#endif
+
+    for (int frameCounter = startFrame; frameCounter < endFrame; frameCounter++)
+    {
+        Mat frame;
+        c >> frame;
+
+        ASSERT_FALSE(frame.empty()) << "frameCounter=" << frameCounter << " video=" << videoPath;
+        if (!initialized)
+        {
+            tracker->init(frame, currentBB);
+            std::cout << "frame size = " << frame.size() << std::endl;
+            initialized = true;
+        }
+        else if (initialized)
+        {
+            if (frameCounter >= (int)bbs.size())
+                break;
+            tracker->update(frame, currentBB);
+        }
+        float curDistance = calcDistance(currentBB, bbs.at(fc));
+        float curOverlap = calcOverlap(currentBB, bbs.at(fc));
+
+#ifdef DEBUG_TEST
+        Mat result;
+        repeat(frame, 1, 2, result);
+        rectangle(result, currentBB, Scalar(0, 255, 0), 1);
+        Rect roi2(frame.cols, 0, frame.cols, frame.rows);
+        rectangle(result(roi2), bbs.at(fc), Scalar(0, 0, 255), 1);
+        imshow("result", result);
+        waitKey(1);
+#endif
+
+        sumDistance += curDistance;
+        sumOverlap += curOverlap;
+        fc++;
+    }
+
+    float meanDistance = sumDistance / (endFrame - startFrame);
+    float meanOverlap = sumOverlap / (endFrame - startFrame);
+
+    EXPECT_LE(meanDistance, distanceThreshold);
+    EXPECT_GE(meanOverlap, overlapThreshold);
+}
+
+template <typename Tracker, typename ROI_t>
+void TrackerTest<Tracker, ROI_t>::checkDataTest()
+{
+
+    FileStorage fs;
+    fs.open(cvtest::TS::ptr()->get_data_path() + TRACKING_DIR + "/" + video + "/" + video + ".yml", FileStorage::READ);
+    fs["start"] >> startFrame;
+    fs["prefix"] >> prefix;
+    fs["suffix"] >> suffix;
+    fs.release();
+
+    string gtFile = cvtest::TS::ptr()->get_data_path() + TRACKING_DIR + "/" + video + "/gt.txt";
+    std::ifstream gt;
+    //open the ground truth
+    gt.open(gtFile.c_str());
+    ASSERT_TRUE(gt.is_open()) << gtFile;
+    string line;
+    int bbCounter = 0;
+    while (getline(gt, line))
+    {
+        bbCounter++;
+    }
+    gt.close();
+
+    int seqLength = bbCounter;
+    for (int i = startFrame; i < seqLength; i++)
+    {
+        validSequence.push_back(i);
+    }
+
+    //exclude from the images sequence, the frames where the target is occluded or out of view
+    string omitFile = cvtest::TS::ptr()->get_data_path() + TRACKING_DIR + "/" + video + "/" + FOLDER_OMIT_INIT + "/" + video + ".txt";
+    std::ifstream omit;
+    omit.open(omitFile.c_str());
+    if (omit.is_open())
+    {
+        string omitLine;
+        while (getline(omit, omitLine))
+        {
+            vector<string> tokens = splitString(omitLine, " ");
+            int s_start = atoi(tokens.at(0).c_str());
+            int s_end = atoi(tokens.at(1).c_str());
+            for (int k = s_start; k <= s_end; k++)
+            {
+                std::vector<int>::iterator position = std::find(validSequence.begin(), validSequence.end(), k);
+                if (position != validSequence.end())
+                    validSequence.erase(position);
+            }
+        }
+    }
+    omit.close();
+    gtStartFrame = startFrame;
+    //compute the start and the and for each segment
+    int numFrame = (int)(validSequence.size() / numSegments);
+    startFrame += (segmentIdx - 1) * numFrame;
+    endFrame = startFrame + numFrame;
+
+    std::ifstream gt2;
+    //open the ground truth
+    gt2.open(gtFile.c_str());
+    ASSERT_TRUE(gt2.is_open()) << gtFile;
+    string line2;
+    int bbCounter2 = 0;
+    while (getline(gt2, line2))
+    {
+        vector<string> tokens = splitString(line2, ",");
+        Rect bb(atoi(tokens.at(0).c_str()), atoi(tokens.at(1).c_str()), atoi(tokens.at(2).c_str()), atoi(tokens.at(3).c_str()));
+        ASSERT_EQ((size_t)4, tokens.size()) << "Incorrect ground truth file " << gtFile;
+
+        bbs.push_back(bb);
+        bbCounter2++;
+    }
+    gt2.close();
+
+    if (segmentIdx == numSegments)
+        endFrame = (int)bbs.size();
+}
+
+template <typename Tracker, typename ROI_t>
+void TrackerTest<Tracker, ROI_t>::run()
+{
+    srand(1);  // FIXIT remove that, ensure that there is no "rand()" in implementation
+
+    ASSERT_TRUE(tracker);
+
+    checkDataTest();
+
+    //check for failure
+    if (::testing::Test::HasFatalFailure())
+        return;
+
+    distanceAndOverlapTest();
+}
diff --git a/samples/python/tracker.py b/samples/python/tracker.py
new file mode 100644
index 0000000000..f67499cd15
--- /dev/null
+++ b/samples/python/tracker.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+'''
+Tracker demo
+
+USAGE:
+    tracker.py [<video_source>]
+'''
+
+# Python 2/3 compatibility
+from __future__ import print_function
+
+import sys
+
+import numpy as np
+import cv2 as cv
+
+from video import create_capture, presets
+
+class App(object):
+
+    def initializeTracker(self, image):
+        while True:
+            print('==> Select object ROI for tracker ...')
+            bbox = cv.selectROI('tracking', image)
+            print('ROI: {}'.format(bbox))
+
+            tracker = cv.TrackerMIL_create()
+            try:
+                tracker.init(image, bbox)
+            except Exception as e:
+                print('Unable to initialize tracker with requested bounding box. Is there any object?')
+                print(e)
+                print('Try again ...')
+                continue
+
+            return tracker
+
+    def run(self):
+        videoPath = sys.argv[1] if len(sys.argv) >= 2 else 'vtest.avi'
+        camera = create_capture(videoPath, presets['cube'])
+        if not camera.isOpened():
+            sys.exit("Can't open video stream: {}".format(videoPath))
+
+        ok, image = camera.read()
+        if not ok:
+            sys.exit("Can't read first frame")
+        assert image is not None
+
+        cv.namedWindow('tracking')
+        tracker = self.initializeTracker(image)
+
+        print("==> Tracking is started. Press 'SPACE' to re-initialize tracker or 'ESC' for exit...")
+
+        while camera.isOpened():
+            ok, image = camera.read()
+            if not ok:
+                print("Can't read frame")
+                break
+
+            ok, newbox = tracker.update(image)
+            #print(ok, newbox)
+
+            if ok:
+                cv.rectangle(image, newbox, (200,0,0))
+
+            cv.imshow("tracking", image)
+            k = cv.waitKey(1)
+            if k == 32:  # SPACE
+                tracker = self.initializeTracker(image)
+            if k == 27:  # ESC
+                break
+
+        print('Done')
+
+
+if __name__ == '__main__':
+    print(__doc__)
+    App().run()
+    cv.destroyAllWindows()

From 2687a2b6dc0ef58e146b903fd1be2c6ed5f058c5 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 16 Nov 2020 22:31:15 +0000
Subject: [PATCH 119/152] pre: OpenCV 4.5.1 (version++)

---
 .../cross_referencing/tutorial_cross_referencing.markdown     | 4 ++--
 modules/core/include/opencv2/core/version.hpp                 | 4 ++--
 modules/python/package/setup.py                               | 2 +-
 platforms/maven/opencv-it/pom.xml                             | 2 +-
 platforms/maven/opencv/pom.xml                                | 2 +-
 platforms/maven/pom.xml                                       | 2 +-
 6 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown b/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown
index 3c492aaaea..e2948beee9 100644
--- a/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown
+++ b/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown
@@ -46,14 +46,14 @@ Open your Doxyfile using your favorite text editor and search for the key
 `TAGFILES`. Change it as follows:
 
 @code
-TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.5.0
+TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.5.1
 @endcode
 
 If you had other definitions already, you can append the line using a `\`:
 
 @code
 TAGFILES = ./docs/doxygen-tags/libstdc++.tag=https://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen \
-           ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.5.0
+           ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.5.1
 @endcode
 
 Doxygen can now use the information from the tag file to link to the OpenCV
diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp
index 32fc2229c6..436190f8bf 100644
--- a/modules/core/include/opencv2/core/version.hpp
+++ b/modules/core/include/opencv2/core/version.hpp
@@ -7,8 +7,8 @@
 
 #define CV_VERSION_MAJOR    4
 #define CV_VERSION_MINOR    5
-#define CV_VERSION_REVISION 0
-#define CV_VERSION_STATUS   "-dev"
+#define CV_VERSION_REVISION 1
+#define CV_VERSION_STATUS   "-pre"
 
 #define CVAUX_STR_EXP(__A)  #__A
 #define CVAUX_STR(__A)      CVAUX_STR_EXP(__A)
diff --git a/modules/python/package/setup.py b/modules/python/package/setup.py
index f00f683a45..0a50e4e51c 100644
--- a/modules/python/package/setup.py
+++ b/modules/python/package/setup.py
@@ -9,7 +9,7 @@ def main():
     os.chdir(SCRIPT_DIR)
 
     package_name = 'opencv'
-    package_version = os.environ.get('OPENCV_VERSION', '4.5.0')  # TODO
+    package_version = os.environ.get('OPENCV_VERSION', '4.5.1')  # TODO
 
     long_description = 'Open Source Computer Vision Library Python bindings'  # TODO
 
diff --git a/platforms/maven/opencv-it/pom.xml b/platforms/maven/opencv-it/pom.xml
index ee689af104..1849259c36 100644
--- a/platforms/maven/opencv-it/pom.xml
+++ b/platforms/maven/opencv-it/pom.xml
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opencv</groupId>
         <artifactId>opencv-parent</artifactId>
-        <version>4.5.0</version>
+        <version>4.5.1</version>
     </parent>
     <groupId>org.opencv</groupId>
     <artifactId>opencv-it</artifactId>
diff --git a/platforms/maven/opencv/pom.xml b/platforms/maven/opencv/pom.xml
index d9c63ac1a8..d6c630dddb 100644
--- a/platforms/maven/opencv/pom.xml
+++ b/platforms/maven/opencv/pom.xml
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opencv</groupId>
         <artifactId>opencv-parent</artifactId>
-        <version>4.5.0</version>
+        <version>4.5.1</version>
     </parent>
     <groupId>org.opencv</groupId>
     <artifactId>opencv</artifactId>
diff --git a/platforms/maven/pom.xml b/platforms/maven/pom.xml
index 322de22c38..934634536a 100644
--- a/platforms/maven/pom.xml
+++ b/platforms/maven/pom.xml
@@ -3,7 +3,7 @@
     <modelVersion>4.0.0</modelVersion>
     <groupId>org.opencv</groupId>
     <artifactId>opencv-parent</artifactId>
-    <version>4.5.0</version>
+    <version>4.5.1</version>
     <packaging>pom</packaging>
     <name>OpenCV Parent POM</name>
     <licenses>

From 3c25fd1ba59504c563cb00134f8785cf4d10811a Mon Sep 17 00:00:00 2001
From: Christos Malliaridis <c.malliaridis@gmail.com>
Date: Tue, 14 Jul 2020 18:44:12 +0200
Subject: [PATCH 120/152] Update and expand  erosion / dilation tutorial

- Add python explanation for erosion and dilation
- Add java explanation for erosion and dilation
- Restructure and reword specific sections
---
 .../erosion_dilatation.markdown               | 216 +++++++++++++++---
 .../tutorial_code/ImgProc/Morphology_1.cpp    |   2 +
 .../erosion_dilatation/MorphologyDemo1.java   |  14 ++
 .../erosion_dilatation/morphology_1.py        |  99 ++++----
 4 files changed, 252 insertions(+), 79 deletions(-)

diff --git a/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.markdown b/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.markdown
index ddb7d9e8f5..adcedb2539 100644
--- a/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.markdown
+++ b/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.markdown
@@ -84,57 +84,198 @@ This tutorial's code is shown below. You can also download it
 Explanation
 -----------
 
--#  Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
-    previous sections). Let's check the general structure of the C++ program:
+@add_toggle_cpp
+Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
+previous sections). Let's check the general structure of the C++ program:
+
+@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp main
+
+-#   Load an image (can be BGR or grayscale)
+-#   Create two windows (one for dilation output, the other for erosion)
+-#   Create a set of two Trackbars for each operation:
+    -   The first trackbar "Element" returns either **erosion_elem** or **dilation_elem**
+    -   The second trackbar "Kernel size" return **erosion_size** or **dilation_size** for the
+        corresponding operation.
+-#  Call once erosion and dilation to show the initial image.
+
+
+Every time we move any slider, the user's function **Erosion** or **Dilation** will be
+called and it will update the output image based on the current trackbar values.
+
+Let's analyze these two functions:
+
+#### The erosion function
+
+@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp erosion
+
+The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
+receives three arguments:
+-   *src*: The source image
+-   *erosion_dst*: The output image
+-   *element*: This is the kernel we will use to perform the operation. If we do not
+    specify, the default is a simple `3x3` matrix. Otherwise, we can specify its
+    shape. For this, we need to use the function cv::getStructuringElement :
+    @snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp kernel
+
+    We can choose any of three shapes for our kernel:
+
+    -   Rectangular box: MORPH_RECT
+    -   Cross: MORPH_CROSS
+    -   Ellipse: MORPH_ELLIPSE
+
+    Then, we just have to specify the size of our kernel and the *anchor point*. If not
+    specified, it is assumed to be in the center.
+
+That is all. We are ready to perform the erosion of our image.
+
+#### The dilation function
+
+The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
+Here we also have the option of defining our kernel, its anchor point and the size of the operator
+to be used.
+@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp dilation
+@end_toggle
+
+@add_toggle_java
+Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
+previous sections). Let's check however the general structure of the java class. There are 4 main
+parts in the java class:
+
+- the class constructor which setups the window that will be filled with window components
+- the `addComponentsToPane` method, which fills out the window
+- the `update` method, which determines what happens when the user changes any value
+- the `main` method, which is the entry point of the program
+
+In this tutorial we will focus on the `addComponentsToPane` and `update` methods. However, for completion the
+steps followed in the constructor are:
+
+-#  Load an image (can be BGR or grayscale)
+-#  Create a window
+-#  Add various control components with `addComponentsToPane`
+-#  show the window
+
+The components were added by the following method:
+
+@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java components
+
+In short we
+
+-#  create a panel for the sliders
+-#  create a combo box for the element types
+-#  create a slider for the kernel size
+-#  create a combo box for the morphology function to use (erosion or dilation)
+
+The action and state changed listeners added call at the end the `update` method which updates
+the image based on the current slider values. So every time we move any slider, the `update` method is triggered.
 
-    -   Load an image (can be BGR or grayscale)
-    -   Create two windows (one for dilation output, the other for erosion)
-    -   Create a set of two Trackbars for each operation:
-        -   The first trackbar "Element" returns either **erosion_elem** or **dilation_elem**
-        -   The second trackbar "Kernel size" return **erosion_size** or **dilation_size** for the
-            corresponding operation.
-    -   Every time we move any slider, the user's function **Erosion** or **Dilation** will be
-        called and it will update the output image based on the current trackbar values.
+#### Updating the image
 
-    Let's analyze these two functions:
+To update the image we used the following implementation:
 
--#  **erosion:**
-    @snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp erosion
+@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java update
 
-    -   The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
-        receives three arguments:
-        -   *src*: The source image
-        -   *erosion_dst*: The output image
-        -   *element*: This is the kernel we will use to perform the operation. If we do not
-            specify, the default is a simple `3x3` matrix. Otherwise, we can specify its
-            shape. For this, we need to use the function cv::getStructuringElement :
-            @snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp kernel
+In other words we
 
-            We can choose any of three shapes for our kernel:
+-# get the structuring element the user chose
+-# execute the **erosion** or **dilation** function based on `doErosion`
+-# reload the image with the morphology applied
+-# repaint the frame
 
-            -   Rectangular box: MORPH_RECT
-            -   Cross: MORPH_CROSS
-            -   Ellipse: MORPH_ELLIPSE
+Let's analyze the `erode` and `dilate` methods:
 
-            Then, we just have to specify the size of our kernel and the *anchor point*. If not
-            specified, it is assumed to be in the center.
+#### The erosion method
 
-    -   That is all. We are ready to perform the erosion of our image.
-@note Additionally, there is another parameter that allows you to perform multiple erosions
-(iterations) at once. However, We haven't used it in this simple tutorial. You can check out the
-reference for more details.
+@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java erosion
 
--#  **dilation:**
+The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
+receives three arguments:
+-   *src*: The source image
+-   *erosion_dst*: The output image
+-   *element*: This is the kernel we will use to perform the operation. For specifying the shape, we need to use
+    the function cv::getStructuringElement :
+    @snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java kernel
 
-    The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
-    Here we also have the option of defining our kernel, its anchor point and the size of the operator
-    to be used.
-    @snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp dilation
+    We can choose any of three shapes for our kernel:
+
+    -   Rectangular box: CV_SHAPE_RECT
+    -   Cross: CV_SHAPE_CROSS
+    -   Ellipse: CV_SHAPE_ELLIPSE
+
+    Together with the shape we specify the size of our kernel and the *anchor point*. If the anchor point is not
+    specified, it is assumed to be in the center.
+
+That is all. We are ready to perform the erosion of our image.
+
+#### The dilation function
+
+The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
+Here we also have the option of defining our kernel, its anchor point and the size of the operator
+to be used.
+@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java dilation
+@end_toggle
+
+@add_toggle_python
+Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
+previous sections). Let's check the general structure of the python script:
+
+@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py main
+
+-#  Load an image (can be BGR or grayscale)
+-#  Create two windows (one for erosion output, the other for dilation) with a set of trackbars each
+    -   The first trackbar "Element" returns the value for the morphological type that will be mapped
+        (1 = rectangle, 2 = cross, 3 = ellipse)
+    -   The second trackbar "Kernel size" returns the size of the element for the
+        corresponding operation
+-#  Call once erosion and dilation to show the initial image
+
+Every time we move any slider, the user's function **erosion** or **dilation** will be
+called and it will update the output image based on the current trackbar values.
+
+Let's analyze these two functions:
+
+#### The erosion function
+
+@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py erosion
+
+The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
+receives two arguments and returns the processed image:
+-   *src*: The source image
+-   *element*: The kernel we will use to perform the operation. We can specify its
+    shape by using the function cv::getStructuringElement :
+    @snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py kernel
+
+    We can choose any of three shapes for our kernel:
+
+    -   Rectangular box: MORPH_RECT
+    -   Cross: MORPH_CROSS
+    -   Ellipse: MORPH_ELLIPSE
+
+Then, we just have to specify the size of our kernel and the *anchor point*. If the anchor point not
+specified, it is assumed to be in the center.
+
+That is all. We are ready to perform the erosion of our image.
+
+#### The dilation function
+
+The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
+Here we also have the option of defining our kernel, its anchor point and the size of the operator
+to be used.
+
+@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py dilation
+@end_toggle
+
+@note Additionally, there are further parameters that allow you to perform multiple erosions/dilations
+(iterations) at once and also set the border type and value. However, We haven't used those
+in this simple tutorial. You can check out the reference for more details.
 
 Results
 -------
 
-Compile the code above and execute it with an image as argument. For instance, using this image:
+Compile the code above and execute it (or run the script if using python) with an image as argument.
+If you do not provide an image as argument the default sample image
+([LinuxLogo.jpg](https://github.com/opencv/opencv/tree/master/samples/data/LinuxLogo.jpg)) will be used.
+
+For instance, using this image:
 
 ![](images/Morphology_1_Tutorial_Original_Image.jpg)
 
@@ -143,3 +284,4 @@ naturally. Try them out! You can even try to add a third Trackbar to control the
 iterations.
 
 ![](images/Morphology_1_Result.jpg)
+(depending on the programming language the output might vary a little or be only 1 window)
diff --git a/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp b/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
index 48b0c2e6e3..33e006269d 100644
--- a/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
+++ b/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
@@ -25,6 +25,7 @@ int const max_kernel_size = 21;
 void Erosion( int, void* );
 void Dilation( int, void* );
 
+//![main]
 /**
  * @function main
  */
@@ -70,6 +71,7 @@ int main( int argc, char** argv )
   waitKey(0);
   return 0;
 }
+//![main]
 
 //![erosion]
 /**
diff --git a/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java b/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java
index 7a5f60f065..e71400f737 100644
--- a/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java
+++ b/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java
@@ -34,6 +34,7 @@ public class MorphologyDemo1 {
     private JFrame frame;
     private JLabel imgLabel;
 
+    //! [constructor]
     public MorphologyDemo1(String[] args) {
         String imagePath = args.length > 0 ? args[0] : "../data/LinuxLogo.jpg";
         matImgSrc = Imgcodecs.imread(imagePath);
@@ -54,7 +55,9 @@ public class MorphologyDemo1 {
         frame.pack();
         frame.setVisible(true);
     }
+    //! [constructor]
 
+    //! [components]
     private void addComponentsToPane(Container pane, Image img) {
         if (!(pane.getLayout() instanceof BorderLayout)) {
             pane.add(new JLabel("Container doesn't use BorderLayout!"));
@@ -114,21 +117,31 @@ public class MorphologyDemo1 {
         imgLabel = new JLabel(new ImageIcon(img));
         pane.add(imgLabel, BorderLayout.CENTER);
     }
+    //! [components]
 
+    //! [update]
     private void update() {
+        //! [kernel]
         Mat element = Imgproc.getStructuringElement(elementType, new Size(2 * kernelSize + 1, 2 * kernelSize + 1),
                 new Point(kernelSize, kernelSize));
+        //! [kernel]
 
         if (doErosion) {
+            //! [erosion]
             Imgproc.erode(matImgSrc, matImgDst, element);
+            //! [erosion]
         } else {
+            //! [dilation]
             Imgproc.dilate(matImgSrc, matImgDst, element);
+            //! [dilation]
         }
         Image img = HighGui.toBufferedImage(matImgDst);
         imgLabel.setIcon(new ImageIcon(img));
         frame.repaint();
     }
+    //! [update]
 
+    //! [main]
     public static void main(String[] args) {
         // Load the native OpenCV library
         System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
@@ -142,4 +155,5 @@ public class MorphologyDemo1 {
             }
         });
     }
+    //! [main]
 }
diff --git a/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py b/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py
index 502457b471..3645eab3d1 100644
--- a/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py
+++ b/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py
@@ -3,61 +3,76 @@ import cv2 as cv
 import numpy as np
 import argparse
 
+src = None
 erosion_size = 0
 max_elem = 2
 max_kernel_size = 21
-title_trackbar_element_type = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse'
+title_trackbar_element_shape = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse'
 title_trackbar_kernel_size = 'Kernel size:\n 2n +1'
 title_erosion_window = 'Erosion Demo'
-title_dilatation_window = 'Dilation Demo'
+title_dilation_window = 'Dilation Demo'
 
+
+## [main]
+def main(image):
+    global src
+    src = cv.imread(cv.samples.findFile(image))
+    if src is None:
+        print('Could not open or find the image: ', image)
+        exit(0)
+
+    cv.namedWindow(title_erosion_window)
+    cv.createTrackbar(title_trackbar_element_shape, title_erosion_window, 0, max_elem, erosion)
+    cv.createTrackbar(title_trackbar_kernel_size, title_erosion_window, 0, max_kernel_size, erosion)
+
+    cv.namedWindow(title_dilation_window)
+    cv.createTrackbar(title_trackbar_element_shape, title_dilation_window, 0, max_elem, dilatation)
+    cv.createTrackbar(title_trackbar_kernel_size, title_dilation_window, 0, max_kernel_size, dilatation)
+
+    erosion(0)
+    dilatation(0)
+    cv.waitKey()
+## [main]
+
+# optional mapping of values with morphological shapes
+def morph_shape(val):
+    if val == 0:
+        return cv.MORPH_RECT
+    elif val == 1:
+        return cv.MORPH_CROSS
+    elif val == 2:
+        return cv.MORPH_ELLIPSE
+
+
+## [erosion]
 def erosion(val):
     erosion_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_erosion_window)
-    erosion_type = 0
-    val_type = cv.getTrackbarPos(title_trackbar_element_type, title_erosion_window)
-    if val_type == 0:
-        erosion_type = cv.MORPH_RECT
-    elif val_type == 1:
-        erosion_type = cv.MORPH_CROSS
-    elif val_type == 2:
-        erosion_type = cv.MORPH_ELLIPSE
-
-    element = cv.getStructuringElement(erosion_type, (2*erosion_size + 1, 2*erosion_size+1), (erosion_size, erosion_size))
+    erosion_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_erosion_window))
+
+    ## [kernel]
+    element = cv.getStructuringElement(erosion_shape, (2 * erosion_size + 1, 2 * erosion_size + 1),
+                                       (erosion_size, erosion_size))
+    ## [kernel]
     erosion_dst = cv.erode(src, element)
     cv.imshow(title_erosion_window, erosion_dst)
+## [erosion]
 
-def dilatation(val):
-    dilatation_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_dilatation_window)
-    dilatation_type = 0
-    val_type = cv.getTrackbarPos(title_trackbar_element_type, title_dilatation_window)
-    if val_type == 0:
-        dilatation_type = cv.MORPH_RECT
-    elif val_type == 1:
-        dilatation_type = cv.MORPH_CROSS
-    elif val_type == 2:
-        dilatation_type = cv.MORPH_ELLIPSE
-
-    element = cv.getStructuringElement(dilatation_type, (2*dilatation_size + 1, 2*dilatation_size+1), (dilatation_size, dilatation_size))
-    dilatation_dst = cv.dilate(src, element)
-    cv.imshow(title_dilatation_window, dilatation_dst)
 
-parser = argparse.ArgumentParser(description='Code for Eroding and Dilating tutorial.')
-parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg')
-args = parser.parse_args()
+## [dilation]
+def dilatation(val):
+    dilatation_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_dilation_window)
+    dilation_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_dilation_window))
 
-src = cv.imread(cv.samples.findFile(args.input))
-if src is None:
-    print('Could not open or find the image: ', args.input)
-    exit(0)
+    element = cv.getStructuringElement(dilation_shape, (2 * dilatation_size + 1, 2 * dilatation_size + 1),
+                                       (dilatation_size, dilatation_size))
+    dilatation_dst = cv.dilate(src, element)
+    cv.imshow(title_dilation_window, dilatation_dst)
+## [dilation]
 
-cv.namedWindow(title_erosion_window)
-cv.createTrackbar(title_trackbar_element_type, title_erosion_window , 0, max_elem, erosion)
-cv.createTrackbar(title_trackbar_kernel_size, title_erosion_window , 0, max_kernel_size, erosion)
 
-cv.namedWindow(title_dilatation_window)
-cv.createTrackbar(title_trackbar_element_type, title_dilatation_window , 0, max_elem, dilatation)
-cv.createTrackbar(title_trackbar_kernel_size, title_dilatation_window , 0, max_kernel_size, dilatation)
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Code for Eroding and Dilating tutorial.')
+    parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg')
+    args = parser.parse_args()
 
-erosion(0)
-dilatation(0)
-cv.waitKey()
+    main(args.input)

From cc7f17f011083060979b980360e2744607b5972e Mon Sep 17 00:00:00 2001
From: Suleyman TURKMEN <sturkmen@hotmail.com>
Date: Wed, 28 Oct 2020 04:51:12 +0300
Subject: [PATCH 121/152] update documentation

---
 modules/core/include/opencv2/core.hpp         |  9 ++++++
 modules/highgui/include/opencv2/highgui.hpp   | 29 +++++++++++++------
 .../imgcodecs/include/opencv2/imgcodecs.hpp   |  6 ++++
 3 files changed, 35 insertions(+), 9 deletions(-)

diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp
index 6eb519e8a2..fc2432dcdf 100644
--- a/modules/core/include/opencv2/core.hpp
+++ b/modules/core/include/opencv2/core.hpp
@@ -202,6 +202,9 @@ enum CovarFlags {
     COVAR_COLS      = 16
 };
 
+//! @addtogroup core_cluster
+//!  @{
+
 //! k-Means flags
 enum KmeansFlags {
     /** Select random initial centers in each attempt.*/
@@ -215,6 +218,8 @@ enum KmeansFlags {
     KMEANS_USE_INITIAL_LABELS = 1
 };
 
+//! @} core_cluster
+
 //! type of line
 enum LineTypes {
     FILLED  = -1,
@@ -236,12 +241,16 @@ enum HersheyFonts {
     FONT_ITALIC                 = 16 //!< flag for italic font
 };
 
+//! @addtogroup core_array
+//! @{
+
 enum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/columns of the matrix.
                    REDUCE_AVG = 1, //!< the output is the mean vector of all rows/columns of the matrix.
                    REDUCE_MAX = 2, //!< the output is the maximum (column/row-wise) of all rows/columns of the matrix.
                    REDUCE_MIN = 3  //!< the output is the minimum (column/row-wise) of all rows/columns of the matrix.
                  };
 
+//! @} core_array
 
 /** @brief Swaps two matrices
 */
diff --git a/modules/highgui/include/opencv2/highgui.hpp b/modules/highgui/include/opencv2/highgui.hpp
index 628b7fa9ce..6a8c598720 100644
--- a/modules/highgui/include/opencv2/highgui.hpp
+++ b/modules/highgui/include/opencv2/highgui.hpp
@@ -66,6 +66,7 @@ It provides easy interface to:
 -   Add trackbars to the windows, handle simple mouse events as well as keyboard commands.
 
 @{
+    @defgroup highgui_window_flags Flags related creating and manipulating HighGUI windows and mouse events
     @defgroup highgui_opengl OpenGL support
     @defgroup highgui_qt Qt New Functions
 
@@ -93,7 +94,7 @@ It provides easy interface to:
 
 
             namedWindow("main1",WINDOW_NORMAL);
-            namedWindow("main2",WINDOW_AUTOSIZE | CV_GUI_NORMAL);
+            namedWindow("main2",WINDOW_AUTOSIZE | WINDOW_GUI_NORMAL);
             createTrackbar( "track1", "main1", &value, 255,  NULL);
 
             String nameb1 = "button1";
@@ -178,6 +179,9 @@ namespace cv
 //! @addtogroup highgui
 //! @{
 
+//! @addtogroup highgui_window_flags
+//! @{
+
 //! Flags for cv::namedWindow
 enum WindowFlags {
        WINDOW_NORMAL     = 0x00000000, //!< the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size.
@@ -227,6 +231,11 @@ enum MouseEventFlags {
        EVENT_FLAG_ALTKEY    = 32 //!< indicates that ALT Key is pressed.
      };
 
+//! @} highgui_window_flags
+
+//! @addtogroup highgui_qt
+//! @{
+
 //! Qt font weight
 enum QtFontWeights {
         QT_FONT_LIGHT           = 25, //!< Weight of 25
@@ -251,6 +260,8 @@ enum QtButtonTypes {
        QT_NEW_BUTTONBAR = 1024  //!< Button should create a new buttonbar
      };
 
+//! @} highgui_qt
+
 /** @brief Callback function for mouse events. see cv::setMouseCallback
 @param event one of the cv::MouseEventTypes constants.
 @param x The x-coordinate of the mouse event.
@@ -389,7 +400,7 @@ videos, it will display the video frame-by-frame)
  */
 CV_EXPORTS_W void imshow(const String& winname, InputArray mat);
 
-/** @brief Resizes window to the specified size
+/** @brief Resizes the window to the specified size
 
 @note
 
@@ -408,7 +419,7 @@ CV_EXPORTS_W void resizeWindow(const String& winname, int width, int height);
 */
 CV_EXPORTS_W void resizeWindow(const String& winname, const cv::Size& size);
 
-/** @brief Moves window to the specified position
+/** @brief Moves the window to the specified position
 
 @param winname Name of the window.
 @param x The new x-coordinate of the window.
@@ -476,8 +487,6 @@ For cv::EVENT_MOUSEWHEEL positive and negative values mean forward and backward
 respectively. For cv::EVENT_MOUSEHWHEEL, where available, positive and negative values mean right and
 left scrolling, respectively.
 
-With the C API, the macro CV_GET_WHEEL_DELTA(flags) can be used alternatively.
-
 @note
 
 Mouse-wheel events are currently supported only on Windows.
@@ -486,8 +495,9 @@ Mouse-wheel events are currently supported only on Windows.
  */
 CV_EXPORTS int getMouseWheelDelta(int flags);
 
-/** @brief Selects ROI on the given image.
-Function creates a window and allows user to select a ROI using mouse.
+/** @brief Allows users to select a ROI on the given image.
+
+The function creates a window and allows users to select a ROI using the mouse.
 Controls: use `space` or `enter` to finish selection, use key `c` to cancel selection (function will return the zero cv::Rect).
 
 @param windowName name of the window where selection process will be shown.
@@ -506,8 +516,9 @@ CV_EXPORTS_W Rect selectROI(const String& windowName, InputArray img, bool showC
  */
 CV_EXPORTS_W Rect selectROI(InputArray img, bool showCrosshair = true, bool fromCenter = false);
 
-/** @brief Selects ROIs on the given image.
-Function creates a window and allows user to select a ROIs using mouse.
+/** @brief Allows users to select multiple ROIs on the given image.
+
+The function creates a window and allows users to select multiple ROIs using the mouse.
 Controls: use `space` or `enter` to finish current selection and start a new one,
 use `esc` to terminate multiple ROI selection process.
 
diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp
index e2636e19f7..7f6b24f0ff 100644
--- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp
+++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp
@@ -49,6 +49,7 @@
   @defgroup imgcodecs Image file reading and writing
   @{
     @defgroup imgcodecs_c C API
+    @defgroup imgcodecs_flags Flags used for image file reading and writing
     @defgroup imgcodecs_ios iOS glue
   @}
 */
@@ -60,6 +61,9 @@ namespace cv
 //! @addtogroup imgcodecs
 //! @{
 
+//! @addtogroup imgcodecs_flags
+//! @{
+
 //! Imread flags
 enum ImreadModes {
        IMREAD_UNCHANGED            = -1, //!< If set, return the loaded image as is (with alpha channel, otherwise it gets cropped). Ignore EXIF orientation.
@@ -130,6 +134,8 @@ enum ImwritePAMFlags {
        IMWRITE_PAM_FORMAT_RGB_ALPHA = 5,
      };
 
+//! @} imgcodecs_flags
+
 /** @brief Loads an image from a file.
 
 @anchor imread

From fef23768fe16525d30716f77e440d082ad5683d4 Mon Sep 17 00:00:00 2001
From: Ian Maquignaz <9im14@queensu.ca>
Date: Tue, 17 Nov 2020 23:13:57 -0500
Subject: [PATCH 122/152] Fixed issue with Epipolar Geometry Tutorial

---
 .../py_epipolar_geometry/py_epipolar_geometry.markdown        | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown
index 3ed072c04d..6b8d90882a 100644
--- a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown
+++ b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown
@@ -79,7 +79,7 @@ from matplotlib import pyplot as plt
 img1 = cv.imread('myleft.jpg',0)  #queryimage # left image
 img2 = cv.imread('myright.jpg',0) #trainimage # right image
 
-sift = cv.SIFT()
+sift = cv.SIFT_create()
 
 # find the keypoints and descriptors with SIFT
 kp1, des1 = sift.detectAndCompute(img1,None)
@@ -93,14 +93,12 @@ search_params = dict(checks=50)
 flann = cv.FlannBasedMatcher(index_params,search_params)
 matches = flann.knnMatch(des1,des2,k=2)
 
-good = []
 pts1 = []
 pts2 = []
 
 # ratio test as per Lowe's paper
 for i,(m,n) in enumerate(matches):
     if m.distance < 0.8*n.distance:
-        good.append(m)
         pts2.append(kp2[m.trainIdx].pt)
         pts1.append(kp1[m.queryIdx].pt)
 @endcode

From b86f129393e46b2680dcc01b16c151a8f10609a0 Mon Sep 17 00:00:00 2001
From: Liubov Batanina <piccione-mail@yandex.ru>
Date: Thu, 19 Nov 2020 12:11:52 +0300
Subject: [PATCH 123/152] Fixed Test_Model.DetectionOutput

---
 modules/dnn/src/model.cpp       | 2 +-
 modules/dnn/test/test_model.cpp | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/modules/dnn/src/model.cpp b/modules/dnn/src/model.cpp
index acee29e680..aefeaa42b3 100644
--- a/modules/dnn/src/model.cpp
+++ b/modules/dnn/src/model.cpp
@@ -100,7 +100,7 @@ public:
         // Faster-RCNN or R-FCN
         if (net.getLayer(0)->outputNameToIndex("im_info") != -1)
         {
-            Mat imInfo(Matx31f(size.height, size.width, 1.6f));
+            Mat imInfo(Matx13f(size.height, size.width, 1.6f));
             net.setInput(imInfo, "im_info");
         }
         net.forward(outs, outNames);
diff --git a/modules/dnn/test/test_model.cpp b/modules/dnn/test/test_model.cpp
index ddec6e79e4..5766684c41 100644
--- a/modules/dnn/test/test_model.cpp
+++ b/modules/dnn/test/test_model.cpp
@@ -206,6 +206,8 @@ TEST_P(Test_Model, DetectionOutput)
     {
         if (backend == DNN_BACKEND_OPENCV)
             scoreDiff = 4e-3;
+        else
+            scoreDiff = 2e-2;
         iouDiff = 1.8e-1;
     }
 

From f8c7862f699dc4da72b828d4e7831da88733ca33 Mon Sep 17 00:00:00 2001
From: Igor Murzov <igor.murzov@xperience.ai>
Date: Mon, 9 Nov 2020 19:08:09 +0300
Subject: [PATCH 124/152] Add tutorial on how to use Orbbec Astra 3D cameras

---
 doc/tutorials/videoio/intelperc.markdown      |   2 +-
 doc/tutorials/videoio/kinect_openni.markdown  |   2 +-
 .../orbbec-astra/images/astra_color.jpg       | Bin 0 -> 138539 bytes
 .../orbbec-astra/images/astra_depth.png       | Bin 0 -> 29766 bytes
 .../orbbec-astra/orbbec_astra.markdown        | 150 ++++++++++++++
 .../videoio/table_of_content_videoio.markdown |   6 +-
 .../videoio/orbbec_astra/orbbec_astra.cpp     | 195 ++++++++++++++++++
 samples/cpp/videocapture_openni.cpp           |   2 +-
 8 files changed, 353 insertions(+), 4 deletions(-)
 create mode 100644 doc/tutorials/videoio/orbbec-astra/images/astra_color.jpg
 create mode 100644 doc/tutorials/videoio/orbbec-astra/images/astra_depth.png
 create mode 100644 doc/tutorials/videoio/orbbec-astra/orbbec_astra.markdown
 create mode 100644 samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp

diff --git a/doc/tutorials/videoio/intelperc.markdown b/doc/tutorials/videoio/intelperc.markdown
index e27f70c7ed..6a6a5e5c9a 100644
--- a/doc/tutorials/videoio/intelperc.markdown
+++ b/doc/tutorials/videoio/intelperc.markdown
@@ -1,7 +1,7 @@
 Using Creative Senz3D and other Intel RealSense SDK compatible depth sensors {#tutorial_intelperc}
 =======================================================================================
 
-@prev_tutorial{tutorial_kinect_openni}
+@prev_tutorial{tutorial_orbbec_astra}
 
 **Note**: This tutorial is partially obsolete since PerC SDK has been replaced with RealSense SDK
 
diff --git a/doc/tutorials/videoio/kinect_openni.markdown b/doc/tutorials/videoio/kinect_openni.markdown
index dc1ee6eeaa..aadaec5e44 100644
--- a/doc/tutorials/videoio/kinect_openni.markdown
+++ b/doc/tutorials/videoio/kinect_openni.markdown
@@ -2,7 +2,7 @@ Using Kinect and other OpenNI compatible depth sensors {#tutorial_kinect_openni}
 ======================================================
 
 @prev_tutorial{tutorial_video_write}
-@next_tutorial{tutorial_intelperc}
+@next_tutorial{tutorial_orbbec_astra}
 
 
 Depth sensors compatible with OpenNI (Kinect, XtionPRO, ...) are supported through VideoCapture
diff --git a/doc/tutorials/videoio/orbbec-astra/images/astra_color.jpg b/doc/tutorials/videoio/orbbec-astra/images/astra_color.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d37e2803df20c6fd88b7c1c54ed2e4513d14d4bc
GIT binary patch
literal 138539
zcmb4qWl$U56D<_i7B5<)xU_f)(&8S9B@hViL5hSHZ41TS-8~_}gHwvTli)4x^oO=U
z<@NvN&HMV!?zb~{=g!XDy>s`T{k!z{8{Q)w4Q&lPeEbJ^Z2t`J?<$@u9>IeL|MPz!
z{O3fZL_~yyMC2qS#H5tul#~?Y6ckj{w2!E$X{aeE9x*<mp`&MDV4!@+^q7(UF)cj<
z{r`gC6Z~64NJK_NL`F|VK}G-njlX?(G^F@nAABIdr@?zbgHJ$%|99YjTf`^A$H&L}
zzaYYYKtM=Lf=Bv~Wx>ZI_(%Rf>^~BZkcjy23LYf^KHh^z1dsj&Kz-Sr2>gW3Qx$)u
zu6%33<u^|}V&v|(+FyqpTiyX$n5}3_J(4Pwp9F?R7RW2vLw|_7`f*rAYccvO<*!qD
z)GY2oX}E>E3TPy2+D$8R-?b!uWKZ^Kzfp)v^V5lfq^a5`kI~kP9JpK_%wCZ;e<1AZ
zr^wA2i;v*nOIN$$0Ot1=2(fi%8BPY)@lL09luV7*glSXkbV{E_?IojEb2~=b^Z``O
z&EHz}v;1hv=RRG!$sK1NWRN)ImH;Bx#n=Nx_#FEMAEplLnCo2q1gk$+92V>9;OEI)
z7^)2<dN#>QZUwbi2sfMXGA}Fit37)0h{#?@-9?>BBJizffbJ{J&{p1O4=5+YDqXF@
zr+TZaWJrowtE$1{#`&pAPill`;Whdvfaq73e@em+b`SGIUgYw&>h(97GrUcMF*yre
zjoi?3Eo`@MmStT0<`UlS)2EQrBA;ZDXQWr0wzPI~8`x6%i1}pp7f<%pithgCWbDH`
z8kr|(AGl*CXCPJ0v^8rVG9A8(I%C{@IIeOxRYN{?GPT7{2r6C)AP2THr?lRc+I5r4
ziauvGW$<tcs4o}A(4i|0sE8^IO^N`O2aUykE-K*CeRt{>3S@qo{c^x}H*Kld3SYJz
zM!eWs;ussHq%8aU6oPbU@qUSUDqs4C@ta!~UVxQI{#BI;1ooR!+&S?`3)sT7!2cDM
zGlm}N@aeJj{P4qsw43USs|9#+uFXw<WOwNzH{C;BIK$;)Bl_U_tIVSK?9j4AwfaR)
zB$tueGWybEuT?^v)FfoZfinPbbQ*-r9!snPV24EHA_6co=w>j1RgkteJ+vYwtF#2#
z&akU9cS+SMPrJ)-*rYc=10T194tXA|kWAh=i#Gokd??pb7{VIx$1SIj*zCo2p%+@(
zMNan>j7oA9IbQwTNZvW)PpCE%zrhSq2ZJk!s$y?lJuv(rv*6w8xN6pWj*mCMXS3{>
zko}hil1ESn3(Eve%FB4=v}Wf#UBFeiUXW&yrKt|dE=nE77+0N|bu#Nxs<k~DSWy$^
zrx?r%q~~=}Q<RakS=1d_L!`lF@(RicAN2yBZaKz&b$5E|sFiCM1B@b}FzuA&#wlBK
zbma%3==1V>@mO??JTUAJOhx3^n~aF<9|>BCPtTUk;#Qbt?eS&zdW$2;GBhH-kD+s8
zXh#Bmmp>G}Z3PAHT6^&5{bsy7dXU4I5-H8gx#K7LCU}f^KOYsPjasH?eIaY1b`u&T
zE&lx1Tv~&GCoA$9wnM-!^<WZ2CqQ|m2MMcSmzV6zfi=X2-7Ub>6yfFa%w{DvI#ToH
z<b7TM68EHZ87~f>+FC00G#()Mw6S2)@GOqiSHp|_mCcKRwQu{1E_>HkL`PV{ot03{
zIh@L3ziBJoC<dCwTB1N#hC|ZnH$+JIB{r}{7LQZ@<k|Ufko(~+8}7@3kDU3UQFkyv
z3}AZz6g)Yv)YN>9>HDgrTca_0<HF|IPQeO+I23B~x@)Snh+FPKzG<kRaJe;BKf=5(
z_h@TLw~GsTWYa96BWsn&!IOD3(foxW(6x%S^~Ej}B62NHwQLg~q%qs&qC)L6Q_L!I
z+<=;|N0_*{1WW>6+xPs%iydU=0mTnRF7fZ-TAeuBeo2jV4pmU;Gxjq1C>28BMcD;u
z4=iT0->c~Jyw7c?2rpmCSx}aC%E8zQiy}UYA3zaGsd@yJMpM~BzBi<P+vNBzUo&fX
zc%(9`dB!mX4L_EFpM;;gif8nM!Saw~D~rHFCGo1mW@3#L$!Yz5;}j!&t!~0)W28Qa
zyFlS$ZM87yo2et)Qf~Fr6Oqd~03qb?l+tLy9vm{|s_W|8a9eH6X?RVTGJUx~WejY&
zenrHpw{X70l$7hX@QNKNGP0AhP><YfcX&SOd+okeVC6S7>&v9lUgA9yAgnwkl(wk9
z0YsSek>cJh=y{lr-ENc|PM7GkdxlTkZ3c?yQ!h8CHB~4}$1Ja1cZKkP>C>2>&K@7s
zc72I7uCy?lslhhP)xDmliPqiQ|4yxwAoqrBc99UkFweaxRmI|@LkfgC2HbtRQl1<?
zX%Qn7pS^q?l3Aq@p-ltKnE11rbEPrS3tAA;)h}$G7^Q+U2V`dlJybXheTQ5umneAP
zq$LpO$Cjj6JS;N1QlXhqQLjH~Df7WtwMgh<LS#hvO!|ZE$Zw#J@v4{`hdv)_R}P}V
zROPmoW0YN5&KN@G{^~Pl*M-!sbdufPRrCXeEv?>DlCq1TEScD3d-YmvW5k951iONK
z21@9g{4f7%#8h@FtB$L>{fH^r04Z+<HA&GmL_S{~ORgktF95{KDA|0v*>@kpilXh!
zWQb!Qa^*Qn3JPT!+T^ci75gZX$WJL1dWR|>NFn@0`mKG*xdg<LF*$5b3KrOX7uHZ6
z&-M;BtqYN?iDhO3N&=bjl<3?mWynNZ?wQBznC%oUuaXE~06i%Y{>r!9J(`!Da2Pco
zx{R9+80qRzg2Zp4f;jI)nHO4v-Ab8R>GTrOes9VLlJ&{u7+K9h%?63XV8^2pxi8oB
z=3m82G}A<N+!&at(`?mCgvSQAmc<G@s}a<FEAMc^v+~iIyzaBf91$Y%umY)^_zH6I
z+ts;Pd5?Cm64mu$IN1^LPW7L+kq2zGhJuZr(X^ILm2yfkJg!ES5l;h1QWU14wne#)
z!>*QL!`@~I_qx`L?A(6#KhMO1T)eGv&H;jcgzQ|L;u^1i#td@0dk7wsxXl}o&k`T#
zeJtdR<-M5dV`VaI9QToyek$->I6um=%M|`hWLc<p)Xv60l?|%@AC}SKT2<LZaf0#!
z)644VVn5i~{>%LF$Op7p1j(!iPskIFP}zC;|MZ7N;Y9UWMY47Bt{NFeRkY~Gwb6*&
z%C~YglM%rSf<(_)gXp071>$!a=T}jV(ysP_TtxgL+CLXqNxr)5YZJ&SLASOQ=?_8#
zRP^zLRbv~SxI`vd2{gSWpWg{jvmLcHH?$0}<e=Mfc4z9xWaPniT!k`%PP+w2?S7x@
z(7=^VslX>WaG%2$KQwV=X}M6}b@KQF<X#MKcQxPxYdKsgRN<kCgO1njA@DHWmVDo~
z%p;#Bdp?BKxp~-(=9x?(%%Z5j)o^^eG$k2S@Yd~6@6%~wxji1-vP^I=UR6EV3M9;w
zawQjOBR=z?aM)6!Kx$QJ=gn(v1p+<&t60yHlVJGR4eHNZOs-@TNA%&E=tDt#HFV!M
zd#&Zf^1-V~dQ23Y7Eda}@;7aO%m)1!B@aeBGg!|TCqK)qjMK=;G)!lA^2SbF9A75M
zjlQ(+(3x3vmD&5wC8N)R)kdXPZ1ptR&X?u)mu~a@mRp~?IrLn#jS`%`vxUe!`hNA0
z<ZZ`K4BokJ?;L=DnsIHORc6&P{OM0MGlye+|B6(+?8FGntpCFT5!K?aCWEm2xyQOw
z;#6%B1dF5>FP!O*#ggR7J;vloH20W^jfG$N`y%A(^cWV?Ht5%dopS^K;wjcEzI(4j
zZ>!*I|FXEswWuiDqUh1cg$|u>X0DJ@pUM8{c-E=Van1HqI+B14RQpcg#e-DFyF=Rv
z(kbd>gfOQl9BCsc*s^!j3G~cU-^cDf#Z*e}^)0*1+H#h@ej;oO_jD3(u6&NzbCo+w
zYkZX;<d<$Ex;6!M2j1zac#L*b`Uv>QcEybb+6`H?a%3KT3zFOFufDjVAR`VnT*rUL
zJE7~cweo5ul<&)i`JeiD{Z~rDvv)j3<?)fcyM{5NW7@Rltu?dL&6iMZ3ckA3vWX*k
zhJ|U*#Lzqx753sHgz3mf^Z;rF<77P5@o9Cul%JNUD*H0(#h#v9Y7%b1*_TYcT_M)`
za@v+bCq22{N!v--DIvMM%)YKbka9@M>}o)fDPy2|;Cf5kSn@rU!ov7`q($mn<rh)s
z(p|m<4(6PkI)ua?fCiz(GvznWn#k9Gg??WOKhgEgrRqtE-^p-OxR@3<Wv&@_hgnIk
zik=fQ!^VU=89XP8d5{`|tjL6nGo5zTrUXf5xsxCt2^qMS(c2NMYMok!{OstMd|O-?
z9T~8yFe-Cyy~bbY0$2Pr;Iq+8N&}x%UCne{{hSH0q1B<u^J~TN>`&R1!(7WsO@pru
zwThl5_O8lmU;H_P436%7@(9F?XI@q)OEux5owB(*uIhi~0sSXCw(fo)TFCWfzn^Eb
zG?KmBJo=EuIAWiJF__QRw&@7iF3yS1pA-1auA=y$h*m>nax#P2VS1c8!?Ojg{=GzQ
zmM-~wO#s%_E=VpCmm^IIZ10g3X{*nAa@>-^%GDGFP6zg3!f%ET#y2vA@!84<)G%Ys
zfaXPs2M6+rs&hP+c6^~LPgf&g0&A=20FH*n!Wx+cJl(=gk#MOqiD;)OsWB(sQBji)
z!sR#EbQx)nTJ6qHw^EkBnJmaiQvEKIlN)3F*g*#zX+zi56#V(jafi3SWv}G&n>kll
zL$O3Wr2$MSk?rJd)Mt&9w5lSbZp~-Jz&1ob+5BkJ{=sMl85?&C+YXd=OmK@Y2T=`w
zQW<v!39H;0*)4d(Ue1*It@@Hz9l*MOh5#Yep6IdZ5Q&xPQjewYgqO%ltYoasHLP5+
zOTYt?{T0%h3L(Nf6uw(N%}0_yGBtbj-EIvQEQ1*Bj9pB>SQmb)WlLdK#~zFHcFgDe
zxC}6ySWR>zbDS0S<A5o9XrZ(*8ue2Z?S-E$FYbJQGs$b6mU%pL=6EwEC6@`Eap!WG
z3m(RM3sPjql-&GCzZk1~qvJ)pG^E_`M`n>kKe#}|r#CW`Da(L!I~AJfk<(~oW{Bx)
z;ziu}_>*a*@%>0$N^JftZ7bKoo7umK+~Jy$$~U1C{dirWz8v3OHB}gzycSysnQ6ZK
z($w@JG$&mib$Mkk+@8_SP`%w6te<h1ImOOSD>L4VVhoVe=6`On?;3*t%E)8I&o4AI
zy7Sj>7X|l9(@f{k8nO>DJJ5$lqYS>4E1P1-u^$|{ul5<v&wgQNI1^&2@{wq>p_&$d
z-AM}{>Brm((y^7ziEEh&Y#kZi7g3sg7<T5Y!g`N~(wD#47S|h=1^k5Q4}WF(eNDRo
zBBKSm!^1Pm{CImn<4uq~!dnAtg@+_!9l{hwv<M@0^w%`Idt*Zua^bdJUZV)Bwez31
zLU8&f_wp;bV?FrQV+-l`FDaJUCKh&G^QxbTt_7rJzTOjY1FV5IYNgh$`QqKCi?cMR
z*dI+xxbV70j)79?$jjx$`xnPXU5!#~zp*!PX^czvOC94Z19I7hxu+x7=dQu@<;$!W
zg;Sirc_R%pxzQq#nb98aUp_HJU5pD20wnx2s$%@kUvkU6L2QTU%UZ>;+8zUdjRJP_
zuu+V}0i3A_!cnGh+*HLKJ}S1Xm+ro=;^8>kvX&2P^aLN!R%s<O5CoO?5sQubQ&bm7
z%ao9<rq7uXeF9{R@56N47c1~wF11wy=|8Fp*p7-5+M5kEWhT1RFtdb!f=igMT&zwW
zX%+h9cKtc0fU@$W$%;n2^G`>YDukakWXX|To57=;My`C5_m+E#0oh%nf}2zB3~jJ?
z(z^`6Q7M46XrrP{5K#NHye<W_*9*2NOy*>j!D6alZE~8%Bi^SS?}f5RTyPUF_MqA&
zVdgqCZVUy_Jcb$N_7(-3u<e4~Eaho#J9<<VNU0K=?IPKN3@5}LBdz|ym1gnzh?iDW
zxj5ui!2^bwOTSBWkIstyIlH=sJxy0M$yJGY0MoQtuGY%jg<>-x-=l;`ieZJXFpSE7
z#0f}A-$Z906@PFVgyM2hpgnfkoDWZA*4ETHaj4|PhlTqs;7{NAmD5e)e$M|p`mVW0
z)COxAy?JROu(HtDo8ij*+|<zx3{#1r3l_w{%b@054So)&M=iJbU^LKu{N^-;wQze;
z!o(J+u?HK3U-j2-rf_3zX8<3Lj<)MMBiUIqH~Y~-rvk#yq%toxVbOg%<9L5`Roq3z
z&?<0koi{K2MY~V4XYmQg*e#Owb^zr^@5NuzEKl+5#d??E=j|%#QytmNm?yQ~<hZpj
zHA~CwsBP;gJ&xa`A!>ZA!&Q3!9p}y-gA*7NVm53bTSrDcW-{K`hB)Ag2z>=acmAy~
zt-{5uy}eD6*c=y$`XO64d0NJ&Prjiw^XB&ecl|c2TvnO!)Dp|@ZhGpGO59Owvl9Yn
zlj267B`~C&r!-s0vhFz&r^oNCD1c2zQL>?`5+^E_>Zn)_a<q*4xhc<LvK`0OQ4Mm!
zS37K_Yd1Cvu1INpYYD*^+ZKXCoDf`VE)K|}O@o7xL-iRSLUdw>=ys4btb<>cvp-sh
zCCux2CX{Z<6{$dd!|GooWJbH6rVG~!S5D6Eleg>*S-Z9uEO*3Sfw-9u%{B$`$W!#a
z{9fwp_qa+4wKs}2B=RNX7G$|j`LNxyF6;`IDFc&0emb-YlSkptN1U(|htoSPS2C4P
zCF^Wta)Ik06?5!VqRrt6k*GEQHQ&&f(7}2Vld#?>A5;4=q|UU8M|8@cX7;5$cY=a9
z9#W5QDM<5e@0Yu;K~wW=oU{*uFKbBVM5c_{t7Iv`_(y#$(#kEb7gCy!*0w93mn}GK
z`1S-M%q&=#tjBT+v$HemWxahsQK$5kWcE`WE2Ty-Yc~-aK4R}hYX(V{biRFgR<l82
zy>U8nY<5qn%ZCFJVSQ<qExQ9uHz2H4-Vz!&?-oQNrq{H5HdrT0Amt?0#K0QRDitsN
z8g`yr&om2o3JHdYR%ZJsT)kQ)XrRe@ok`>A@nJjKDoxE-th2~b{_(KnS@;FTB_ggG
zpcmvv>P)|N^>K4N=fDwFu0AlV;{w+-%ez1*mBQ%qJ$tJsrAj`*EojAt>^icaB`@K0
z^i25)(Cfm;T~EgYHB!;!d4dy3)#B!_axR)0tLLk%mbLE4kvImdrO0IqpPxexl_htF
zW>r-|%w(-gRDI$o+q_Qtv?-N-^P2P?M|%1Azw)GG_0;1hmpijshJKMec_$G?h0Y#I
z=5(g`w}c|MK-&=8-s{;Ns)cj88>Ve2PB?AprOHhDjd;bYzj*R+Y-feCZcZ&}(38?%
zMYfROjbG-Nh|t!K!&q;}x8|`|)UflH${GSW99k580Y_KBPo_pHmyzY8rZEC06K=9)
z83^{8$3y!w<ZCX>ogf5r+5r~=`}8pZVo|VaA07B51M0=P73eIPR_|FijMLvvJ-xW}
zOmH$;c}05Z?(BO0UMbaHQ^;>Bh@!L?IdPI~!es)Uk39JW9uaZ|ME8s080@$kik`OG
z==$PW#U)5quZObR|GfX;_hw0ey<8IEQ9{O$Q)Ei+_s1$x{T=xsINmop@<2J=g=mLw
zrGk~am}5di-{lE|hE}*ip<$#cNZI_jUVyhU&XDt^iku<afz4&{_#N$qlT&Cx%xB1U
zkIA=<q{LmJNUHqr=-fzhv77V3MJb~LtnyXz5rd0Ooq}}vNlMiBt{b{3pJi{3RR1&C
zsY^x8<x0Go8TL)xQs3$gvC%q>qO|fK{8nuzQE!Vf4<Z6Al)xc_fhvIxCwuFbEuOOy
zkIY;70@h{K+g(Xn&71e02fL^`JZO*^MN7ribR=ik_=Uv?tUQjDe!dc#|J&5BI@HXY
zUQ~uL!0006Gq3)5ncVzd{@V4Bi<}j1^)kPw@GZvkaXlJWR0U=v(B6M0h8)E8b=-b3
zw3%FbcO)EzAh${r?D1D85QNObk>!x?jI@e+Gru$?h;`lLLbQEjCCg=%iBJ26Vd?(C
zKJ=G;2S}EP^5`%ER}KqQLGka?@~J>5tL-c6QQu}OpsAfhuS_E8VRCPt8L;g-RmaZI
zyiKjPkGW)zUi%}ycG;nUfyU#aLD@#l9FP~r?$4q_pEEfbUK@XgUfyj7U{{)|M32mT
z)XGgw;*aakyInAzwFvi5j?Cm5&DAZj9WF<RvnhpSu*&yd?FC7;Rp8qg*LE#TBYhv#
zm;Dz~oxAa!+~$mUjj(o`8I)jh@8p4vKF_Q^oycRZ4N%IDNQBSsQ)kScU-lCdMsM@e
ziQ}lQk8lyr+wGYb9Wn|9su*6}_J03sGDoK|=|HeA0mpKPSdmrzhX?PjnEiS)cc8)Y
zp@A;tYxec@ahaFDSqZgt0BuG_hxFC@j?FHI^_ym*dHf8s6pAzl3RKxgmPwj2KjJ##
zw~J0L@B9QmFiZr}(%>dtt6(&iSo*U%!F0KUpQr-c>p01b1eRPO5eMsp{e%Iy4<dH@
zr98ZlsHG;Y86m}T2d&iMh@H2i?i;OA-KtP=F?#zjg?dH6Zy#U?wg;qvN2K<f3?-pB
zRgDWUlc&v>CT+%fsKpvGD67%Rvy>F`Ndd;;D(>=*h({wbtWnXt8(?^#3>CP3bJm`2
zxGuc^C%@4g8i53VzmR1Aqx7<>=*i*5&wU}0YI!3FQl&;zdu@_cLEv=^zh+7R8=P<L
z5yLq2f?)1vCYr|7x*^n`4BPxXB@H~azjwRGPkS0*Qj`C4dYY?$=16>peY8@2!Mi2I
zD*E>Wm$0h@%{q1dZ%kX#5_}`xFi*u4AOFeB{)$*8Uhcl^7Nb9LSo+wAF!<sTIn~iV
zU8qvlkiK_dL|2ht83p}}MtaWn1F@K>qfM#+$Ih-zlBwyy=Sjo>0c)qmItrlr^R#bH
zV|Gj|Ab!ov1bt0Y$K^M3L*^Xny^jTjt0DM|i3s8&qrDZY&|GsH8{mdw{jJAzt>-e<
zGOu0K_moNc%C81&EC}=pwY#j>9=(Jgz3THP+KDQ0&O#5-Q6|Z>d?nXl%xmIz%5pNp
zJ$KvubOnTs>ysV+Dk|d|UVm3Ug>|sA>leQ}f9}=-f9I;;RkP)5;Jbf}l@_FsI~y+!
zk+W)!Nx|=ww@PHYP{EiV5l4<qiYBqrX1I5y*JoxPXEZ_yWU-%UyryXiq+D(3dyfi^
zEME-Ka>Eih-ZCCB8S>?Mth7c@>$I)(2e8CY#0pSUySA%6d&L$4Eb?eXzlwkTvdTqr
zFkDw7!dFs(5VS<1hVWC>rkhm?Iy~8XnhSF-NwMM<jEpm65~_d@j<6!bGw*vCVY3&T
zEdFkb+Ke7k^}_m&`pOkkoB4f=4B0MnKh(atx=H%c>UwSKU6Z`Ll5@^(2b}U+bV9N;
zWzY8RADv4T*?mAdFs}*-Nlx8NvZz2?6FmhUCT^{Dn@zvOobp2#|MNF20t9gI&*qln
z2AWxH5o`|;;5xCA%bv;XAEP>tXxpZ;wLlU!c_#D;K3j_nHwjv2Q&yr^fAOC3ltoNI
z!klQ>7O8BXE{|I%JMyua`57pEO1NUmG8f<`PpBB|G$CW9f8a<{ABqBS9i5km=DlMm
z{*O{+YwQ8zjmbnw@Z>idi;2fkdH)I|N1u*jQhGN8U>b{(N`Ocy*@Io0dbO0I-P`V4
z3Y|+_4;|HZ^&s|5<{IeuZKSAR1(!;;c(c5kW7gHrfM>J8>CawV?K1_K#lCAwALRj5
zoZB+yhC}!3BRWYk7DK}{(g&on&~GRjU%U>^1?08^_DlAfB?JQ~C)yMuCU=!=Hj^`$
z@m_DZ#|9fPW@)e{b$v3Ps;N6%u2l>Z=6*u_F`l`MyFpsl@3Zi7M}+FiLB=P#*Cz1b
z9Da=lE50)sznbe16}H9%0>>8@))lw5iox39_1UD<4YL2i2*GN3Qtlj+y7mX_U#Dw!
zs2%?EdJ*VAj-$4V9f`t108on71jM87RQrNC<H_`_O98QBmLX9MH=@g`KCXWD0Ucv{
zBaxgYbIh~2tc*DiQgSQez;8*#i{5XGHqS6lv-5%HaHlqGhTF8BE`9%eKiQ7EV(eZ<
zWQv|xI9fck=wtfzpHNKRdJz+N&b1$4;oO+}Al{}W5dWceCpclgxS=6sshxb0h9vnG
zHx>NwW0m?rNTEGY{k`qp<RIRq7Z2Zt9I^KXbSorW`J&K3toALx!+0*lL1GKNQHac;
zesv)I>4Bgb(c4h#DvwOZa^WCevF02@!a5btBdJc)Nu~YAd-xC&Kl2wZI=dk+g(<F{
zJVjVC_O&EmpMT&*>0a_Y(QXzkSxTke-Cimna}Nn$>t3W6Frh}J(T|!=tDIh}u;ZUo
zaS^Kt>9B8h`FNaWc~PAN`w;JWf0o@jZ6(p^&bK0Tn`u(9Sn<wGFc&Yq`>%4p?`GZV
ztw9MWvm+%cLGGR*J8%U8RIHgr5}D?6F9Ia-a#R#93qnIxH*TE(QVuIC2Mm^|Eqh^M
z`bjfFVxR8gq2v0hSG(I<V`v4E$WTzzJs}_ms`ze}IZ7_fE+?6YTY5AJqajvkDm2K(
zMFpNf$$hGOVeOo4G2=3NnJM!z-qB4`BLh7*<9p+#kYu<k%9}0Foln0z3xNEGe+u#2
z^Bt1ATJV*~mO(>E=yFPnmC$UhofijEgRYzIZa{}@8&cJ!l6OmLrm0q=`|u~b=%f+{
z<ri?F6R^V<*DgYLK{ngc*joMFN!H~R6%5Qt?9JHOp_lt?N-nq1w>^b!=A=T5i+FBx
zB;{=Bv^BA$x-uhwdk4Hw6Lo@RsI6g`sRO`G{Sfj3fawa(zVXue=~7#LY>4d0zTz`s
z*`wec&eYd(gS*OCFu84I8r>sKX@uRd!*JEDqS_|bHa(Z8z&cJKj@MTx>OX->>X>8U
zsfCfv3kOqs;hbz;3uy+`+aJmIPxMzao^aNBD9j}WS;a7Hj^SL!hA`24;;Jv4Cf(GW
zpihD}gJwlVgryUlWz!OV2TK{F;?}iK`S$F|#~c<sAF&&aa?+Lb-M?3OSvD0>=BalF
z91YCV>5!h_HcCtUm>4}SB!f%13TB$+0jf61^@qulFoD5Thtfg1+EyC2r=aNK_f8rk
z51dWOVtFd5ZFe2U_a^Nj4|6`hR5<j$FI_RebD^Z3iAa)Xflt+#RGG7fUriU@y(=t?
z@g7;{@QWi6)`E~JfYPOvR3;j~4J-m%^!0DwaC{$!vecOBZKZ+(SWa>~b;zF%?trR?
zL~2)c3EICM%T4DG9=#U%o>{8xNJPgg4Woj253TkG=r(ftS<Y%;&J7;L9_RQ5J+xU=
z0z8$r{A8GOfPdCv*58z4s29(lXJx6v_*v$8Jhf8w?v<gVixDq!k+$ooKu{wG&f)-g
zm%9tIPU<^A-&kg$F24l+x|b0BV#X<v!c16&ETDVbyeoYnYC*>2(k|Td?U+Yg+UT_v
zqSo@(Ik)6HUpF_8O+d)2G&lv_Ui?*Qbz*G$FOu}z*n*R(jK||!6EU$L@tg$tJW2Nl
znk8%w_au;j6J&(&xBb(a0KQ9G3e&}So<e<R0!#2zaIDO)M&2B~o6N}Ix0DDFx2TYG
z_8vWq@hGC!a{@S2xbM4bv7DxJsmO>uH~A_L{C?6%d|8*$rPJ74%H4r7Dd@j=?yx+V
z#?5unVHfeJH7mfJ4ecf1BtW3;=`UytiZ5<%eDFcofqzzD^Fx^f$jK6WGCI9b-vIm3
z#XxaNCNTt6cf}Q^X@^TG1`VCQLy3%Waj^$Wb?q)1NW64hsq`{`QX)WW|KhDs4YKhw
zcWPYn{^_2H<-X|ZU`lHJD*2^m<))fCSX4ug#WdDzxL?58$;M=&nL-BUfS*?NQG=s@
z<diqsTK$!UL_&7wls%R5yr=`As9}hCNsyRT;(C4u6Aj4Q6)PZ9e;si4)bczK>%*VZ
zWuZNSb2@G!@Ax>LJKq>-pGA6sWTw{8Y!7}IqfWkD%~rN9KL%NyMA9^~CHUEJP|qjJ
zYr`M5$D44B^p3{b-90hj-b<aRA-{`AqcFV@NKELGF!Id#&*h`7xD5T}$P7W{v{~tO
zrBmZbhePM9H#7iTox%jBvWYK1Vq(IoFHdX5ELFnGZY@<<Df&*QZy#+>G}vxsP$ZWz
zM-hby<#5L;4bmhh$)VDmny$A#$mp^|nYcJa<_u-SdD4w0rFrRJ0H~NJRTe};HuK~6
zM1>Xwns3N=>7-oJ8+zB4SB78uQ2*-nK$zM)-bk?M3qz)T00G^jIn*hXHM(P3j^YgM
zg$@#pav%`tSn+LN$)y*g3T=Qpn`+fddv+@g)9ee`#B(xUm?9YZ9!y|sE%DRKk3Tgg
zjOpBYt0J4=qYACTNe^ATLtcxjOHay0hWD-mNL`8fXoMNxRjI$vgEt)Pn&eRCu9RA)
z@llVFqrg!|uDN(4*9_Z}dj}Ib))u4oi4U1Bh5Jh2t!Er^fM4^CKwsS$C}|TKWl#_I
zDpDw4?ZxCDn_w_&Cjq65Ulm<G-Cj6ra&Vcoqjosxv|G~y%?09fg@j)P7aXwdI-6+t
z2yK1LuWk6(b0}N3Ev@8u?xci}RcAM^ZJ*@;tEqhaWxhP~iE0b7T#~Em7X9(VfI*D?
zLrDqGW}2mz#z*lXGNyv9<%s$JH~>KRlOCIaN6-H$o+KXOJ@|)zYAgJI_D}4D_kaMO
z5RZt4mX4U7>k$LDI1lGj31vp!=aMQUQqr$f|0$tJ{|Th<i5}E92>PV!*#FrWy{6i$
zPOaqV;qqLaSHE7at@B2%M&Lii*Thi6DX=0sxkCN^EiP52<)-H?knSKJ?(%y_3PnUc
zjEwQ~><HmChgeTt)zSU?HhEK|sR&Lhqv-Wf`mhM7als7Pz<^RE#op^rS6^f1I=uE`
zjC3$wMee7+cvt0A_7xFLppL73+xmidOhF*L&hQxBu0=dh5}>u!fjl-DeS>G0+CI*J
zZI9LJfGhHw@pTDY94U{h!`D$p4Q?XOY@u%zm8+fCg1O`bsrj<yWdj|TV{<Q1_AWCo
z$iFyZ5S7nqYjgW2OhNjN8Y)b=v6hvKuU+-w`)zV{Tb7#f0k{(;Q5Z5hvyYRFto@-j
z-c=;^MA_){z>s0vdPwXIL%n?2?;+n5W>t_#@PcZ~pHggrkPm)MLPT`jSc7Qv&t3<O
zz`$5n<z-Vxjq!>A8Fw-69NxgxA9`3xgbL-bH<~^4>o2>^L*)24uy6j0bW3Y3)iAW}
zK9lR7J<@#Pp=LEWW5%C);6a?a@wyrRWaC;Aq_yp`Az0t4fl-u6J6;){dmZ}=hd`(d
zGmHyHD_H7Q&nhe}dX8IL&3xuqrBtPNEBaA^rr<rLeKBBa7KpBZyu;{ZoBXO~wUO~s
zc>S`gs7gU!m(D}RP(VyV>A7atY{V^RX`eF`Lg7|vTu?Z9Z;T3_tB7y@v*_dFY97NW
zF6t4D^0G@3I3EJK9&(I3w&+~R3&~TD^>A496n6-rcGCctv&-exOqd^DsoI6QH?1;h
zbY-;|hMsT>%O+#N7rZ8$jYty;r#S8vcVS*Rqw_`2={J*2EmxMunzdWDn|iWvK7o*g
zvvR@z_y;AhC4fm+tRQGey`esl$dGdC%}WR4F>Rl*&cM5vhI+F(9H%hYcP#fheBSz1
zN2|G_u07gF?1qP!6Xn?LYU-b)(|Oi<D0GqOF0ErVThngwc-v1y*?(*$&4sZ<yvR9c
zsC>>;>|WO5m8_DF{kHjO?A~f0=s4&j%Kz?ZD=T$P^dzYL-+#MnvC<I*^<9b#J<=C(
zwHRdT)mGC`vGjS}p=D~}NOROtx5;X%(4^p-oaXSp+55I_0gGHFgFs_O8LS7m0>m-t
zg;Dh&UGA-LB`Y=L(HVXy$C=Qr)u}4ovAZ(9qG3Yi(e9+1+wJJlkMUSrhmMvxS3ooB
z#Vf6+7qi?SvqavN0_Td4`GP1&E>G~GSl%Y0+ZUaC75!AyV~Qp39rd9tiv_2nl;@UD
z?~^$|?q(C$7v!F$@t_851jGf?+0pT_(r45Dej4irwHlPe)mM&*fDAXpd6!wm%hkPc
z8)QF3V!$&z4Q|yDj|J(5!IgsTKzF{#Fd4|*io9*6d7FOt+G(wj(XYA<q`+4j)k7$H
zT>3GVL1r&c(>CkZ_1L&S!&s+4P5Y5!MNgr4{`@PmrFC2Z?c=nujv;f$j`$L1usPMh
zuu0hTL|U%QYpcZ!=)!9c%hmdhBNMe<IHo%yRFWSM6o;hDV+`hAcMI9W>7<VHDu_@T
zvoX4_e=EQ4d)%HJ)KNOyly4ujS@ig5C_?XobiajfFxNsN2dTNqiuV_<TK=c)uK+Q$
z@P>5`rl`)%^<drF9!_5a&WqQ`Zpj=F_JDzYNQWqTt`z86_>mZxR>735SXj=uT?VwU
z&TuoK0hSV%<#`OjX$4o^>|___FP@jk^3|AEor`?0I2N%q&NS-YU^`JLKN;u<BVY7;
zX=Al9DcXeKl{9!02WXOZ6J_SxwN|V;B)zD(TiHLoVbF!x$$?krJn}MHTLjE$`n=yw
z2n8Y%p7C|eeec-KMLZ|nuUt$qSZGANWm0wKSAE}jA5+bP@*L)eb*InXM<I<31w6Vx
zEI&P66n7Y6@G@TlZ#@BqjhMdsgakLSl4G@$^Nw2fDk^_nrhf~*Z*ay}7<_j!sh|#(
z8L#fPoDBJkcj1w;jxc9Ss*c9b?bE`sz90CVYVDTzMIaN?vwQVhQ6s!l?jb!0jXg12
zY#3U4|0OsbS0WO}q}$U+$d{0oQr7t&f$(B$_|76TX*a*N0wj92^lZQJ6f9)Ea~efx
z4y=~D&nxvtSI+`RBA*}|#uelJ+0${c<EpR61(7i}qHd*epw^C~MfiB-1`0Qr-e|+#
zVu=3Jb6f`wX}2<NI?K`a0S&%UZ)rpMw5%AfwoA3l+%G=(DLHrSuLEpI8sPmCSa>M?
z_Ag!_Zf+c`IC#p1o=3YzuBKA)tzscn%9u&}eFo|@cN$rt0D?sjZN-$Rk~YKMR0DN_
zdhUwVpMH^wceuO)^dCj9njIdu5-F4N2d0W<HpZ3uUlmdRQbYyyds=|1Xn=zdji}V$
zHR1_Hm@D%JU2)q&Ee()XzbsSt=n;HpVzqjqSCEkqm!ci(6EW?#sK6!JNG!P~cAq=r
z|3J8svO)Kh?`M^c@V!m#ORt)iqsg$(+Jb=z&wKO2W|vAmxSO!APm^M-8XF^FNZbYU
zheeH?_v08YicH+iYPch71txhuA#{^t42NPdL|%HznEHD+4PitpJ>NcqFZd%UOw79G
zWr%ZMuTEK&9(=;u6o|qDGyZI|#F*AJs5*O-yjoo)4^9687C@yMJo9880-JQ>Qp`ys
zXfRfRi?AC>yTggvmWg!SwOQ;%Syx7NZ;<@yec`m`7K1brLDNirxFQ`lGdWVHL|T)l
zn6U5_;1R=#7w%ms!XG6g-4fQ?lAto+8km3*=K0T*MRzcU#xxa`Vd+T~*@gs&4d@5;
z%(FT+c87Mgp(V=lc;4IUl(6^OL<WY{FD#?wqF?ADS3j1|RCd0)k@~Uj<#{|?hD^w;
zF!|oyPqun`U-awawMoG*k0($4xzeqx;VuuVS~7l^jp_@-79!Taq!>3~miQLeyZssG
zRvP^~NtE5OCshl_vVq!3o#u^*+B$O!6uJv-wiGgJO1sMN7jJ9Z>p>97{q`XNA4sUc
zzcG-Cue7kRUm>#Xm-9X!FD1@BINa2G^P1vg>v*#1`m$1(Kzoxeun2Pk<7nn67S=r6
z2O>3V$Ys!6#Bm%CAOB>jsh?_w_b(k^bXR;?J#nNHAE69PCc27xX>YYt5z|Q_^dmTW
z$?UM^>h!T1hI_~mH}-*GXtGM%AXOdWcDa?h6$0cCKJu%r24OwJDl-QegXnCp)Ga?7
z_NcpK9*TyzbS0c&rb>b@1P$^v&lTl)Yznwkh_h~1<r4(VelppyQVHD&1K~%>zjR#i
zVP_e7fb~*9rTim_vZ4Q!%H$~~lu=o-=HfDnFI|c1?mOU!I<!2Uv)b4M9K=J;*<{Wk
z50oIgZCre<(5Wsh2wp5L<u0J{ywtYtZM?<aeS#h9r<$9!XB)<mg$;>LG<Yq%e($<)
z{PD)?j0XLFBLo!}Dl7E`zAWaTFBGwTtQ3;>(RBEyDx|uhKKVG$HuCrabx_BPu4u0I
zdt9w<gR^_WSb9w?B*dFMlT?s-)Ce}rA1K-eDOPqSi(GTko&jNyj9uahaMrXJhD8LG
z=0@sN@ns3ee>V>Q!N&X(Nw-tqV_@I}v$T$!SNxLj8{AHw-g2^fI&aRhxaRs!=QQrQ
z^k2O0XqI<=OlVV~#KvJM#hIVK65j5fJy?@AYFzlw0)XKuZpgC$ff+ISra84=1v3)Z
zD_D<mh1^+xtx%Aa+0==x`1{B-w$s%HF`23vJ2k(RUfkEd&c1t_O0)EiL4K4_jJcuX
zVWbIZnAoNbX8Og~^K(^GE`AA`ATLl^=@+^4iM{yuSr~Ui#v|WzIg~)h-U*#KvIsD<
zJ7Cso)kfCX23a?2egute9O8ogkfCJ*baWcj4iSB=SpcA*Q8lu?7uc2D6iUauAKl8H
zeJu~A(c|33frZ1J^)C@Lzmi@GA5o4A{Kb<tS#%t?x8d$4*Cw=TAQ##>ri7ZeXB?tf
z3;h2GKpWg|WW^MlVdmn#+ePJD<ION`<5uaPM=nbKTO;x&%U|%#uueZfxe2!PF&9pC
zd&adm66!v?C2Pi<1<UakxOx2Ca<ZGZ<j5EA*v`!&Pt4r(FBkniEU<3jS?OQA5aQFW
zkx5Y$1o$5DDdHX}I$AiW7vykGgQ&w<eLF#Hv;RD>uATLqud@&<KvK36LGsAl93z*7
z*6S;Y)jDXmBVeYIej)*!d5eXWooD{uFU8OEr$R*+4*%k@&OUkI2oiZ=H9736ruSc>
z-Dm4vdQzv{YyUkg&Lc8*Wxc{!7&L48AEHGR+$;mCQ<)kWJ3jsTgt(dKi(`*SPRyO&
z&^rUUvCD^(MR-;@;KyywFbR71C9qt>F99Vd!7!aR>*ewKhvlzZX6K%tph9YdZnk=H
zFy78d2SEkzluOBU(Mfq(guXmT2{=`{`=Th`YJy1BE%4@)$MD4B15&ctsWmN)bgW<&
zJbDY*Z`2w#+a?Z`*z6y)PT<9VvD^L7Kw!yawU(g;XC4Dg-+Un<=>==Q!M`wP9-cmy
zE-(vR)P2~?GT{j06)qaR+iF+S-CEC1H$@AX)wW5)$Mi1vwb8g%<5vCHF=RKE^~o`P
z^zyRI{cZg51h`eOSTBuN&^`TK98WJ{f3!!9xl&Hw$*E@^{Lb`U^A|y&xRQ%Ci27F1
zTqN<WOeOgK&cKD@fHietBtDBDI_4<_1GbtMhpu$Fc2PQYSoW8{`ZVB<ILtdmNia9P
zT|QRvY|wALx+;iRdUewKLkU?;js?Asn3)wYe?!T|au2O>fA|tN*B)S~^0^)DxH;_^
zP&X^=tM{_Mbqe{8-VnAxRbP8LgqH>^Ou!~F8Z}iAoG~lm6N@`&^Y3t-Ohy>$vE2xH
zUXYI`zc}d-=WReXX(Xn;??BE<3`JR9Tn{ul17sQ+X60yjm1dCNG$Fs+-=y0M=q&8M
zyZnpSfKZ=yJVeOGpP8F9KU)W|))}6vI^DH!wK?vM)?A*JJ~ti14k!X28@|Sww?is1
zzY-@-zzk9|aQ{VF!9bSyCU;v`^xeLJ)v6KwO*6l@m$ME3)|<F$%d7nr*9u)1&#RKM
zrdVfHr{8Ysr{SK{PuT2c^wZtC78~3%a@+FXo)ddWp-bQ#3B^?1$k4V6=lx$kQB}pT
zfHGUyyvn1~Fk8_{_Hb4lnRmTGvpc71j@HdJ3Aw?OROsmlURDH7QK|Gk_bS3!Hbc}E
z`yK;iyJlDFkZz&xU>ySMd)T!^pF(r}aifAv;&YK5@s>n)kbTqE{#>bw1YcGfF9!md
zAF^kUzBX;x1?xL#_-_Gjk~n;4q<@UXS=zZ{p&|B)s2hVRQH@I~Wkw!V3u{n__SUfK
zvFTSgeWJQ+hK^aQgJsY-1l9NqR(rI3C_{Vbh2*lRzA8uqw!WS`aPJl%D_2o5zn}iW
z^6BW&BgVWM>Z`iCvcGr%Xm`Z=;ki{*oWx}&0-PZZUI=1@CjA(Col!Ztj%wX?3~2>d
z;NOSb{U^tRqgA6iT8(eO2g+X;_AYwGHEk);=x5cQP&>*anh_^wqlcrcot`=(w|R6^
z3cmSsutQn>*HEn!_f==h?v%NDB(=@cfaakn5t!SuFV*Fr1Ko<k@?~|T_=T#;KAoj;
z)QoS>lP_ZL`jp<t*<O8Hlrz|DM_%1~Y{dX}lOWYg?gc$9Xk56x+jclTdJ>Ek^~Uv#
z&6;0-PwY!MkuEGWLT|S)x}EJ!{Qg1%svO&XX!N@vthN5muwEKCIV_`_3z)Q2e!T=b
zpBUH)-5&Qy*d%|Sc|Ln}e**E$c?5iM&fVPOzMPSm`_Tin7O=4ldsvD2aSpcm0e%1L
zi-W$Sgr>x2UV$XpT50#vCCV$MqEWZui`A7z(Yv@sQ;WA&Q<PvEH@=LdUB8=wMS0N$
z!Ir~&f#%c9;;B!eNWE5z0HRvIPY+JyCk_jr8|1@p%e_4cOXFBB(2k3oBH4v&TXW--
z8!mCC;QX`!*j~5Hr>)PU!HNCzD~|5o2@ky1Mot5>Y(N-*hrJo2h!FCJidIvdoJ6OJ
zzbT=EYHa}gehWqaZ@|IVu^m~vB(a}3o_xy^D4)tCJwpFEv05|liEB7ZZd&MiR8ZXA
z+%`1W=1mz!^Hyj^<N3Yb@h6l1Sme@JSZ0yBemz3doQd?;C`+IY1!QUL5s%7WJg|Vl
z1%~^f_0;pW(PumJ2fygL79%gs%dhe`TuKC?-Fd<*t2iNl_{~#;MHBeJXF$}*F&(}W
z{S?9?=<_^N`&A2TM$1hnmFBa7b^pT^#LlR_;+K0nv3Aj3)4T>H_BV4Y3{Goi%fQ~B
z&Rzq=y7(#9LV*z+!aX@s)Kx}BKyb->YcE^dXm~X#<r_USot}AggTMQBqV&Qc`tgoX
zQbIk>d?o8+^!KpFec!6f&Gur4kjlgjY<s<ax3hM2KMw6KF&&fFaTIHErPE+3EUsPu
zJ~hkOTulw)XGdf7J^m#6YFN#i=JMsX%DQ<Ss;tBPM1~0NR~J%`0cm9^znqi+^Q<Oi
zHoYtJem^1jTweTA$3AC|z!YMhK}QPO*qko`mCPZhU5{noaunyV7a;$be)7(8l_|v#
zSnUFgj5FK_vzykf{xyIS#tL)(>sa@-xR9t<kLX^@(6_sTt|Fl(wbCbaKW9(1U6*An
z>XTCHXz_PsHUq6qzf~OU#)(#(V6ijcHP>LDQbuRRM%nyl-=A4dJ4s|n*8txPf(PF=
zy_+%3#irY$gB4i%K3ucEWF}2vP=ta@_fWZINS9x4Mc!St?7KU*_Au#MG|~(GP9zdO
znU`!=B7KaTzR4pk;w?`Vc^t~6z)Z4Noh37`7)JKm<6pTQWwiKdVoaQCN))+q-4a4q
z-2kyWJ1_M3D51;QmMeo-rB^htB;60SX=VKAD@n~elwx_>ZG6k1XtYtF<1)8+7GiAg
z5lUDCEFt`v2Sp#lo6ub>vnbbx-(f*$ANO$zls=w2yc57<)ZKxl|76*U622#1PxRs6
zp5LE`cmH}zKlB*tJAY@2vllISh($a*nl}`F^4jQL_qxNN1KX-m5b%=#R;-3faq79^
zWDRtF;;n}FUZNp|qubR(#1QYAy4THe18^i9-PV$DA%%>)NXf0mz3RqVNk2t;`|s7T
zMqTiAz-(V>=7d4Z)Q7|Fc5^UWf<Nvk9CX@o##Js>Ta`l-4L|5xWlb;<tf4RU1fHg@
zsxp`Onj^ZZ$(H+SUHG|uB+96bTTj0$0+bZUPGt>RpelV73%vZAR*q!Xpx(aPKh1IK
z<)0ToAZcsOx9B=|_mdlcZdK^v?+n*gEgJvi=?zcIQssa1QW&f1W`6_|LHu7lCExt3
z{>q;FMTXEzLtq9|RiCu}3UY03U1+Mw{BV7pe7P|skh$ynxrcXaw%h4MJQK<A6ZZE0
zz2}YGzLWESw|vhA-i&@c<X|vhfLX&eU2o1CHNp_5p29eNAJO_A*jbQ08fOs)$^>Qx
zo6N5abZ>CYEg!m#1*cdpinKMTkB<I+SfkOA<1`otfrfoMHA1yKJ$Ap9fw+{JCfONm
zq(&LECBTztWmx{=8L!lV(812fQd$|ic^qfcl^Po3mRzjDZ${_+Wdh?<A>Dz_Nk)wp
zotFN*ba;G0S(Vp7xc}im7T7YD3F_bO=C9Ni+P7<wl^1l@RR1P7_HD+}k;ByOOY48n
zH9y$sxfTKF@U=UNP$hgJ526hGT*>32^A~R$m8|g{^qVf(lKxz7_n6RH`6n?64R2f_
z;TFZqYk}UF9Hw9j{+K7@uWms@1s#W7tSc-(;Wv=Z$|kC&hGJIU#)?n7lai^bD~Rk{
zeyV)}c<X#al4q>4N6#M4J^wFWLJtBtlP87>#MFwo7nD4C<5;`uDLlx?WcTj^1&!B8
z^vCvxhO<v*{w|=|7W8n2bh282^~+YJ<7I%ywMWu=c#{b^?F}VrcL(roXWOF=DcAL~
zBPKTl?8dc412dcH8`P;-_N{Hq<}V%|<p9i+qosL?x~^-Uv84*y^Hs=LK3;{8yNufg
zs39Fwr;JYqu&TaH{3*wcJNrKGfA4p?o^s1`OeZ%y{T(nt1A5H-q_F)Z^E3gddG*V?
zTIEo^?5(Fi9cABnm?BnRv%r!i4NIg1z~y<2`(~$3^JliQBw{NMf_Dsh)8m(xmm8Bd
zywmP4st{tLUY$opf!~^gp3axb5Qe<0^LSmj{byg^K`qC;ctC=2!`MHuX&&=rH~;p|
z?d(3o4L25|a;{PTIi2BV3TbhcMlYqP<Tx(?AeXjaf+<gWOuWD^(1_0ts=oKCoU$+U
zox|n`UjJ(#-F)tZ{KfluZ#*ATjnb$ec1`6ZPec*NzPcN;0oNQ0ZQLHK;q|?jkG;8k
zk`lVJMIyJ&PaFyw%|E@>^UwL^TU`%Za|~|zi<en2t8P|^d={qt;B@Jl&PLbi9^Yi9
z;=9b12+4-xxf|o5subtAe8Rh9G2gT&CRlm8mx%4(6SK0tEwR~Kl9164SPvwTaNg&Z
z8I1y1o>I|GZD==Qlw%oXiTL?3CnHk7t-@)&#SKX-e02OJL(!ijwuwtW9hi9l0rops
zutvXawNK%)w<Z@-s%s_Wy-eYm^qaPj`n{C_b&>0_Sx-n`oeABORCTZM6c&YVW#vvD
zBn>a<en;=e;Bfjkt@;fM>rGBYem6JwqK-j;?v8o$l3R|x_cTAJ>0?KBCN%St;*;{h
z3LkohC247h!j<IQf9?@P=&FSm+6}03JIB}Ah*G3Nvs_~%B}c}p=B5g&*(2g~f8+%j
zc60%Z0fX9QX)hJYeLj2F^nMVGr{VjHC$bzDX1guw{TGj9jf$Y=^u5bYP^T<@A3x-@
z#!)p+L|H6fth`2EC><yh@rEt97CLxVILPZ4zt`SuXfwn1n|4}29xjzWpO)47pUQwl
zNr0UazlWubFtR~tPI%;!h8Yok-wYpFfmYSe-c?vgTyu}o@9$HdXP%gwI{Y7=-aIU+
z{Cxv<rfJG-16N!V5O)pSb74jWH?&;Ky;u}eE0@f2n<*DSP;uWKz|_igKr^${76fr8
zn-r}qv80KnTq5^gzrXjr-ap{tk8>TK=kPh4`+4sBem-}{8k8YZ*Is;+eml>KK|>qA
zbdnbwagVrw!~yL?nUfplRc5cwZH=pWfFnn+uP2c%^pzV(-N)i^hfR92X7Zrf1ANcx
zO|tVh!QNOSgHn{1#i-5SF@M9!MTT<RS!U%Axv=?ew))qdH(!>fS6^Iew7dYTy?tsQ
z?o{aQJ#7i&iJzC7*8eD*Rb;2Wi`;+IrCll3h_Z+g>-;qi`W`aa0QJ&B?$I2nI9~Hc
zK%PmXPKU0Td;gFtyLs!v1IfSPeHo6sDqd2$KW@^3^Ot9lV_^B`ZSALPg;a5E`;}??
zr!U`+rsjBSi%K<94u8&g=Qd}Lam_q~`{#xZT{ZagOZR!VX2Oyt+J)6#Z0F~YJt4N;
zLSG%1DK3>PT|eV8rGtn!f1IB0(<3NO93nlNvp?Hha_5YVS5gCcJ%?<e_iGE8ZUjGK
znrD%OX^(GegB^bm9vXM}OJ9QeJDn#&NOjMfow1#V-)t4m%Q;5{#*j`-<&GR_==zTw
zf<B)9{PHh^MoQ&SOEvcw=HXwD)F0F5w#w55L8;B)>!BtOW%0>zrX8w(_o!J!t<8Pg
zoAA$=E3R&A-$4sbqCGmvmCuO1e@NY@!6J3*v;z(aY2fw`|LSyY7|Qx{dp~vMz^A&@
z$MwgWz;N8G6FN|*aB}M${C(9Hv9g4@vFTNQ!8_}p#lJxJ@h46!a1Z{amjk?aBkbqC
zt2FyR{d@m@T|V>_@+i1%E44;Zyzkc~t&788I2)m!_P#|F`<<K_e>W#JsG~Pfet$)R
zLK^bdtiiTpMrXcXm^x3sJ`5n#?0Xg$gN+y^QQ}R`1?w+VMS|Yxj*V!#r?me!3IESc
z{>_5oEo~L{{zZ^OYhWJHqaH4haDF}b{^bBQYyG9+Ao<@t*Ac!y<n9AzztJ5q6}ItZ
zN8YA7B#4W37ym})THIR51McPCxgPJj@46vtFjLOnxpFM=&TlwOztil1^%+-32W)fV
zV_J4XVw4?zE2ay87G2c?0?vke0>_>g-vipe{_yHvN%Jx1J55L8TVJQC%|+yIqI`E0
zW+<hXub9=(yuP#;IiG!D5BJoxtWc(+cDHm1?}-fs)K`g&<6FL&u|5+&*4I^`znnXh
zJ8>t+H1VkLvqKRXBRH5oCAk`X&D(F}Rev%jNRIoi0?S!n@;^xmDe(z8!c~Cjb^nHq
zSqM?6dJ1^Uvs-Kfjt0X^#(#D^{zJ~g&;hhoHU45vTySDhfZ5U0Bp7PfE|q}Q22Pp;
zWcxaSnxgN|e(r*MSYAURbt>=G{Bf^_^SOI3U7URPj^C#7nRtW>ZgukP7vg`hv2Evs
z=OV<e2UlJRk1NCwZ|)B9P<%qbE9aZ6KApK@dCT!?@9MYvQndK0Z?VUHZt%^&@3%(H
z#av15e=&T_&EGE`<!E&`C!?#lH&i{tZT4G4^JJaPe^;igz4DfOdTTE@Jy(F$D8Iv}
z9BaOHFa03FO`o^7#ROKNi2v>UbpDk)-*Vy&*Da+5?_U1K9Ui&u^g*k&l$&Bm`cp&m
z%F)lj8ClD;S};_v%Aq`Uq4=jQzzRhNrgS-agkNn-+M=coaC!cA*_w+U$18f;EJkX#
zy(>-Q$4z?1s9WWpT*ec?Pk^zj-5Qr=fZNf{&i7I(*~lyXlRxB$_1K*8{jY9H$2)n*
z%U`UjxMSz1+P-`y{1h4+a<9U_J*uFSd!XYH`l&R_Q(fgk;c3>b#>?%rrj-|Z*J4hs
zd>H!uvX0YEUO^PQy;1z7#I>~;O}ms)&5G!a*F$y$y!xvw1vl>mjn>eL^G6<9{3|B#
zO$Dkn`35)j?5SJlESS&XQ{XJcb=|_k)ubsocGB8roAJm9edY1x+H*&6EtyaL<RvV@
z*SJe%zv2#Jaj%M@8O{TQhwXhrLu!-o9;V!b+gZeu25cweef+0TIWpaG^V1<&`!^Cm
z{+Iqao%o~_Yw>-xu=ZxFVmw8@<L(c+!sCKlS_wV3Kawgyn;S8fsE2k$kMQZ+CL0wO
zo~GPxFU75OWIhf3Hi9TTo6==cxuoLXn5K}m^yzbd+eFanQrl_(r+46X+xH`H?skUM
zS`H39-T&q1t)6@97obC<H1Xm<Vk#$6?eEVBEXnM>xw$wOljm3-c*p}nH~!hhF|}-I
zk0$3TTu}`8nSezaB%vSaU{+2Z+&&y&cyQj`gnMA|Q{X?rAw!<dh{QysXZb;2dvg>k
z34Rwgc(YB=`fi+5`;X?CTbKP0Zk&&qo({8mc=GR?mk#vK`iIxBruK$w(>I@iq-xRP
zLzfLF{{1YaJy1|j{^!l>yBRhwhi|Ify>|Wk5-a-iig()Q&G;8?JsD;bIYauVd&AW_
zF3*NmPq34Bl~%SduWNnvj=S&i?S+r>6E=<ig6$Uidsy&HVRi3clCgSs#mby2jE*97
z5A3=2ofd(=bRQ+@!=)c`=a%xdm!BGOQAg@I&@kS9wCa}@@$r6lrz*ecc69+_dm+Pa
zH2HftWyi3g5v$)8dpG6$2r4o*JO$NVtDpW|;lM+$#}yldney=7gYpec9p_?0&o`Bi
zt;L-`9USy58Pq%dicp6Gzv;m559~{L^4`1OH>ht_<k|H6)*JXOl<mXYziqy)?GF5}
zGUqaZh(gb1&;0EbbikN*<o+`&Y}ZJ8&cpNKlWDKcOn(M_w+T`G)Lz?ic>w%F&cZaT
zQBW}lyzDjatWXbw|30Cksh~ir{T|JCEQpq~_p<#Anx+1sJbv`2h-2P44hqrb>4Juz
zHi0@hKjf+(S9&!)Eowh?c7oi1y$)$TYY<2eF9aRrB`xXncFB4-tnOLoIekzzDiB}l
z#cxm^BK{HGyK(%1>fLX$*}oGBmwT30SD)|7+wLkP*?AkE?~J+6{C@er@JpwBn6Ik~
zUc744Bpmw^<*T{2!Uq(!{<*eygbwK}QaM_=kTlMZaBHAmI($ZZpND&j@*U!nCdtE~
z{g8SWe()k_p{#2H@YGMm-O~m7pzQvd6jCh{w=TFnes>bA|9E6EV)+<&xO3~5*vdg^
z7d^?&Vri31{jwJhnrl5g8}jZ;+trZsZVr6%#S+WUIiEfkc6YuXYPKB>+*^!lM({`v
zD*RzDQMsva34tLpt$?5Yz49yd_t>`4{*%W+*V@VFWnQs={v+*EXH99e`u~nfxo~1;
zCbku`5pRv2nGMgn90Eq{|Jyk+keABW#eu|&hr8beEy@Ex%3uF+I9hvF1~Px$qNa1w
zTJaLKCKUfNlI0&9i+{R$N7Hn5p!2Tm_aj}m6lHxD(-ZM~7UlldI%ZS2{6j9uw$HYK
zbc%0XGXG@Tb*pPOZZZAhUJETmrAsi|J!!`_^0@EIv`mOR8++6+@6*5gH_wg7sokc6
z6@aY~J@-b~O~h8CtBFmV3m^rno|kby<cv0t{I{PMl{MCNuRNd7nK%DwAmq2i8`t+;
zzyI=Q^8?bUe;8$&_ml`%>Z+<w>-_6@SL4Ypu$}p5NSAz_QsYSe#JmAM4gx7E=I|0U
z*RjWI?M$`h>D+0t{OiB_uy$4Dgr@Y<|N1aIF?$bhmZ5Tz^WJAWj5a=dd|E;4o>}BW
z`M@yp*kpMY9idtJ%Vpoo<z0vO9N9Y653K<0{E!R&nm7m|?by|rT$$+m^-{m?W!43`
z%e#dF6=iiRzH`{5*TFkVfHLJyU6*a{x9kYn7*X-pO1*ULF%|y#x)Wk%?)+p>t@Ve+
zuztMn$iFj}Ic5zy6{YjjQE!HgMfth!jj_rxG*Vl`54pSN8xK!gb}9B9t%=6Q<J&C$
zq-rA&se($ia&!B3{d#?US<>3R3;MOmNhMo}IWsR`+`(T6Qmg-1>!c68bb~s7{FCn2
z(ZifX{>_#S(~aPv;pF$q^Jg*r>B)A5f0C)T$9@@+9ji4UJ$e7&r`ez6e!~H3FISzW
zGOzlkM@O7HY##qa<?{LzX|s+tmw9b8O}qCGb<9bs`>}z9w|b!(LBPvf|8;YCuatkj
zLp@qL!6Z?Id%zjLb)0DECP|6sU%^qg542o-*nu&9*-N|^q}nna88Log``sb)=*V-a
z=Y9UR*M4=g^b-e_>HNfy{voyDMdsrDUuCWyvT;pJg}8XcY@jwB^PaqD8EKyq+I7cS
zYcF9L*)Mc^U-I?1(c4^b)>TM}&cU{WhYD{VUFrz#48>zX77oVOacfrDPe7&blw^?(
z1xox2NeiB{@;CE#A{}Di+`6?sW@2AbZFTFG(@+02qT6=(^@CirUP)bI=~_s>7wmKF
z5F;+k%PPfT*7U;6OP}&-)CucbllT0%FE?RBP|I2EC%0$X429ER*REeTI@L^WGDSAm
zhVJXtY-c`w9rdxR9=^(3={@Nqows(Xn>o`m)OESZ0h7S<*uMy`6T`5HiC(sg&`hjF
z{qg3v1&L3_0&Qq_eK@be0aZ_^N2s-8vhJe37?zk+Z_)l_VJyz--|4B|=FO{!Uwn?U
zS`!+&>%HW#2!#c8%|(ZEr7B5kk=`)??~E+jzo6xrE03nV{qd<mcD;8Gw}W2djBCx0
zp1*(YRGD3(_ppM-!-CVGrz>B_oGpsRpCL83lD-aptTnd{ay7OMx>WnlZp+R|^IQMe
zKQ1ed%`kmrFdXTVc>rp`#w0kb0P<G$I^dc=j9d!&8Somje%R@(lYG*yIiWm>hgqq!
zSX5XVG`qA@yHp;VQ`G-^^=9o(<b%TNh>etWa{a65k$HY)Hg_?Sy!Ho)y6T$s&jj77
zOsR<1_4ndz?22?Mr`-#YTpF!_YB$+D{IKdpK&2MyxAFTB@|5n?;bv$}-L&08)D_|)
zz4mIpLo>f<W$5aAhh^=nRrNv9RaGCNuAbCuw)?jOnO?l;gmb8UJFciV=8;f*b;h=&
zsy@o1GcjRr-6hVGbO~_{*u75uqy90nLHs-`VMTErf5diNl%4@j`leSLsnJ7^G5;Zl
zY_>D1Ke4De^7ZPTukDY=lwxg*46CS>>#LLRHPkm%S|M@LO_s%ft>ZlwP9H{SHMW~2
z7XP_5B-o92muCp*d2EO8Np}A&I=<qi3B<3*P!FizZ_-ne=6up#eJ`osblh6(tNs4I
zekW<JH2Rf-<9SiaEBy~n$Me8vFHw@ZuB<f-BTXtD%*hQ^mCxQ{Z>sgiZVd+VvZNNv
z57x@lRT0>+>(M;<tr}Z3r*#u*)u)E{#V$bq3VlxdZS**Zm)TKONnZcV!z34laiS}2
zYaMpGR@ZcIykg$FFc;5gh`H5pz8NGNO6N7TF8ph}ICFa}<0eQ}8a-1|aw|gnR<Ol(
z*dj6_vvzTJ3Dhp$Y2In-s83rgsa*UBv0DKY`>MIk%3bC0N5uN<^Ra6a?aN>E;LE?;
zy+I%^M7iW9{k>3%^9=dh_gBqt<K}k%K{3_)vJ;+stDQ+ci05Bq4kfK#a$F(3clcAJ
zMn1E!^>+N{ti3n!cP>~^q7xR8S7$%XejK+O9~lEBY$N+OcSDKX9UQlA{xa*9p~;@w
zT%W7QDM^nOFIaTB?w~eG9yb{OeC!v6x6ywBx3d4%r`}k<nlhG%H2HI0ug;UleO)xC
zD=YB1YTyKIuB{rqy?OeRt>W0h%-=^43&7UnFz`NQ3(=ghN(v6CS^RWu^pYT)JalvA
z+Yh<ON!zTQhPyYYAFydFtMUyDpQO5G`*g3Q;W7A1@fa>p+lv=TIyqSX%T8EA_Q%h2
z)bevWi?eOs<hRY8^sRh|U2XDGQG((@vS5@u_;Lwz*Y{Y$NOVle772qa`L<fM?rMfO
zmsAV~SuCB|@+N;cZ84)SMn=kxI8Gui)JFFjjHmn8udTc(Pn)l;nXKPhnX6Cx5ZxP<
z_G~;g_={KJwP2ejgtb=33;uuKkRX7f=7LYs7-nhJU{U`77V-!`=8;tG@pj=a&hEun
zn@_XaI<kH?$<}utwMss=$r+paiI=pJv-U3YVLIO-Wzo7}+ugzNLl7tKh}tdcm*FUE
zVwN+!NJpQ)cd5iNx@y!OfA49cAK9$v4SxpbP&HS(So?V_GkASTpNdB(tyt79+RY?&
zE-0<cLYrZU`u5Rri*|yiTYr_=J>Re0aDK_*GY`PU+v_cjH+%>l!>r`kjV0A?eLUHr
zmZeaC{P5MYJ6dn^>n-}jzC&#V<flL6%vK(+9G~N%qsg@mQ%EPwqRqiiI<`|+2oZg7
zP4_2n)A7`E(V6=7pO!FGK8CbtOQI#6>+A+m`U_GP?ZkFtnfB>d##Txk`?CE(8yi_I
zPpP$w`gUWYv6S6!-VLs{m|8`B6{sP9qaME;byciTTP${{TBt1<TVHs3fmpZwap&RE
z^#s8~>h%J*KlCpr)6wZm<L^g!X@$kMRbQ5nx5hHBuK$orbKA^+<j$97ca!GbDi7Lj
zxqL(J+MJd~er}k6IYAa7WK#>Q@h>Zs`F+RwYPT@l4Et!L^R5s#!>)F*q1kS8<A337
zjyXNbfF(*xy~S3Y7k|NrkjIhy^yPCF_V{{bx~4rHWCp489gUUZ$9|8_%q$wS7zHg<
zRn^KbDv0-YG}I#$_BQAvk<!FZ+r6oaMeEl|<j;?vq|0NXe|dUz2UHJjOd}V-q7Net
zw%x{J#&ch8|2cML&}0#u=v2FH_v|UvfINnjCe`8(E(VW1&@Hlq=csvI&-=G@g#D4w
z6)c&gHn_hQtTqkl@0fA~t8K%`dDY>4!XW)ULR+VTyK35He*t?@0fc*NTC?^(zi%a|
z=`Cp%s2m)L4%V$2Tptw7C-nz^xv^2a)^~x81UPasliK#ZN0k4RG;##Y+OBdvJC@#|
zg(<6Dh+-q_E)|Ve4QgZe*$$4+t)`#Le6W$(QGm^Sgia^{aihb;8y}o%(I-J;Nn`1F
z{`1U!5c1AF=5w#g@i%VV2F;|pzgq{RD+F2}jxYY2%ClCw*W(j~@Y=Ur=Ks|#|9#KG
zebC1Ze#26$qv^fHnfBJ&gMV1Nde*I3U*nEjAPcTU9{k%(Iri?Ep`H4;GVil52lo%E
zi5r@~z*5IDOF*Cvfq>f3c=DxF$x>ZM-|lMa`rqrySYk9R^U*dqi}h_=t0OwXqP}CU
zGs8}d1dfxmMdN(iH85c3*h<1ty#HP`vtt8N_e&RB*%@au|9(zIAjZ!#1$V+YhJjCS
zp8jcnkPrH|6?$D#+vPC}>e`s?Mpi$=ZSs3<diu8A*qq1zRt$1#e6pb#1RqPNjRx^`
zCJ8%$Btf_A`l4XvKXB4G=#7HiqC>Ygd2H*P#e5_W!5zu7px1TOUrMU=C+QtpKA3^L
zI+pH}bVG9?1D3jhPFMlW?z(yl+u@)Yi&|>cGxLHD5HEevZVa=?x3EL{SQIv9xgjIT
zV-P2c+7A}>yD@y^`6QK8Jf?YnKujq9ikWiyBA)wg@fE_lHu|-5;>9+vxg+~xA6-p5
zyWsW`vJbM)_=rMbyXImTV6eccM8|Ha0i+Y*^a!1Pa!M@8^2x00xIaEcC5d4cyLnel
zo#46tkaGi+fO!8)aq2`grm|LSJ62`P6aFgxmW}w=L3|lUV?GwNJFMwDDB68K`D^Lh
zsZYmia~6yem3gUOl`Zm_lLMbT6D<FL3|zl8e%LvwDPi}b{<gS#$~v4A!giySgkSo$
z2u`XJe1O+3jx3BUBsCy?CL1hDEE<p}rzWe$YbysgFwvR|yvt(`rfdcOdmDSeb6*Ub
zPojPr^L)Sy%Eeh3JD46D(`kNq?%G9agU?p^pH=D#Y9}<*A{shvwd>R0Ze*0)__+Q)
z#I?;O;-``ja7OoHu0>-s=Ht@;o15+Ux7z4WANku2wcGC&!^9Be+@$y;QaEM1;fnvi
zXcPCGSS&n&P8B=siZegf+X~bM4<|hVJx81d&4b2|?w)Rzmx^-$6t|44I48|6oTkb|
z$}3OmHM3Xh=k!2t7acUCDZ2+7YkpjLY0(aPZqXWjW~{+6iC>FO(EM$I8XY!o(f-^*
zjC`>(WLLQBCNbTewEKT{l3(`h`bgw<-6V29{R{x=9)<7u-B<A#_}njuguTv6kmDA=
zVgJuavO8@T`O{L5EJ|ePX*9SF+b-F92r*;`3I*3J@QYxJV%3VbqrIGnfV`cC3Wg^h
z<{g{}X&Y%BE4+nI4pn>KtBOtbcvO%rO)*p~S=C?08_pZrX{n|`KkuhjLl>nS1am<D
zSwBh?H=|n2huNuwVkH8WB|4^jw4*T?ckESgeR%0e<$J=!BaTw#!hp#`9kM?Ax&F&4
zwaz>kiB0VQXLms~jGSFdLJ>->jl_o%e5O~_G8;(hJku~Zk~@2xo^LdyE!0rswF)^W
zok=b21=`utiH{068lcA{O&9o&Jl={4Z{{|LqE0g=y8KKAAA*NEbc&rkyrM!yYsIKY
z?2YiFU-0ZG1H85OQG|O#nVEVVQ>)yuoOw$jPI5;vw$Gr}kS;7M97J^)lzK5>Bc8yU
zVIkKv?j}uBp;-YT>5i&6ra_F>YK#uXo$HgYZu*t#wy9_j-#p5!q5<%&KFDwIlO227
z1fG!TQeqkSy|5qaa}uqjhvzXzGUY?0Kjik}ma#T;C4-ynhe~!`cELi`ub%R&=yhx|
zTqAlRhvS6{NI8qP5?U9(+fM*)Q(~5k{iZ}!UADf^Q*|Jq>jj#8u#O1!Ak-m2y(uu^
zh|b_v0yY^DYbH|JIQpg0wlg%k0P1EW5ez<|`F3?-izK}1cdodvF%ahf4+Uq}P8=Q=
zW+SsL&dk|^-Bnt#Ir}A?IC49iw`q6{s2N{LalncuwkoElyJFu4N|OWCm_@9J5k)wh
z0S$tA=MVPauCx9<)96?rq*Q)jHF_+diQeuB$EGmFn(^{4tG)p*6BLVGqiR@#eS4z`
zr+&y`odc}cja&g@_=F;-o83?Ul1s~vUh`F(5Lo5`^q)@5C<<YwPQD4uVaB6QH(up8
zCT|JBmaq@E5~;CRTv`4?A_snvu>&s2sC?_}A<K)%Q3_X9eGXFQkUBb8{{}1L|0--D
zqhHq#UcodJ^k|0>T_z|7Tj<K?1W_ZN>`rZs;dIBVxOyb0>p}D4rOl(vAl*hkV3xx_
zWp0z}^9Y^8HbU1Dyp2eYtXzR3lbf3i+MUA;k*;0X`PQYi8R>1H2IC@I(zszvY6=b4
z$f(*XT22Qo_{&^e%fC=tOWJrvbxX<_OBI641Hm^iv{TXqO%0*i+Z1BC0fDv!D0ycM
zcby(pvR{SrCyK~%1w9DUs{6ADyo#6Ws%hm`k{S#4zE6!D7s*mbK2yOCXO7>}A1DX~
zpAkz%-bZoeU|yF4ehGiJ$Hn9DxPTmqe>-TUEohx!IAhQ;@*-Nn{F&LP!BB)qpJRd0
z`a17q4`1wHaonA8kv~J91K*Lg4ox$$hPDMkA@ojXB$SW+673>tj3HNmfz9|w*=bhJ
zU%P+E;o5wqr8Al&f%~axF&=xzrC@||Im!}6Rv}-+0?qPt?REDslLbUJXZm9$4Pe6Y
zN3zM?HCU|#8a;B4GPw0q0XNxD*F8-$oI?HXUe0&_lIt^&$!%@*E^*Jd`f9*Zw~QlS
z6d8gi;ceNk<0fRDgRGkzOJa;~vbo`Q9?K2a35%OlGVyK8i9*qWDQIge=VbKhXPt)D
zP=kK#ltPrjM6nB@Jk&D8yaC6y%!5+1AqRqdwiWWwsOG0fhpk<3#P}f6q%>ho30hBW
zO&I1BJxU2w=7dP-;!qAIhw1Ys2Nr@<!Z(DiZ5Z(x6FD6W=`~3{LzIFY99}4Dke?{$
zIZynMLpj--%=I}Q^_Dp%oqnbcA$~5kM>|KQctkoU+fA>z4jv5AQ4!i>Ys@w@AX=~T
zG_M(r_#JD{*E5E*iJ8AU$}3?#wCx<aOG6<hZuVL-57FUl$5KnYodsi<LO8J20Y-KZ
z6A2co0HxE1I<t4YPn*n{_gV7+%IE-nEXpEQC<=plQP1SFB_~4=7Ja1AZ@Mkzbai+3
zPIjMXm<5k}-?bAugIHHKtu7jAJ*CIEBU!q8s?$zbS6q@#Kx1CebR+wei%-n!2{D*5
zwj@6)EK<I&a)3jDTT|vGFLIQejB;Rvu;7xg2M27PrJHFfRiNK27+T8+z$xaeH1L^I
zUE2`zaA9*+v9XA}TG|7QrrFQ?-ldngi2kSpH9AuC(kr(Po58%x!4D?>$(Ihs4NrpV
zbbRSo<}_CQjdQu%S}N00mjHX40h0Hj1cq0h$`DhZAdjeWQQ9YV{VRv*eSn&YQ52Ep
zlMOrdVX_oLh_hc@F70sTKLFFOQ%^X0qF<T0)L5qH;0X;AhU;@9UX_|MO6@QV639Iz
zJSkkr+IycLHK0SkC;1i~6+j@Uk;*r8_exLc&~zl5COz6hp;jr}I)K{Hc~!;SrIc=H
zZLAK^z_Um_It4vO)bUYTCv?-9K7Sjb?n?JK>gf_VDu0H-Je6Qjf}JD4!6DZE@6|e>
z`R{tnNz+yI(*p^v8%#J9>R|~sBggTRks7Wsrd5!CMW%^mG7pNc=irVfPts=(ywV|9
zIL*j(9{^j5pmv`W`QkE>HR`#}&NFqSE@~4BHm8{2n_V5q2u_ek9c7z73tkc~&<ZRq
zq2a1aG6QRqqg%9On~YM+wa3H(wbO?XtG~rVU?bVOq*ijK8Fj!|*F$6%oNgf0!=0RH
zJ)d1UT`8SYp|(6l{*oZ`xFc-Z9fdnMvh!4OUz>U}4Z8bBOejF^D=?o^35j0w1{dlS
z=lMyNUU(QmrazatRV|gNW)Tcz`1efTn=FDw8xXajL!%ffO|RnhmqRp*>JCkprPzE;
zNd}){4=~MhO-s?nB7>&09+ogOW19MVAK?*^o@=B)FWMdBaUgfcgX^Bay@gQOR64V&
z$5MCJXUm^qDcM@uB8(OJmgR!08it4%CO5lfqOn)8ADthDoC2r}&Pd9C$aRZ#(0Kz&
z!4D4XG2k|ts*JA^-MJx7Z8_G0>6_>jUm?d@xKrJM%8!CtiKORy%-#%%-B1-7rM}5%
zTv4@QjZ(|yr)>@i-9iJ8D^F><3@_nAkiqP6CanqP_kjYyE5{`|-MJQJDlMX7RJ(9{
z)I{P-uGJQ-jy+7F^1=Ax>lsJ5BJ#w04r$&=a>*sYKjZLP{z-PDpILVXk+nf?BA{T>
zShRkhLCk5xc7pjyl5|?6WSQ+OWdffuGp|6=S5pjalRah)I?%h?25k+N@gj>uaweN!
zZ*4M%grI_HT9qVx2!ZNr(n|H4hxc)d795qaE=nya_KHki4&yK#O7)@rRy#Nu<h#lT
z#FfNQye&8(&Z2x(Rs1PY5&0BpfB|QF6ct2J<xvCEH{H^4p28Gguj%LCMyRK%L;=6I
z<8Q_VKE3kvg8{L>BrJG(x(Od6tq3utxD?$gZDAeOU?D6gn7zd&KjhpEUgn4e($-{Z
zb(%?y#@f3B6K}#RH_>Y>fDe_MUFA}I>@~33Rb3XMSsawJ*e!gYHs88&tpj5IKzfo^
zT%YlkE5duoi`uj{lp;G6jvLVcMm8Nfe-4y7bu)e!NO;G0xR2h@Rq?l}ttmtB5Ba{3
zw#pAh3u9a_K9NU8uQBtmQAJZv(1z8@fNN061rt9eM=6$MA7GU)jm;5z5WwE-kay}<
z14_h#wd)<DI?X~2!o>Xn6<_AS#e5K19_qD_-^O=Q+x2-Ygj4h<H4_w}?QL+NZ`c)v
zX;$p+8gz>zTMG-&mX=vJ8y#2OxbZP0;ab7|1gr*|n1C(l#`xo|@M*?fSJTPm-IxyI
zX)JDHnt`2=6bMphq_NqQcS)|%E*;o5qkIHsbQjk+|LF{hsZtQQ)|E~g*31JiMV;I^
z4wN~#%lE66=dF=%nSK?bTJF-9^(EucFA(|m8pMZsxVVQ>3?@BT-?^%86GSURC17z`
zdy_bTL`8n1owX&nZ?D$DIH^lmvSYP@)MaAsF`e1xLv6h%&Yl7Uc{;pS$k0l$Ydl+2
z9j>$qfNs;1&F7AU1J@Cnts*sDUeR+rmT7|{{iFQ!2G+%t4i~5$*bAT%)B6;m%bQ0(
z^cY;5&k@3c)iszq`>6p#C!jm~yF9(7Ea68e>TB~hD2h8LhR-p<I%I8PPF1!f<D5^5
zI<r_rE;k1d;28j&{%@U6;&s8o6@yECf3+<gqe0kTDaYdvTg^?^6$0R-(*&v{3>v8r
zX2mn(KLwFp5Mq^lrF?sJgJc-YOYNh&p;{-@PhD0hxWW~GHP)i1Qi5#jfW_Fx%wI6-
zTAX5KIMWhgsynpyIKkiv<=`pb&jr08jpH5eS9Fam`KU6iJA>isnKPvxRUu!%NApLg
zU#myii`rcvy=uym1a5a>2GBLz4O4f&1E3Uk8bt}R6s2|;TjH=RT!3{7R?kJL`}?Sw
z-}h(fs7M?&NIF)Y^Jb`ni5bfuOdz@#4aKtIzcb9~ODe-3T7#fL-P|@yqUR7XzR%iC
zW|yvX4elC#LLa3zr|>IZLy|zOC|S5Ox4#FGh%eE7v-@DLz>BJaOb>}7_Z~vP+pKL|
zBV53pj_Q^dvfFcimn6*>(V?dmqSk_jp)!Ane!xgeWYKfOa$y|l+d$nCsqP4l9La9#
z3RcG91Iv^mAFqXKCL<LKWfsreH)t~Js@OIYzdl~>AV@VGlaP_$W)E(OH`8oj1XOK%
zat@G>>Mca>5$%^2Qp(4h+F8R&^*Ou;m@n)dimtM@U?)2pE6P7AoZ7!pYLgCwJBwo}
zhNW104Gow0+b6)&GpK?fcs{{R-A3_W{!Vt6scE4OI-tdCy6)>R!+1`OQz@ELqm$9?
z_{PY`<P%cAi%j2Q^8WO~F#l+ZLE-?g<{=_6)DYymoy&%3X6OCXX3$@6{x;0Er(+8~
zKc~bSl$KMg=%OfcBqKa-Kq)(y{&U!FRmHK+Vr`Mo@Ni`wE&a$U&Ut?KXJ8<VkTSi9
z0zOLvesp^J{xq}q2-=Pb=hn_bI$~&BAcUa|y}%dujo+k5idh;Eb@ez8KC_NI{n=5g
z?ogFHU33GQd@(z6u<|1rYB^mr(dCq=Nl%Z%{E*8WW||Cbg6RuXNDC`s$Yl=A+9tzJ
zE}5;7F);<>gF=J4OI_xBWf}W*(E37(69v1ZCn}^4I0;wNv0q)@ZbVo~e#r52!gV}<
zUYbH9^@|fF)hPzIp{e5axMh!KCb9zGueh9{t^smQ9iG^k5i9bT^@7tZGtIHlCE{&E
zIFAB}j;qirFmR`uvv(Yi2tccu(DJ(q8AmGyUzeF<$z=I!kQ0a|&tSDP(p$=5sb!u6
zn<kO4Kv6NF5pM`^ELO}c7>0V9g)8ue6Osjkq1K};7*}wGQNg|PEbNX^wv_2fG31s~
zPH0`~$Ehpj>EI@ArlWu<v3ZJoZKI*<lBWn*u~1DosfcbsgL4qa*I?D)Y{7b<pX*KM
za<AOoR1O_>z+pX<Rj?H2b~NTbe*qC@8dv6+&-<`y{`RV)_Q`iw&&|M^RWUbW*}M%s
z_4i<PHEtf`a7$S9*HE*yr908p@OG|;8E?wE^(OF4FHKiNk$4B*Mp;+dtC9yvln5B!
zgBSrdo~ig|)WUtpivgs@Vw!QBDy40Ct2ymo9!BGYL4MA}2_`nfni|YT7Ein`W(jr+
z+7ak$$g{VFv?JjM7Lb~HGY&uGmIPn8SCO<2c}!m-u}!)3Y+$m@09kiSA<Wm!?M2Dt
zyGAq3Slrdau%T#<Pd>t4YzmmwBzCT%pn>w~#U?Lu?jj8PNLky8)y~VaEdFUu>NTYr
z^;qVtWX2)BBN|JhN9jwLgEysFOfFqyz6&Z+%eCtFmJ8LWj3jgQ3f!)|=IiLJ{tr3T
z`K%YaL_F8~!oe(lL*b_GZKUucjnvFro3(~Yu4!|L*Th2FYG4XFRL|7+fR6lD{%YuP
zdO5UdG`w`C)9~SpGUAGajZ$kCdL-ohm)B&hk3<PeWcY1Bi7YiD?9?dPwf}l+2$)Vm
zRn}NqaYHY<-S8A$8Jz--?cxoi>;f|Cq#u&LrK8#3R;Zt|_8M`%2LYCOWoXMnaN87g
z{+d=-TyWaEJ2n6$-Lgl|7~(=#+6I=Ogan;;b--=;^h#e&WvAtUSTjVK-XD#=#w3Y%
zA*6>BtqhA{JsD7+H<(j*2?_*=PzU3dk=&glv~fX+#KSC9k-*aRAXjTJ+-n2KYl(r7
z@vSRj%Y24AcFT)xBnHdy!Gv-<atK@l-j}0O?3QjmU9Cv1{CZb$TZ^oe9J%YR9KRt-
z$2~>Wm627KJ@>Yd)791HgM8@kmJGh}swSup2026X6o)sq0{`6<FE*PCLyhwW%E2YZ
z*?9v=?jahSA-9ZbLwRJFgExSM&Ye=B9q298ab`~GkQtUCCQF$)7qcl40Mv4=LuDvz
z-gCo2`^YAug~JP1G4o%a=Rd<C>GKgNP6(mK9*733e_(Rw4(t2^KwH}u==q6%W@4$n
zg--(f%qf<>k?~nyzs@M-8iQjhZ3aMClbfQ(u?_QyPIK+JNQz=#=sW&fN}EAN*>6kh
z0VDZg!iA~mWrlx$2caL!*y--P@>EA<8^jin*<E-(jEl#QR4(G;Sgc*q`j>Y>zJXIp
zVBVEyi6sm-fV;@M-artS0U+K{`={;zQqQ*|x`I3BoOnY~WypZ(+p%76&YND<ja1Uh
zsMFBTclPYOC=7@rPbq&7RibEm+I(-{$vlE(ZGpeAC75U|#%qDyn!^ChG66!mHh~Qz
zeUiq&Q;&6>__%*UDve%I%!Jv?l3_g2718Mlv9l(sO^rUC_tfkGlOm3ej@+Tu+i*w%
zb$g(t5vZTQu-A*A79JRib`N~&f+raEUMj1sQ|Z3;?#$=1Fzc+{+k(d*m^z%N0)CDt
zk<1;sbd#NYit-_EIILwV%HZ+ijp#|I%C{L7dp<U}|L-&Xx>MD4e0q%2(d<Z-qQ-8I
zI`xdDcd4ZBBcFR*AucrX#0Qc6v93sE=5(b97nEf>l|%d56)7Mrp&;oY8uktZAsxGE
zoR@CtNFr#=3+FTdgs-%(QyPHNL-advHkw*R(9(u2gFh!k1?b^~-`v-)bm)b=<4D+!
zNoELSgZa~Z&|cSvss#0Sr8ou|T=lDw)*+Eg`nNFyq69Mq#D@6_ugHRl9BFSt*?tyb
zDJ+Y{G~tXNDOq78>rV8DM-!bpowWy<pUCzA!u`>utvPG|0VSt3cIE>=m!mf&F(M3x
zrt3ajqt;-6W0Mm)2nqdRj{DnG3dShPZTg##g}VxaR4TQwWn{R+eH0uKC>b4GnklO{
z3}FvPrxVYQ_&SF<lBPgG`D<otD_1jX;}Tz2M1j?nSsXIB0eimtp#evOpe&1=F1Gg9
zOzqKfqP1D${nW)+5o#$)FX6gV_3=!qy3BH@NDN1#4@Lk|B~Rpa_}5rEIBcW?LZkv4
zdEg#@XlOu4EPakFSty+YPTU_uyt8>)w5f_Zt&}>|r=GE$Uv<JfFEbZrX+Y)h*0RnU
zDvxS`GQN${ik7>z?G~)Uf?dUKFSvyzP6Z@dLFyf)6+$Gi4!Dfq?=JvkG`McWU7~k+
zAPkC`+*2YpTE?w7w3Kdoh$--&m!`+463Puq*O(Jsr)U}Eif5`?WwJ>5rLa`k{KwTO
zYK<fiRp3Ia^wkjCNSC`(f5<8RD>SzPE-zR|(W^x1aFE)9qb$Rq!CS3SZ^(sIBW8}|
ze;5W4<0<)oj#!ETb9gZw3yZXIrW5M6JFW3l(fmq!QOxQ{<rb5`f)=(EeIVd=gY^=A
z1DQjsN^AzqRMqB`lC2{Twil#CnU+52bc!wD|D64)MhuNq9x}_fH(}eq>x<33SYu8`
z56vSIc9=U}9~&a_wkjlr%Xd0{$RQ493CKU>=qgAf+_H}Eu0toMLW!mzy^yCnTw`#}
zoEmq{yquajmJj{p66FBPH(@cHfG14r%4EUm!E`zyzj}UzhH$;6NzIi$MVmbKg{#oi
z4ZIdOHE<TgeK4+%$p5bc!UpFWssxLoaS)Y<V#bPSN&yv&l22T}(9K5n4BI>l3&t59
zp-ykLV*J|%-WRghRaq9uiAT*ZETUgHwNhI-BlIW&c}X-;W5R|%Jj#bahn$(h<d`bZ
zVax7xgK9;WHrz8|3e<EJu^Y#3&S!eMPOB|ohoUB=eJ1Q7Dj734MxChEaSf+U)RDi?
zDx9Y&gkr4CX!!tmt;dAf-3Cg}?Bla`DGihVkJ8wsHU7tG{BLW(|DV!O^aY<oB>bXu
zY_A{WxbvU?$7uZgKSrZja!1;-sf5)JfjS4o7g!sin4wX&rLgS+6@vm(dda6;o<WI6
zavXMBM;oms3#1E+=0lvlDVp5{hKGV}EqJTo^TJfg9g*{fbzxCriQ+D1TSzn_b#`<3
zF|P|EHxH=jxp7*CXcV5eVHQ|*-&(b5!HB^?@lqNbj-Fc5wf-R&wUeWvb%5f^tZ<EZ
z!2Nho=w6~|?2<Pz8@{B}7_{3(iWvWr#~h9r9F>o|K*8KZ2cW%W9yK=H`Bc^_>}0HS
zK7tqsevxt5yOOM1{HN9tY>WcdMNdJJQm7b5tdLN{;|k5VeFCEy6$f8V(1chPf7GLw
zq85LsE5tqX4B%39hBCHuEy@kg^t6%E7(3y0N!F-2hHGiAG(?gTuTuiS_@WIwj^L6b
zRb51>OrFg8H+M#do^;454>r0yUZQ*3{JC|MP{*plXw_{<pbyit;#tPIZY}xHb&NNh
zBH14B#<80yt%E)hyf5tEs7^c@GbuyO+F;Pr-5)p@&}o_Xi(OmMtjgF!^E4GxvTs*<
z%78j_*P$%%lZ3)eX2H6W2(M(z3Y6L=utOalh}~8D65`V1Za5Tk6ET#FEEO`WE1aUo
zdYO+?46rt(KrlguzMx2pVc{L%hI)reXn0klbMU1Zht^Qls$M`<YH~UYTN(_bkr-E%
z(p2kB*sOJhuZZ0;$0a*l72nA9p)h-?BCW3<%h(q=MIw%c2FKEU*1#iWz}t7r!v+G-
zm!uT%ilKd^OzwP-nTTNwyu!J|2%UaZ0~}`h#?a|yip!Siyp$+>Va2NXFoRyMKQv&B
zEP9m1URzS7w+YK+r{3QNo{&mAbB8((0Ey^Gm#xAda=DYD*GwDNU_`Y`iOiH50_?Qy
zz(Zs{+w$9GXjGGo*s%eZIjXdNVl+#OqiKy~?+qy0tQC`HnRlPTy{%$UT1Hy37n+nb
z#j{~~F{>9e^mfo8ZRXNg-QA|jO`&I~21IF7rAJSt3s=;yXeDf^aeg4V<kfLVTAsr=
zAx(pWOLM8BZ4FuQq#_F(Dh-slFijEU$ETCAxTxfo`)MZ-8s19r<Y2vgQB!7EM8T$$
zttembVA0!ciV<9kzBCjXWtq3u!EBZEw;hu%pQ?6<H)URipeDAMWis1aO2v9g18b{x
z{Au>8r;nqyeHy-cHFedC7|#%pBY2gpj0#L5M>;_9me-%2n;`Av2JdwVGAPCAMD=PN
z&i<@LJ{w-)61v+aGI}TNF_A=;3!?(;^hNY6G#?vBJC*B;Qeb9c+*_yTx8*ZRJnjNw
zQX(fF>i=|fx1DC#p~L#6u0gBhCF#^co+Y-PyS>{)caP4;C40cIHcCd)Q8iIPXJ)qU
z4jKMCpwZno#n@y^+5;O2HRR+dO^ZntB8`*Tr(~9fzMHrrrk_4@Kp7$y-7uo5<nT~1
z%h-wcJx<2bQ|=)KbaKC|#`mY>m_yszb}$dIa9Lz0APr0p<V(|@gKN)J&UfE%FSVU@
zvh?EhK;{|c1HEnf;Dz3rbZ4ib<3`jxYy9ah)K}SgeeA{TJYs8MAuuK9gldJk3wC$>
zE#A{I^?k@9kmMr4WO_mts09T>v2B{=C-fGQU52&`2e%JZ5bTxQG^|`Jgi92M^mr))
zrblmCji*?Nh=W}Qp_<G(z%`M%cEnI=n&$Mv9NK)kGlohr%E4sRI3#Zx4O#S=OULoy
zGF!%~Z@zZ6h#kC5@c>qB7h=yF<5kj5*h5xrIYti+FgSH{p!YEIc;?V$)>`3khQ<Y+
zs4QQp>rC1AAs6tZv>M#d@k^d*74_-F)I_nOA>$cJtqPk31CxhxG)1%&+agzX2>nlr
zK6n7DiWLLzpe;#_w(aOOe0)iOHEKpteX!a+5g7GL+hA*;j)>^~DF;L3@P$pu0vpMn
z2HTZQC&(@Vh@F9jU02$W_nF4wj=his=?>qO?PLWJ`{GZMH=KY@Mi4~-OLxR`@V2UA
z-UQ1Ftzs3j+w3~IY|0@s%p8l}*{jjbvV(CrNiMl@2y()V*T4*p7~t3l_xmm>^k{AF
zZb<P%uIhxY$j1Glvs-w>#A}%$1i%8)DR^-q+NR4z5Mpl-K-Pm~4j4K4IUeCxuzxoz
zAbUD-g`!1o3gnNnq;%nSc_2-iJ8qIe;d73^i<3yNA%taQX3->@>nrmXVL6Jc0tb)G
zaKMrRzYtFpLJQwFqLrU@L<Qg-WkzoFY38tqX<4udq!{I6SWR{{xbC79zL8GR=)|KJ
z1h--@KpdukN^$ra&TL?+@HI63(zKKYox5Pzcy(MrYeNvJy#rdreAi@pi1AdUqpJ1w
zwMxoKH(K+=C}c^GcBD<~0I{_Pv1w+`v_VH=%Rr1BXPP`X$z4zpok=CSJ=EwUyK+58
zQhl)?h}QHi<~0LqU^TIU5<w}t?bvW^7QeqGKRx^~?w(}nA>5Twv}F=;Lgw5;oYA2N
ztB{?*(&TZv7dlnqcQVgRgCUJCDj@_L5SKnNy*D99jsVHFJ{v1EtvBd|pSmx!h3U(#
znu8HH*+WBc3{eu42jk|V^e48;8u24Ja8FVCFzyE4N~?_Sp>CP?UQr;q#Ai?BDV?eV
z6w}|QvH18$or0?d5%FJYQ%CQ+b25tR7{4V1_-QfBi9FdLKRdT|FkphafZC=NU}x1i
z`t<+0^JTs+8WpSr(UCzSv3AMA|7b4Ey9+e~1VHp9J3|}r*tmO=JKz?dwJ6u{f|0n=
zVomA!!Ime%hF$UH<60iTC5EnN<-+a3F@Y=;BIdx`@|n`}1M2oVcr8GQ>89QZ@T)5I
zkHBJA{B*zWk&Md1C6MaJGS{BtV1MVLA9A`9ztyv+`l_}9FSVKH=_wBGxJA<@8m53s
zXoHDGa%~gzD){9%&`%l*jHHL@VH@H0nrG^?q_tmFM$?pzn6#rZ$Vw>j#1{^?wI{%Z
zXX`t$(BNE3cYoxzen@g93~fk1Q-NpE_qP;yB$Go!cEO!Wp(JtM8@@wg?r^P*bizyz
zu@ob?WGLBExExevNJ*i_#Q9M&4tog?8&{lgt%=K<Pww=B{ob9D(w=Vyi*!v124yHL
z>yW6#<<Apsq^FxY^PR!}&8lyxQKuhWBKlIaGWOS?fRWC%^yP^UjrdG|{LSl19BHsw
zw)78P<&?yaBK6Y<%QQ&5*QS;)1SWJs1w)<YB9O2Cg^||tZ@4XWg))OCd=4PoA1>H|
z>XHq8<;^TTCT4L$<&wEW`B5&r5MJn3IM@qJ!8vzW!%p%mT+jA4P(9Yoy-17<U}He{
z*M1a`=+B;1ilL!NZqkZ;C(_z5IZ$ZmQ3b%{Zyw9-eicLx%dU2fj$XAoiC_4aE`J(-
z6F;ud2|WSFbK-nUs|;+_w&|I9=Yv9F5^3paSEfFOIyedoHj*xlQ9lMRiww_{B>;2v
zSbDon1W1sdlit24_Z%kyF>(87$%anx74$f`h{gE5E9RPoy0wS3<T^LPHLtfE>xX2)
z4B-xGrohI|v2pN*QniEM1GTwBAr_lR#xz|^QDk?N4BfpiOo_uZ&SfoaD>wR_N<QYK
z^kUfhcL-lu`b@<WSyTqW@On)9hS-w+F!U%j9FSSHEk9jj>3-gvwXzzuUCc5CoaPoW
zq1#8WyA13Z$psocw+rXG@G>uMz${Ng$slRdMvq9}i=WREZ@~G{1>W4b0}amCcALg7
zK<`pY?;&4tfbF&dLp#Q##eG?iM+1|I8>)B)@D9_TJ0t?#LQ_MZBMT}iZDi|*=IL(v
zHj=~y!cc}zMW6CrR-X`>g992c^DvlVQ*tqL>hK;2gTW7%N2Z^;nXjj3Mp`%YEpWxM
zgszj}3d_kT93vQiGs8@ajCN&(I6G5z2}5Yb*7-yuDk24`=^MQ}g=G1JP)DcB8*bfc
z1?q|BzG$H^Vu<*V5U5o}R8g0{%!VhB=ZJ)o=X(2P`n>f%M6>(gaBh`NA<ich(<5b{
zAzw|9CU$@qHdM?xrxJj8h`z|Hfu>K~87UaNbsB9dOONKATvE#6IW!Smt~vr}mxi^y
z2-|eH3`z(uB!IJs&3cRsd4%7pS+w{f&M*AX?xQIA_Xq9`sLIKX2r_YL9O|)wi1S(Z
z%76F5t1qshU~tOy(S`5>dH@lnmx5$8uNw5bJ(HFX`jFN&lI^VtgaYU0iAi0%tPR9q
zlgD<AE?_M0J=4iupsphs1{&`QEzp=0!|igfj8e2bL>r_}4zfMs3JeF<059XsxQN%l
zI<lEV7P-m8DO`U0kp29@-BsImA-A=uTOtjGD}~4`VUvLOo#UGAoD#Sxqsf02h3i5h
zRLc-n+{P^Dt$w=N=_ut+yI>kEWVb2TU7Dz?k)Go5CCa<XU$jhd@77&+BF#!CMheEE
zzCqBqtUjOtHC*ygJo*06X<)SrF_B^UazKq!osK#+Y8^|q4kv{I!@_rk5c47p_4aw#
zcCiXdvFvCWv7B^!R+%%!o+37F0N-(wHozi1UqoLHE*@aQ6J+XjMXHBa0cy`w5piXQ
zgOyjI1v!1Z?5m-x=m;17#SZs+W(+w2bAcS^NhxGIkM*bwF_Pg%?-**LfcT{C0zF?<
zZuVtv7Q}b{BU!YN^AMc>8gXm-*4@Jf9=^huWCA_N$7o+ZepfmbqU0|P(O=jFt>Ier
zCzv6-atyP=BNd<35w0!w%}6^iH^LM!#kUxtYtHoL6x9K%Rf~C2BCP{xD70h=LBxbi
z8YxYMvS~zZ4KX2Jpyrsb-&S<N#~cdwRv0eLnfRjXXR0F<#~0g3cFVR5vTm=o`;3Ec
zr&pZBFJdGI;gN!x)EIWTftn{oohh%LU>Q%(xCw9#6Yrse=v{2mTftH^(v(@>VGvdW
zdMT4m=)4b6-v48*tx*ucF1=kafR7aEc9{h$*{d#CPwWhES`vhj&V*pHpM?sy8f1G1
zvX!O#kEC;;gQ*pab)XMf7HZB)WyyGv*m)_VMEQU`v!!P>=Aoh{-`~X(oHvwRn|x-1
z$!owTTr7BqaF?PQO;B7qrXuY)=C-5<poWJsVO3TH-GpSj^68%WDR~+_jZt8&PfoEJ
zXr#8f{x^WuaZT<n^9{;w9IGE3+i<x7+47A_V_jzbLS>r`J9(y`05jKcQ9*|Ippo6$
zU|jtr)z<~%KGtbn)2G2UpkoGTP-t>^4WdmD|8;)B98}PWey#yoC?=fwmuiyqtmFN#
z3&myo=!^(oS^|l?W@!rN2r@~#JI@-K74VfRoAj?Sa1--bH@EWAxB@>_Dbqc`P_U}+
z;^~}^)-R&=D`GCa>E^_mjrb*-tFMNg64&!?PCA63>Q*rqB@|3yLDx96-Tj-Z+M*Bj
z3-y;fF0PL{F5*AD=Nz8Wc5J&SNi)VS>+|?l+5?GOMc?NRpvuia2D1=FZRLXh2d_X-
zziKk3Rh<crnJY-kF2xkZ<4f3~6g6b-JrVqsv8z;K)6j}Mlf=0*MYi@}RUg>kq=^16
zVxvWH+q$CnXULyoq6Kj1a>Sgip2&Fq&m*s7m+Y$+h^W1cyAv{9lAD)9PE1irQz<Tl
ztcco(yp}mo%&dxNi?VH!Bc-7yV~Itsibz{J9i15o4$%?8H*`8Np+swnNLv!6vc!2I
zCPr;KW{iHgM3VM58_>T(h~-9TyAkBk<C0}rv`SPdUu5H3DZYqCMoK4V_B!-=C44Bm
zvAP?+%&nPY(B)`f#DlVh1bkmbC8Nlr6Eh~+1eui=lQD`X<aT9eTRd{faMnYJl^GrE
zbM!?wwqhz8LoCN3MWU?HnU$i*mwk={@sLHD;i7biPa?}N?30TeXmgOfuC#VIkywzH
zwoTa&9c)Y4V^NVO7#S&cM;2p4PA-VEv!lrwEsx1WeDfvcooxA8@-rzejHT?#RoJ}V
zSnAK3X=$JAc^`5Y`5r|3ADVLZJf7@GP$LrQk^3g~NKvT%M3Q|KZiqj=6BwVN$A=GM
z{{V_8QadKCXhsJ}#`_vbOui-LNPbB&{GVsbBuB`kJZoek?8T#K>|Q4wlbmlx5|>61
zj*M5b(CLjoLXYg(b?i+teGh$=YeZv4O%X}0j%gXimU(}O*>pXQH_?QOW-Lt(HfbH2
zD2igSwd36%?4*nF`8I@$C$U8vy_;MmWZ)~}%R4KF#@ThE#VhDTd|wv$t0&@ZiZk>m
zKV}v<;z_o6?mJ?hLlQi`M|hzp+Z>qX7txg;B6=*0zhjN94mj|fktg;qvtG{n5_fFP
zQ85ust31zCD3U}HXI&dIp~_66$uv<N875`ABH1<gnAzf3c1lo+v;FT1>|f==Pw+f{
zm*h^_Z`(XFv@RvJ%@0CWGBO%0YkN8qGDPL{LT#64zATrS-4^|dO&=@vW{j7!6#6IR
zarAinwm&0B$cg?^v3S_xk`#4Ah~O$lOVIYTO4njZqh-Hinb`?N5|Su%`yM`#7o*J3
zv~Kn{T?)ryZ5w7RT6-UjGp#Yld@jhT7D{#5lxo@L^UKJ{EQ-gWc_kj15At9C0HQuv
zNgf8S(4CKmp-Glil-Eq5eupkEoQ}_qE26lhT@9qgD=&#__>hO=`4LQ#XU4q_QjUnb
zB&fC~-(oEodo2-vmV}Qe)eH7FeG!kd53+tb=vF3wEfH9E$CYKaFVONcWj20Zi)j3d
zWBm$E8h!A@Md(yYN_uCVVWG(pk}P%1Jd%&`XZxOI?2rCO$sgsiANd~~yF@(y03@_z
zDmvv*(72>B_)+^W==_Q+?D8d$sddBs5Q~e+l9?T!qG_&*e$b;tyD*fxAqxIPTYiL{
zkLZjmvn}Zu=#z0f5@iZvi;}Z1vGEJ^OI?w4d0J&AD_kZyQ7IX}_~`G1$jEE(*-(8R
zd45zhM~4(dPAjn%Sl`g6C~Y1O{Dh;gCdl~|)x=WBp`mEoLSJT&#jY!jF=94EZ1SrV
zc<7Y!NA`Hu^iclEOjbzo%`Fb!NLSH}iAAz0n6WmM651o<Ou~soX`?A)ML%ZD2P#92
zvM8J>_E7B`Vjb|&Oz3hdxx|Vn{{Slpc=g!r_ryrX$wIiVvqf=R;T}IUyiU>J{{SI*
z4Vw^#?8cWEjwtNH$K(1GP?3owiBDu@eUy?kKgp1mENxbV@3K(Vi0GSdqA{1U6U6KO
zE1E0*HOpL4=k!Y17L3=UCMlBJHiYbxQRwXNi)402%lug>N%nWbi)TePQHes04?|)Z
zWG3U$;_}DI3p8|F=89#}YtfTGC&H20<d5J#p?)K6@RlZ4XCpSfnKK+t#be2v9!sMy
zC(zuBmmiW7k#EqmL&*G($o$dy6*(W0KO@OwTsZGXLYUTF3OM#O+9?@P*tB~rgirE3
z)%YV8#Sz7b#H5C0#)rk(MI-t@#`9vx>|zy{{{SuMR$^jZ9$9`akL<K)Qm$^xGW0gl
zo3USImRXTv{{V|E85UV(mROG_$*L}h&dDo!F=)z$?}{{2Iw`Ks$xZmxjpW9~m!YDB
zT4QKvO3#kTMWX_eLfJ+(L1NDf{CHei9~VRDMb|=N*+W*DFB&mMiV_~h6llP3{g^Q_
zSiT<!>zn4dfA!_`OY~W!e20CH^kSMi9AwI)E6}NMk^V%|{CG&Vgztw%q1Q2;b~z?D
zWl>yOLTC8xzKGwxG(1E}(b(QgM0cWAD6TpuxVkB`6loT??3W!G4s7gPQAHhbd{~R)
zTwnZo2F}cQQXN){qa<5q20SE5w26LCXU23`t%)?o?39EqRg^IliW5J@ollAxp=hx-
zJqT%v_-8|O^eGqr0686$y^OS97GDUl{{Z$*7K$7liO}7clsMR;k!=x+9iwltMDs@&
z53<<o*t~3NkdW0O84G7!8)b13#gx(6{xZ3jMd<9NQFc2;vk_UeTV*q$>6c}3fAF}w
zFhv=ev*e@YV)CCMnetKceV-xnA0s|%K2|R&@)?<*CFpiq6f9{jDU`|*<FbyAJeCHH
z`YroA713|m(b%AjR7TU`53>}?VzI72#r}LH^mH~^k=c!w#OuBg`2PSRtWmR~+havD
zXl}%7h{*mzreb1j>xFNM8=*3aV@E=KCN?dYCRK4wiVVht%K9ssO!Gk~mA#D=BAJS2
z1~D<=P|+4kvc!ak5yhY4LGDEfKSON`dmfKPQDihq_({Gf^TDQA+x9ChihMl@-x)MU
zOpLZXi)EH~#z%ZT8a3#@vWgIjvqGc3QIVDQDl}1B9BffM@c#hfV?`9mt~Yxk*Rj_N
zWf-w&t15)hntcBNjT~WY>`ZRRyEbJ+wpZExAKB3v7FqsQ=t_we-$j&cro@?FW3Ce6
zIv*NEj)aGz?}q4)4Y4~A^oFd2V->O(v*P+N90d~uP_K`UQX}Gi%o03_IMCrt#TCLv
zCq^ij2^|fZD<c+FapH%`Dk&sbwu-Vghq8xA*|8$FT^4Bben_M-#H|WK6C8^&ke>?{
zIvsIiW7w~RQZYspXrzw#vW!ZiALKZn?4RsM=w5hzA198i{{Umgf3f5AhyGMoNPpzF
z(Ek9)ko^8rITEoBB^?$~doIsnLsU6gi4~#8g&SvvDl5J_Iuxv-F_e&>8#G2+A0A11
zCC8)D*DVxi(mxq=L0Lm1V_Z0j2SOhUhek~nV`lhj_@iQF*-Oz>GCLYW{S-Zz@nW`6
zu`p6TJUcCVEWfcVnAI0Ve`1K{+tI5O=M(0U{=)W%`Cw-c{f#eWSyU?YP>N<WIU;^)
zN66$cBw1Eys~xV6_^vDHNe;!L#$;q=d`nzpA@f!xMvWGKe0-E=d{FqImOY6f@rbyj
zH(N$+_EnWara_X~h`eaUB9Qz?#V@g{Tt$+kDH*YOS~Hbs`R2Wk&*b|)Y5kupn4~#h
z8aflPx+AizO1m~%;)F7+Y+E`S6>*mx6q$_{BG=b6Iw=XFlQH9B(Ah#Pvmdb}V3_<b
z(S>$2Vqj7zj0{X@_{HN!2&7jJhX|96bYsg9?AJYZCcO%{YeTLvvk~OFKe2Q%Uk#Sn
zoekt9NfpHoA~aE4DrA)lhtT-%zAQ{{j7l&t6nIFnOXyL)$Kqa(iv0`1OZ<C3*Jt|j
zJ}Hs$wb@1}Z54c1J(AI7jr|nZkxPnY9i0}0ITVLQKSi`Hl#2Qp6m5?}EN|#qExuD4
z;<!=bzK4JC>yEhWz@sKRLYo<5n512sT@mtEWPF#{rYZdimS&8{Ix|C%<cqP%BF8E|
zNUvwehvb#(hKTjY_r^t`h^{D68nMKrDn=y?T01GBHfGPn{D&k$HdZ~KiDHeKDHKs(
zW;QgDMGbLQP|;%JA+}^TWMxI7&6&}87bGtvh|P=2(EQDjoSE`c*^*{-FDKaKDkUQ!
zypM`3!3|LqiM2*8i$`WOQYh$1(TfyJ3<*Vv3L}h>YuNt)MGX$olW33Zchfd?vJT0S
zR!SsL{{Ux3hN#BIsFM2~C1Q4W#c?G439`pONOERubD_;}*q!uzjSI?uEu9KXvokX*
zGHatJBRU?%FGG-v;?goQJ19m&bYB?#9;}fQp<KE!#FDe|;l)U%Z63!<vHh7RqWvGy
zjF11s08kJB0s#U90|W*K1q28L1qA^C0s{aM1QH=J6CyznGEre8LUDnSp#>BmQnA6&
zGh%Y#@GwJyk`*L0KvSZ^@ik+UvU7u!(&8m#bcD0R|Jncu0RaF8KLQGfiWit=8`lpa
z<RbRA^o;^SRQi&}8iI)yBt0RCQ}Uu(BroDdwPc3<c~?>AUs{SK=&ZSRH9GLnWo(__
z3bc^%wB$WSYZ|fYNUzza`D!+Jqgew1b{F^1P8?-*z3o$4H8SL43JZX3rAaJ<n?HLf
z*GirQML8@pW%8*4C{^iU*3?;JQGN8Q@R$6wsE~0S?5MXS*B9v+q**r+L@k%vX<>y`
zlg=g5fLKFJ`l<~*{>tLwSB~eQT;B>Z7?pUNt-GS1m3VoaU&otDOkiFx0@4DyrrOjl
zv953~7!7HJipx5iSx0)Z<z`d5vTsx1qv1t)2oEamy5;#)2N5h-j4FCHF^gFFk9A<U
zuyC1>M`j&Gg)S@m>`$S}+jQIDE1ecdPpe@}$%a@)Mbk}p*1L=Fuh}R0Y4J~oNf|$8
zmh0nQkre?pwW<g#7GK*;K-<87UKMT_SIHiL48-Yrlo*BcbrsxSg<>M%p;;Ez@UCdo
z3v?UP#Hs{f>?<gVE&MgzQkyWg)sRFPkM4~qHX*NVN-BUpHIoSnL5Bv{L2T8FV3~|j
zA6H}j3i`XKu{{RV@+>jSeH&|$T46Ksbm4$B_tf}@!lYtbHdW@fmE2#2cL&0<h>)`z
z4q9cy{U3*lD1#kP4{#L&Mynd7iPNn%bziL;j!|tdp{UW6IV3MWwx;wk1zks09_o-~
z7aiekDB2WNBg<OW6#oDyuh}3zOI^i2>UnV766$q^+S|sYkrN|iB#U*g*{SzYhWC%|
z*KvLodk=+08^@RvVtgv;Y^`x{v|<==P_9tjMI%WPu~A?M+MZ27I&G)AgMb(tX=;rV
zRY7Ct+OxpfWL-5H5$8=O3na%@So^gz9WyyM>Ere@8@P{~eJPMGX$_xW5=B`L7mUIr
z)31dl8N2@gjbaE}6V37!YgqPk?c+~7hB%hdL0yx?QJ08SZ`>n#`bKFHP{682UbhCr
z@S&LJmCm)CghWxk`coB?h;wTh7PzPhBO)gLbg~W`#@d0@3I;W{8fD@{zjXS`@uss8
zu9ni(mk?hNmK?<M)*l#;c2vp&^&`9Z)PG$n7AHq1#fS834>WQXNRD-D9%Fm{RB;&s
zL^O<SQ(pBStQN?i<&Z^Td(^Y6oS^EoE%2)aV!cX=$@Nhf@o6(R!j3e>=y2mX=rdT<
z(@YeCNF`RsElirUfd;-7{{XcDZsnITYSUv!Ikjd&HNLfKBpz+Ip%iDEW<71DwU!`J
z3A0>kXyKz)Dx%Xc+u2a9tc_&@?)X+RKp3;<yL7$3ojA{xbUt*rj5{#J$E~lwh@i4}
z{s7XV;&KJ`Uz)7yM`U=(8@Sh1BTI@BVlntq(hVYur?*-hK%+OWSWyZQG^L9%a(O`h
zbgVs656+GvNzxoJ{_(o`Q(IV@?^huV@(nC@73bTghMzgRDDK*<NUuK2Q(nl{&`}vI
zx@+&&jnen^Nv}s4F@5drLF6NbJFzB;DoDkR_CI!&6c>J$*1y^09?@zSiSuh<1#@2i
z0KGxj^5x3ZGHf=s-LYZfY>aNjya3*ykB;w|Gc!La8|iO#7*m;Dho$?stR=PK%8jVd
z8_~fuZ8j!U%#6cG`evtoudOcDCdRj{EE0aqlFU3psWz=!J9sTJ=DRcG7ujgxaME$m
z9Ma8e2XEy>yoNJx3gO|*gq;G&?0WbbGk(qD*V-vqizTn8jT@(30B)49NNr=8U#hhP
zkZv;u`@+0El#ZY|SrSc8fxW6Q=puz%mNoP7J!>Ro#zif|k?aWY`P51;W)^O?H4GWN
zhfCHbRb!q>2Aw~xDttpQ4vUyaaj2}ED`oE)Unsq3t9LeD9@nJ8$&Ce=b40&*?^CL^
ztUC1-ihRR`&8Xd~7YubdIfsb9<yjiT_pjOOd}!8FAQ`-?h7ie{Qz(0UtxY^~2I3D#
zZ@)q{E`4S`ba4u!JY{)Lw$u|o?DrlbqOk-ws%l8>n%vbK$fO51V0>!%j&7BfM3z8&
z_MkN}w#`!gWt-~gx<;*H{`&s_R+-|K#{5>y2%tF0BE~I|6uBPiZ(Sq1BR#>}u@*)w
zsoezVzRu4RS)^_km9?+iy~g~axo2HQnW%?bSli08dx)afRlmBt`y=|<%Lh_+s7KLB
z9Jy0F3^t>B%vEE8O?5h-cS=l>pzFTA%2;sE8*zaQLv3A6NQA!_E@VM#FzZJq8JF)8
zFY~LD%)T9J+)Sr6*Obw60nf7v#7T&GLoB%z#z3`z{c1=PKccdS9Yv^{TV9p5dJy^7
zT*z!!&w82hxSy^*B{*C;Z<uN|sI4YcQI$UL^`$KT0A8AHLh}>y){NX{nKI?OF~4Gb
zYf6g{24+j?<8>qRu3R#&UT44MO9MDqKICg_`)bET6z!uot8H@AB$x6hrA*(nCN{7i
z!%_bL!i!&SlnHB@Yz4Lz=@UGDBBaQ?@B=Da!`2@T&rg_U7hHzN+eT<#>oG2^rdCgF
zM>3%YT@JO0#IaKG#PVe6we3aU4a1HV8JaaDd76iVIFN9)E8Wd=PoQHW+kGhTWFlQH
z=<W@DVnzTiy7<z_xe*PoeNDa<Azo!saNm_kthZ*kTQ<U+Aa!BQl=)B~06eJPm5H&j
z(!9GA-uhH=x%6SP+s>^18&j>bGqQzmeg6O|%Wc*6+-lVpqClN|DKm2GmoK+U%3Fw?
zQ0^*B+}t)P@HKpkT3CAxF>I|jOwVgnxam(2Z^_|%eswgtxO_$|V;0h}+&C1uh3xjb
z=C+iiHf#3qrk*@J=xmHYTaPnDCMypqKB`S~@}lza&@WRW-M!Vy6@~BNO_x!AWg?}F
z!jld$Jm?#u+w-X-j>S3w_*XXlfpc5VlM*43q>x!!`UO7vb!6_;jl&A|IaqGSg^=BP
z(@h>I3{U(bx^3e^Xt5kTOlO)UQ<+DRzO<qoch+J%5gIA>5o)xP2vD~S+8`Lw(M^rJ
z-#RJ7zHW<Bz$0AlK~Vmye^#T$!jqd^#OA;GXt6+=UfPeww!e_9EV9FBEGM}-Fz})b
zixbAVvc87Z1Y~4qHY!%xee|!Gt-SA8z7m8e@yW~lV1L!vxOPdUmTp+m47VrWStO2J
zJe?z$t6$hxO63IFnM({lO9IZf)|(GLk}kf{;wvnYnELI#1w;|W;6*T*#fjxUR5Jo%
z%neBiy`nZ3^3u4+SHB|ltJ{<d+uN;AH*o?uwf8wrl#z|}T7oI1jlq#Zz3j?1q|ORQ
zW65rwbv6tGD#qY0q#UFIK1R3+pn8cVlQI#*=If;i%u!@jv5c;s+QehxZVw6`FA6bX
zbsGGsxsGe-Sko1XKS)MVF5cQ_&R<Jh6K{1(Dw|ym)NAjiOy)<=YtwNv*%<5PdYmV5
zoR$6?n^n(j5;ynRi1%Cd?9!o~b!Jg|jCXw@zxho>6!(8xoeJ<d*b942W^;Gn*+($b
z#8tSSjmHxHJ~Sgl%wN0BS>!33!Fp8D&Fcl>#%xBN;jK#qP=K$iM#SZ*+lC|Jpy8q}
z+u!o2Mq>J3Hu`j|OqhAH4CL@x2)D9?(HJI6k4MBIkNmdz(#Q2h`ac%C<h}j=H5&{v
zB$4Rx8<^!3#T;RkwFty=e<}ovw_8_zx6=L<Xq>S<El(d5+D=mWf1PH-V}!Q#a%5ri
zqbo5gG`;IMJicaw2?dpgb@HeckTh)Bbn>Z`FL{YhmBY2k*DY`|khyyGqWs21-QpQ<
z<>y|GH4dxZeQUf>4O=dhNL#jD=#{$I+fk)(aO=gNV>#}vN0f$)Df7Kq2ObhMM(OM|
z5N>w^xqCX*;FaXKxJT3HP~hW3#EDqR#5T6S&Xtp$k1HL*TlzF@D!#gcEniW@NU+bC
zyE8Dz1+><&SXeURIPkLGEtKv)cCTTzeHn>vM_D9|*bh~ZflY>K?JReAWg^`6r;1f|
zmoPR0_wuGIgq89(cO|d#wLxa!;z0CSv_8t_iFnaBGPNo(-(FE+wJc28+V%#FZ|*)d
zjLis|AwAk6ZJ_{>^QiG3Mt75V8P0Ekpz>%JGQNLTU(%IV_Gn4k*Kg=hHYVoO*k`i`
z4a3j<+j_Mmjn97Xm14Mt%LrKr2UhfI1*=Q5=D5st6)bZ!Xt2(5DBRlO_31OB*aZh$
z`PM%U*nz@!D!Cr~ty4IDSd%yn)CJbC1)~Shi}P-27_GBz2EwGn>0uLGk2;EQxn`GF
z{ofzLuEoh=a4QLlfHJd5!f=0tk^ca+)=}7xAk2zyW7eSRiJIQ~ewAwQ^Q;2(5i1=&
z{{T1VN~~=b;CPcsCtt%_#T6le37LHI3W;9K(kKMBy(wKJxMc>$uyYaytgb!=gtDj)
zn6k-YbhecyB3s7->w9$ZqluCu#Z|N|CC*yJNjnjH5i4t(#+uGbO8Eim1st-@#en7F
zb@8dAnn>Lh$hhwT?6oW5(sggTvJ^)4ijPDZ*j;mF_|tQ~Q>C+-Y}yf^7ouM1max-$
zEg=Y-<~!E|1~SLp;(E_Yi+eAXaxqB<fweMXfo}&@zs8J<68SuOeibUl;i7V?&(%Mx
z(yH2WLCGbw6a=?pLlmBt+?HYE{{U)@gifa}jN2Mi#9bYl_VB3!r+Dd@+Ky&oTWf7f
zl3Um`p@qU|Q%Q>K4}hn{2$oA|FX7geyQ41K^)w<cWjE$MDM*|nj_ZS{G^vbSmIQG}
zb+7nTbGsPd)nIIG`O@YEL*2Pnno?c0&^4?5C5S-IRG>!HKpIr|^*kXG4@Rq_8|g|i
zYTDkN6zZJo=W4;h1KHfA->urQEzC?zIj^mVxUWHrU#y1xgu}c&w9!d~5jwYdCL{yp
zrDg6GJ~ysU*{Ysn)Y`am0<!puS*9VS9mu-1eR~@+D4mRkG2d_HTp@yo4=?v`9nhb6
zr;1ZIOv@S7zBTAE%n4k|vc>s>`BOzI6@9=zm1@>+y-*WjbOytztX#6olZM4Z`+%m9
z&iL6$H+TqtWzF=daS_0<htQ!pz4YfUv9BmK3&A8<Yj?(7A73g&;XT^@^&xHTsAH9t
zxrkh+Z|d~$i!sZP-(lxjBV64}KU!=)DqawN?A-^Es7pPvoz(6sFQd@h#N1bJyUIWx
z^HDY6v=j^YQ)X+*jHcfD*0>ss=PNheUZ>$o7};{<$~>z#c$3IHDiVKtGXDVc8m^=E
zk*Np7jXx?vEM3$Ob*c=lW(!y$rxiq**UOgQog_K95HaRw>FlEp4;2p$#hdGZl0Ah*
zEVzkMOrzAjuUDN}#^pqXJ^E9$IC$BL^4#}*^pi^K6y`9>tM{#Xk@!-$bA0yRxR|Mz
z3aq+j>FuRh57x44#0ijJ-&*=5KXGqX7Hjn*T2OAl>*YrU9?|#Qb@HZVe57b7EWP3L
zpu>8(&F`z>q|1KvZ?UN*k|L;{Hq`!f?2+WKHd|VQNyA%s(uX;rzKTYMj4k2at&C0U
zF`%)h6Zfetu5GG06|>_p5s?~Ox-Y-Vh&C?T_?yzO=HbW8qvuk*U<4-WzWM}QG=$!P
zvfr;t7<Xe3e-m(NIM58;Sq|R8S>do))V0_U*6TsD=J3?%Oo;LC?IYPx{T+mtj%Aq{
zh&SaPG?E*l5)`dUP0FWmA0Oz{Y<0<UFXA?&a=*2NTn@XB%7H>cZnvQvjFDmoaJHn1
z;;t$;W%U&;{{T^=#5qiU0;Bq@0vBE7lbD}%N@OjGkzz8O-nO6t^Q2ddL5TGk4wO~-
zF)^E+ZSNHf_U5)$%hYwGA#KBO>ekebqM5JzDH)KT2DYCn$j2{(Q3sSj*-7%>^QjqQ
zcP!Vkt@_d8Vag@!F6)1V=qX-y*j8Lm0p`AYmazh#iW+z(k&JnJ%iuMoo-|+?GX*pZ
zxue7`V;b80*6UVX7xDA$;m0m`f8k5a2eO@<mv=W~<6C@tGL0>%q)6gkBO4h_^HB@6
z?Q=lD%(LdXe=iC{ETpl}vsPt0ZBXlfzklLy*05tO)wrI~s@DXNWnS;Hgty4Cezl07
zx)|wxfC~@8o*ZPX@?Vin><6}{DI&C|l{L8((6qegL7BGqSNf%qrI1CZQLXj%Ql^r=
zP`tZW1j-&k${2ytq{89l8!gC9LElUJsLhBXR`HRu8oNVf9ag;)`ainc(w`Q|wK2+m
zv&3bLd#K%y6RoKO5+rubY7xqy82LrIj~em<t(d5`wPhjOpth!tOQrXXg}^>E!q(_t
z`A~F8C5tH~t$ej$tY2}Krg`I&(sxS@eUw$WO@F#Sof#Q|<+hEjV5E_PM+kh{TEdLT
ztcW>dUj-XfMc2`Tp4-upbz51K3Mn6U*~h-Ge|2VUQAVPq3x*7j%#VFVMv==L!()~2
z>@>~?qa95QI`Tny;&w>;r>#W_!x?4MGcPlHXgoyZP`3x~HgCylLz7}Ts>fFzs&t~!
z#S>;a@os+uSa8ws<z8N6aY=@cB+A^bjK2y=Wl*r2<b1%3_!^0y0~R7|m%5@{{&X@%
z@JYKP>GGf|CBotV0M>uKo=FbAR-%5Z5-T<Bo2^GYQDGnlNQ-66eVNIl9N3&pQjOO(
zU&6V*lEnf0)17GW=6SrCc&Z1ofSV0t(V!rFjZC50gEF&4vHrHEOg0HH-*gk4U)@B{
ziy(unaauNCG2=@(kdWC+F&%vBAQLo@lFcs<t-HS(m26|kpxwMPYG_1~6v7QWeb%W2
z*s>j)m6K&2VuAYsM|jhr*C$a|reo=G*olGL_EE3vMXpHcnywD|_7O`IojbJ%CtDEz
z025Khz1%Qp{rA`Kq@FWk4V%iM!yw^CIi&sL`c#s}V<0dkNy>HCPmMyZk$q{CTtF9V
z?_5^2n0tld4G1S~D-ki1Ev!xYSX#2CAn`JZ7CxSTBUp&B1W7rGc{czP{Hfl1WJTQc
zudQ73SXdihrC#nr^>#I3F>fnVahR#KG<nY~fPhY(+RYj_0xcuATkd}f$Vwv~gL+8R
z5o={4jE6~L)7*dUR=qZt8&W7CavdD>sb3T?m;g9yd)Eyu8}!n#TJe~sORk&gQ%J}-
z;@MvM<*bs%91p!?LUy+hzFeLaE?^C;Y$^;dt(l~kUp)nI<FsqkfvKq`Q-4S;e=qyi
zaeSLFrM9X?)`7La=|(<0*gQehM<c6W*`U2+8JjrKa*xKE(en!i6$r`CZ4cv8Nrr`+
zH>7KQ>(pRMWR=c>MHxx=Q%m%WX!FeZhzx8rwt~K}%qE82<vrH$G;hpJJbojsd5UPH
z<se@;I_XiXPxb68u;MJEyI80a=#eubgYBj-g;}uH+#CF>HXrtVFG`_Ux!Dw4pM@jK
zg_1ad7vl039zf8IF;iecAF{FGx3^k+EX#+EaHi5Etc*|KY1RZbkY$sKMg7E4c_n5$
z^4V?WdORqCk~b7(Mi)_6?#b0fGo)qLb82+BDn?QmQ^?!97C!os7+gsUttn{)a^IO`
za5U*^o@co^y$XHfuga24M2m-6bIMyMPNWZgWio~zdnK!wTz%!*x7MW2q?CxYymqUc
z%O#32zj$<^80IACDcWnp6np7%X{Tu=zFUR<g@3JKKDjNdA>tGi$^MQ>ob(eo5&Y=O
zgP1JIz8Xfo{{V#cFDQIM6QgWy*Zk?OZb>XpnWl?nlS4;E&WF0)Y28&+F};?s$UPD$
zJ>OydDrS8!(a56RSoA+S!>g%fYso4RrLCsF3RvX~i>2HPg%u;>V0iFzHK4?Sp&oU>
zgA`;})=O(Z2=h4V=Cd?_<uurdw4ZRk6%oy$+#19v$|av53J$;>O=XB>UmA%=OS!-G
z=Rz$==xS7Z21^_EdPriOd>m^{CKB*I?2lGUI;f_eM~o%4V#k-|O<=DT_Zu*zV!)~B
zD>}&PsC5A7!id`MeQJIi=XpYf$_S#?z4fBjy}H&NZKwdZmGv}M_gB)94^+S9{d;>K
ziz-K`)ra9+Nu&W^T=F-=Q{`%pAkMBYnRThAiUXDP;$LI$^QHhW4Y694qE*R<aEh(g
z>94|pe9?G|X?jRw;w~9_NG*07pTn?ar||}%lxk7AyvJJl#u>PC4H_e+YM&6wio!h7
z2G_g%txUO=Hg_gG<M(f+2q!CtVw^@OFLfRiBEw;n6R|eT-qS_c;&3M2=(ujbJJG|<
zl2{#VWPbJfc~=Eo$s&sue{Ez`63;c20q8CN0D73sJQ&82EDBF<8S)p_xQmiPk(mT%
z`_b0a%^GA@B<1kdgMv`fGDvd{qe{y-%Kau^&a8eSpi|dB{=#V<LI*eo{`2{K>kAVF
zT=HlKvwwgz6ck9f4IW;gF;Vzd7`SGZG*l{o9lWUq2MaDGiL-Ffl}|HQd<2%g_`G=2
zKe)9s_aw?wQV!>-_fZiZLD$Bl#7PuVxfxJRe51ygH_`Yh$K6>Chq|JT;xNU;9*#48
zJg-xUv8N24q^}lp?9zf^WQ!bRzok|mJ#?iB9BfGElt~*EK6=*@2Uob}3FGz)=ucbI
z5oc9!4ZJDlfx^xT<kI~$(EIB1J4#sRa>k;!D}vgg>3ZU2H_z(}>s%L6DHc0vaahSC
zVlTr`;CM!${O>@L?tJLUh)|rQqWF7hn6Vs#S{5awi1%%yY2!}q>wK&7spKbM5PSas
zp;=pckf`WZg>Op|2Pbwm@Szm4<a>d)jY^WT@XngiNN0{>IYPzy&}>Vp+z_OiMp+AK
zc2lvXQsj&J`p~((>2s2+*3|@BSjpY;{{Xc*NOV0sYgXor=qm!_*Ny%}ezbR}pBV=q
z57z##ox0R;VPuYIkgTmD-&3f|?6IrIBV`9XYkyGx06W%c3r#dkSDlGEcvP5T>%ck?
zBN>mh)Np_}Am}Zv7kAK}o6^XnGXDTC<wn(;!+MmBiMXJ(hE}po8d^8@X;{}Xqy`K)
zIT>?tE<crEWReNv!U2>S7V31i^z-LP;U>seQ)>SJSDF4-sL;%L!6QM^zKbqL5#^P#
zfzfk+_SP~>*(1vLKN_DBRSbnPMjLe(sV9(RX-}|QZK^CdeE7T^>%x?J(Hq=+#b-fc
zb#Jf{&o56Z!(UEfJ}O0F)b%ns_<=#Ke6c%e)2HY2sj_Lq37-D|9V%oc`3VP!sXE%<
zjUo;s;FO%Y-+KDYQKLgSxnet}tV<Q}zLl1&g2!yMb26;nDt}7Nou>+?C@r_fvKaoX
z+gkh9X%txX%X3#wN56Z6O(X;Bk*|4>SdZ}D^_X^Te{E?3TCq|<ZmhQoeO4WWjo(jo
z6o}<huG?3oV?G(X>81VD=bcsd3f9Y5a+-mE%D9aYcj?zHS|~DK#jp8OdTBYjX42lu
z)ZEd%?d3sGpEjW)ldG2ezbZ?6Gg1p)HX}-O`>f0GJ&zV39vS8h8r=I0ODxCsntKOI
zXc**RqeJ2<St22F>U_PF8cC#>jRz60YCJ|J{yjSZrES-xW?$MOlkfhoK-$tJB&%Xi
zc|ok8-5^YZ+qwL{!gP2fD16lG_|rM{V`jg?yy+G6W0y9YEgN3e0-|C@jbH%s{#10P
zBKe865W2A5uj_eKcsya@E`Cf$-S*Lw49SsT5avr8>Ua56u$ST_F_n0)VnvN%m@hob
ze8F>A>saY!Q2ZE#igNho{{Zc&7EG_fkA$MzHiXt5X&oYhC3XbwwTKmnnMVxCu>2@t
zu&|~V38r!fJNr#>xFMpQzOTiS_fw{{Fz5EzX-XUQAJFXHysNm{c0&IE-eK8aPg!qD
zo)_2rME-S+#5aP#zx$8iwCu7ZnT?bU#Tf*u44#pqmL?<5D|=4Vpkbd9>B@L=1wZo}
z8pXjP$qbyXB9v?22J~|3*V?#_%6hTzt{QSj^F5hub$tk;gzj%ltog!?M~JJ5IA^N0
z0+_31@B)(@93Cs$MO~Dgk7nJ7-`(EKJgMP2Sn2GmC?}u-qBFz8GJ;hvcK6kd4@66k
z2GW3HO{(X5S6G?Vjh8OBrjB%IBwf})V_FbsrpLy&<sUkR+hqCB{{V#EH6z~P24U^1
zeTZfq_E55a5UfERNE8uAF{=(`6#y7iCsxYwsn-6=#m1R3i8SH&b`&B$uGKlWbgpLj
z&}*urtq36s)16D7R<+dqiJ))qlEe6h)rm_5VTW&x)P8lDtv)RGP;9i)v{FA(K)nKX
zsg@u~TNLv?9g2!9Z>g?P?XByZT9PrN>DO_3S!Rkkrdb%fecf~(%U9NLoH_6@mXN~{
zvqz@B?dzUAObMYXP&R99G&OuS2)nkd*oM>PMqESZeY^l3U7bDkio?rhj$kamb%#+*
zNxL^%Ip+`$;l1c829ZFwK7yI@1+}hi(uinV#^JOycyVw*(=G2v%_j{9tOf1vs9-iL
z#NuZjw*LUFVXb`Av&(Vja*vYKSbWFpO%8p#qit!TR1zT<G0AOp>F2F;fJMk<m2+6$
z#`S{qVm!iBu)Vo*Yp?Md<mLl%YlMw{Tb|Oxb=LJU;i9_cVIpa#va|IEUu7kZlx(@Z
z^@@d2qiG{LxzgimGeb5TL##$a#2$9FWUvixtye3f45rnZ+cr~6oBgK`x$#;!teMGD
zpdUJEWP1e`w)tvFF$y>_Can8g-BPw5E(90kj^vwr$?bRu&5BmcqTZ#2zIUloc-4rE
zsO1N)qhDoV<Vd20OqS)0E@5g6D>qKQb;D)Laiv|P7gMzrT_GC}AF}ZEzIFE2A}i(9
zf3KBrF#N~YlB}dL@V#R(98x~6hQKDo0(GUghTW19PIdqlwx-wpXreC!9W!Ru)oz`E
zV@7s85MHE-uN0%WH@#~}>TJrvz6SJhscszjay^fkO}<nb_nKa7o`QobvasuV65qYK
z@HN0zjQIwsURbiqzUt{h`jOe*_f{S?HZq<c%KreBEE2_;Me-QHyMGm8aSt32smvHE
z6lNa}&WXy=uS-^j<Fi1<{sQ%uSe_UAlCD!g5;J&Knx2-c%Bz_4Yac%v<O82wx*z3C
zpyph;a?x20)4iF9@u4-iunjXtZ>Nn0zG%+QDL(cc^83G|SV`w)e9;&ngmoaD{giPy
zrp-1n_*uLL!{=%qg>ei`!dsXhFNJ*~fH{ebWut!{8lR0}nj-utp|zY?hApqiEeCxg
zCcu7mo9}B~?4q~LP2p-n9HSH$Yw3GyPYz(~_m+(C!qZ0^7|uon-`RW9JXmRg&3#iZ
z<4pv?qGlSSTJ#<_F|g_lM+CqoG$pRu2KUwT7=Zr(2oz!r4r8e!Q$kJ?ppX=~LM{y`
z!-(CyGiyXe4i&Gu%I9B(v>=-g!iXD~@S&I=ZuM=Cf$Ys|4<3{;rya!vfUOE;-@xBm
ziB?k{cFDH23NJC1GUT$@=xx|F)2&AuUVNjbpE?=FMf;%k*%#l?Rw0eVk*;y!ZotEE
zYI&mloXKl_*Pxf&T=rq3S+cG=K_8-HVW<aAyVDFr)6SO}N}e6Q8_-G6i}zonc5T?0
z%zld6t`_-N`%MTUQ-+A#w>G*On-D!3f~z0S{{Vpa(kT1B9g*Sh=;tI5d~9v_RGet!
zl~<H1K1$@n$sdty-!f_9DaiVTq8BnZK&@&(2`wSLi)O3VZk<J4fnR8*xwXcCSJxwm
zNx;!_4GfK<{wAatmd}{#@d3^3rTRUlNidQ#Nf<r#+kcN~JKCU_&BTmGubVF@(xq%n
za-37RSi&(Jw*LUVVhk8yfnp_(Ru=IzPVS1j6wUdHifN$`A<#ytl(DfFx4xBc+_s%p
zPtu|->vV1CWR0Y_??yEOv+#}z8(rqbx`qD$mYf0|bp(2?=U9vzwW7tX<9mAGZT)ys
zVdZ1!3jjXt&a{F%F;G31@vF;>lywUc$Opo%W|CIF_lGt9G_px$PPvV>tixCFw%xi_
ztM5kIGMX&A8fJodAYyefIa}4w(m31sVpx83P01&#8<SZhLhbva^aBjU`nIVt82KGw
zUr~8#-$r25==hiW_p?ZUuYF<UnpT$+ETEjnQEhEYi%W2r^IO&Mpd0;i5__)NsPZ+M
z6Nim2Z%EqJzt!WPaVY%gCMG<$?z)?+d+5YuCOHWrGqvmL_-rsQ$hkp1_UZE!V=aeS
z3D7Gs*0C)3$rea5pyC52_tNC)wFbfQrNpUR%FUzr+rpVyO0t_UZNcrZ1(@ukb6i|t
z7>rRGLwu^Atrp3aC0yiIB1QypflDa;-{)Ic5!o7&cy7T?zYm>Ctq^;3GzZnyPwxK!
z+LCz`Gl7~TmgF7urNs;7QKX#>VWMULAuLr#l^t)`vdLq^=2a?9+L+=dYd1?3tjxZj
z7v0V~sr>6daSX>n<QA2gS1G-UoAx6OBv#BSNuDqn#q<^x7>kxpZMU|SzNf~Nxv|@*
z)}p?LF23H#u(AHqcM-3?on@1VWNd=s;Cls3Cp-)|vE?S?lvfK4#~4ga<$*B^V{yLp
zT5qKBR_PMVP!Dq2jzD}#Cgj$;_Tt`iE@14b60-$53urgiq~VDYNLUqC8iP?qBxV`q
zjI^J0jffuOQnndli0lYc@zk0VO^B9RWz#XcupUC0uXi?9WBujtWAdwJS7trd>r!wg
zpAUcda+LflG}6mC{{YRApYiOpNY9s_lx=Rc=;PsCdHl_Ci=P2p+`e0y<<b03rnbD7
z@3yC~in;8!Y>VyeS#dGUxLxd)%=Gc#(p)X|DdX|hpB;=AC@<`w%s6nbav{^q`O=vb
ztV7l(L*-adz;W|Gze?dwto*qh(|TB(e+%?#&cAza8oW4Wy|rB0ip1fPEUrc31+UIF
z{=DhdOkD9~)fEk`P$n8rt;qJoq_3gS?$y8rbL8S_(!QyNRfCAR*bSNdJ{700u0{v?
z=~K$gtV#6+yIw(KFz#qgy!4~SW+#2EOB*Mo#A?@HSEXjL%4BX)@Su5Zay0GDBW7<J
z(?&EUGjOsCL?O@*jT~a|z|4PWY|MTY!<Gz=sd1IOwA*^N;OZ?$m`18Kb~XZ*DDf#5
zT#;eg_Wmo=i6qH~hnb~izpzw~neulRpOow2(y;Pmys{Q+9-7+!0P>1+!yt(*wb#PC
zxm<KLsoI%iZs@taG>%I#pgvS0*76$;wl##7Kt~-|d%^t=WBnrE+^ZxP^`s)#_iI=P
zlwUb{GOpx-Us<s+z{-&cHNIWR_tLa?o_s8(UO}i*Ij!(hTt(RLYV>C`jlY#I@9)`?
zJ7%@@Hqx;8Ts|45$*huswgj8rjG4GZ_pC2-QZr*!5(O`;XKxMY8pAAk%$afko+7Z~
zNQ7(&eigwg4~@O`%H%>xbKz=d=yvVR6w)Rt+`JY!zEr$i6bm)3^3T`ID61QA`f%4G
z)`}ESYSNZC!D$;n1!1uUUMZ1^OJ%S_)3sTY=WSoh!nEv?<9e47h`E9m3y^xvy=E%$
zHM`nY857b&#zZXZskye5G&zZa2FYvg!+IG|7Cicnl#tBsmel24D+?ewxmi)XcKv8W
z#391!!llAW%1ifT1G?P}GO#Hkn>zR!R`rI*ttTA;Bm3n}VN7tGqlv|49vc2+R6;<~
z&dF!tFLiovPl^gSy4VxtU@6(Ad0I_a+fZwZGBlyAR`XrH1m2~SDOp2x+u=h%50xHd
zRUKT~xMFzHG3vsmnVqu57dc`oX3ox_<P6QH!lPik_hD~4+w!TboZ*i!t6J5qWZ0gy
zn3{=u%#U)#d%}=IA0szU=Tf_s6Ucxob-tA8DQPZ*0DVtoD)ChXi)jvLC$6`m?D9z6
z1NeI>5#o~-i(*{l=zpO=*hU|`7jV@CZb~YyQr6V-9jg>oGPn$GH#tx8u3l^FuJm#f
zmvMU7&>pW^T?^~7YH2Z3#1-6VkJ6>Ww;@YQ44-t5<y=fuxp2zXv|Z=%_6kVikAoU%
z82hy}#>m(><%ukJd*#2zpnysc$A?8@(f6!AAo3Mqbsc<ZWcoHx#!`6rR9;xmik-dH
zm<0p(ji`z2s=asCjGP%be|sslMlkDnrR86JI81uKF2gi=kB0R!P98Th9I>5piw$aA
zBgw^zXwJKlOqb7>OWN0|lMWO~CepU0J(SMwUE5O4xvmBM74L4%?c+dUV{RG#`^Wfc
z(=DbQ;CuJ;q@Kl;KQef&wWMr6q=}a@loi80J2^$^4j&IA$~7|2C<6XUqw}mpKS<$=
zLptSjzG3aXuTZuR5SYc3<w$?M+V}9co|JLFOnnC~II~(a#EL{)kGY%MLGjJ?JSk$#
zkw$&WGTx^+d;MI<+$;Ut>DaS$RX3<145I2Cjjx%A)}tBSW6X3tX=io^66Elpw%s!t
z8T+GBd}uK;PJC28QShR4Xjy>v3c|&qT&z?6QTb32_r-}Ofwv5Q<=560Krv@V{pu=g
zq-aH4*Af;>@Qgg`dkf$36kcX*OwHBJ>Q=W2?gfugT8~0gU2S7Y7+uDc<1(zy`}NlK
zE_rjxSCzadDKSPq_Dsalc5?b@m|DF$8L>O^^zxu>kT&+Ej^@hE&2JNWkuvgl_1d@c
z)|8F*HEUPtQokEC#5X@%+ofUO7JU++DaDX=*QT|Gh+!v71{RQN_qNvdP>w>-&utP^
zIe@2+g>klSAp9v^wGnHS4g9GZd0z{g)XE3O)C6Y0TYtu|#Mz&OtCw-7T42!V+d9)i
z$0<hcw)UaT?xtP5E@<2wri%?7hLj^**_(1?y$IqylND$|@VzW)q)E$|_13(-apC7*
zN2y&W#5C-`x;gFmRD^Yf2L<%D^&C<1D(-rkvI}@nmS~$U{M@G0tsTfsS1n~cYYvQy
zVkvVtgZI2BLlN0-%-w&h)!>z!sPw3_m0x(SX}+w7bi<;Oede(O<+4c2snpn?)3#E$
zJXR&4QzNoWi5O3Kzru?FVbD^|iYHqYhTN^~r*95u)243<d1Yeq$k~~XQ$n8&a!P{H
zTct|JeF6A(3yKLCyRpin&ZB|hNo9k*H>^*ejbh#$Yer1%WKoqe`$n}MOib^UgNWKg
zU@g<*Sj=Y-;qiD^Y|ZKw$_}IBT90_GuU^`OG%X~Efi7OdL2A4qB5PYUvk~mQ79_i#
zr&4GaaHpsyuNoNP=v$R7?XE5#AB2fLWo$|hhP8ttXr;>uvqvJgt>s)aok?@6DZQzZ
z768X&=B%<OH{Z2Gi&6VxQU3slb@z+>D#|kx(waF%<wMNz$z~sw3`}Q@0-fX<uebH0
z@sNyD#+~r^tk{b#M!4oT+wrHw<FIZcMbwoMk~~1D%Bv{h*t3fkG`^NH*==J*IC9?B
z4m73CbQ|AVzwFd#4qfSTNtpG$9LD(_mfkuq>q^ED%0<Z=(a?zBP)O_-6}`nIWwj>P
zsB(f(Fj1{VK5||)<=0O-mk}p3#{-~z+h|Dn(-u_%OE#O)m1g1R+0<UZb)fkqZ#nzT
zK;F)}3w{-Y(D7dmjdSO%Im>(4<_&7&0d7#ccy;W!sc`cjM>6Qg)p50DS1Xj=vh|}Z
z@4<i``+ijH)=1&jF#c!xSEMZe0C!y}$QNae?SExUBWhuQBHzWe`~_iHrQ9O|<Jv7;
zSx=+;+SIt&B8>!W#pSixK&8Z$wh}MdVVhoG-ti`vg{%^KL2ZRe!;0Akt$S7*4aBjL
z!j@yVhSkN76l`>Or5Vq-)J+t#^D!-ZTkxz?U3ek=8+eK`;Lytyi<!|iD%#K`N{`+a
zu09(arxU*g*O%FPh2@ZO%awQ<D{%h+#n+`IvPY6RNmOf%=)M;hHXXX~q$2eaG0{IB
z*gYQ?IAqD=%7044dOi?fjgtQWx(DJ7dpcFAXT|zC%VmZ)UZ<^M!;46kxwm^t4zoR`
zyM|qexf+k~(ktb81^I>R>hL_H3yO?N`Hy+S5PvU?V<vfwG2!xy<-N)Ng<3zQRoHsG
z5`JQXaYGL(<z>z1L=K&5MdCf&TbHc@OL$v)Hj3ukkzLSzwQw?@-J%+pVW*o64*V&Z
zFYq)N<K>hycV@HxG$EEK>>NI5^yo+DK$*A$@A=S-v^a*!Y|-^KG!qtl>y`5QqQ0u=
zvSfFNn|KlpBw8pp{3#3fSmYP=jV3ELM@7qEDQ1+OAWXNv!u7<;In*58Xv&%a%X-zg
z%sp#q?50y-2D@!omXi6n{{ZxgMz^~5sM6fQuX;Uz_)|j)#=7%_*lAeEW)e>E$i-CX
z6l-slb3i4Kivbz7QS7Wrl$O%4<YAWgsVt{3OQ>HEKRUq-%c8d~U4LlRd)@DI<|`yb
zvJ0PmM-!>yucOWTqiRRsnMKNq#Yp&W9PpGPTmJYK`TdCHgyk*D*>|oR%4FezreZ>x
zmG8b?FSTki-(YJpayjHJQ{>b(SN5GnKr{MS=Ca?`T&Dj3fT^ye!W!iX>uQey+!D9e
zBv5oCWgjEi6f-vtAr~{|wTPxnCN1LBT%a7lZBU7jL5&ZVS=RT{Yin+zu<;o!8r&_<
zGZEr_)VO@>ZRB0)ZHj<lq-^FpUcim%qn{4)5TJ>n&y<hw)$q>(Lxp_n^|$5l_nqoo
zM5VLEtH}V!zdzEG=_iCF5U={Q6LbFniMFgbmf853^cM9^9Yr^85^SNwIiw(;D}Ho7
zk;5ma!mX!=*5Uo{J0HfBrlgwzz3OkS;g8p{rwTV)a~t1(bzc)?;W3jV*yc8*#zu%@
z1ZNjFKz25$R(aYMjwTi<b!PW_O$=PDsmKUi+|AeYq-KMK!MISZn)m+zwKQSR44ae(
zPk&}?-tP(*?^l;+O4{9))DkImq6230ti3AzZBKpgOC&NBbZo+ztg+3Tn_a8v8Kq#(
zvdNY!?{{mO2&>Ev=YNelVx1YInTje7oj(f9*mt$8s)qNhO{5teMrF!$@~GA_QXIN|
zRf~+OFiE|8-*4qoHzrZs3r<geY_&XWFB=Tr{ixh(b+D|DhAf^Hh>dTRBw#P$+KGU|
zAWy$CW&Z${{{WpnB1umbyTwLdkf-9xs_&v)+M<$2kQnwcupMvIQsyd)zHoCNg+m%n
z(s3QVTU)#O)Qc3I-cumgO3M$U9<y~c+<U$jUj4OYKX_1h<TJu`R~lE;AzR5VN0VKY
z9K+vn_5(LwY_&A`a`9Zfg*y#7e>%)uU3tc1Sz>EI2R56UnD-7vE9a@G<Yner!1EUd
z{{VoeFRBLk>8ImZg3Fgk)DHoXe_FLN2^cHxx6si(H7uDJM=K1i(@Wg{0A3Um4&pZT
znZ|#WWRnvlkq-NX$~N%UpB43wtKlNLaby-6ZSAPe7C);XP^GWT-gcyi0PV$)@~VY5
z^VX9Ij>f3M2{Sx^SV(=;;|-e-jx3EC<+rw);?hTER0Dfxenz~jUe=$Z4i#|?h4vzv
zX|Rx_#948%Mg_gT9@=2+a(BaypTd2$#F}0#kd0;VsbZHK68&NsBT&JPq+6h+i|DVV
zaNkJHYr0#8KQr>8XOjhlev8cC5(Rx<6~ivc#8VU9SYF?i5=Hddw5wcJwY3dYp9&gY
zwbG5(-fN{=^rcPwYfb65Q{X6yG@9twR<&X!o!TN?<dH_)$N5*FicDLc^s4$3?KLJR
zg%k|d84z4oI;dM)-%6ZL3n{*JTbjtR$u?axh0xY75Yr<jO_u#AYrdkmuVUUEDP#vw
zZ*7GPkCI|Jp<}PQrk+-8sn5Q`r<&P#j$Ym!tH+)<X7L1Cf~$s7ZJBC6qtaO!#rYWz
zHs^h){ahrF$XT+>dq#Y%OFJ0ic8@mt0Lwr$zgG#wb4o^zuiW#kJELLSpAR&UG2bDz
z>7;hMHs-hN$#4nrG*d0jfE(7E3e2E9jUG;8<}1@mN|XCE(P>S~23+g_sowm^Q!W1h
z$5^RK=36bt_|}>xIdJlhy(<+n44I{4l={5@_*%2FkXyPs)J9ejLN0SLwa<X17-!8U
zQqqKLfO>oBGGXJM4(kGJ-F7`UHHgQkXGoEx;a^dd6K|CX=CpTqCAwO;L9K&@&j|+2
z`{u7$t{BR^FHI%8slBSy7y!}Vo+jC<96}qw$CtnwX%P?AOocDeTYR<0;LE_iUM@qH
z{@%+D;N9s)fJQ4Nn44dbx>F7;h%NU)sUea)x8~wQyZKYa3ui1WBhSn|wLEx>hlx=@
zJ{G^{L4f96j>LQG(PC$W_=ykU){_x3Mz0(~A4W7flYgBqwysluKd>2hq;mP)yn8kI
z*44@>mNvFgeN@qi#&7qMuj=i39|}9|QOV$F6p7^5Wm>zzlwVF^`qh~%eYc{xr^3l3
zhcH=k+<P-rxR`;K1C>k#w%=9osT^V2C05JKZ&-xI4@b-mwYa9?W&@^e)OLmhs||=d
zR|;vGM899L8Im@gvud&%<?{xjF{Ps~mwj(*{#3HZ0TH6*i#Gc1O<rO9DO0=Ed@G3|
z^mZTwI)i$UNyVReIErlBZAyHv_5!H6G#9^h&B`lC-1%CPIk2!Fq}n!99|xau0VduS
zHMzBhhfRLNVVrtPa!KXAs$F@mrB~T{`nsFTi7MK3P}lrwMH{C#>r!HI7!;f71<@`}
zM*ZS8(xEY_nrw$p;n(x5ZZ33@3NlB9jOlkiKi5jvQgtWBo*54#i+k%62mr@A<&YKM
zdS>pX-a@RepUev4I2rAkRP2NAH|bc26NXYpSlNK)H#fQXeEV_uL>nq%Q1~!lr~d$O
z_)=kDW#yR7HIA8x3J1VbE3X+Fl&!gZsy8o;o2y1;P^aLxsHMQhBEy!;!v^3Q({rT~
znl<gMFBrV3w2@Tu!p*}%LR@qnG@L$XZFbg^=`I*pa||GEtzdQheih-GtT#0zX4!=R
zXqwu_v={BrU(Si^*fl<2)$L>8qe^F6d5<dMqLmF;h+aF1HaFPO%rgso=wGcs>@%Xs
zrI*BCYIVb4*9vcS4%|<`3iQ~wQGH{J(UI)&p)xskBIWI_l@vL6@0b>Rmf?=Nnh@o+
z&i?8G24Kg(klKcPMrZ}@mgb=fOlG?0)ofSGvwKm$VG)blUdXPTL6~xyV-{HeI-)h$
zR*prE)GHV=j_RM8sMN0y5Hxtz8<_iRgBlTvp7~1alRyu?t%_!gBwSJw+g`)-rcNWa
zUuDHy4kCE1TGTUhWkz-L@%(EsiH{BQ+#ghq?MC5Ls6EJy?tCv&G~<VJnn&K$CNg6(
z_yDH=00pe`Ld_EG5p#C-)@J%C0qFwb<-h*wcly*{M=?H*31qr^Gqqp3e+tB64$GG3
z*bN!V@Yf)WM}2^$lc3yDRoLncVOU3!WL8!;-&(<82dG+U1^Vb0<$sN9tyoaCTQMHW
z&c{8Clp4v0ds}v2TCq%!Wpb)X&3jqY{#~#%X2pX?@gMf0&b(<)iTr96&8K@WO$F*V
z78r%lkG(<D_GU}6{nKv5i|dI5#ORVn+WYpX21O4vxpS_n-r6YGm}4OKc$WBzkgT4W
z`C+l^_r27S--@_vwz_81TFC{%uc}S$R|M%m?r&Z`+E&ty&b6Vpwt!*p?6B+7n_E%r
zAg%A`R@dEJ%yj5Y3qzESv_&RGI&WcHQ8%}W_)}$4M&~G^6|}Ki8ZLJ~r9|EgDrlJS
z?W#6>pqfCk1^`{`K~Y!3fMSU|`E?_yG$cs@Y@v!}*d3_y>N;Cq%>`X=a&#h}l(O5F
z<Ks_}Nn@sh_N?eiw)7Tzae7N@d}_wNk+z3|EPdvY*S<k3eSE)KUM-|(<1(?!=b-qX
zm172e{9+urEo~3$LF{;uj(p9s)NO#lyDRslBr1<)VlP&g3j(*^Z=_-l$NcrhC00Nf
z2I2#j{(WlvPBLzCn}bJ%^mZ6XG~!EkC*`FwoEOp4+*>Yx%Jq}#;|CVmFuD)<g<I=R
zm@>StJb8*A@Y7-s(inVlx!n&k`&UCu_8Hq-k$Y1~B+9---I}FH*s4moSa|lT*qI~M
zZPZW^b4>d4dbH0YW_I0)-n8piVa?-uxanF@oq0{{MS1OCR>Vs1K{&L9S2mu?0M@mF
z*G?#_4pHOQrJ4*xxOoGRGW4WOQH~6H>8~nEJV$}@se<yM=KQ-YOC0!_rHMPSyW9#?
zcMw5)yZA4coh~Q+HgE;6!ob5H)(mQVb+Emt!;67&SWs*ygk+N5HrUi3N@3?1h=5c@
zTawCKe{Igy!{NAd>f|8SOA(VxdDhnQt!?b=KMFhT<3Lu{t@}FGZ9T?@w8FsO#;i5+
ztL;M7uA+l0+LWtmJS)=<Z@xFLz}9aHc-UWKddVN$)Zc0>sy!$=mbRNzXLT@$+e?i<
z?rD}yGH4yGPaIJ+$h)#eTK>yXKUKw`Cc``p_}YYG>6xy4w5v@e*T7dU5uEs!9lSDZ
z-mQHgHas8Ji2neF1Y_JataD|)ZW`aiz50s8Fb64-mxkwaVe+5~WlrNI@26dA1vh*$
z9DoPDl(S*2z?)paY<w!@U#088hV`c@uSf=O5NWc=N}$=fzZxeZp&dtGjd3#MjDL7W
zpU#LAKNJ2TT|O25tiu|5q$ooVjcP1<f;AnQ+uvS@J4`krO$W-eNp`<_i-Ba@*;lJ?
z?4tQIt1eP6YabeGa`@IoHvrzhbXN{=UT`#c(Srl+tE&eeKEWUyxPD{ib_3^J${I%V
z&{E=~449`T#P=Ulq44(8M=BWPLzSa#!K`SS?AT<LQd8@Im-$wmn$lt;2+p?|Wfv8k
zj`8oEtBN1eU%NpJkM&k=A&6VO(t{N(cDYwIiXdgQnNPS@m3_@?R?Vl4S$Vu_^>PAv
z)3xoF=UIo&wc5OithUgd=&CD8VVSMrM%#Aj_ASzlGyeeP-(JX3n<t?}xm)xVAEe+d
z_O_n-jn+lY52ngNHx$@;C3vK^g;=dRJd$T#_oO=4GZwD-%ja4P(aNOw=~p7cfgA67
z`ojA0vOn=@`cvOxZawt@wVrZkC&QxrfcVwK;$eK*{HyA)OnHt`1gWS8!Mch}m_Mu?
zj!8g$JehzsJT1hIRAlrr=tjH#HQ%*n+zz6IDvZr+;0?bTfpHcY?!be8Di=n|!>Ft4
zYP3hujnM3AvPm2^=_Q3_j(D%WXzlq_-$;_2SoD-Hu0Ho>r`#*N-x^TpPHwlM!kdha
zFSRh~<6OGlh;3_$>XT}<s>6B~J(pm8#?(Hpp$UAe<a~ZqZ;F`T_sF4>9f^k8@?so*
zbo1i4xXUOUN8PpR-pb(M;NXXaqA6=EN_;$bJ)|z)G@;n4-o)=}C|EjX%r3s)*1nHl
zU9&SD$rO>qO;ER5cRH40#QFA`6i0_;?RvGrt;OhO8e63<xtGGVu1N^e#Dhu>$5C#=
zJi7+JV<Q`1Y5}#7E>rQMiZ0K}J*JvC;*M;(WRt?3LE$yK#C4!DD7KeY%m}qN6B1*W
z)H3ORVXQovgTAU*cu?B5(D=|)+<K}tppha1S2Crn4HkR|C<dbDg*;)(Oy`sy>8k~_
zqzI8g>U1AE;3A$BMI%x^=&-3)B3MHvM=PKvvs;XjHf|ZVbkg@<Dp1Ojt6M5OdBM`5
zAp}bJb!?jQ^Em75tviF(>0M3yI@00_G03c~4s4l$6pf4ezZLAu#<p0&-&0b5qo#&z
z(ukwktXuM<6C-i8^tq}=&12wc1+CKH(ukErgT0?$!ldCWatQ%eR|eKJv2vrf!uA!1
zV1%^RVdZggMnT;@-(_jtQEK1<Mbx+y(dRE}En8CbxwQ}t0@Fe~warHmUL}Max!P6V
z*?ZQKW*@?6<s$TAniDdeL5SF?@uQTxsP&sl(?jD!F~7Ers`bKKo`&6=_uETW^@ivo
zk1oB0Q25o()wSzD2ePhCm3!9J=SnkX_fWout!vgr1Rnuf=tX5B<9a#|@b;YhQrzFj
z3LD?Tf|HX^3ySn^S+A(IyA`Jh@#WlWub8M6M=HL5^6U9fQQhwwXnr)ABnCahUiaRV
zs{LN#$0@qK781;#z{BvVV;9N@HuivjI=1zC&e|g>Rv>Fgp~x%X)}+=kA?a{$?5+#m
zIW2W3V?l>~4sV5E<b?A)@U&;oFtrRcQisRtWRl;rxY!RG`kJ0}0z>B3vHYtF40-v0
z<44|JmOfOeCTSo?B{J$pwKVGap|Y%Z)07X^^~IOCMbT>ui-vDVvMr}J%5*1hbqO&r
z!#0-U;mxm8Kl&a=`pS9<w0m4=d__jMgfV11%Gwd8pOtzkj4k{7Q$r?ObStQ}M>4@P
z!gUU@lD`^j9qW3%LxP?1WANUJEp44Z<^3u)!{=CY<Z#CJ@a)4&&}Pl-%4%5{U0C<J
zRpgDrxFR;TgwQRyuVrY)-rBykeX7L^A8wVvSmKTpKeK>jT0%}kzE#?|N9Eai%hRs3
zLhowK-Q)74&V@6sWxr8C^z626Ya@#=pf)>F`Os2Lx_0xey|v;h*Q=V$R@S4?3N>+|
z@~&;@F|qg7x==rbYgZMX-EB#GdC*b=C0LJutKQ;@ba83}DRyOb08zs(xS1Bm{{Sxc
z`O-%b2Zk;0ro$wKUO+}i*b~$mgiJtfn6}O<7neo44}@wc+dwKjL$bD_@p*e`S{NS+
zEvP2-mh0Cok1Y-!7I`<cagF@-6&%>U969^Ac7IE68(P(n+Wi>T8yLD`FpeX2w$|AG
z6~&O|W^C+f+G0Uyn=kQPepjh+D$>Mah@A0$?ahq<D^Yri2-tmQXAz6^ucN&r(m+3N
z6F(=Nyf0J5g2O`$#X_MPb@8k_VmNS=ZXBi)nKZAYaT)nbff%;vG#?>FOngv+#O0^U
zS@4*NFz2X4%6+S~AjVF!7|qhrD%;>IYSzQsPRd9NV{yN<RH=%^OZ8`4v(4#A_|?9T
z#-F58*EfqMUw>MY=?Rpx$(3VXo8R!RDstf`HUMm<qJmdb4j;I&#uHtyrF$us>NLMG
zwHs;YN}yejO39cp8`_<e5)-_@ZA@_Zd1Q<nts<eyr_9k^iknr&_1)Hut5y_|^l#A9
zWQG=qk#})&e1Yj$7qKtQ<oj&H<wSHMh$57E&^&xHU++pYf0ug9#1gvG<<UmfZnQ}8
zuPTd(NSzP?=Uk@at!YN%N_`lh+^D?{UcMU9t7f$@zgn=?ts>pWjd@k+t1jmI*4Fk`
zWj^st#$-@+vl>|*7lmZibH+e#jR#JLYQp2;vqa-EoBWM5u5MYgRJLm|Oy2(h=+<Q@
zY`K7weL9K*rK`%e836F8Qsajtfl3k1r=X`RERx88opvUet(bxj7A*m+pI^m<`+{}?
ziyN0axi>wB>j2tY0)F?O&ZgItEyu>L4UIk-;IxFVr3Z0)?e6R~hm}X9;wTL(o2j_>
zR}Y3=E^p=iX+k_qqBfE5VXjf&YG&IM^O@R3_mU~yf#<inu990#O6f%Q`ZK|Zcd;&n
zsQ7P35;1XoAv%j%NDfi#-&*=N=^~1lSD0a^2r}s1*7(uF;MtcAv%cI<FJ%bTxxJk$
z4!LcTP=!2#nvyvmFQYuIwZ^(qIn|zxYo$Zz;Z73e86(SoZ;cKUWOiFQ9YsWv7FgM=
zO7BMn0;)n6DNXnBr@#i{m2M_|fw&)~NX{%~CQA*0xxHJqQTxWQ@4cK<7-1%9BVHqq
z-q-c50pZ6cL35_nmiuYhDSaf%$Zb{>@mlVEZwk<DS&o|8yt^%ZDA!S5bkT_N8)?>o
zZTfa=y&K=%P|mSgPh89T)y;>Uc-8wEk92M;C>In85a@IvtI^;vD1Px$)x^X}o%(rp
z94i|w?tt_(>URQzuS%PoB!qVlZxN+Xy+us&5Wt+<Qz4S-2L5#SH<Dd8ww^VwXu?P%
zlM8=YIg{9LQkaV}I+1r@5Bq(KQn5TmUT*D$>nyI*%cFwtuDcIq1UVF2_M6j*n_VtP
zThvJt7a4V8EpzrHEhh&H84DxzC%{rB9(0XVSz`4?M-xP{v5{kG6QL&6H_@odU|hUR
zqppMxbzg*EUzIE%#|_VP_*N9kH468(%yhLX1qMdfDW$^US2j}YtK=yhY-`G)!^dpW
zwp*T*@I{7avsg*8dv8m{oh8K-%o()y<*Z&VPH``(exqua5F+&N-&wDej-LzBcM&e6
zS#M$Np~YOewmvoUt2B!e^=7QmF=gsVq<dx%kt18mvo2+KabWF!wRN@lm&)5)`n5Xu
zZ>_s@*4LuV-ZgE3-nG`%;YlNU`&yJ+b5<Z<e$A@c$Q~7QX$ICCiYE3IzbLL#nReQ^
zTt?yj-j$Vxm9}g4YfWwKrDoQ|bfKo_uh<dFjSe7pWlfKfr!8hBZKZG6I}Z$z(?eMf
zKou+;f^Ob)c%%XjxilB7>~*e4UH<@u39A?-7Thu#vERe6!h7+OM!S#u(>|8{-rgP^
z5zbq&le(h5tt5)h$_Wa2ZSSKE$JZUu1HVyg(g1E7r55^3L?p}Tz&?}RLGcnk9|gNf
zZuheE8^>P4$6zl}z+=qh@g;37oZIlwd#W{yFEQqDE$$WdxqTiTWX~ABvcF2Z9%&r0
z_|?WEfCAVIfZTiPOv-JWC}GyHk<59b53i4Q--U6LSxCRKgvIjZjG#GlYHj@JqA5Vy
z<h8Qgn!4Oo$9l0XGP_+@AW_~r-`Ucn#!62X{HjiDqrg%Lc4p;lx(c&^L~qUJDYC;D
zYa3)hwTDLLIdf^#r{z&iD9IeBC_V?ytrUdY{t8^xn7oYIWjmVskc}|tP27!4r_9z8
z%gObK!9E<qr8|;!CvRm9H(rLhd5-m^Dw}EJ*x9wUF$3LM^D3{MBw|d~uR7HBjs4Wu
zHqEWKt6Y03!pEkS<E<O1YyR=lwI@xe70}m}cvYyY_dg2o9S7NUIYqX;S#R&`^6W`6
zGsk}M78J3oWmY%8kCjN9g=3q}y(!M!0b4Bv)gwAv!j+7I>8G?*kl7wJ=1?`YG|>{u
z7UfCXO5^=zCyqY)*oGcHRl^x%9w2t+!WGW^r5$qz6^_MyCmUg>n=b{^OIyn4OIna%
zU|53hlSX6E*A`<Erk61-{{YQ@%COBp4(OK#H1YHIRvJbjmT0p+zCG{Gm|?Vv!<04h
zp2ttTEk&AA*~jd-ZTQ-j_-w#*Vn^jl^L=BKSyWGDUtq~xkxs|N&Hn)7R!j_S{{Wb7
zTjQ31;4vO<e@aH12gJ#s<~w_h6-*3En|;syb;QHq<BCgLFhl<U-fQa;;GmljJ$5F!
zzKp_r>;C`_!u#T`U!wkxZ*CkP`h{Qp7xZZV0Q%s6*Fq(R7~<b5Y%4HGyzhN!qyR2$
z(0o5SaLsj!Lm12838>>Ono!`{-rv@gWo1H6pxTVL3zK6+g7Th+TT&*gna153^y4^K
z&dS@+(`MXN$7X81`$}Jyi+I-6y}h-e2HqW(y&X3d()4rQ&EZF_t3zHo??ZmY8&`z|
zRV>F~DCb<3@!pkk1}D~U8vTM2H`21|3DjDyXipRpw0E|3jFqg;SRXFY?WKYc$i>$}
zJA(BfmH1*iWw6b9Xyi5rVxs20vo040jfEd0J1H!`#G$_`^xs3m@BaXmF9>`x&_7jz
z1sY_FF&{I%aS)hf8eAGE7&de&!n7ftWjV7R?f0((;1;Ye@a$NuoH7Y!VRF98&CSC^
zy@}hcMZ$8;ZbsLdkrW4r^BkMs&YmTia~%O?(P=)1IXGBJWL6_~Iosu_rNqYvpd_el
zz+TkM_-6>awZXS!W2NqEul4S&t?vF+ufo;cKjp1!L0d>a!+NsD#D4wj_Et5jpOtr5
zdj)sDuZ?Seg>7$jZfay{nkI^MY{{b#RO#^*FCIyegK`mWVdJ$o)+6*;$}3+j=yHK^
zslDlLWE$*)dN&~j_UbAT-aKgRe)lzJmfYGZipgTC-gRcRX<1E2IvmPQQa<Ugci8q(
zzP+9TveXI%i30l7s?x8<h^>uyc3kP$YWKID4nBN5nscBm55m0;H--NI$I`hmpIC0W
zV;~QIS{Y>9p%t~fuSEu&H*at{)VVvP>^7@*t|UaWs@$&;ZBg>~ubB8#7eF4|+_%!1
z*9*?Ou~8mxa*b(R#EHTjw!IvswKw*fWXek&?^zY2k-c_7pz`+9fgCbx%#xsMk8LhQ
z5jk#nGipDT69+tA8SLK8XoD26u~I!PuTN;8j%-4!%8Bf(QY4QYGwl|SakiamJzge8
zx?c9*&Wx-%Gi6p$(vKQ$qy}vU^^3&7FC3B<9(92c4s1SX+*<Z*#Qp}IO68Hj1Z5x^
zo)}P=_>GZz<=j%Y4F;Np*Hh(Ne7}`zTME?_`B;9He37s4f%;c<eiS(n`-kaT#~;8S
z`fHI@eUXpSx>kebTh^xj(|X$e>giImGOFa%v9R1xl{6aIp0=zoMD@gt@VDn#F!0<B
zOI!d4zPWLUhVTYF-!OkFe2Tb{AFR8dai}v$Jp-ea@a6`%j@(O=kW(rqOKF?JlZZ5u
z`A@RnuWP0G9@iA3cDG>czGJTIY92Msd-xvBDQ~V_J1$PO<yxEN^-o$`5gq+dD%w`H
ztsJ%Wyc`?}^yLYm_f(}scy2fCaPnmX;Y=cs@j7UAt-f?OzqYvvu<@&BAzwpowf?P%
zc5%8&05sBHI38y7N)W@xHA@o65<t0-H(DYd8>~sQ(lP>|ca?_qBT38VJTF;7=3?4d
z(a&|is=ClJjJEYxb4`Q8kscfyTx*)Lqba_uJOyXhew3`F-+Hzv77bvtD%!+&)VGai
z+SeoHT1EAUC9o3u$!pTc{*rYEZK~xCP{!GXOFjxghk!1V@WD;B7PhoLwC#$r<_-nb
zb@Cce*2+3tdS-;YdAg~uuS<-cI~@+>`;L?nSH$75GOl%ULZcJoVR5w^#xvVeA2kg=
zRldEqzsvbn{{Zs;04n}fdlT$aTYIa!^RDfGD(mp=u{IW?D$3FBs(arm7-OB04YTDT
zeY7J%VcU{L;z##eQCy>C%Dz;bEaezj<*X=jd(CX#>;7M17z;MWtxQP0tM6N-XfkWq
zZZAY;wwJ4ELz=Y~G(0GsH1Mm~x3JoPdui9LZ&&R%k(oLz^r5xwdRrTwv~RybMe#=}
zgRtJXajr<b1rO@fEX8CV2WoZ17GQmGsi7k($O>Istt_z>mzQg51qYSBunn%Z8~9V1
zREwTaItq442o}+8dMlDY@GYv`+)G><hYJ&wmDfC^bOdct$D<3~t>7u~aad)xVMelY
zlG@qE)n|zr4&YR9Lnqaukl2ZJ%j~349#}RS^HsQvs>`A)el#IitrhOsj<gksQkq-F
zxVWE2gT0&Zq*HvysI0|(HT$G=t`u4Yx~996*07J4I%P2%c@g^3hFh4%T;ys+Y%9D!
zuYGTS>)T(>wXyftLH;l2SInN~)A6q#>)Bc>TiN`pufn{mKo7C4SIha)6}=NpY?y=t
zK2UO+j#WIYHoo3fd+FhCv3|=-3{wx^OV|D#2=*5r^-(eLaL1<&^Dn`#_|@W_CLRo>
z8}p(WGLg{HHs4D2_8Z&XKn?4diny(7da-RPz>(oo@k^#5MbHh8jq87fX>)#+&!?4l
zZ|7QeC<}c61lKul%B^dG)_}3N7rk)MSrc|1wL2qaZH4O64r&B-tzRj(ty%E03-yV%
zH5%k)Wzc{&6x<a7n^v_nY=-yCeAb|`<;~Yy+L<#VdNO&pG`~q9&JqR#Km0?W`QD7r
zdF69*PSrLmD3d72I(vUANu&Lf4@jfOp`bRml^$(NQ(7|FZ+x@^s!JUDu3ls8u0qi;
zHg~pb4}D1*GJRvDt>f;dhF%*;nlMA5)A`hLaN&nISr;_g_>PpRh>RkGn<9b+ZyIB-
zE>aEZLWeZ6?Pl(aP~nW_P4=h?9ImqML2k`%ro27%xizZ~c|WZiFs%sqR)qXW{0&<@
zhw+_%=B@L4g>1L}5!s_#<6oV}>sqJ2yuH1N{0v1)6zvf~Vhm!(S_0l@+u65gKz+1V
zr9O2e`nIh*Y<8@K2T!=-<KbDbt@UX~s()sGO5tJ<qx@>?-M40!Rr55Nb`@$i*jB@{
z&aO$ni8ZF8g}1EDASbPs)F}2Yc=?8x6{}oOL|c~DkRWfY&Y69J{{YIjwfYK=m@Jnp
zJ@f-6MvP@N1zYcU*L7OBsBh7hzhh(5ASRl2$`GAG*PpVpE(q*Jl|?~>hXFXk!L{SI
zX7Mx?@VL1x(l`yN6)>_AH(ZAGJYx$hq>-}NM07U&YySWX$A9$gL;nEM@uN@p4MQi<
z*uj4FplXI;hQ>&WZ@`u|UX;=wqOmAgWy%nrezu{T4T;VB$UqeyKM{v?^o7|>N2s+7
zv9MqM8&(}5!plBaEPT0q>B9XTi!6FrW&CR6=&5b{*>(BTmcYvs>6BQ0w1Ga2mpFT}
zjJ_V)Sh1K_!!rx0Yi;g5n_v3&R`%`N<=OoI0G(~wwP{=Sslw*^#><pk(@TwpXq0nW
zVPfq^maSH+^!poDgR^eVm66x3o-6F3pPXO4<3uR9=~k`p+pTN2Wv_2-ZC+GpkIS^i
zHNMnX$(6r`{{Wp~outw}@FQVOA}JA$RTi%9Tk_I{vbyc1anO22(Y=+e9VtU^(w6Y>
zn%8cZsQ@If*o~-`j>-GoXu=N)H;~z~-%n+9t%tj_r)K?|*N<kxwylM9_twi@E7kZ`
z%kZoqQ>WGc0P8)Q>E-_bZrj$j_FlA5Kd)t6Eq6!6{<W>YuYGve_4-!zZoQahadGKC
z{{S|vfF4vRcIrECe%~*!$s*kk%8YDtFRI^oQPSO@U)^3^n^o;v(544Q9(At$n(tVR
zuk}{7{{W3CTG9Ui=?b$$hhgB*Dg3KUHVk<TDt?uJt6?wW^>we=!S9NWXfW^mQ3GH2
zeh2>mrdt01!>~vH0FWyG0O;%={*_qaFywd@u3RP=eTaWr{fuAbu1HZmnZH`<YTDkn
zHQxF5cdKwKe%|$W>%Cjn_1>=aca34Q9q{)T{x`2bWq&%?@$Bnbu^;08b){`@VE$Ft
z;ab;^zP1Omf1MFuSdeqb22gqm)9h$1?yt+YdedIZ_G{Wm@2@f+%7&UBun&&igTk!3
zR=d!+)6Sc0=gPTi%|P|5f(CfXet(r^{OfAkwyvhXpJ(%}yLM}NSFKxo>$~%<2hP>k
z<5tW605yL)>;7ulz0F#K@vXjf{Oe=>YS{k(n!RuHSFQeP*}u<QH~H(j-};q@ZNJrr
zTl(!^t#m&s{&i}-zBSYjD)o=`?d;mR{A+)mcz+jW>}w(YnAQE2b6WOZuU4UbR;_R=
zPQr4K2U@*l?Y3=uIZvH1$VlGxt!de7=ql7Y*GkY<wWnu~W)K~Vf6{0NEpH=g{j>i7
z>R0Wg{{S%kEB&TV^RMY&?GOI|sK2Ft-e2=C>0j*<kMS?*U+q#~_?ExgALU=|E`AlJ
zCB8<#+F#%+{i09#e^2*3IR60lqTl+DTXt{TrEDu*J2$m&L$#T^&#$_#(!2+UX1%_3
z&GW7A?5*oB6NU!53)6unb+&F{*{+ptmF6nd=So{`LAT0}X0^9xhi884?4@C?>(9ch
zepCS&ybpz1u!Qw{d_f(WR^Qm){nH2k0P(;70Hap*q50LT`PW+8`uKKgY<;!gALssc
zy=(Y?D%QNI6ne(hW!tdPJ(;bTcpCh>JSyAP)va?e^4Ch-`%NFR-Fby;R`9PX8j-DQ
zVh>S6T`K*GZLN4!^sPHEz#VwW{{XwOhx_-@(|_?H{1|?Hn``IxKU&hq%pd;%8t@<2
zm2PWu{d?-iZ)IuaTYPD6>T^}QJga`pxw?Ie*7bU~ZicnfwVUvw-3N_akCkmk`<fcw
zwHo$qS6_wbmyd`%FKRh?e2-^^B%M)%4~<-EJZM{g?K&TIcUHDjU$akczpZx@=la)m
zes$bmjcm2D*2`Vg`PZrNt(4bvR?A)0_}6u7OoY+e@JO<Yf0X=>%DjKD-|WtM44?H^
zoo`m`^YEg3JSx?u{ksa&v&-yU{pz*7_2*7k)#f#NI{av#uxmxDcHuw;xqmZO;l2L=
zgwXVD@T`C~anmhotUcA&vud@E!n>#8R|v=0D`W7lTl}ZDU$MXTULA!SRU*VuL%8kU
zw;h{`-n{<KAbBRg=Aq}=z3TO1NUIB)zQ@d0N(ZHBu&)Z&`v_CU=de_ly{*%;zzWy1
zr(m|!yoYPwv(B5wp*9`eMQujH!?QpIbnVx(P(Ad1-rw2pTZ-PZskF(roe*?Ao<G?-
z-@c!R{rh*Tb}1bVavpBAlvkH$m-ZwM>I+q_ZQ0>nZ{tS0p?fc2D($6Q8&ZM`Rl7B;
zht+Pu{`_z5qt>i+t?%}4TlRA6U1|wBX0i4kv8e4ee!ZGkm)N)1c?SCrZFrhD-j8PW
zuC;G|lnr=_0F5ceppB@4EGsb5{hrE^d_X^E&-)I)+j#w(5nEf?R|nuNT#iSu){^{9
zSR4E+zaPBUem`*)%`o|XwaqfuG|~S6l-D$jeS*7={?@f}K!E!%MQx$f8nqv`FZMRK
z*&x9vjdnR}Mz3ve*{9Bh#*}$)?xfe<SG5f&>AeA%>*rRl&$BhP>Gr&9PqKRbokhEL
zt*dI=`+dK#)Aob9YhC+v?bKXWgD==DI#UzbVL&eCzVYkX%?h`=xp<NL8sfCY5`5wR
z0P?LPNr#hKuk}d{wX)g23g(JrZw8Fk`uWwbLi%5B%}owqC=B-_YgLyF`s{6`UPc4m
zR~pveW6SOD_B8t&f4%XSe^_|-eqUo+x7Z+cq4qgkk9`c*Z^pId_SMqBAG`KBSdU?=
z7ACIIL{Eu{YUd9Ndt(N_*zmvl3Hn$29v9sL56XsIJ{?br!l3>XIamCn`wjm9vD&)!
zZQHl@J67Ghe{1Jl&F-yw)zsGYwQu%3e#W(@ZvC1cbzie;@a*yIAMR%3vt#x*{?~fn
z`whO%f~{U+yeiB+;=9%1Ti&&;>fx(;{Qm&4U+iuhvwyX?t?PQ;xBEN)0J-hi*1G+T
zyF4p@v&^(|^A9RFTID1kI<Z^(D{rx_ZT7Smtq}Xgb7@+w&z))h<8~l<p54CQ^}pHa
z+qZ7t?%iup4SL@i(nj`Ie>eDRxIM<SjjNY8yjI^T?rGy))&BrBa})-@YZ&($)^Ts$
zTGsFFu2I{!X4kF1v+4bY-J5-lzR!UBd;b7uzxEpswc}e?cJ;Mw{f39Le$DHDvA5aV
z>}~dU`#v?bfB(b)ClCPu0s;X90s;d80RaF2000315g{=_QDJd`k)g5C!Qt@n5Fr2B
z00;pA00BQC4;4Y<$@TXfUc1FdHoan1Az%kPI_iFONutXiF(Rhh_V*29cf3bF86ehp
z6Gdtr40}^bzlfAo>0H+tF1kYNV*_i}R=Kr?B9N_sR<M>|TrgEBUg&~ia^c%oUXX2s
zn*xiJc~6K@6=>%b<Aq6Gd87iSqT&x8a_e6T7u3<cfPvAPJsqL9w59(5DuwLSy)udg
z<#fx4<6OVM7&9UY`*C6OF$}5<+w^_Qe!i3d@vp=KdY+~y50MG#MS}ifhfPGh{QU}@
zRZZmxr?hIz)o!9KhMHJeUGei76+wKp0H;`H0q&OtXJ&Iz);%1Z0|qPNA)jf0y6>7m
zjA2rjwX3Od1XFDB6Zyc@sq~RWOe<hsL47L^K46CzR+C&VO15b+-JFIDS!$e2DO!Od
zR6NRj#<sUZ{{SqDZgLi*ig>6=Xu=oCex-vT1Lr?**pubSch;PK1`$DCTk63}8_Rw{
z@e8S7MzYbCHwwhCdIO&#`--m5B5Xbsra7@Tv`QQFym%Mp0DK98xK2u$XGr1&MQaHI
z#zbJe)x|ZnB_E|qh~`*m%GDro!^zejg0fhAArxx(%c-X$7KhBc%pg`1VMCmjC>eK&
z9ZbEj0ZB4J@ze@5<{3j$?ca{f%;R)0Io4)l*3>;ptms~SiAonlvh-i5^zuhn^(}+e
z>H=1;wZPCvVF;k<f;j=IL(i;Gs#x||b)DT}VWKKyHTJ`p!3ZtJJdukduK?ULamx#D
z4)p~KO=D}U&*WC|ZVxwcH3n#I<+da1hf8F-Eg_}^?lFiOt1IYcQz!n*m~Q^jF^u&6
zN|!3*VR_sY#W|wm_=r}>?wDmzIL47XNGN|`PRyp!T}sf>6S$21%p8<%^F&Sm00@L`
zW{%UH;VFQ?7L?@G7=*&o(^chpn87#}tB}vRn1f*9YkpW}d%9L&kSBDIk8Wh=MXzyf
zGXqo*_}qJEvtxJW8}`HEx9$7KeYnD$HLp;w$gE&wJGq$Uy(xHHz{6GEq1IVh(qd);
zXrI7J15pM1E6jYr;MV-&0b^j~))6<ZUK<MO7UB*XZP=8EU@|rT04N|+3Vy-EIGbRr
z;>(v%^-MDKoiF!0j=H`jLg8Y!@>aR1#OAhgy&fi}erkZF>OLmcfiI<@iuKa3XdJ9z
zy`v1KGLrL~uVB;~XV{9o%f@#A3PX{qm`1T1_l^?UQ=(DSC0a@`%U3p$B7YKxjkrp>
zG?+32h3akC#fm&#!6pQ`;g^@Y13P6eMrtjAbT;~h<e*;1!vVtbm3r%Pj5_&QD-_Xo
zY5oaYvX@=tZs$RBSb;WUn0ijmX=rlxmMm9bloWTgY~Fa#DSmp#HC6qCVExK~=6f8x
zIAWZZHeBX&y)Y{bQt^bnikrwj)h;#~EO_i{5QN?j*Re3il|jHANMP>PUmd&t=Dl7t
zQwAS`S8G-W09wiw2D)}0E)+H>X4S{0R!?FpI6t{^JxdVOdq-J#gX6A=db{Vk4^b|}
zF`9~5zu^I=E0{)mk-17Aoc?BZ2wWR+-)VX%#(=k1l!=i88sn*Uo}<HM=CG>fTs*XD
zzMQ*-g4=mBd*mfBmx5HcYcXUP08)X4<|t8Y8mUxTtHN3JT=tK!k$DGMk@tP+4z+oh
z`cBS+?>?fw+_5j46j4U{w2!tf{ED#O@+J;}w7q{2g~|?VI&16VbijN;kC?2gTh{iJ
z7+_%A&0=Mkb-2@9$|8cztDHD7b=EHkFFJlIF@iPSv=YL!(GGjhTEq(8Uht^RK8I%#
znh|A?4iMZ_fV5=(kQ6QOXsCxcv5ofLBhFbLv~4!ZPHJ63(O)RoZKJcKyDNhO%X*d?
z6v$mgYFe_xsJ<#|X;R1OmjVr*a6i!qr?5m=YrXwJ#(64VyosbtmBW%~?4(znr64Xd
zkct`{rmkFrP^~Ocozq|LBXPwvDZ3lE&Hn%*1#I?+;I*-!n*K;>Qy3wAhvI6r0D{8G
zw}@G318+rkjZBDloJaEw6q`_bW$!IT;~0v8#ka9B{!^$t)kH`l5ql5lLkXM4uB&g%
z&@r6U&Vx1DIXb79mzOsdEr9y_n(qc)k2;u|BG?;0-Q}Q@CPQoLBhk^2`Z9ckyanHO
z42a(ffV*!~j<Ra{i)`*OvQ_WM*7@9{t80QFyW(Il$Jurm>mCS&_CHb`%~RQ{uW>(w
z52!6G{!GqLLEB!ZC&^0odQ9&(BAh=6@>FUL3nqs2@wTJY%YV)}h}(BFN?C>31{XmZ
zB~)5Y@j@z7F+;R#KqbDu&NJE}x-8(o>+dLfoqH&+shQRTcCj0FusnkMNwRWU51JCH
zGhEsqh`N<874aw>uB01-y<pc9XQGS@6g|QrE~#5T#5`k`<(MwB(j0NO_?Js=P%f~_
zrQQTwv@e&4ptB%O^=LVTj|~a8;uuPIULuXMs41#oYDyRerc7%{^)N2o5Xuy^$62Ho
z7BvJ9ORQxy;#wY6Nwc&%W&qtVUzt>E5}a+D5~`Ib=GH$@9euB?C_b=VsOl|#;>xg{
z9p7kPC3kDru_@<%!56nk-<==|zMn22R#JV+(;AC{fnJkj^P^D%i74>!<`xdvW^^6I
zg>=Dd`(5I7keX%d;xONs;RpW!FHp^zhqNsQ({3R}-)UfRa5UPhc$TFC+BF1zlZ~`&
zf(0%Z{6xSyft%w%z|%t!NZjadx=WA5rOpJ%7Z$iih20Sf%558U{7kBDDZtadV;r%T
z#UCypINJ$E!s0y-gb@{4DeEmp0nF%wo??U})HU9i5~n3wP94Wmy5(1;8RQtP*b0zx
z9CY}K=g<Nnr%M_lPSd*IO7#|%RfLA{;I>@YlmhlVM+{<Dl|G36*@uoMnx{&l{Y?Qa
zf5}qf*q3=8mnik`586O^Kj4&m!y2u-^@uvr-Z5id$y1EwZ(^rTORD1e_x!^hhlS>^
zs)e{NMR3mjM`047`RZeyH~l&M&EgtURea-#Y?4}A)-%i+!rjv0fzBBE9>DY$5u=>0
zt5u8TQoJUM<^KRm^)iCd&tj^AUM35|)%YOl6s-#!HRV%?@`9Z4Pk6UMS{F;)T2|+v
zc3F{=mJjimHXVPK=NSuD>!EF1%*v$5sqm=xlG!Q*qWpmiUDOM5eZ{1BazF`)_nIJ>
z7YI793K?(xxYRWjUegq(e$H862n!EtQQLxVmE61PH9#qS%+P@<4{?eg3OtQUDTvYn
za@Up_C6ZkJj~bMev68uy9Am_<0c)vTb$v=zd$2C!;zhxo!l)}++*IFAjrPQj7fB1V
zp{nKs$;=26>o{rBJ2cpPIfja67bG>&znB?(cA&eass5n$Piih!NK-;s*VdU3mxw%^
z_lqtEEXxO5`<9ubRsd~p{Zh9@=Ig)Q!!($`AZ~8^@HWua@!)y==b6N=3MU&`JZbSS
zrJ>}jHP9l=0#oAu01@jV>v==La>kX_FeO~fMb%uL^jW<I*fp;b%<YTELARKh#aB!1
zJbcV43J-u1?+r^cd*;ITDv7tcXxQO6;uUO$CC?qDQJ``Z9XrY;*Tu4)?J>JAXz%%y
zJcHHRNBhhwHooIr${!YnDGd8|8Rz~n6WUZ;fowIiZ{%70PsFl-YVL>8{K0tF=#2KN
zm&qcU%dc@DWx@ye$7n#8Vh)%K6z?IBjxFqxC|9{qfu|~$ftblP9Y<8Wd(_;eT8!*#
zjl&R#QB?;z<^v20r(U0koJ5hQg7#~0lMMUhE&NWF@=SI^QU2rx1KbKGL6kYe#I{Oo
zh_3O<(*{3r`Ax7d2NBwD%_;!E^y_E$1Coj*_I{-`OU}saSbW1qCuR$)`9xn44u#pM
zqPBX2@hlk)7#T0p0=o(hM;|hPS(YE9t;*8c-}W3D3n|ePC}dr3C`muX4cw)wYhy2N
z(Se|sf5Z5VRAojLHTN)38!rP=oY2Lra`K&|Qde`7u9Svni@%w-YG`weN@(x_8B@0m
zY)fcheK++FfT1+3=Hr0GgJQE^Ql_jdbRMn()->n|>f>Z4uo$*7{a+AaqR<aUo16^c
zcW*g~>KWRErG2odRz$u#F~Y7VH(thORK))P4rnyh;t;SG!<z>D!xZB57fpw(Nya6&
z*_DOXQo7}HGPo*SXRN7B+e0_^6baN;DSsV^(hmSC+=5}mr93xhH!Cba5IY6CuL1Wi
zKyJ>0c00^2kXqMV=~TcNS%XV3*SEQf@@0f8+M|~Y1+mcSF4e8;#eXvbp`%jZ&nNCX
zDtViIeZGnAIgew030BC^ASeNcG-dStO!d{ivW~(){{R+Ha9C7$(MN!meYe?0f?O2Y
zrX^`a;03wtm5W2ADR3QY^}wg`E-i?5EY;o&+=&?-y?BhjFM|$8xikRB@&PdT>6I6i
z#&CELrc)tw!8d;wA^fW_Ex1egORaA*H}XkdmSnH8>ICMyALTNY8)ZWar73w8#5^6k
zg};s#GHC2eZoaWC;V!y$xH#Ly2bHuK?nM*rxnE_|9Ogl?E74C2fQr#9z3a@yVAt@N
zI1(_Gi!(6dn%fxEc8eG|ea!$Gu0N<D=i?i2h}@n%p|eavS#G}AD_UKS75R^%r$Dk0
zQo;(cU*L>~ILgg@%S4Nj6^ul~7VQrl#H82Z!EP0LB{3I!yQ=g<Pr0|z4%MhuGS4r<
z>fBe?5XMMOI)WQUzLKheN5H?8MqsN43@lg#EHt)bT*VMM+AFXQkvsPLtw7YnKx;+m
zGc|ZT4H`aSu;tSu!!KUY5QSkHa9*5ha@$a~YvPswPD#JW&R|yoDl#l8`DLVhXv5l^
zoxTQl3DKWlv~2w<-E$BTJgW1_GtDUy0d_TZ0)r!r#agkmetT-A$qn2`s{oK-d@xRF
zw=J)!lp|<MuX4U9!1;Mu9B3P~xq<0lbQy|)S*FgC(jj_5mw&{?sb&IIQ>O~Uq!cKz
zuMsl^*GyYVL>pxzT$&ASKR<DR%A9U26>{3xQ|*cdJn5i+oX!je@pwJ@#%|PqgfkH)
z7D~K0Q&V+yC!_N$V**}|i(2}a!962R+dRsuhOt><#4yE~8Fr7?sN{ZW6$&pw*o32f
zuw72hNrMc%ddzYs6>I0>CIkR@toR9^?w6*_Qo|NADYkLmddp^ShH(zV1JMJlqKxRY
z{{Wt1!U1frR<K55I1RqL#yEIy5?%XZQ{O{B)FE;x8n@CYe_tJ0^1{63eBgL3tOka3
ziKiFy^(?6W0HD5RIcTSYAoZDUG73DIsESwL`j7q43;H;^?Jd?)KP4P^onBRb=2->a
zEY6H#{-8vQlMddkODD{Aum{cVHaHQBs{Y{UYNtnf^_rss2=PZ~s_-qo>YbT+m#=AL
zT3oo(6%u3pX{p1UT%aYk!rFYRiIC`^Z<Cj~v^K(`tk63^*Jv1WsXkm(yR}d_T7Lpi
zmv}z)QZ&5lZSDL_art{-u>8XWeLDO901O9LN~kpU=$V@Xf*LfhaKzUAgP-9}BMP>9
zdSN`ih3E@esICiW7UlF@9UDZUqJ@U4qpl|Jm@=T`{{Y7bcho)C074T|jwv@g`-_!n
zmhWNbt(<r58$AtG(J&EB`9YPJ*5<O-yOGi9mtM@}L_5z@&LScfOTzyEoF1j8TSK=H
zht%vShp{Xj6Ur9sx#A7w6QjB=tF#E1tl-Aop@TpfhzH_c*7{tUd113s#aB?ZF!jP`
zo74?WoZJ^z39f>zMtGK3F%<F4vBzt3(ui%xPz`6i?a*m2G~;kW%F@eU$LL2%S{PO1
zh#Q~;#=60#;5*q_^9Z$I@5HWHX>%hSPTWD->=hjU021+n*JHn3%uz2=gS*wjw`H6V
zh-dw9Rd=ON=2}XaYcd4{8eHd&B|w=V7dh1Qzk*j5l>|3<1TyaZuZenDY#pVS-%6wV
z_?||FueI?si-dY`^{x_Ecsesu`Hh0Mnr0Qk*Kb8jv_+&Vllhb?fyC<QQ5QOo0H((9
zX?n|iAiY`ud1~h)7FkNAe2IGUeGy}^c-Kr<NmL<&w=P`3?<Q^zo^f9{5F@2rd(zKS
z0BJ$pdz%6)v&K=wghx~;xr%O)#B^Xpo38Q(@kiUb*-GLV^(mKQnP*{^Cacu~>#pY0
zQu}gLYTE4KS0q<?Li|CgU6!f2OqiPmtA5#ZL`u$w_?EH#IKmF`Dyz_i)==$$!C0zc
zL~aaOuF>%wZWQTDiz@NtP!u#`DvnX%VXsWYTcS68SwyP9@4TS$G6!I0WUv)fwSi8k
zQKHz#beX6N>3-e5A*!0*&&lcejYQ*Jm`ja!<t+G%cY4(o`(^N=_Hp+s#2aSAf5hl+
zR52=WT5@jnq@b#G_sq$|q|5u1%B$4y?*u(GlsB9DW;3Gg!rsohjO7c36O6-p8^q^z
zm^X!2oVte+(5F>(?@**PD$&R8F@0H4_85<F3VH5WAOwz-ene2pTj_e6z(dG7`-!LX
z2pe1RLwJM@BIqdJU#W;8s%fHQE9jYZKWTiSeW3F(s*tiWs#-hNUs;m5oyg6h15=x`
zq4zC|xf<J;wQz3QkDLdMKd_o5PrOHP9eF>c#kGiQu&UNG>BKE?sLo%3ihZ<P8tbSu
zaK&s{X>Sd2FBE{d*UUh>*UpTNtNDf1Mk|r)Fj@G>n9vMbo~ek17LF%-0=iBcM)H(C
z7zbHd6;~(o3Z|K&@Ubf`uoW$h$QJ3gRxz|+0dAnc$1L^BpTQMk{Ev9E@YLs`v?$t~
z8*~!&gYew~j*tU}&Dma=fbEbkK619{#CNPCp{*N%L=fBEi-g&o*%bl9ZHSO9M$fM#
zSx0RTat#PW<MgHvY<93e;z4f^wSgA=Li0^Mwl^n4q^KF728KtMF?-B7(yCDRsHbDs
z#6aW3!leTbgfiOHTcEaMD0vSnIf&5am&_IWm!TIF4flM+4sFL5v8&!(>8jCt5p455
z0x%M_g2cxXfp1K`;{Hgig`RhbR0qMy^D6aOFC&NslEY3^6Oj$z7Xl7(uF#Mq8Utlr
zXQWVr2Hz{r<r=*P=r>b@k|Cy?Ldz$GeDs_J1EVUQ?$ZAN1}Hb_o6W~9Zc^u)WB5S%
zEsIWD&X!dAirSTIRQ^uVvG-Fa<(Um_Hog#0S>5`UkL<S2mho_uIST{z0fmk@3^%3f
zJO2P+7|nOoLse@m>8sNV3QND!>ON9c?l@9P46$!1gN++u)Y`q6k7|@t?-O_y{{YYa
zB4k?nw<w}CZCRDtbb@^cpyF%LotTyZx(4-BJg*iZG}*>#4|ci%zN`{dRmhh3KZv~y
zJx%godijNfMP|{c+vzdP#01@?K<N`_kSwloRb<kLXCSscYHTx}tAMh&UU-X`VOMju
zY6~#QCe(Xpge4hNfW=I5*0M+3upze5{^kodP$_M--VTmi<gTe3aRV^7!-pdAEz(P^
z1!wprBZ0-0SjO4X2Z?}Qrzm4oT;uy`iSD;Z0sJwor(Li@{{V~}Sz|cKL30FJgIlH+
z@nD3o(#E2_4jl4k08B`#)f1w?O&+xyh1t_JEEK|sv8j~4U8lwA1Z24C$Zplo%x)oY
zlwN97uM<;tgOwf`ukrbbIAij7m#cuUCSr|=Izj0Hp7XU{^G#z}Mc26PrtS!HKb#U)
zaNmbXt5P}aTuX|hAy*cgf#YM1bV|!~r>I?FryGtpKv5UFKWJgnr9Tm*x?b*cD}RBn
z#Lx=I+*2Zb=LeJmEoDB)A?!)J-^@9NcglYg{5Xa*z$I@v3*O^$MPl*#Fgsdl)xUqt
zpq|~8);HQk-^J>1^j1%#sC_g{frC1iW}=I?Y#U-3?xXO<utCObtj9&%Lm%JN64u$$
zU(fKCEBT7m&l%j!)trzzdXoIB)FUigHD~=r;e4($c%WhXsuQ(Z-PLq~S20U#ePEd$
zh(j;}py~$b;FTuJIgHR2MvFS=^WItsi`Xu)+5!9VXt@W5{$?krTQg$&SJc$0Lnj6Q
z0GBTfUh>9YgYIH(a^%S6%s|DY;0m;{;yn5{enJ~E7S=<+%zA-L+g&d}jn4Z^0dRmV
zL#kVOIbva`(iD=KO8W@I?H>l8)8bkXgAaFKHTS?7%NC{G;`EX0hG2^j;K-}Yw1o5=
zB>|C6v9xv)6#ycR4jv(=YHx947J>AGxJP3Oj(c>Db$*P6>YJ{A><#(E_Ia3P5{1+Z
z7F-w!PgRqHt$mV^TuFs6l`d{VmO7x!^FGJsSap)^Uc>?v4WO%`t&=OT!Y~mnS5xl7
zT!CEFvRP@49EF#S{{ZE{60yK}fOTyx{^>5Dh0Hu*uL*%ua)5|*86xk^j|_*ps2N+#
zM*jdZ%MZD~5ZWjJOci(Bm=88Cs#v2pj__?Bq1(*JTb*uKg?vL)QWd5~tGdfo!91_~
zl%IHUXX^*VZ<|B-nV*YRzrWN~yq4AFlrJV>ZZ$SytwS-zpUx(*1O2-}R^nNouT`JB
z!0sV~xpd6E6Q;VKOP6vxB_z75YpRH74Cl%s0E9q$zu~8bGoW}yvG70HQJygt@|YaZ
zKIIKQsTE^mlBjOGLLFf-chhkK3lqOu&H0x_iU)=Uh&MBZmDglXQ&WGK74a@AEqi)O
zpPf_mA}sQkL&hVy(uZ`__=@3>b9^^U9c=sDbD}3}mfc+&AbDaLp;SV#71|Z?WM0(D
z*?MWi?K2j>DcAtYu?jb*su((+qSV2RRUipYmGK0FflCDbz-I^4V6rR%6dg>)YuX+q
zX{6Hbx!RSg27KaN9*s4T>6WO4?F&IvWG1*n>JqPU@F%)k*91BZkzu}`W2EZt`MT6Y
zSxYmYZ-~@vB`XU~j5z&a2Q+aJA7c->joiCGRydR@jPHET@y9+T?Hn9<UOvd-C0z2q
zAq)P3?S!fhj$$@&Q;fn(BA9ToYK2$)l<CcQqEHjD35PCbd`ruj@8);gF3=W-MJL2V
zFS%Uva5g-B3;j(A`Z@TRVnG-eEY>?kbpcC4po}oqDkSBkIxgI$!KH=DZo6D*0sWz8
zYUVm6p}&LzLg5c9bn6xFT?Ue=0r>&=oJeRWygTP8wmjA)b?us6!BauZ-y94e3Oc!X
z^BN&rEmzVS+-OV&RATW`#Z{PAyGyE$Tv~wa25P$(J2k|@cw>Q{ge^QZJ6YVo_nA}O
z^p;dp<_;q&7q?<8Q=+Dx8f6ieCoq6k(<_A#MMI{GW0f5xBSU4YHEXzoYp4>|Kjz^z
zsIPzzY+_u|3R+pKbHqX(5_ozufyw!nejgIqoMoVW7QsPi1r0!SAP=Bf;XT#DD@P`q
zDb_5v0RuA<li!irCx~431U1_UYw$5}`Q~NaF4J7XwXS2YpL-`56wy+mKqMc5+8-PS
z$$BxlY`s|nb%p?HqO;-|WIXKYu6jkzgBU%tmKQSOuDjou-NGw}Mz2I{jJjLm7>8%s
zPlzKjOkv(PUYeN}LkUuPn#>G_P+}*g>T@xTd1SvyivrXkU))_2gaj$9?iMQ4w!2Dp
zmIw}*p<8~TwJT-I?;YI6fpGVEW`oSRj1WwFxsl94D$xqsEe8{U^KgNo4q2N+^EE3X
zr6Ywm%tIKl7d9pV%ACR{k5cJ6&gbf2U&}AkaRag9zJ#br5rUi@shH3ZXATdAjOAI?
zV%5_<QRP{k>>lQ=z<Mt<@fBQsT)uO5bp>E<rtI%5({O|NB6leX6<s{L=_oEwQe}FO
ztf$uC@tJVyS;%|LS+V~BR6<MQv3|+tSUo`C?HHbcS4?wE4`)~)>|=03#sy5tjM(lw
z+G$kPCOBD!JztYHp=Qt%CZ|rsUhwlo)R-tol}fKxxDRFt>MVtuiZ1B!Dc`KJaqRN+
zi*=^_9tHf78XW@B6s4T(idfD&8G4E-So?~3VHsapU^n)t4xrdo*o9ckvX`4M!?-ZJ
zp`83dtth~yCA9~5WZB~7Ewz_dGcA20S~SZ2o#Q*HBhSOJ9MhQp0H+V4BJP>8#CKVO
zD`|j5G4m{sP>L9f@po&`Nl`RuAGM8pVaCj(QO)rgtgY#9;M^4i2A_y8%wk7jX?j&g
zX(4urTnfny5d2gB00%JU3Eoa~WFEu797;%8gasW1r!Fp5$xVlV9k&MZfsoP_m$<E2
ziU$ce%-fjS9+X>7fe!3p`7LGj3F@Fd+$_95C@8g;<}3jM+HK}^7XV;^!MuJ5DiXla
z)?x&}U`GL35()JFC8_LDRu|&zl%*@Vr8k|A)XPVMh~2}QnED8}fS_-830q<UA32v*
z2+4Uk{JTpA&>hVxI!0))LYeQTCAm)0)WTDQ$vb4?lo*>Yh2Zo*nO6=%tK`kipuD<{
z6Nt<d)T7Dk>Qanl;tIE6aH_vWvG5g^8$1;P6)DIOD7Cir)=^6UT7Xc^SzVG)Q`6r;
zf$=WMPqqcj=8mx92JMbpxCiJhhC_qM%mCw~dbH|0fGifD@feH_()A+fYtN3;02>wO
z90BTOgSc^g1M-02I=-HG?Gk~`lBb5m78}p!485a$r7UjJ87&C;$^A=Iiwa{ts(n0r
zm6<<J)4?n%N+ScTOYPH&=?fz~PO7{z>}HG1xMLf{!WD4dG!K>-`tuN_R8itC!@WZ)
zs~NX-upCG`tLYBxV86d7FqvZ9CiDX9tg152Xiy)xYy->{n@>ctm0l0<^(OqLKv3U5
zaLU6A^CupM{e<?qsJSkw-*D$9aoQ#AVjPBap5~Ia4fuA=##gX)>K5-N38mXKQn=4_
zF#6$DY7Tgu%AyJvS2PnRYZo`EPPPYiT}6zc6RKQt@iPxNXZ?t7(Jyu5%sL7S0mcYQ
zj6`E>qbWUR#O_lZvc!y-N8@q5zH0efDu)$Ev}1GpIutU@eVPiqwEC5dHv(8$lXCb0
zw}!%{ON*VtyZAsfnPGu;_slb+a{j59(RaiGCFN_ZOeBW>W&qi=8Sq)NFksBxCrhla
z!|@mb>D+7}F<L{=K1^c3C;FX;xsLS~HocDlFCc2TH*&rMDZaG1SqKC!rLdgmtgVL<
zj;k?nCH`Z$urUJ3zI1ej@CJ5zTQw<J-$C$N#gtYCq0_lW?lK%>?gkESq-dJMTk8t1
zVCK^OXb5Lmz+KP*e-nsi2E0II$gBR3FwpT8s+pquV`DWaHgZw>mwkX0H^PT<#JA33
zFHWq=UEHAQt9cV1NHXPFn{9dE>G2JfQmfASm8!Bz<lLbLMB^yqK%X(Sz7SwR_71EM
zf`$u_hC=~-*(r-GT~_H39L3Oj*A&A(+kMj1<Oc#=pY~-%H7S2D(#r$NVJ9S2WfMt<
zxa&aBocxwtP!?-7T`Kx7ag_7|(HmGY-Y<zm!XTPB&cPD-)XVZFWm|Xzx?KbYm>G3a
zMg_9ikC$bx@hFKfP@Y#XKY_Jk${9phh`l>FMlrodLbPG+g_dupN}XXdeW5{vQItIN
zW0`@C4MCa-SnCOLl~|*?z3r&oGRHQbKX5>Y4CrNhwgzuTfW!J`ytV-4z}DtK`LtRq
zumHN^3$V%skF}aTpFg}!xUl52Cy1<Gs}8ncdl+gtKXV1z-CfZW90dgWNSz@nx3EXM
zW|qxXj%5{gcpLRb=aANj=>3ydHddENWMyu#4`=l&4HXtCZ=AOo<D;VM>vHxwcP9e{
z^_`l9iJuBBkHS0-9-4X7t@jO3l=U)J>jYLeDl8Ed21W1qgAW*!s=Ce^#IenoXux&u
zF-Wl?=DN&j+it1VW)a2qHGL&tFs-mTIwRLyXn0x62Te`2_C;_-9gse6)I-qfW_e2O
zD(FFe;jdQ38WG-AQk_zR81BJUWLc;j2D&u{Hpc74GUIKbZbeO66j^IGp{aC|)&<=v
z4DYngdIiTecb!(_&MP+v2k#%awR*EaFCXkhg`j*IQ|A%&;lG|u``1Z*kuDc&{{XVu
zc}ZQB!{wBmDmiiI(fE}eEB=e38pOy<Z&JFc&TVfxjBJe{S4y3AnQEri$jXFLXaj|H
zI@y{Vhhg(*){LbD--Afqx008%Amts^iuE%?fnABlU{=7rJB}|}f%&K|`f=)|R0ZFH
za9JI!{DC|`_bUR&M|Dn>vNL^VV0EIzq4L8+U8AJ_(?C{ac1Gjr?Tpc8yiGW!;6^KS
zs~sO%jWNq>fVavsCpnuKEYNc<1{KlB)!Ec{FYU|JMY?HzVip}`e~N^vvqaI{T|C1W
z@<r?uE|D2Viu1UJ*8_4UTKSbOeWxK_rF19h5~^5q9<uMKjcc@fK87@hnDPwH-RBaZ
z<GHQJ>mK2or~@}|nRdlr37SD{9a&nvlt3Q|EvZJIH}fgFX5y@MIp3t_-qPz~bIs8C
zj!XI53RC^k2+|~dgsX)$MUQmVcb2VH9Z*dkM7y?VrBx1Lt8)JU03I$s$8B&3ej>d8
z0N~A0rEN)>nck%lEec#we7W%rW0hd)9fydI(EX_>&DCaX?2g+p#lLd|CYMwhj)}R7
zwm6dL<wQ`qw<Nb6It(y;ss|<a9e9*Wx0~Cg?*9NN2Pig?dyW^{43<;bWraT^3$8db
za^WGdRP>I&0-vN`dY8rdxgFTBt)&>G=bNbb1k=pw$5{OVzvAXnrlF%$wIm4oB;=1l
zRI`w?J%3WXPYJx%W)lKDQDg`P&;iRRK4Oe-S8H7&Z>1{T(s9`fliWfNzLJkXty=aE
zA4z+(1QEL#^g~FCh8wW$D@2jr*1OC?4Mcycc>;1*F<q?|ckHm&ud(woIO?9L{vM+=
zm<VpFEj$!XukH}(EY2am;`2Czs&gGsG=)Q$bO0=!JI<(M*(hc!*dQ)ph%?|9+_;RB
z)D!AqYobXF(9a1G9jsVtwCL?LBHA8E!8C9|Hu`2d*@ub2ENo61(fkng1_y|@V4A7y
zRG7E(h=5^%7-qJ1nevhfJ-VL6-^2v~m3Y}tnaF4NGXhR<Rkllx)N|=?`1h4V;VY>4
zx8^xJun^ZwKu5_m3>VMtUh1@S<lBMj9ISCUQtP#mMzNfBaowm)UEBekx~`zY4c?@4
zHfW32o~y;yEu8Z$A~l;wuWWqH_83sv11;~|M_9$TY|4;=h%U?SGJVF>EJW?la~36_
z)(c7-8tWZC$H+#ay3VE8qi}VJ$bwqF=O}%M96?C6W<af9+cLq*`;0A9QKNOETK6gi
z?_rIixh|r?*o0{cFOd%n;VIauUU?%zD^+!zIx7WY@|jAb?H+7V&-0`fVmS+Jc3L>K
z{Ywdt1fs3iK8P}U!8&!+8Wp2S{l?%uswNm^>iC^O+0vq<!KhbO1^b+JaWAsJaEy-F
zls4Xub6d`i5~5Ef4Ce85YzroKh#!l%vip`)tLj^k1_J<};yUdn7h%uDa1|n<SD$r>
zW&Edg*>=pMR8p5NkwDN_msbcbKS;NySWj(wi~K^;UEEjwB1ElN0z7NUqMOgMECOGt
zdDl6Y$2!e~2+u$(h;8up5tpO`NU2?&SN%k^0o)YW@%oi!@7CZusx3xVSeiH2wCh<*
zcyQMniKJJ>T^jqS02Yd9viQfSvSAZ7T*@HIWuILL8w_`hX7Md<68VYO232Ap(d`5-
zspgVZ5{APnUzt;B#Fny!cEO6K-+}<t)i|Qh+<O%_>den^4AKG}2crRF`iT>1=$Ybi
z64<2ySTgn&IUb?aDP-v~i1HS8y%*@H29T=+#|BXf56lRy4^6QG*NiBHtYWY*$(`33
zFtGWAd43D>4s5aDnecpS@2h@Kd0d*Kvr%<*)6`xWDKe?(JMyx%yOjVj%Q#BIV`@^t
z()h`ke6bwcVEiLpk;f~Um!NXWUYrmU0n3ZsiD8B@DpvY0SxyTK17#1+E?bP0A&#c0
zGHz{7;4Q|B?E@lnd~22Aj$eBdRb!Se?st-)jz#CF+ZD@rup9$%uGQlA8<iY266UM1
z3({1Uu~T9WHR1^6cxrS9#2MtC9bnU0xs2->mmo3fy_Bc9ZZ4cp84mP8lRg^pj%yz<
zXo+dY`?C-PySD<ycwaC@P+r$I8GXuQ)I3VCo7@ar0O?*HID+Joz@keJlsysygU^0o
zXGpL)pI8tl<sUvSz#&DPG|zW%F9d5=!SO9s#9kA)`lY>(2Y7!f%4G%)gm0<?iFSLl
z;vHxYAQoO?E58JEG|c54(TTMd{rHN?h9GtyFbg<UxASTOid%$jQN84g*5er07z*1k
zwJ6vmJcrb~{{UPpc$LcHX$?<{(^|~O?v_E<)C!cU=RfriiJYUz!Kz}spFUtzG_S<^
zmGE`YB7#EX9?oCpr4SxKL?{bxS9pvBI?hlpsF4lVRI4WP)CQS{hE3)50Y3~icBJ)J
z%tqR~f`9?aZ~>Wa*hNoOsN2>Ety@4_YSs(P!LPg~3r$yfSMbPS<D=AczUc-B$oh?#
z<&q-!GV|74SNKeDqQ5oqDM0ZHPmMtLO}AeRp<Y#1E3A7{EQldV_3JXxG$7MCFgdrR
zx~<%nEd^7`2zp-CA4Wj6TF59oJ6K2tgTZ;0^KWSEh2H}*;8pQsEK-0~YG*`{th<~q
zn1Ivea<*C1WQy9`u?6gVL#(*5(p<%yn7GDd3BwO#4Q?{}cZejR=q`%GUP*ED&1}n!
zcS?=!_BDC_2~sGw0Oeh*Eka`T(+wBeT@_Vj?plpuuYk@Lzo}JAQw|w^E<EkN4r>~X
z(QV^|;=}Bnkm4s~TwpicOtqzN*bk{l-cxTrBeyYif}2@AGSDYR7)2X>LC9#_6;uez
z0Gw9g#)nRxkul-lrG!%JmMW^C7j@RAxk=d}e3?MkF~>;7+ML8B>sNAzT1t2&Fuc`7
zz(xwN>6@x07#IRIDTS0|zo}jsT77)`h`Ip?{{Uif1R=g(!~_6$H-gUjW|U}lSEuZW
zwRC_#014EyCP!MB!KZ5!Bh9~1c{n2YrZHmT-H>@sM+RID;s>KaYB!k3Oo@NT>LgZD
z1iXXG$3ih}nwjqIfnmwT{lKULq;5=QRnLdKa9Hu18m~C1R>mu*_%cC_N0PK-Qm*Aq
zrR^QO!oF8WG{O3FhQFlE&ED^9+;Ab{4Tct~F5Dqm_KcH<F_QaK$_7DS0A^(n<xdJw
zd5$DLDlnIcK%6e_HTe{V%`ki{@f?g_Fu?(n%mxnOl4RcxNafwc3^+xaE#la&({3Lp
z^B$StBmT#xZSB)CZ42;)ps`7*&x&7sOQTpRhaPxS`pySx=m+5nU8(^_22ZH0YMP~F
zXQLH1OyK8NS4ML`5$Gu}^D-Q~J%%QD+`yT*%`@P@^>J2PSq10ZuqNi1loj5$D98%`
z071{X;}O~uT~;CVz01&E4k*Cu+EAWS3c&u^ig#i|Y?WMUVl%F%O{HNms+YAYQ|e{I
zU1Fl#)DT`J@)iOKi_^1Q#Q{$3%*ck(dfwt*<r*TeK8s-93YNED5MF*@Si{b~L$+Wo
z#^~bTwfdMH61;=t9}EG&AXBn0On`^9oWF#reNdK}9HhA-JkK~*Y0tFEwW}PR+{tmC
z!%%gRa#EOY7*UtXnMXkddNN1r3f1nv#R&fZJF5#nA4o5K%(GYKT}emjKh4oe^IK@!
z#gCXS;n49w>7sQ=9B+Q%#9?;M#m+(BNsVAO$9Y~@1IoCLbrcInM-REE=$Dh|ka5v$
z+%0r9ac(D&uEV8*;`5?ROGxAZ>c{3KtzAO{L2BEZXELKBEp47IVB@5yIf(GOH3Wfa
zgf%GCvTpo<ihwJ`hrwW~uKrU-KwH)@6Bu}hI?oWm*+Ov)SirqZqIPJhe#RvR)DS%f
z<`hinJM?MVSn$sL7M`K{&FxVf0amLR%r(hryKGf^U?U}=*xk%R+5}6k(0CeUhK1I6
zF&B}QL@30vC|H<fDx*Mc3T7_J>`4@Cg*tP?<&--yEp@K9X~FXvF|4h*!m<~ZIs@wK
zxMf_Z^Av_OEHDk7ui_sFOF%ell3U?cW%`Y0M_%LLwop<47oHRoa2a<peAKMAz`g~)
zOk7$EGzVc#VB#z?$y9hp?g(924&5Ajf<?^y#?<<Td&30x7jtESk>9ja+-4~(E&RdD
zM66m*cC58JLCFo_^d@yQiMU;N0wHal@)k~_k)|FH)B+LoOHPn%JnG+=%nyh!tiHpO
z8hswoW8DKPZ^|KKDxIdAtoOf?EO2h_9`V$1s{mu_!X;I=u)FJm<;M_Q<ncDRgq-*B
zaYXoB!~it{tp>~v82z;}Z^=)&NK`Al5%k(6XlPBg;FP(qGTdwv#md|WP%V*0Ds^R>
zakLz-?-8!GMo2hrF77jtD2;oH32-r25S#%rxbZ6{>>MK{is7uZRk8dyW>^&FV=X<|
zlJV#_6I_II<^fMS3xYB1iqyISVPc>A9D#txV1E#<_ZgfTuHf1^F6_ijpvebzSo=Uz
zW3uTknq8s1ugst+*>U})2*sNn&ATx=*1*znIG(9SYnPczl9Bog9DK%iana*4lchHk
zC5=sy!8}AnP^zG{_<g2rs5rn6Vf>&ewp?>}t?-s~1y+c+7YJE%-^>fdWPO*ezSZcM
z0szc+cda;vus4ui9DPTo%pVbf=?XcC?9H~Md}I&8#HiS$QF0g0F(!!t*}*^%go#Uz
z)1<p!k=`j-_Rfz`aS+xrOD@{Ez{M3k8|^C0RBrk2f2fsNZ7PgYd<oN^!3;9jF;>#A
z;ykwW*7=P|ivpfwmz}UFT4rF)>6&ou5q%Lq3~IP5cBmu;mYND|&`fqli!528`CcZG
z6Dy3P<s;z>T4w8wcvY!N_7?(JIa&zHnyiin_FRcv%i`}j*jMyo9NRvIDBu=QsO4XX
zNo!kK?kbf?1W>&5aX1Z}KI3f-8YHe|<d#pvHdv3b+6c(64MRx=xnQcv>C#icG}7t`
zb7CCjhcjbGm?Z`b_?B!x9;P?>9J=h!&WP#P2PEOF%2EBWi#3>l29~N%PnmspDFx+;
zfNA4^ONSNeu>NCbqbT|ezCbW)??6Vz!L)XnWUT2sdWblRwffCRC-<3_oI>#JxxUhb
zKgPRC+uz3s&<Ymt8h!qu1PID2K1{?AvKY?g<4hx(5I}kl7t&jf(e^hSp5p*o(d?G8
zwKUV-q9cIQ(O{Jo4mZr)I))0@SJZd{&U>enSak~N`#>mJ5%Lq0L*l1zQk;kwNKWSx
z;NGZ$^4#AQ9OkVXW_O#Dm@Bl_p&@dGE4joKqkr7C1{F$xU(u8mlSzZUK&nEjQw3^|
zJWp%{SxYV4TCl$-C)8j8EC@mGd_)ehsH%BZAhqNAC(SD0nK!5y$RcuVeFvSHmIv2M
zOvL5|`~5~+D9A~WJl%7|#L6tSocYvkdr)wI{fyiPJB7)9No?58;CF~KdUBWKHL0-!
zFKh^vXhIhheXa@zh7z^8dWaf!Cxa%M6@QtJMtH4}qWns^b<!06b~h>C7>eA>OT$sb
zBM5Ibw~nz0n4;2RqpTO74_y4iD6tSObXAWrv?~I7NJEC6qorpaAVRjw)q9*=^>;DP
zpcbO}jczg$3+O@%Z9GJlgFxwzG#e_5Kf8R)WtEDrYnhxKnJY(&QB$suRUPwiU3;g!
z-lXZoCcR50wkrs9%*s(kZkPg^;SPq|#alIEh=#9MGtG2Dt*c7nVPpX9T4NWO0kRe>
zmNSrZ@jcLY*9BGFy~E6P**MR<An_ngWDGi&p2T%VVWEs@;qwM3!u>}oJB^aO%9y=m
zXP(&9a-cN|h%VU{{Hauf2cBiWWUIof1|b5?%u{d@k2l-m5{8}RTk_&J;AIH>G16`Z
zf$(7zl?^JPiZix%GEJIDrBiN52D%hV>2(mJ9o`4Kp~!fSwHy^7EIp?nj<U<6^DJwO
zouzy?Bx~+IUB$|6mA-S*Hf|2lhB6$>P%OCVTlh+BuXvy^80`)4JErP9xthUnJ01>E
zLT(qf8AYMfGWoc`SoIJc)*@Ba=NXulV%dm}v21OKV~DxgabO)GF8n}=(A}^2myl&A
zB?rqN3c3b^+~`jPy^399in!^i4K_VOKT$HT$&6qzsO@sYcZFZvTWS<-W4r7!h?7$0
zDc0g(vWHny#rUtdW;v`9^dZn{tIfv2y8BRkh@pDH&}gjI;+4?QGe&ckUeqSH!0>f_
zK+ptO8)Ls1ZeECW)Q2x=VwTLfB}$4p`bCnXD+x{>&bXT~ifUMckF7@(Ocw#r*o3R`
zEivvM;OYty9c)$+g19&t@VU@F3c?F;K=h{rZu-h3N|N8qEgq790y)-q8jvE7WM*ee
z=WKk~0Nrf(x6~cz^TfD1HyaK}F<p@l+93gcq8!ajDE=bYqzdVeG|>)YD+Bo<XYiPe
z#bEBo9by+asspu{VJi1oVzTWv%&t=?M#9$+UCP+7PS?vV1xsWM_mv`X%LJwQsk@c3
zWJo|8tkg~TyyjI@nrtHqr@Y2g9T=8QUFnO(Vm+99nnx&cW21rMB3WjN%F2SKs=CVO
z7Kj5{Dyc6_3ST85JQXobODywo-wt9k&XMFwlG3}5(pL2195`W`c&y4}X;0cHx2AC)
zi@ia^lTb6s*NY#q7lmexxU7ScBKTxGT4m=`a=<LyrRjj)79htpmAQ*Ts};gQ#jTSZ
zMlwr3lnRv9dkJz7=QCU0BPDi<P4c6BBUt-S6DTQ<Ynl85)*AV#dnCUA7#Uxv0_S+4
z<Z}fTcPneCxw)Np9gc@5H4)v6Ek}+slWHQi2ft6asQR%;@DU$mg-Q<An(q}Rq0pz1
zZh`?vcQS8Pe5sqDmwW5RtL2G_JKct%c#!#e3DylOu<-=n&M5|&N>Od-mpD+mb|vn_
zRbiF_jw{5v+l%WBwO)N(KbY7Np|`k=i7^AopHL{G$r|IU5W-H5G16@HWdH)|5RLc4
zo7BlnF4`5Gx6H(1vqwip!D0pLFPQ3`aWSfpl;yQp_?Fo|qzh~c!>sL5%nd!6L5W~i
zGVKFnEQ>=3rj#plx*-XDubs<_tOnwhmGUPA<-`j!JjE@Gh!)lLa7@AKJH<KkjKgRQ
zRbJ3k726bd<|&j2TdZ}Z#8wFXC2YgQL=db?dnlsl)qdsE!JBiLTEVk;nfRPEW6>xP
z)zx`|skFYbfM`xdF8&5^hAWDLUBh2-D^^D<ZvIwbVrtN=HBv6K0;~Hb<o+d~lp5Fc
zP*BXP>j+BcwzJ^HVu4M*re-!ag;IJSFzTrQP&%8QlEv3eIW`zQ!DbKj8GZSle_G;L
zt_IZq0EE_R#xGyf>k?*MRMF|(r@4Tj0=hv)rw*eJS{z;C<)-eMLRF3^tc6hksfCK|
zdyn{sU|6B_W4y-OQ!n60%mN1Pj_xKw6Z(R$i^x68Uda9w*>Rkb#>Cj|Gg$R>MtT__
zZz1`d70_PnwUt+F&Ny1NWd*CApqB+E182x#TQjMLlgB5xkLjZD$;qrkYrBg&qcmE$
z^9I7b9L5EN11ua)7R(+`P!RUkLgOi@Dtw5}G@FVHqdHXGsAOQxs{Bq6z6p&!xhE3l
zm8+&d5{E>F`@+8pxR*i{UNAi}{{Y1n$-bbs#*)F<1O5gv0vTcWgq#jt-NHOvuntXL
zBR$#G3GWy$0-;N);en2GqnNm3UM16U`xGrBJ<N7aF-ZXwVr7nns|1%qyQ$-#zv2f3
zw-WU&zS6*8V}MYy`Ib{~S5xsD09JZ&9HI{2rUoih59KMH@hIR^P9ToEya@UhXF=WG
z_YwLj*4X@_NBAfZ1oS3)Oamt>CCcnoM18Y0&Dm`d__7x=pebYtT960mY9wpL0==kX
z=FpSd2b0t~PVyQC1l4jSQ!+P82lED%ypKePEw)<99;TA&Ag1dRRBJ(D+S%Pgje#Y!
z2EP%Pt)tr+sBa6OWJC_myvo8iKQGVJyv?DJpRV2Yfs8qhN_EXgYu@oahEWj!1+PW+
zGPF})IFqn9;&ZW@{ezFr70l)smdd)A;C7+U$@4Krs}o9?b72#f4mw<5Wim?=0|Bkg
zcXd0<Oy%t<tNlZb!)#n>Og7Zn+PBQA3q3HPCiH&bi9pzD9ju&&qqgAooB>~+B?F`D
zc>ZCIdz)f`;9n5Lpz**MSBj~Q+CY2FRdB7IkXB4u-X`nj)&bg4<FQ}NSDf7MtgXR8
z%<z~k&8bwyG}K(v3s!q2gf^Cq+cErWN{)nD&G(LqRYc6a3UjotQd0J3g2%9$B^kn8
z3fi)mm*zw`jm6Bq#g)pHxCX#=%RHM=S6B;Z=4WtHzGa^3wgJ#Mh?TCAnM!NSea>(z
zmXCeah+b5*x0*25OM;wyGm`32(z<O<)h;6O(z=+0F?@6+bRKgghhcX!i(|aYy6aO0
z8l1~DoY|;-j8HWZd09|_f0!FpO~G(g>$EnF6<8SyxaqlRZo@8ij>)|%E3m9E*NbUX
zRu7m!IKSA{#J7I2ZQf~uHkd8N_L|N2iU=ps$BD#>9-)(Nb;NX{*1!n*MW=e0!R;`*
zRc>2)lyo_FDryZcd?(y?-4Ax+`vk2Pcay9@zl1ZXMi6qo0%aQ*U@?~!_Xb92m9@b1
zs=o5n1Yt!=Cz3hDLCDL&9tn8wGA3@y^UP%^wFsXHrEGzF>x4`7V8><_7>U-_7sg=I
zL(Gt3=4wWa_TSycFCV9J>Y??sq9qG~j&p(qCu^AYwG55Q1Exn1+C=dhVzgw}d6@1<
zxm0{ua;o7*+G8h9ilF$oY*Dbo1^`Wyv%D;+a2h124P|c7t)SdAICz!-?ykB@x2NI|
z^v0r#6;$|uV!H=wscf09@U*#gyGd<}-pzsI%)$QvOFm$%HiCwgik!0X6>0S;Uq#@%
z;#;dJfJ!pdrz1?QYIeEz8rr^M8818VOu$8gqv8Nsx8T&g>+S&<Ym<`wm{iGD@Zwz<
z%idHJc%SL*X|Gc4fc9d?lJs#7bf+Dxt|>m@D4-^;?p|vV2AK!F?}$detWb2cWm=hf
zz(p=dd-78xM(zg2dI%Wwp$+sg%q3l}6e^+Q+$pxkytZX=sW{S`=*(2g6yZjGwKrv&
z>^1z7<B_zt^dKy&3wpIiazNR&Ur8zvi5RChG)t<YZ-=gTIvMytBn3cmS-~xE?U-jZ
z#6=>DHJ1L!{;aMGSkPY(F`C^%s?^=do5;L$I2wK?4HcOus^(wOBa|H<GdD9*((j>)
zf_*d>(OfMcW(KMan)MAY?hBzXI&(OJ6LiZ<Zf7r8uJY7bRX|jZ=vmO(@rngbw8vEG
z!Ri`~rFf0*xI#+zrNA=QSL-nnyI6)v#HOUoZLJ(sXr91oJs_+$l<VF$cLMV+mEgf_
z6S<eD-J6+Kyq(cgR^rVN?v`COGY%ZgpJ=XRAS?{Wre$a0G;e*PR5F%6T}#Ltn!))%
zrPP&#yYCjS+omb^i20#pOuIq9Ud`b*JOsqAhmY=1QS-~&^N5ct6o|s=t2vgMi)0bw
zGRKb-a}?YhCP6IvNF*Z=6^4%m%ftXTWYBC~rA7wgrprLSyg(;b`~@vrovIgBBS4}H
z@*hy+EU-NR=~eS60Z}nUqvB#AmD=UBPlixwXgPgaOb{7@v&pZRh6A;$@JM+t?>nV_
znB^bmd2u+_r$bH3p+y>c_xi`ls;t+Sr3V2DEyl1k`y#j<Lf?p&3DSQt4ZKLDI>r1K
zK6&#zJ))qFSs)oTu<}^G;yibQCj7FkYHoB_(vK5iMdQAsCYG4x%%=sKKOw|I;jJox
zQh>vPv$xzsaj<-JNG93pKrd(^zA@Gr8xAHU3S7XdI$=f2Nb4}O;ZpkuB56*jPe!ju
ztN_X04#I_14Bwjt-ezRb;-c#TvsCdJWtVv7B`_RqE!{1Zhe9i5o?&$ej#TS3GC`IE
zAlva*?o^NB{B$J9aFAB}%M0a>J*UybWE|aPv4l{lttp%e)TwE}&Don7Xeq1Aq8pE)
zwO`b}_O)w!termP^xgNtU1f~XwOwJ3nS!(JG##kg@XLjj3*23Q%9Teds8>X(l<f|h
z#HOl%s=Okd*_$=>3$*2{j2CRk&NBeTxK|KG1$C<z6s6pO+}8F)0YTdP89L`u@En0U
z;aN=M1xJB;8ofh}eD0MrJIJ1<tl0y~nU!-cK}{gM5Vy<LVB>ycb`!L>{BbX!LyA@S
zgKEEsi!V$ppgjllsbdpi1^6l=+q$qT_=Md3lP{@$ah)>$CbZDNEQje7AUz+bihW1w
zcl%((J&>xYg!dM%-w{Vm*#+kr$XqLwDi%;6LBsPCQaH-NWt<m)ZC~h`dABeZ2Wji0
zU+hj)jd*~VLb(-oFf9}p!zfP1D{LC$Jz*NDut6BPf-dn_Fl(a*W#!7j*;A<1i??Q8
z(>GL0D#c*o{{V~}!OXL`7KVB;69A3_0LvdJ4?<i=@=!Ds-J|SfqebZb2}_#S-`D9Y
zRz-{nmbl!>@1m7<DE|PuM<#c{=;PhN$f$I@{w9DI)tgu_H^a@>n9h{IHKr!axE1s1
z{6Pd2d%nZjj69)Sg_K&cDuV;t2gEvOwhB4Pk7U+j{S%YLVPuNtWoQ_>{Ut36cca;(
zF^2-4#*FbV53rrtILpLtRtg0>8yB533jY9x9TkU?rbb30s2iXz?j0RDNb1CTS1N(4
z_gC&Gnk*uVuAuvmjUxklrd3b5=u-Nq%{(YYvpG1rl_+Od7;bXyN9s}=9t0xVN2wzS
zC1@X2AW+$!gZN@MM^Ca!*pC4|;alva{7oi@gZhB!c|WAmzZ#aJ+YlPd%>MvMg=f-X
z62bLF;oPl|zG;&%vfU%r*gfxwjl`<<rqmbE+Y}rg6n2EwP$i`YL|t)lL6xdqQIAkA
zVFJAI+NvWw&O<4P8^Z5r;R<G1(hPYAsir0tN%t$-u56q+o37!kU1JJ~cMA-v6bopb
z#-$yOVV0C+a6n+lnTR=7?Pk%U#)*ukxfRoIy+Bta$F<G@)=;fjzPW`4cBkS;ozA0>
z&vS<QKs3HLotITH8-Ymjn6RULQH#l!LGy`Wg8Bp#JnK(VzqrHX^$!dp4B%y8dKNxO
zW)E|hMr+K*)x-vv68p?~TgO}Wsam%JgT=DD(lKE6`G=GX4ygJ!GkAllz5f7vj~G<7
zd>e@=V!*b!mWEKKQSDJ(Yy!hxePCef>>7Dpt|Au8Cw9h_`h&e3E`{(F8`EOxbj-}m
z&`t!Xt@^lEWzc3Bpje3n$SYJbfqfmJO$@2st%5PE2ExFPMj)s(5p&>AWY`hhfB|7~
z2)mt2u0O#!UyDIVKG?U98@KfhVD(?AGM4LO`jndg03M>^{jM#47X3n#B+lQciCC{c
z)V0%_{{V9_9c-OqGad!sWAhheQ)@e*o(DC{2A#N<BTu-9&#;A3%wwVd044Yt>5_|l
z?lMzJS}F>uPcr1FfWdr_Y&n|HFs`6ypuM30Uq>+sgCoqo$z63iv#r)=EEDcsQ>Ge)
zC9&KKcDm&Z2#OVnlQ4SRDT(O*P_XEES-RgymgNxYaRbDK*dYgNr|vdc+l;o0L5L@e
zw95MQf{k@kvS8u^RnSY;9n`=^=YVUIk_<_}!Snnvik4AFW89-_7nGAvGn%Df`X(IX
zvGI087M5a)J#in0K9X1%PyLti<`<OwsN>aphysazEvxxS*c^A4qjtGs>%_s7%Au6k
zlZ4*UVkSuCp4dBh?dA-vf^~~<^!!IO*Gn#ydVEJ=2)23P^1)ODr{o@YiD(x@;e0?i
zfIEw?s6D`N%B=}P$~BBsgs}C!wp-10n4v@3rF>d<VS@(U7FquO(e6Ey`HrYA>k#Fk
zTNxi{s=3awpNRGEH-SwmdBgNg*Pe!PqoPHR9b;dlwYH85CqCV3EDniF9hU|zr5l0`
zGK(qSkt#*2j<=bU7+NMrS;!Y%V~z)jgFkRr3<X~iB^2}cU_!{l3Qv*rfSkGNlEd>E
z4v5n$S*%qDS1`!K8pa#pFe2E5R4Ks%w$v7Rb(tNu;&1?NTQk}nV6ft!v=6A@mK<MH
zH9Rh;tG7*|-}5D`>Cv5KH3}&?i-}B5oCFU`z(YGEr;RKiuqz=%1SNcjukrIW5+iJ$
zs7=6(IpzzM#^36UB%4ZfdQOyiC3=sLwwf3@S&LoY8%}RLoCan?!WWi|?1D>2KK-BK
z3JJh)T71Ltv7BN4JVm2&WtUDurnn;|8z-cv$7I=9wlmsMS8U>k336GiEFAirHbq%e
zDk$BjLByh)IgN$Dsn!`$tQ;~eZM$OFrKqXSNNha?OHQ{gr$`MeL{31}G$VQQQo@q%
ze~CvPLT~7WiWj?2<~2M|+=ezENA4V`{Xa3Z4<<?5H?}@i!cfR)DEnEJ{{Ta@T#v8n
zBad+><RdLA<0JNnFfRBD^h&Op7}`{KXIOFr4mrtR+#M#L(G9{}Sil(-WX+}+Cl=ui
zdo+sbm${lW%K&@MW+O|i$BfIUTZ?U6+WoO9$yaj!0GFsxt;wU@O`XWe1zNg5VK1-&
zS+mY%$D?b+5o#|p<5nQ)MRk;04`_fF2T>P_USj~*@~$PffXU5ENXdXP*pve=P`YYZ
zs9Vu$5Q%6k@2oX~7rU5Y6e}|L;+{62F=>vjL(H*}0dtj@@lid#3(F9q!!r}DZwLjn
zn`;p0)@_cw#)h&iM}L_8^iD7R_cGUAsIHrdImYzNDm8HqN{FHOm5-PP2cvZy?6zLK
zuDavY9@U*)Rw^F)Z2d5J-G%;%fHssZd`1qUg4A@z=BTL^G?8g+V2z1u6E(?T$xfPn
zh?lH4XX0sB<SSsS9K3Xcp`{<4bAAVM@kclax^IL-wXr#0cWEvRD{H0X<dhhO0bSAf
z<1@XE@TaZxBPI*&{7kL1I2iYNYqA^oS#|zq!so>DE6?gJW$I-tP}n{H0PO8Al4p3a
zzQ%*a&S)D_mAs}P3Nx3Qt*~YmnuYbQCQMZRA!W#f#t%@1TRzYh)NpK3weu}3>c5FX
z?Hvf<nbV1>V7H{GteeCI;LKK;h@g2ULXEX~l<d{&a1*hEo(r|aP%5m;HFu!bXf=XS
zgoQ{}&D`YA#L1qBtwI2XD9;34k<i9`>HrSw@N?~gGF4AG#G%r=Epr<bQi89l9){z(
zYs?^uQBaRn5}h*DN}B?;iF8Yg1q<lz2;C%%lZS|*zm&DYMfO2g4V+xDT&{0+4_lYY
z-L7T6$wL&(3M!ta%i6isv|Lmx>-0)3R50C9N($GsQ=*t|tl7we?O~@r9eu?c_Pk55
zEqfO!fv+HD4A|<M_^6mknLs~3&Qsy`m>aIu7BTW5v?JZAad`6LYO|>BXWd1_6N7{Z
zwTKW)2JGnz7GlgnM>l?BgOQbFddu4_klj2X2l$F)V4gJ}k#4UmsLC;7>*zm;&>&U}
z#?q402<m2UaZ-;uf*GUIrVWkvmrV*VR9XR+f(&d*c}kSBIfbr^d1Zi$Y8P}68c*zn
zhf_UXXJeeNxFjq17zU!~8}bs$Y_5v>%sgG}Gw$Zie3#%L-;I7FQy?)~{m$SV*Tusz
zouzz5RG@#(VhXa|uflkptW-8s=!-R6&9LgC<{RnSI~aP0@`6+`eGp`%`HDD4%(q$Y
zqI;*rsxjbsb1b{~%Nj?4xmo=pT~5$h+BytQJrf4U@O?pE1H<<dir1I({{VrFvT$GS
z3huPl;Oj=Yv4x5a=Q9~*et)T2>DQC_n;%<O+$R7vVJMbY#XtfhJP;*Xu4<qymjSo;
zh$a@kA-6G`*DEUp-S5Q2Qo=Cfh<Y^@zmgY13adw2mcDh&anwL=w;l!86R06z;o@6S
ze~4S`U%0EP2NLe{ikZFdhE}@xf?(0O!MpbW!u*L#E?mOhuTEw6so`?G0vN0qVFsq3
zh`A34NiCN=VJHQ;x?}njKZt9%^~5mc=H(`Rj4QzFvYbSL9_q}+35`SB9dkI?d&O@N
zpbOk5OrJm8-oLnX2=D&@Gp8^8Y73K2k`~JU0D3?<JVBYK{`P?2z9D1i?+&#2m$_4I
zeFAb+b^ic}-Ebgq+;{$vsqvrECHX(%W?R*hUj0j{#}xjd7?%G4on^MPR&>YQQrUdm
z76P1L{-aWp{k@@lF+FMmx;w>68~cAzReulZ1)H7M?i7$0XZFNXRsQWiln{O|=>GLC
z(BqB+1a6dZ;ap4_Rydd3X7vEcnAPYK>0k?hu>&wTj^`jHeqdm_mbN?b62;$nbj-Cu
z(H{%u;=UB<-?&>}9vPY<!NN-4DD^*by3{amO9v$47{@I^v1qdxvDJp1-qPz-vI@ES
zi9<OTY`WhwbL4wO?N`os;soejSwAGFCw*QlX!2i~7i+CWgc03mo?NZq9#`=<V()M2
ziHlpU{;})M@~3tkX_Yqj8Cl=FSki6@{{YeMW6Zwa`0|51rT2_f(`de5$pCJh?h5eg
zAE<?%ShgB{M7VE~a(tBj<vkYT{K86QY~X7j@*}*CkNlK2kBNk^!3oDL`VelaOD+x(
zVca7_aH7QZC|j5X);61kb##<#G<8T*0@F+G7QF#TnaK^=_>X*eg^(Y-F6g4ZW6-gc
zY1rMf6FIB=ca#HHV=mhMAaO~Zj_W2SGUkuDnBv-MJz~}6mKzRE5rwsdsev$Xfo6;A
z8g^U-Rg)Qp^&boQN!0k1Z4;7JGwm+M{mlVdo<WE_62>L?xuU^)lHuZ8dwZE5___Z8
z&s8oICEqgmi#VH?E?mQ({8=svO<Sj}S@|UhM0sjBvMBY&MT*Vl0K1My!dE$#@;}5T
zjlNnT#@SS(5Gs+r$LbdJ{{Xzlv6r@DoewP%>MTc0{^g4Va-1xbc$FN|=3Hxh%7A7$
zoczYlDyJ}z@=81Ipey{qHOtH<K<#Fj2W(7t=1Y}eh-y7dAx9&KgN}J_CACY;$<>c|
zdE?JX!+PdcD<`AEe=*bfOUg5o5qZ3zT)<f73^+>mh?KTH%5Yo81`nvLkWrUvxL0AV
zWz;(I-W<#sWS6dc?h~Tv(85z+dDJsf>D)6ML7(PfeQfO%=`doU_ba{onN51eU(D3^
z6#oF>mlqIa%l`n|sItEnYU?(&$Q=o%7s2{ws=r@};STctnCPm$xqpZzb^dmieTx?q
z>)0&Z(f<H?nX~VJ1q(cf`DG+%873FtnTuJ>T~ddKIzN~;6@AH&vksCCW4CGKNr2Mq
zm4me8vR-LE=7luoJ)m(=qJV!9+WX=z=ta%;f;*!1Qqz%_j2+-3cwo1$SUAaLKrvun
zbgUjPsus=5bvQvV?oy5pre&dLa#gr5S1c95>lFy4X#ncry$@VRQht}YslD<}F{E>G
zX5g?t#1v6-4oC|QTubmL5RBB<7Rzq5#4oRTuTk+`qB-ZZ%Qq4M&CC6X;2w|eBhRS*
z<xkj6<<r<8b<c<MFEvTcm}{$hta=I3m-(I#sIb{_-2J1*4{zKEr{qe?`BpTl?{fyv
zXqJ<@%a!7n_Zrm#wygeE45Nrj4~7(URw@;%eq&uoLaXSSWAOb;MUm=Ft2kBeTWAG7
zZTKM<kSLJY3?pawfn--$2>L*j9qKd-YoyzI5CBUjJ?V(ku+%8lYf<33AO|jHD>FIn
z3sO~Vq_E3zdJn??01-0+2O7D=Da>;8fwbM-l7@_8kXQF_FiLgOI^6F!Zu;EMfE4eR
zU7*%kS~nXQHW>+ut(8FY#nc{H=z?*$-RX|0g{5QaHeVv;7heb)0JApB*oFHL_&seh
zLd~mD>tB09GB0QG9>h+hS1d-Z<t{FzgKc`0Rz4+~&(sF0D#Uw6m}+!pTQKnPDY`Bj
z2(g4oNL1MHl>TGgdc8<DLk}kdE_C+?u`q&v;Weo@-X2))i*>(q0}%t5c0W>;tROke
z6(gLMwK|EC%icA@J)>A%v_n~AU0GU-?h^rReI*OF$fz3{A_pduEzhL0qot0?xpG#b
zL~LA<r0G*GDkuPn{ZM{cOD0|=OcH_f5mxxxE17s^y2TqnYHBz&xazN00(X7I;<1f_
zIa|!MZ_t%5U3<(sloer;1j|6{GbqM&3C$cEl-M6I=5G?6J`rUzT`n&U)tD%=fR#%X
zt)XWO#2osl-j1^}>aIF!2CsABmh|e?%<;r^nQUq_b2>_eJ@%Aczz$+M&!oFLLBfU7
zp+bB|q#+bEk#U^rC}N!EH_=_9w-SWE7DV>ky}!8Sa5(E4y2is=r?>W$L%QOpA9+)F
zbc>3ujUs2nu~gWSy9jgEAai6VL9qgN&4r99ywKj|KIKlRmi;G1yvM0>$cB*IQ!aC>
zn8X&P?}?AT$y;{pZUF{yFcgOa5C*l{Sw;pbU@)GruR^|OXEp7rnba)JJj30)l{Chh
zA{3=c?Jd!}!7AK`1X9evO`kHum=F9Vfp({8+W7AqMfd6!e=%FNS$wX%%R;5!_E>(U
z>aj11T9)@cX0bf(UGM(@g@Y{2x5ud1;FrE4H=oR|!2aNS;;kq=4Bf>IzGd)sCS4w5
zz2@fJZ>(ba%zsEaz=ZE{YRTej!0V}9@bW^K3!Tdb^X59wnRcu?D*R3?e8*Uh->M~#
z#-+u=V!N2+#JybvLWyP?zJK#uX8t4Ra`YHi-H-Q(tak1MxG9PCnMC0004T-Ow1-qw
zFI97%cQim1cPu8(dt6BP0i3ftD7&eeG}<QP#!E92hN;stFV-dZF5cTZL>j@?Szj@0
zGS!p9E21S!A&xK4{#+e=$7R%ISyH;?+8|%dDFYg&R7^_K#N$!_0Ae+Ih<K@-@FVJ(
z971ka8Yo_3D+945?q&4&f`DIp^)>+U0ra>E{{S+A`-k$mmxHEH*W6ZadRqHoo(kyy
z00^`#d#|Xehw@BV{ir2;ynGPL@5{d8F>Ql}2HP@?kma15^kR@2zcJ<rxj*yx#U*SO
zxB%`%n6f*Uo-S1u@zznZzR+;Ra}$->s`IEM%Jk+8*z(NPossl`s0R^vV$Q|{ltv4N
zmP=#Z2-=s9kxSC8zK}T^4ctnKT~mj+4V`AXz!-JJ0oT-fVbMb>x5`ui(-3%$)uP6Q
zOV(H`A&&9)>Rzo+a~Qn)l(bd%m{oq#?!fmh+J6zs;_O59DR4J!1KE@=z9NNH)N#|)
zBzX4bRUoZ=F~dwVyh=Jw9C0+VyF`Q1A&w(%px;uRV~Y>CLWdBoPKljCgWdBVL~vuD
z@*d^PFvHpnUPco(m@=<39wod*LnY25lR#-4%fX*g$61Ox%&sEonS?P?#m||Hl0{o!
zxWJ|6T9)wXH$T-3v~9Wz;A3A!!Snh{_s9>#Mt501krN#dHT4w&_7w%7xYhGAUnF3h
z#2Wza&DkB>4pueE6;&y1JGUs6trmY#yuIrQRut<Eq57C=&r<MIK8b_Y9Z_DJor{Bt
z$9cl8Ua+nPj1t>E=em?|i+p;V{-q{4Q3F>VAV5<|<_FZJ{{Z5>qtm&6Q<S}GBgEmI
z$Xu`(k9dO~d(0EGN^=}_m5VX%D-aJoqmKvkqs@f^;IWO6t1h}k)K|=^4Rno8_rwd9
zam2m7CZzE39WAeEnPWOk{6gW&*Kq#;T@`OLP~BN|(sg1kwe=i#@XoK|I8Fjmkqyc)
z+Z((*gYuHHo2*yK76WvYj0wf3#3Zyc=0eaLBiaHA%E7|wQyj6w{{RNirqxRE^8v~f
z!x?Fp+9l-T8^o&fHsO5AaPW#av$Vo(8r=>ezq!2HHzNnEUH<@3haF;Fab2bMuJF^p
z7cH(mCVL{J$*jewA#1F`i^desKlyRj{tD}fRWngi1+-bB*xQs{979FRV;3+1PJ{?$
z1P`?6652LIQF)AVGZtX)R6XS++<DWaSz&PMQ?8~)?r+fwCs}iBx!}SgBNdT#okzRc
zP;;E(P~BMO8ep8oW(`EX^3=n@?&9kUTk$K*rjB(qySTLPEOT`lv-J#%mS;Z_o#3)k
z<A3=O^)c5K?iA$i5}euU0`#6>lw={2vY9tt=gJ{*@{CzFb5mECClMRmZQ4F&&5slP
z7~RY6P^`*Uu24L4E8)b_=6U#PZ>g5)0i8y^BDPSjrt!I|v${hZv{PdN)hswFp0deS
zcj;vh!2@i0mKyqj=sU1%4R(T6n&^OQm%MtE>I2SMXK8EZ0p@2!=p_rCnVTHbD8BHr
zg{gcOME1R<EFUngwdM*CR|hc;vd2^?ZVck3XZHZ<G_fGx2U$l#%J`QsP$dx3S2FBx
zJ)(4rb~iFVg~2Z6J$+9M#dbt-O~&1yQl5Vie11RiS?URmdCa#9?1;U>h2N>c6d32s
zNbObkW{<&{^m&z|GXqklkGYg#3pbb-A}PdANc0sf8~c`yO4*5*7vUL~0xfAWF}Cyu
zlUPSEg`M=4@XGqHS%uxWmoR-0{%q;}%G#=?1jy^8y@Tpr^;2N@lOjJsG5-Jwa-H2(
z&eD3F4NERsf^%<HVl=&nUlZFG?oAI!2}S7;kfujEO79>=nutf~EmR4;Na#HvEr1e`
zrh=jekkAr(FTo%PA|!@R5P^UYA=171-S@up7tCk&%wB8u^Q`Ba@gpYPY`ycPxO4M=
zKV&Y4lpns%yS}<_F<f232{2*9suzP#QZt{pJZ$B5^m_Mjt<zGSI!pTY>Zm*MsBA;3
z_2}Tn=B#7(<t804hxG^8QtB(i+J-}m+c7ut+r71em{Jv5jM!17odOSY6jJ#@AaM9;
zYTusPnv{S0A4O|Hgywo{s1>OeSkl@qqWiBzt4bORi(~X0Cj}CGXx#F+b*!P&>e)Zt
z+m1CNzb^;wK-tfo#bP^gkK(>%Ot+ivCOxlpxjUG0TxLwVm^fkUe7Ip<z`5MITfQE7
zoBBO(LTw%bLnBjO*=7Iw=g)re^amFV{=x20ULj`Zi1pXn6@*99x58ur{^8q+cZ9fB
zc5m9xzN4c>y^#Si_-`DSJcSJlF0d*MWA6{17|ZW=RC4s$qEzmuVGlR!Po!gq-;IB>
z)Wm*PUaEP&+F}0V50f`}Akx_qeIgb!%Zk!@__#j$*`1p;x%}3S@*h2uM<wJ%SAIM@
zxD{2BrF2DbY@&v9#F8}4<~U~m^hTVe%O^G8Vf2mZs+ynmQSa4eM?dRixqeuR8{V-0
z&AGm(r7Y?>$t%{@II)r6fw<(gO<rhnK77t0F~KP`cn!vd$eB=2*Z$85*1~F-NKM^l
zqo>Am)6i36xj8{h(%<#pY<}eHxZJqt&6w?@g=MR-%XEue6$#c(&xxKsqJ1CaDg;X6
z_?Bd~`+`SoS8cL{Lqe`L+wTi`gk$V!N5Tu&;}Eyd19h=yu9Emw*$yCi?uwtjZk*2!
z`{5XJMrz?^HSD{))5qUTlDf{@vK|M<vO1WR!a6ROwA2wn*5B6~tz3+=${+!zVdj{9
z+(Wy0n-5RlyOGD92iRP+>*}0bC^I5Q*1AB|;p^9c4pw~~&oQz8+12I8x2Q}74&Ktm
z$VoI$(k}k$QQJU*1tcqsf6cKB)Pu@gG??`5+vG#&SolLtI}!ioyjoH`<%kx4Zhw-<
zLW%lWlfL^E#6xg6X9}Zv8`4V&8@gU@aK!2X^Q|#_@TN-j48AYG+^dX>w^P9g{aF-<
z9sxKly;Z-JK~xm&1lJc_oyJ=5e+gqIHZ1;>Z0${1gD#jU+S7kNSJ)>%W%d?(u-#Cy
zBmPz<^v7<kx<}N&Ef)1zh<a{y{g_^0;ngAM4)z+Z!z`|U`0Pd`SdLG+T6+=<tPt}!
zfRDr1@1zs$9jo%&vb;`$ZP=*H1o?w5o|DBC^yAK_|C$Rmq%L^2t`W6mhD75`7Wh(c
zi~M|XS*CPgCU@(hc8-CX(A+~dh5z6mR3z89a{1mDU%f9-EYjrt&bT(~+w0Z3f1lH|
z7L`qo7jfb_rpPBcPoA*bUHSHJ$jHdXT!uh-d?r6b&nve<>RikK`Pojm`$N!s-FnmN
z-D+h2>urMP4D|Z}blfI!uyamTobSE%r;Vq1pVhLp>_%jk?`P^7|Ci=T$b9MdT}|@m
zE35Px2(uLUAGWu}D5lQ%&zZBIO~ynq{|4BMiH`cZ19q3{m%j835yTmnW4^aHequ&@
zW=-GuAz)>vx|Djj*pm(8HwH2tvCS{i2bW?xn%?p?IM{ydF}}%>n5e~pG+d6<KZ$L=
zmEIJ|KW--RpcR_k{-00x4{Ohw+mMIZCQPl>YRW>ll!)g0%2u4N6~9gs4$2J6{$+5m
zI{mFJ^{+IK@!aQ%5_*mUd<xnfsZtV#tSrg1G`1YPCVh;{1)Hd|_MEZ>I!t-xI@5_S
z+ci!8sh$8N%Uz{B-PiXp@Dncu8e`0FKlA9f)zzEr$Uha*G`$yjUGnE|NA=PV(^cEr
zp;z?`VTF%>m=AcpcRlNpiHm4$S=;`zL)W=3+jqE-<1bB*w2%e8R*Af=>gQSXaK_fO
zS~EgzQ24!h%WB@4cQoxw4nfmt+YpSrIy{r<UH$$<FpRNo`q|xu;^a%AkeB{gUGPuK
zn;^9M_3&B6mr*dm4^5b^?4zQeF$-bfW&50(z9^R)VX%$mV2$&pe`z1+SO2Af>>sA8
zVn2*rw1f#id?{`;?H@>0xsnR+VZ563$7%mb*}*BUC^kf<4dVRNHm%&9#J27qYQW1H
z<hIV7W%AO&=dy($C#o2?6gpET)E8)${ovBklNLp%U+m+rAh&6G^m{KfQ1ipd;_>a`
ze*svglB)yF)G)tb9lhZ|p!?YRl59cs=v~6E7E#VQ#pnRlVjoMS#pn0Mu}7N{!Z%iB
zN6$y(A~{_eq@BO3Z2U@0)jUrx`Y~cN%duwQRG%emrSdQ<;Vb8+{b=p4RgKshrv3T+
z8EXcdO(oZ}e=Aw<=nq9K+|}RFJ70550b9uZd|73yEwj}pp0$9xj~`f05c>sl?WJ<+
zQ&&pbil9=mV4u)|L6O#P9yc{VCULU#$c@0Gt<1G<c@FBZ%|FZ)S$xUxr_E`(e};)}
zp#3Dq@ayW8LSU-qfU@t07){2teZ)(`F`V%*CGO>#!UjEO=REjn%$F;k#4C!W;XLkb
z1JxyhUIjV4w&stPi5qczF^T{6Anw2PNhhOL<~};8hfJHrP?GPuKdMDG_xSAyRvU>F
zeYrbhL;S@v@DESub-*><852_S!`sTNy-W?$3GISY1EI>dgZ@*^cHiN~Dm3e+`H^z3
z4c)mTT&kmT@$}VsL;o|O9U<t8cr+jAynUq3vn7<}>sSRok*{(36|4DmW`|oHFc(wV
zl4(O^%u^GrG;esozQ>lA%s;vv=iEQM!_u(L<11;*ZwGk(i>Ov>ZW2A(Dk4OlEHVEJ
z8}!ICqZOcZyub5puk&iaqLhEi-BU_Z-u3%h-GRTAes&IjNcfViIZnZ1KFXfjqq#1N
z=3(`1qN8nY0h}bE8h#IYyYROoKO95#w3cX|pYkB%gSz+svr|l%GA_8bTPPm5kg=$9
z%2qf(BKyAFbE&B$Q#h_VsO#qB^Rg8wlds?BuOJ}K!B0Oi#LCkbJhr|-9(5*rn1rR}
zvTZ7}MQXSD-wB+4WVPazO}X-*1E(|juk#1d=Zv4ahOe~kvim2(RdgM11R%e!nX;}(
zyH}_@5PIMfl|$uL+~A_(D*nHVhL(Yrk@o-RR&f3gTtS6ah$;S$Uh#jy6;!f?9w&94
z4{HHE|MKXjrO+3hOGO!hO0|z8zYdI*15S%v+}{0>6_h<;ABw+spyBf!+GOM8OYoly
zuLr%?MjJR+>_A+x!l^1Gj+-`7e1-9eTJb}Id0LV&u33$b-0IN5l560w)YaetbX%3`
za{AyDmSV2>U5j93XxeNl6cAr_^X+nToRB%HJ@cvitF`&!k(_+iFRn7!l}Bvad0)+1
ziuRP3ihfn(R60f%G#18mk)!4kZ?R06NFFYyRpVZ(ffu-QVRT!&$KQ`Lpih1_mA9Y7
zvzkKkmfH$PKNk$i0U?_Zw}LUDT9kdMs{a-sN?JLl*t0ckAeSu6g3XfiVChL3wO4mo
z$EZRs(mE^ceI-o{$aL$n-7oJvKjUAGWQgM2IARt$;Hj2vCn8qy34DW*55K;x4svv+
z(C@&*wbZu!wnDTCfNHK(@JQ-7KSko#_1C2;)ylkG;o@UCYI6`(TMElNw%&RO<<_v5
z&b*ZN@+vUWOOTiyF9sAxZe@g2tQ=liE_*$N7mui2yzlW)`e8glaBHqa0IG)5(ObSF
zaG!Thi_Q90vcjImD#Re1_ZT`knDE4luYNu5FAXvayD?c?C(AzGY4J%&7K~+iGvN%n
zgEkO<Di-7%7q)glOUzb**bkd+61EF&drf(qCzXoFj2^ST-KL$x&Hu2`>8<nc09FI#
za$~mKAwY)gg6cFG!kR-q5=N-?&Uqywo;RbxQHbeaDibuirF;#elX!Jw-QDb^S{5md
zJdNhrDVG8A(<6hNe}t`TANH*n#Nv8uYN2%dN%5c-9w9nSUTsU{PKYb+6!N*OZe)$M
z29|qTFG$dFd&o*!@^K;PmVr(5vW|S>@_6FJ?E!OxYBN4a@;b%}43Mr63idYB#U_ed
zun>4;Gyq!Q@=JfzP31j%?W)KkUH#jL1^T+jhEHLgut9;s4uQzx=#BjC!}>nenbF7i
zc#f$W`07*MsX1zt>cRKc$h<~{ci;cgi1T-`7hm418h{ykr};^YE;OK-<858Asr-Z@
zo~sH7d`y*CyS)g46z3&^{#K3bwJ4ao2M5`rMD@Ld_E?sYM@ZgHSsN4D-FC{b8v3Q>
z=U(Q8bsh2#DzB^JZk(AlE<B*pttBwbV4L%(xZJbHS$&Bfr2{=T5H=@v-|pC05~4@&
zowq{`VqTE)!o+C25+{i3x<_!b+qT&-*7{RqG0|!}qJIN19ph>rpB!H<+&6I`az|d2
z*A?yj?5L+$SM?*x1l0ekAd`hV4o?e@vGL03`?1%`2~(d8+LY~n_EbYnNmfKGj&gLD
zy5=aTu9M$S?5NTBAIv<#=Kx4K;kC>D28T|BkO8~!gE)t>t&rRKJMtxC9;|<9{s4w$
z!D)XA5~M+Ygi5}j&^C7WuUy`#hrF5(t^&+yV?~W4Jm0~joYds6gUiN;WL}F8^9-7f
z9n0D~o`R3$Q)zo5%9QEL=!BXp3+8OxM^WPY3sCk6LCk5=E}`k#>k-sYhq5L#g5jc{
zf;~mB^Dj5WOzIP9!ptQog3R5FSf{&~s+x_rSg~mjJ4jz3boVWO?O46PLBJgh;@RuE
znJ+04)OoMer$h~UZp!OXsA#QEjt&DUXQ)7Ml~?PCr_!dWfLTB@NK0x}h_c?qv`uEZ
z6_;Mb@-H?#mIx&Xg_PD<qfGP<efKk<A-l_f{VNeUG5TbX@d1;)Hu5KIMt`H}_OeLX
z&(5(X&ef=qZb=|Ub|4>3DhVxBkL!V6mGPx%+B1O47}p`2m=8kx(K8{_N!kV5I1%xB
z9veB1g%A=RuOWZon;-0fiwgseCm2xdDyECXDouBejaweg<8|ajofZPyofcSz+}1Yp
zk;nQ!?yf_{Vgo{ac~PWn_XE~%$#aqT+upSeTZ4=#Q1HfCg9gNO8;oS)R%v<k(YY<^
zH#S3L9yR(KIA>aOszAImwkAGkwT{=Dz8VE$u!;?Sqd$O{WxPUh4ikMO`kkG_IT5cj
z9CA<d`F^wSZx(;CEy(V^y{J*kIgOP={-f*3&Dwuygc)b$1wff2)u0{pEg;&PSywEt
zKS}}H=oK3okXo>s`m%qonVScZVolZ{b>V*_>RHBmtrCVQKz=h(n^%VM$#G`U14p8E
zRa|BhoE5d|uK9u9gGcG1UM_@jU9ZxeYXi4tujQzWLO@mnll`l*_*{sKT)1BVD`qPF
z?fj^<TJQrKOJ-?Q4`=0g8qEyfpzU!{0`8^U<C17XWZ#eZ%w_(2CtM@tEN9{~%#@*)
zAI+48irwD`*Aox&GVu=#X=fj94h}oISwNJGW!{Ph|A73cblg|H(3gIqX5-cCrl4?`
z(bG3L|ITVUO{LlaD1Ud(t{~45r8~dUP1z(?WgQn$42XHWJv<wA!DK$7o|-+&jS%_u
zeX>C*`>Nts%FgmHc!k6kh36z^V+YxV{AKv{b+^P%`@@WaE!J5M2Zh3jiuJIGn^C*f
zqwzZ)8`U$apJC^#cW5Hzd6;y%1@zcIjZf^<v*HJmD7raJqMuhffnMFE;`=0!{Vh4o
z5fKI170$C9sBU_{J0uo)SoIo|4vTYClO?#C$fJ4SOEv?oI^_!~0f3P|h6ge|$(7?L
z+Udm?d@wzPtFxHT+m~LC{?c4a9;CP#uL}_B*Paku<V0RfSh%dJ<)%UsJr!K?4w8g>
z1G9}qKK`W{b(1U%z0{N8<fUj6<wW1g_jlgApiEYNKXcz!lMEF*$aT*w_=IEgSrucr
zloqufp8y9pt1xP}AgVm1Y>%^%GEKNd{PwlH*WgjmX}Nl$kR;pcj7SF`mLy%t9Gw&k
z%A_B^Fm#DaKf)ZQcDQdlmTE>l4UB~ms1<l!ZszOU7ec5<T;@KO(z@pu?JUG-bwNdT
zIj|>(ve4peEWUV%1T98+q`xsaC~mRFa0TVHysg4K=4ZNs=TMsogQD!pn_xNq=rpew
z6q8r+(*>lZZ`NZ7)>pj6H%%P3PBqRBy`R*_I2%ICz~3_5YJe@#4$NnLb0b(TObgeX
zTb-wNNGeZHDs<d>7jZkCo)-NL9*$3xpxD3*fu&-Agux}u%Ti!&9jiSH46fY<WdkAA
z_4>bG8ZLUeywnL0Q?~-ukxn9tPyWgGWMT}|b&PCUfuK`kqrKGhJS*+*m1}sZk;37`
zb}I@~81zD}p-w0-@S`Xu0?E8pQ=Qv3u7;$=T-}<I6CAZLQ3T1c*l?*s^2$`0w9{^D
z7e&Z5Yw_e%&q3VWkDn{bT2rG(x{3^Gw^&L!Zz7J9H=6PMJ{K_UN^$@*^D9A>1_yMJ
zcR|j!{241dK|84epQE8rbEDFz+d-hFPp*UcBuM{u8!72bLzq@i08HY+df+d7_}6$T
zey_N0b`-I@pkh_D9mDl<nTTGSMb?ByOqHG(yB6aQ+42Jr7hodEe&FXle(Om3zNYK<
z!~_64G%@C+$IcA#pBIH~t-u`Vt8w}MIQ<UUHQ`ak(bZaPqpxhA-!HMLz<K7MdqVpL
zUjcizqkI`$X(1WHVZq@F(h2P%Y9@xl2Eqlymk(q4(!RR#FG<x(9^ITVDh-aZ<Qtsy
zvA^Rw1cxmaQX)vhr0bORaaw$6{f&*6C;TB&pQD(A?R0sRx)--V1RF^<tOB?EXlY??
ziofoC?t^A+{Vk3mTO)o`p*^fW=iIfL11$}hSlQDo3-)J3QdTw1J?J^9w3tp*M(o%G
zwoS7taH5jeoWOZCwf<`J=&AX=+u6FviwfbsnX$T!?>g`)s=Or~i{}fz3a8FhiMl#a
z!;*Nr9d#c0Dvi~>?6yFkd(byEiUCmX-n&Wl5D=_EaF}<m!T6Cfws;9I+(B6tC@=l!
zY>9lp`I`p&TvcaizvT^p2<sa@&!0tp&N!nVytY{VwwT@>*U`b~e$kSu5cPHBqckS2
zXayak58<ufdaWY-e1~(&+dX8_t&1LJ&gA{^8&h}NdD^7HOS1_SSw+<}-(99Tf844a
zvJcl*MtHHQUQ)Ra*+peO$Db;#iORTzHOxv*WUgX5MFXc$5D$$(P7aer)dK^KCP+=Y
z^`ms8n|l~xRgqpjqIuIb)g{#iOq>(D7ta}A>uOGwV*I@mzos3Q!d?T-#oIv&b2q}_
z29`mYi&2Jio0x!{E9Q&vGr+el8)OzLHs}j}I*NZ~_v_=6ya{iPoKjL1A8izy1n%Nh
zqc1yu*UXm_xv4E5moNnaeE|rSN_%~G#d9ljg6+~+NBWbThFI9>-CCrlqqM##`n;OG
zg#wJqKvqhB*`QznSWJ;5C7k$XAxH=*ww{(IQSfFA-|I@T{vga+V?ff9BGqP%$w`kR
zrorUX#TV4o-FS0#`^8kz#@ZCaIx`Rg4V#qQv<K#*P4vu-{o>Q(QGdMhkiyS<P0$Po
zf~n6{pC-gIS6^akA|`QUMMl>}Oo)Ia&>q1Bv^2_F(PF`W#E)+1aaWI8<FvxalRJ_T
z8A?&B!@B*^-j!}|r?FZ^D>zR{KvSIcxp)T3&*&Cx@as4~ER9}X*h54vTs*v~Zf58Z
z^h->!be&^O$-Q!LqZI;}Vc4nZd%Q&(;777$+Y!@&Ys}5K6PaYZ)uFm`iS7yZ|EwHP
z71RAzCY~yyjN7<B0o2qe3&)7f6_x?mk5EJP^0NB6pQgNm#dM03$cfCK&tG(a{~+Uo
z8CwsuLc{Q~5vd_<)+hxb`euzP4BP#`G^Ja`){q0Ih$i<tEqZ_Sj$e}bDHq%;`c8jj
z2r?LwZK-hNKcS>LKyR4oLP6Z~pn;Hf*;2;OQP|mcU|-CPjuRB84!xCJla0D`tJz6!
zAXvrq`CDlx2NCZo5&2+Lq{=C$9)y41Nf@U^NE4*og-JC~f919`%e`F^rjp!pbHTff
z(o3Jzt(NeUgUFn4Pu)+4GPId4_DjRb{peRtlsh`Tn3AC_mMi?5WSV2G8%LdLyF_^p
zjsAP0e0cB|%-HjMKtWtOu*6XSd`C?6&bWf>f@e4aPvK+RB?-jlD2aO(6=Cy(`Dn^3
z;m1fVT)@m$$yi3<$zbo0BnGi<2k?+CN=5#qX>C)PX4=tqz`dT)E#hRGGf2j3DQeN|
zCi$877V0>qYdrQ&e)1{uhJPEG!65y#+>+(XoLpSm$&(*$UQW(*61lZ>JMfWMk9C39
zH+*c=r$*)T%ti_YH0H1*jvj>UW0W(L)nm?<OTN-V`;zXfQ>zvORPEbWuvIOxUg&1i
zhnWit=T4aJUmEGW$(7_2L2`f8ck)q=A$wVs&xe@=$+L~r8X0VZ5J{Dl#8r#cLqnVZ
z$RPGQ*!4z9BhPq9ueF9h<}qs&!{`!SxQ0kb*HJ|@icKVSM@S#-j?pRIcyR)cw|TX2
z#0J$O(;YtV&U_x}XEe!K>vp~Tj<lr{AKy<@smJDid1`_oG=QSD3nSV0RGx@t(rR_L
z#<EjP$94NgTa>M2HCs^k%cFGb&*8DMA*m!*u_4yS1*g=c)&6`I*p3)v5&m<W)zwK&
zVI+8lXK|bbVe#IV&h|Ae`H#vfuqaNv0D%=O`R?YS(|jW5H+Zk$nWaFg;6ez}ld1`V
zAi}buHm!R&@P6}--|k`SnV+?AdJHefOC_kB*vDrAn$jy)*9^>OIe7_^?AQGit>8d7
z%*4>cVPgJ~AH|n=VEiW5E^_may^j>gh{URXp!@v>ht$tGJ06lcYCZhjPNk!tmqvN<
z(DR>B@y}#@Jh0QEvl?tgcKc2DfdVJW+D2#El#$~^F74zbWtzwrC(N@rV4<nh`LY;d
zOZ+d*<9smFqIvMQDmGo2S3LpYga4984?r|aPlG2%bz&uHV(G>5C3?A6!f5k$5&?=$
zy{0`L&=Q(h#{TET%_0ufVIq2v4^n`b20a0Mz&p$GAaqsbmln{?F@gGS8@1TSrefON
z#mjeFLQ#?V)H3Jfih+3_fa9A4V1e-|{C@hM!E3wG*%Fuo$5`3eYf+zDDo6Lt3YO8>
zCu$@y{IsTpwb>oWdA25jy-*1Vh9^|tDYB=f!tU4?T=5K^S4oX#W1M*iVYudZ4mXnr
zBFbFZ@rt#0=0CIfu|jZnJmdIaKgOl0%zH55Z6lYTq+)1LhJAdh44<x3VNPZ;TgK!#
z#Pc^ccOYQ8Fven?&O;{C4Y4;p`nX<!uwG=}qD9?Q#&Vt{`dO3->?UK^qf;jMFFuit
zxG`jU_9&Y)B^;a13oR;mJ6+EyTH-)$+TA~SHKG-+bLKme2iEJ3{~=t?MXUK8mG8B6
z#g2ok0yMg^q@wV6_)=I_CQ7t&!F2z>)3Lm<M|Hhi6KJi(T--LN*L3W-ZX`s>_uOM=
zqwX3&L)5PR!d%X+cz|EK<AinAYZ%4%jsIq1D-drgl#e(?7Ob`^436Q#Tj-08%X@9k
zh;P4K8vGvimyZhHZDh4Hz4SS2dIInaXPvmd(q}@`)=$iv6qZPSLFD(U5=KlzBKZ#A
z;u>ICf=yj=NARdmKZXZd2bGV6(?m+vwfD2M`%HAoXj7Zd9dpwI3-XSepu{)@RqlQa
zN{nd1hkc;A+ft~f_fnuH{=sg=Wz_@Lf#AhKQ~g9#<$DfNDe%y-qg1VK%QH5LbekSN
z)toUgp=l_azW8##gU<^p<m2{K2A;p}%fZpVAO4pnkpN`1#8L(I0Epf>E4>Ob+T0xx
zCx6xT9TVg5adB#Ku@x&?IDdYhez&pMCP>spFasKUCCoV_x9_Vjab=EXqT)bGYLtF%
zPCfq)e4PcVyNQy&rz3vAnqEYV#nZ;|u}KN>>SkSzUl_}C$2<tO6}73#;*r-uMwg}O
z?SOqzxf8wXY+=hdENaKohD$405p$@pD6=jYXc;JdUa-A=d+hxfx<VM89=w=C$4Go3
zQ$)9;o5I{VFPU)<mf(B<moHNwzJmHuZ0by96}%@oXKb5)d6sU^*eSMft%D&^jN7Kg
zZgqV^JQl--9D}}+uA}#u476j}ru9~m2h@ypQXV_-`tHE}qD&{4*i+|GqEr@~JT!`9
zEGVahb)$MrzH+n*BIlHpC>>7#H3RMoV;^IL=kyfq-Y%4W!@#;gBFKXCBZ>LTs^jhf
zc-8$#<p6~(8ECvu0P9DTC^Z~tj}Yk<wM1ERCj^_HI`kT)d<Ih8rI{Z#gxt)YZhYiv
zW}nC#_;mnSjU>u*38i+_x<{?TYYghn^VBK3<Bdp7p_|q4Q-@XMNRnJc{+l7pDc8bd
z+Q3o3Wfdw$nq%-jdvK>)b`Wm-m8&AiGfw}+|NMHcmI7<&iNou}mW#hMJp`Z6_Ri8L
zLnpsCc4VnvvsSZf9D+aJ9N5wk@L?HjBO&jIG8z`IZCD(`U#sCZjV}qi5#{;(a0m_-
z%Ntl}4HFFGkH{^#TwD=5ab5dbbooa}*)+rZ-|<z<T3lc95;s%B;0E&z`h`)i$&on2
z5*r_*OdCt~G$7OMGk*Tu*Z>u1JS&6px;TE>dRVx0U(08mkl=PkkSI{)!-(U6O?}sq
z4yAu--chlNDFfVz^Em4&>U}R4M8giA6$x6N@DQvEcVfyZ_HL3NyUv-VG2Lh@cOApE
zf4+@6`ZKiObuGfUS_1CxG0=WM7<YT%Zk#dP6PscGs9w~mhZ(>;CN{l?GB)9XoUp<I
z>DSYkw!bueO+#XNMA+cxs3XQ#YdVF;lCb7nf<-Biz}WX_QC@WUem(#+H3i{NrYG=W
zGf=Ee;PPZkkNz{ju&bb3W8#6gS=xOlB#I>!zRx+{l|F^m<_m$Jqq4~-WlnTs=Zon=
zJ3O=LP=Rk!S?<tt<}HDC6rcQ1tkA!cg3cjQOU1|II+900?~OEhYq@TZntF-K=O}G8
z_sY4*#s=Ey{b}}<IQ*V~BWT0y;lKFpT$fVe0Q<(GK9r3BEnFD91Uf?o(f_5H%t!Pi
zDcp90Pf@PcalM&tzV{DW;=82kHC;Y${-xo0o0lam?{Yik?3@(`kPvJ>2?{6kkdMN!
zFG@pY(u;;^z{sz_6+PKX@nd~8G;X(MJIv$!rjn2^bzF~XfzHC1P|Y**yICqTY9s*S
zbTE09Ivt}?n6fvZah<wnZZrk~FEl{Z0{&x|(obs4@~=NPYJtJCtKD^%JeGKY^7WD9
z?SIz)(hOG$n3`{n2V)Bpy<;r4&DErRrQ(y~T3v8jSO?<?q->-@#_t?J8c|xQHElu{
zDN3N`mE-gR_fb{I%4S<ov(a(pnZj@Owyd6#E-HIscNsCm)9k8LHpjW2MzJX)xE1;r
zq-@o_Pdp<eh|55nMbK+^Hc~!|O(seg<XLF3!=%na8ubq3Sqk$w16oo+>*3^~q2nw3
zOWJw0aDiP)TC9Yo8K|(bxYuAyY+o#<B~5$-&01-kH+qqA#&U>^QUjBXcFV|mStwaq
zB<$G3>%}O96f$E^GOFLe{3MJ?>?N4s)iQtj?ibMhQ8kV>S{u|4@l+p;E84c^e{pk;
z2d*Y3H9CMiQM<H=E7AK)16(Jd&hIM5(voH?(Bh2Qy9K1=wjWlYQR5b5k8CWQA6sb>
zr$BB#5nJ_Gl?DvIZ`+umcG^9czo;=$!GX*#`}BU(huy<}*`4<^9mI(m<_(JCOyQ@k
ztBHftufrHq>r5q?jkbZ=LBsOq%C}k+P`*BKpH2d#f`t@<s;QF@oTic!s(KE--cB4R
zp!;9WySVHMY_Ii<I&|&+3QTbEvI$b&tQz3A!10e2G3s0bNEB6%zchGe5H_!G>MspW
z!U1F~90U8kvpdBLlGU0>P@wlGB6bfI@Zw=t&Vs_iraxmQVxNFXAvpIi15P%&`{ofy
zer0Q_^lF%RQ3&B%o0h<g)ptay#VToz@AvYl6`n&hC(ljU3h!;oQM*}2Hw!)Nw@dyp
z#JN-B%CVVVCH@3BN-bASXz~1SMBPyiWIOgiS~kJDM^<D!L8;O9)JX{*ox|(@m!?;d
z-%QuUQE=FeNx}&I-%M<9u>d04hBp}EG0HrWtKGUTe0NjtqEOUSqgYqYp;`sm1)U<=
z&RNtEY*4jwp^<MZgfF?G4bDVj!Q`uGvQ<|s^BNyzC~_Sfjlal^Pda1STi(92&bpI0
z38cye6=;IisD`R!KE%eOzR;P(%yQ($*hORl)ET+&6rid5*~D|A)-)66R!$=iagb0W
zIX%`Q82AZIWhVusG3cNIcx<R^f>++rmii4LZW}0U%%f~1WM4U-0n<CluGAAdWLZSm
zPhtm2h?q70EqMb5Xa6AW7vrJ>+2ptItAI#q2HMED4oMujqscZ{KJ@t~O;^f8A7{<6
znwuD^^iXH@)x_q5Jl~^|Yv^<8R;v`!CV~!FyM~T;xH)P^Zyw03*5QxgV;Ln|)(#qL
z(1$i*{`{rc@~0U344y8(_=4BM(iZDUYeNC*Lp$R6<|0^tp&q}AyC&aC6WJJBQ>NMO
zI;oOoFlgwn8Dj{NO|aO*ebU9|=07+0IpJRqX^ps~z5e~koAjz#JM3zV%O7#rk>q}7
zWP#injoR)M=S7L6=5r1s0U_V_wX!UsgjHm$y9VbxvFS8vF}XYcFU<nv3^cm}x$G`2
z6Yv(`YE`%7zkCF27A3sW01vuhpz1|C3B+1ro{^5%rc|Ek1wMhy33D{O=pX~TB8GT`
z_VbSV9w^9B8KDxY<8lOBCfVbjX3nR@%U;%;d`%XI`JH^*YH>&yR#zNE0ETqOUe$vM
zIq&9k4I$&qdi8B3xA5W~*AY!Diy4zAAd7Oo97#=Uy9KRq-0>$@lnOJyk(`OHA;3E*
z4}Zd~!A#-v??<lSdi(9+8S(Ppb@dax^da&q&QE<#TcHYM`?VR;8twW`BDx*{5*K0&
z7zCuBZp3Bo{2~XCbh$gas!lqUWYV#T8JnmnFJSP0da8Qf5B!8--w;M1zdf}gs+)2o
zk75gMsDk$eg2=y#5=E!|RMRVX$bWp%E3Vvqli%4XKw&O6T!X_<PS2gAm?`%e@42T<
z@C@xnU%}}m-EMwyDmH5F&bnBb9RVbFB3^I|y9{q+t<o#EpJKgF>H>?Idbr8!NS;X5
ztlP*51+V*sCKX1fb@s=;SShT45#c&>;2Pa`iEvlTpN28wzCwD}d*nr3g$uOCnQE!y
zki45(>S}r=)7VH-BYd+y6w8MT;v)diU+Lo7m`Xj#%eh%e#2tdkZLE;foS|u5l~g76
zfK9e>bPIoO*sR?YWFWlQc6L6iY$od&36r@*h3)v^W(PonAn!c0+U7JK{xE$gc))kd
zZ|)*^oc5<tC64V<2WfVmq9%4Aq9zf<M@-stS(f*b7)Q8=mO~j#9J@~ZDYqdBuATRE
zxnJeV*n+%{(F1L+6>VN;F+_dyMpU1S?Q&3eP;uF1b8z8GbD0~0SJfB3QN7V2xC>)Z
z2&N!I{ei7~I5Df>@!pi<H!;osgb{;S<eT_T_4B<F1|8#?$ifs;BWdmDxZzl>w2U+L
zl#{0$Z>wO(f{-8mpm9A3o~BD>zGkARfQ>h|ylOtu7w44oT8g$PmU=ptz6^JR7`9{%
z0p2qW6)`k2e<e7`?_H!x1X;6wEsRi&b7vFf1`Vrxno#mS>S7>O@skZB)1f!VbtTO!
z#Pa1_B@>ELi<G~TIdfe{=#B@#U~Jv}bLz~*w`0Yur-Y}46<PtiS;UZbOiWt0gxjDW
zfdurTHV@~y|0$CyNyF7gU?^8UL3uXLDU?q19<{XJFdrQn*Ap{m%r^DN;0C(1#I+u6
zAkxm6&&jH>AX0hN+8we(?KtyJ+G986-jAH(PLMZI8rY+N>0I4@d2FTiMNYwqB(b*!
z9A$*HzL)mDAo}rb^)MjlqFQ(G*z1hVC+z6}hf=P^{X_4H*7074N(=P+p}v8&Z_pcB
zp*_%Zel>z2<E6N5U!V+!p;R~0F|t_{yQC;gkdD<1(3VN=6=tY7%g$h|g+Cf}`Xzhz
z`~<X}P#;O6vmjy-EO%r^nw=EH&KL*Cv#^4GTm)hkjWB~Lpm_54)wHzN6DO=vz-gMQ
zG(E*3$<AU&Al0w9e;8~l8{%HvQy&*Op(St*;l?n+il*Ys>Z&5G6+W|RVc)Pzs@y$t
zw``=K+F#lH&dofETury{llt!STGS-0o-PP}?@smw@IFMe_*w+y{H4iF$E&V76unO{
z?_2C+c4+wgm&R+z>9CJ#!;t=eHVmpok&bG@pr!eLHVjT%kI?;s<bmncgV#;}*Mvd$
zKNCj0c=1T3LZ0~8^_oT|j%;CC;#JZ6mkoP`Ii+eui4M+t+oW36_&=!2{>%MZ{$DkU
z12j>Rx+!K6g&qDg7p&iWwVO1=#>>4UWtq7ji#m8jIgxLxh7N_L5Hvm<BY&r5kO^5*
ztQcuXtdeuC7Ci}0>>afEFeV{POinLaO+!}g4ezl1H0cjE*5lN0zQ?bs?bS@*P%%5I
zx8JHWvSo27pupDp@vCA{7Diz9T=D|OM1Xp^QxtgV)04LgF-MCpr_gx6Kc3N*Iwrk$
z1r;klE;Jb^`f5sIY+~$pVr9wu+@hv`Zh41GjW?6-4#s<@Dw{9+;(3$GAe4<kNY()l
zudRD5OYp|8>@tOAxxBmTQgK2y%3s3>yJ{`xyk?8JQaXfu@l@H0irc-Z{HuvF`=+7i
zn%UB(6n_4Q&0<5^sX*SlXW5ftB9Qjfb&s;<)u{DoF_ENj@VZ2ALs#)TQgRmu-;8zV
zJ;_kNK_|f8l|kRxm8Jd|UR;T+&I@C2QOj76F@m%-Kog0Yh=-{Kr&yz$-6@n}j!9b^
z3-{y6gRaW{ZN;#4JUsBH#Oa&${Wi?yKL-pN@yxD~{{*HU1gxsNJ4HC6Xw&ir%G<U#
zipAO>4_PWOo65j{63o}V4%B}*ZAFWzDpBszJe6gtPtN}efYOb*72e#bmredzUanv`
zNKMp8>opW@X650(5MspJOCa^$=JUwswiJq(2IPJE>c5fRP&x?0h|2ENce_q9*^FiL
za$kHfMTxd-Lxf-uSBIWJe!TE5)_m_{H?k%5Oa}XNX+7FxIP6Olf6&lVl`;(tR=g<l
zj(|maC6mjchfPZYW~<%rdy|+>3YmMxdL0MpT8$!wU1Z4zau64L8D5nuCb9y9<H!tb
zg`qjo_l8FiHT|vy)*AA~>&@lx3ProH1MDFl2pno!kkNMV-`=Z_Zq`wDh6<lG?ZJ(>
z4K2np0e2fnJ<uEBXjg3aLjI3NC{B3QD;#g}_IWUo@kvWao*}5BywiRzmIXWIo-@lA
zCNCe90<HQZ8-oaaS;7T34g*Z|n(;5Du<jmE+;M$0%XingV_3!tpEgad<h9ywy%@ze
zbd3V<yo>@5i_8`!M~x&qLr^tKO8bcV+#Gt`QT|A2a<E8;;{4;+*?{I$K82Se3@eFY
zR>RLL$ceN;2NS(KLWy;PJL5F`!jF2o{CyVGRJAS7cYCN$08r2?`A?uth=Sw__D=$-
z<tR>k(|*$<s2!!FC2EGnUvP~VZ{3{r+I>~onj<LyEg8z8-Fe$ZKH)mJI`>_7EIGP7
ziQ_NLp3xw1U3jyIIncT$hO|v!`BsX{;gb`!OvsMv_K<ySs*~}2vn8i5WqjL|?FC&T
zQ0u6qqBU(^yXi^bxio#32flIICrRB)+HlQf$eZ(wCVk2NK<uQ{-h}kIaV%?^P^HHL
zqnC(M?B#o(><sm5J}oau`hFxWu{0Ui^%1Z)t7j;N7eH<mR~0WQn;5I4+0~73<S2`U
zhjNUsk_zHoBFE7d&>W#`3llG%qSlO7<$z(t<0j!?*;yE?Ij`Cl(CSFv1Q-M+v1a~G
zX@eV$W%?GYHUBWy=@9|}J$t0K2gWZ|-RKKe{r=L32{DiB)iDQF+m|Q?OX}dO>v!t^
z()hXVxgBxU(Ucf>j%HZ=Z0a-&c@CDjab|rvHW;^Ykx2hWdyY|1_dh*m6?2m9palVz
zCCm-ucaZlAm30o@wm$;w&K?RQRB&kSSx;4oGOaFx>73<m6G|q%TrTgkB0;AlBzT8g
zup(JeO9ZYXLC^e$Czbz^8$#5$JIGYf?OJ<~X}pKLV+B4>l{SkzvRl+ip!vKjPEx_n
zH|D@SXbO+fG2e^Cs@90&E}oZHEaG88Byqh5z4GLi+y^^zdb_E)$s<#wl3Sy;g8%_9
z%AsLsas49_{l<s6<GJ6=i(Y4_5k$P{8>fo|jv>NJ3jt`hsa?4}+Y#-~fr8}JE@^iR
zuS4pwpkuLYJJ2oE7GRZIz$6SsGiyAHu~(0klh`)ZfOk`v0sy2B%(!_~m8JSVxq^vq
zpv=m1QF|*_y5pH%#fzjv$P<wQjq$JX?L`miuP_Eg|M29D85HUMfr@A0Jw~1gqW9a%
z;V&Z!Ffc=~fVMkkIgs0)%ucG$6#El8ItKOkkZGa4Q|>xkW?^wHdDLL2`9u#F{D5YM
z`5>LxySL~hO;q+zOUKsuQ$t|0)+}SC$ezWE_`>n1sTr=Ok`7*CwY!X*ro_l~EpD?)
zlGTiDt@*mxF3iXu>@iSyH{YrFbMZu=On0wDV2tyC=F@|`2TEBA#pY#CHt*MFX&KcN
zA^d&cDRa3t92xQ`(H%=J8g>bvsh?f>b$@*u#2P&0al2BHa=MDVBIBin+C+Ej$A<LY
zSjY6Fc75Q*@EHy$-toRewn=YMy?}SVyN#k#l<f4fI~=&3ax`Aag(|25$03)D$YZZn
z(z=h=>>o?<-p~SNF0nD<c$6<8@dygBRM0gIRwjPq7RH8m;H&tmOvbkwI)zNr2j?CD
zetOWKVpDrWPm-a$p>)9+;j|B)xBF%+3mj&|u)l%lwI;-v@34?*h?W_bKUrCIWnYh%
zNU9^`%=c6<!0Rz1Yg0A9te5HNz8$jyh)uEyHhZ`ungcvQ+_{w2eGUi;>XA&a*O<U8
zo9x@GqvIPUIggkBO|40-84}5{5fH6aynfu}RnQToAA{5TN(A|wdkbi7yD1s1609Zr
z(z_k#a}J$S@Qagg|5W{E2!$UORWZ}4d%-fl7W}0N<DYtTJn75~*jzcnb7<Z1(2R|5
zD8X%m@7QFq^X&doN7)dHtxe*RZ>CA$T$x1$z7|^0r`crL6;9&16xOA~4mSoQHj>&?
zp2|h1oPkOey?=6-dkWp=R8;%uq~^(9`CDRd5h<y?pzY&*=zj2I70{l?lHfGZ!Ny!A
z(2oaMo5EI$TgepCBs|Z)r)}bAJ`rF>k#;xAEO0f@bt-U{ah<f}HetGZeJmjikYiAU
zDv)Nbt#o-UN&H+KK;t3t<4@pcIcYZQ&wUoX=myxIn6zNbpBCZJlo|1Ovm=mz!Z^^r
zN`Zxc8nR*)-+R-P_i2rSPE-GG<0DIEjt#V{p{`;_T2VvC^008yP)wN&u0$P7@g^%(
z*#|J|@^1257qJ)-kS_YGst{h8QJ>r{LkWgVVRYngBrGrRi)v~(B2(b<axGWK>v+5{
z@f*!ZCh;!G@M`ymefLS{SQRaCBQu$zW1JojxiCL#3Po>WKzKu^i%12JUPC`5%?)IW
z#qx`--`F#`wJ|X29^u44+Swn*=wy#add0SI#NdzRI>5~S5w7}zMowiSp@m@}^Pw_r
zGV8i%KEhSYf6aClc_=A5p@|Y5r#OaKcQ*gd5j|0SDL0>vPqGLNGAh(Iv&$SNx>Q8r
z6>*(`%kPJJf|${5O^4i*YT9{bT0X>%x3#Q~6KSpqZ|==M;`RCY{WO;a(7veCb}{mj
z_%nTfx5s#IomJ~eA3`XreA4mIv+Ieyd&){)=yin`?NM%-ZVIIqr8x73sHG>nWGcl)
zUfKTE+<XBGCRHFBz9JXb`t~5X-CBKJcadrI7x+)%FV-~m@6-MPmC|}DHs-U<XAu!S
zyWay<i38bqE6q$|Hm@0P!qjwS(_fnJ94E`vgsBYX#HmY3;o*V&rr732$df(C)bS<u
z0;w2NlGx|u;03V>S0_7kHt0RIxfF8Kw72H)XGI`9iDYfzDg}qKyrKWf53vFyy~j(~
zPESA{F1{$@<evc5t6b!RqQsH8X6%f$N_jW4@4sJ)NB9aiB0dkggmjVsPV%aIh}SdF
zH%EvU8j^#H`_E%(rweu@>iW$@@123n14a$M?Ur!v2(l1NP5cP&R9kfJ_Zae9_VNrB
zix&RqPJ;duGOic3C!tuWmRwa|rl4?h?iwEb7=d&Mk@&9B!8%bi7l%ntCN<)!pX$*7
zvN-k=#6kqF)1op3>Ua~p=Ydi=RhCuz_N*#rno)`e%uKONG^tr8Iyu)$xlokt_|7el
zGwifZ2`fZ{O;LlE%ve>>!S8-_No^iH2IprSUL%<C>gX4y{AQ5j+Ls%fc!8Ws`9T}|
zG^};8sPiCU3Mt~FZQU*TZg;Ex{V%&5U&NO_4;|a2nSEnG3*{t&*o`3v?%j-Pg=!Ee
zJt=L?b_*4EnywOg3T-v@SXiz}Jo^21hEAIsO5YeHqEI>TFZ3@BW^sr5qj~575w5C6
zbP?Cb@3V@HdeP^2Kg`EekC#XJ!AtFis0U$*YivK^KHf(HG!eVlj!5x&jW`A!#s-%7
zyF1bpY5F@qlegWhPNh_j+N3$Psy?KxAL-q?*^}Rc>0G3uDIXUM@poJomg^L+*PxC1
zm&6MECs9b0yME19M~1z2eCV*vi^ue#%5eS2@L!s5RRoVbMN#WsgF=@K8>}GxAL>hG
zcR8c(E1DpdRMg_lV0Cbqbf|W5X|%F2`X)Y%!Mo5tUTSzUu&5pb)RlZ(?*x>ZQ=&fB
zndvg1P?&dC!<nh;sE9p6hH5>jMV6xuk^8Cci?NJ~%4VZLa~Ce)u<x8T1ZTjHd8P>#
zDX8$kc)jL~IgwGGUa|jy;lJoe4a$I)dCq+b8yHLs_z=618TmrXr+%5lCG*gf86@-Z
z+$nlsq-yIg4Rs?ZtZ+mKiCyGmj?e|ihRsZQXxK0yMiBY^kZMe$UGqi`{tk&*j-$C9
z`D1W1NauwP%;`}8K1di9WA89RL?d%kN?JC-qSXmXE>2lc+gyourm4if1>l}jQ`s6l
z_9}U4ErD}5O8lTjxpVBe(5T+4vO_Ku4^r5B2o3#)mU&mGs_t{a{3dGi?W<?*#J60f
z>|4NZwaSnGF)fujW4tJ$&I+5ZoJa{%?FI&lE!U0cIU}|oeIfXxvQ`!LIX_TfvgFSd
ztlZf9#Ai_x<02$5&Nw}L8Z!mgCDx{i$eqp6y%i--6T4+rLh>U8^a_k_-^qdoOuN8D
zDzqyh9OX^MC1Hw2{e=g`l8lQ487WHeI%vL7P2?>HiSu{!Wti`nuMZ-P_Sr5bU(&}^
z-Fz0TeHMXNJ$A8qV7WZRXV-V892B5X1HsWrTBjWnZ|{U`39Mqp=~)#Z<!ih>sdB;=
zZek?;jm7q5R#&XNb@se46})jYnZPo+tDUtsWv<S$f*(#5l3`9%%8mNX0PyNA_r5Ut
zQM>BlF(P=3(~2Lb3sY}bJPH*E8NyzBdmu9s$9gL_A7#GxIeFhANKzALlJ60N@HSyD
z_uP27sxF`YX=Agd_AFssH@)wD{@XgUUkLWz8>jN-(LBk;GO9|Acp*-<d+=Ndb}T*4
zX6d<9olRj~bAo(a=MTrRLP(;y=zJ9ImdKOu<{)GeuYA1Hg<F%4_f{I;!<!QA4kBJ`
zb{!B6oNwhqjEzkRW<n#rq=);nuaDP9cc<-gZ4F?)7>A>N(H`xbp3k7hE#3#<FL_$b
zXUPl1PS!Z-fBtt&EtehlG0wgF`40Pz!hKV&3Jzy`l>Jr&K1A=AL%=bhqJ(^f5|{Or
z*{<lFTz(D5XKnA8)VVJ|E%uCEa65(KNbTkrw3{Ty*$uM%dus@@Ubht7yQ)c%@8~$;
zWMfb$%Cv3`b;1^968WXm-StKkzk)!I1QDg_aHq;}Vd{`{L6u$T!{}hjt672t$4GJn
z$#(wH*HZpub>x2zA<83E<sl6aywG%%9Yssb;xETl)@q4}v>{Ee=Y5KIbeu}#=OgBu
z(MFTMRGxACRQy%5Gm*+UL>|Yw?w%<V%@nc4zyI7IV6IiBEn<y*{c-GNO35SNr>-9o
zR+2@KoA-g*Z55{9hqC)6AQnIe&Nn(77Qs_QZ~QkV2gbReUhHl2qTr*13HL&EW<hSM
zkcesAIajvf`C{KyR4Lp{b|vo+=!<l$4aoF(ij?*x{Y$rC8b@Xgd^k23Edp^PR+SB*
zJ3Gb(W$a+!S}zWp?QzYarQ*uKPY_X^<X70jh1(hon@ut1E?PP~uF6v$HWt&3Cp#P~
zs*hWfA~5XZeD3Oyd{Ngz?Ll02Mih9@_-WMRN21F7Qj8z))I{`gq20zbb@Ml0vEV^<
zNf&d-pZsF^;tDcNUX63xhSL0lZ#en$=NrgEEwON!O4D~HCz9lVZ{#_BRT=D|D?OVY
zj|m?xOAgS~YC6ce!!riA@zS}k==zDx#iljCTw#{{?RgM_mMRPc!U&=TRd4gDagh!Q
z7Cs?~-~wf14L-6R$bCq>D5D{-!C?zLR<v^cjz-A2n<-ualgG_ukeHOQIst1S65dbX
zl6ta8UXknL^t;_nLY$jes*;%?5Mi(rAqjq)p*)tsVbU+C{8Q~PwT5nE;zZt(2W>L3
ziaoREXckJYUs*B{C7eiapB-mdJ04xR10X060ID}8hsFvv9T%W7<1ywF4uN9_qVxgZ
z;JOqQPF3%{GPvFNLrMBko3DN&Ma=3ytVJ*Z^w~^De!wQ|Nw(|4^*u$!6Mplaq|?6N
zfLru7fjK5O0~=SslqLTF{x83g21t?e81vss?<{^K)>N4T3z$?+?OprJt?j#EaDb3e
zp-k~Y+w;sS#%es{jD-2N+x<mp)?bfxB3$pXr>WqWyp;D#5o!4|;hSjJgUG@VP7?%I
zb)lED`o%&7sfYoT{t*}7t=JxWlWfV+v;at?=in66NIJFrkmR^FJf~yT&eoJ>i%Kk@
zBf2SHzbp2o>{-A4VkQIdfH6{hz{N*=#xn5COr+20)}RyAM-;*^RJ^3tR#qVw{$4@|
zt<d5@)Fb#xw%^WwBs2nb&r<reV%&PBgMaZsfy(SqnC)w*%$Vg+{o#uahOFpI2pgSC
z4~I5`Nz5tYYe2hs^E2ZJ6v>p_wkiXbhN%Cgq1f)Gkve4e<;y(?lOF9vmj8@urbI1P
zeL)W%=h?Hv6cOLX(jsX?HPUu)z|rAwtdS=qVj&u<uIPYG^sZD?e;NI~OJg)K;Lgmz
zerXs+iNR^E_?OB|>ic-B7q#OCxUeJ6?m%JGnsAExs8AT|w9qR3P_P}$Bi?NllrQz5
zo$r7<2(mR}kS*2zRtdEd-FK80LzRiD{?f=reLepan)n&*SnBQKpkn^-@&{hdpKsdJ
z-_Sq9e-z-$LN^UA4luMrRLAFA^3u^lk=Rq_Bo-MG2Se->x6wu`nPDfsYp}|Xwc!((
z!$8k>B?K&9?cpg>u1Es?=`{MqJ)ON?q~o!~rj#Re7DnDugKyw-B;LNNx<_WC-)(9<
z!`-3cSWVkUbMqlEEsxzg9vSCA-KN|L7Ln;RH493y=V2nX6A_3~W__o(enB1@<uu!w
zOV||B7#^w|_<U{*Z4vf<isBz>{9uG(;=1Va@zC5B-111K*kRn<eMa|kF67}l&PBjf
z$aiYvaU#X;)l^GoJs2~xShY78QSL*2>{_&rVmJN6L=q`BpPAGP?r@Kcaz_Y_9ari_
zAFwhYPUOd=%M|r+^cX9T(f<RPKxV&AomfMX`I!v+YxiizN_%1KgO=VAc?*r}b;2%J
zwu;LipZEE610?u+{{YCZIdG01?tkvvVEYpS#c~AXDLh#ANp9xB?a|BFrxzp-FWESA
zIH|I{!jpH{`TSi^P~dhG#C)a5ch3um`7QuK$k{0C%gjJwDZ{X?E<kx;;JgYPO}N`f
z2tp!dxPZw#f%0YD!i2*A0MaKv-;sPy`tsKs>{;w~LGNSaxC^F8QF<8=f^lfofF5nL
zMD|91zdgHU57+!h%E^WGh^BXIwnI)|1^mkSeP;J9ry+W>L;E)j0LC}cR(5+j6V;~?
zvN`e|=2~_=FCi1ax1HoC*)YzZV7#$+$<N|apy_DX6OF)!^}qG@?Td8blKE+&80jGl
zvU1tml+CHD8}T68Gus0;9|A#<<D@+^{iHmE<6zqpf=P*L>=&`@YZsDwAP*;;*bv2!
zWVFbC)A=jq`jQ!IB}iA>x`cn_E|x#n^B_A9{Qm&r926w-2*1ivXR=QrPL>WA^2@g*
zAA}1BiUG))-GtfMxt@MG?_i`H7hccdoo6l><st2-uPl`p=@;Byp*QOyF1B$2^$59Z
z(|pWh)=j$vF9bhqqsA!FeUyEBDWN}PxFng}v*91atD%1BdAo0U9rU@>uZ!m_J(BK!
zix7-|wkUc30LasF^NAO-AGkLiw!1p=2iZp}o8o;WJRiTm`?rvnk_li)^j`#Hyi1L<
zp^!{x9yfeo;I>>Yyz*vt5$R~sGgQ5W@eT*FcP1hj>ul;2&4&ux%m~?+?H}~X_Z!SR
zWyjb@0^P0Qke!0Px?y;whD0pQU`t)jNsHNZWbr0_f+QZ#&4OCOc;BMX#uRh1Jj{$e
z*3up2oY9}cG-Ktf{ImW@b&mMR-k*oQ#e&RE7LJhbaucZ|k8R7B7svkqw~ubV#XTj?
z5ObC~PH!(5kozOVcs3raSg^(qZ*+&@j^M&R%+q;bua{wM;}N}bL)8{LOpHW>azn=>
zvNq@7OJTo;HJ9Ti`3O50$9Vzl1^ZbHTECEkZMiUflYEP@Y_|)M`PHJ(v5Ftt_(M|g
zHoCt*?o4d&)V#~kf%5(WL$RZhJc(xAAkJU2bpd9_++!X3S@Aj{m>uQcwqGt&<k?Tr
z2=Y`i-;AlkO`qb!>etoufBxB~7p79u!S+1UP3t>uULYTA9AMtI2!!-yvE;9l=wCrH
zDYnmSet9Q|^_BD{2sZkV%`c>5VX6HQZUmTt+|foD$$65Z?c`~v%MX%0S=;UV{{V&)
zyJ+Ubc|GARl6D`cM5ER|hAZ3rx56LU`jS4mb9`m%ZiyMl22&0a@3T3+UnJj<6R-y3
zGosUazfWUcLCFrf$en_{f;@n@CBJe!7ak`;yq@{^zomrb*Jlr5A0%M?h)3#8o1Y<<
zB(_A%?duy|q-!Ok?33al+f>b#c1GI}JdU&30~heQv}|}wQ%ip>9<Xkoh4c%<aN%$p
zNv+Tc1N$js#y*DI1@=ri2kCA`mJcnKFotwMB`5GlsX62#f8LONgYb+PUiky|52gj4
z?z-~*Q7^V!VZ|a_Y8{c}5eQR~m><KFNZ0TUlO{9mq{G$csD3xlh2uX_JNjLP?V-Zz
z{E_bAsg`|~-?EB--b>Ak<RhPqC4j3bB@38d!g3+xyoBM{3Clt3Ln~BJM91SMI~X6V
z5IsDL4^!5x6fa|O;LG=HpX;PD!fE+fFCfQn;t$n;l{Dlp%#s7re*^l*?QYmPIrjb`
zw&Sgl<j%h&$q$gw?URgR#!2L%=>r#`Z-kGBe+l|;@UbM#$s$eAM;mgae20l@En1le
zfnktx+4GQI*gcRKU7RJSJ{RpGE%UcNPtk2M%wj;){{S-_PD|nf_VkYaBv;98r~d${
za-X=exXAA;*a<_EkCwqbSl&m%&Onp9Op0<D)_<8Ek^Q)S14|K*$VoNQFMVW_l;ep9
zMB^AvWK|9gkFEVl;Ry$S9QdU2(0BdYPS{>V5GcJS!Lag9m#No~7iBpY2@!`FrSeBp
zW77za`^Hamy=*_c{29k#@LvS@HwBlL`ouJQKgIMJ0e-LIG<wFE%Rg4<wij*?cw<k;
zzgvGL^_E26WWE!}W796WCHx_MEIJmxY`#psTX~Yc7bi%^SAXU5*gY|vXS{-k^|V3u
zaJV@=aztw{8Q1>VIEOhDfW0Jhv%V`@s_e!)7))S3#QR|4(_!uMWVJaR%le$Ykxx)&
z5QDy0$0SzYM)<HokJ}$4rb>(xf8UcR!^y%+4<kN8-HxobZy-%1A1oZ1{Yk7RJwx_C
z3mg0<in%!M?6a&djf#`mU)(E<6V@^e2*V5SO`d?2*-#!bh$qqn@?7@Ej03-4`$;te
z<c8k#fjJ(TLyUilFy`D&+Ddfn&$3JeYtqxLxozd}LQ~&A{{H~t!R%vaQUTZC0l^vS
z%9H&`K2K%4B71aQ+t2=5ctSdr1Jc35>+Q9|K`Z6vwBlKGSrVR3k1jlz#!Sn7Sp2$O
zdnO<b3H>Fq`I5=gNx|9clcSI)mMqiOapNP156Hvh<GBl`@NnQ-Y4bn*Py9i-w&W#F
zkZ+J2Oqta8<)h(bWRr)tD_k>76WC0XJZxn@SN{Mlhgkr555<EE`jK>fhC@l#f;G5#
zW$dN2FnPcBm!-(}W`W>AO}OCR&4F_LS(yU)H`q|Z9vC^c4E!Tce-FX@3iT)HhlP=x
zv+me5mr6)rX%g59<@cR-^7i@?zcvfA`a|@Gm{@rP$Ol$#{{Y_K;U@`4xA=+n?Dhac
zF#bV|LD>B*6Kx%2jUl+tkq<Lu73bTKTg-=qxYs^hIS1_1ko`fKKFfVT5+2$<z|;Nt
zDq|dE31)~Bun9BByo{dqeuO*7A4YA*fBc5*4qA<+<?vv5Y`FGE*?e!=D}9R*jCgMR
zSnU4SUO;-v?19YKA5E7#KKx1Wh45q}z%SD+d~YG=_#ytmy&;~!yCO%#CG3~5%r-@v
zqS7wg^ew$uPgdSWl6YF+>+())YPI4Bk4RW;px;{_7O#`~$_;&q!w=P%YX|jn<Cgx}
zJXzsnJdiJ8{gB3c8t6gMzC2pKN<4<$F@$*8-jXLK9b5Eo_}y9dO~1I~jCk1JvIdce
zQ`tVO0#L#6G3}09<pt_pwz4<3U*Tx!9qP&ZW*nEW^dOJ73}=+JAF&H%Gn_(l&$|f-
z?SSGQ8{5TWFN`_=0CWEUQ@@|@`||ms?ZN*5CBsXnNU6V<9A`ugmbK&`w>u;nnJstO
z#?G9wy|5W=dwHm0^ON3@nb>bH&1CVl(%Ri*<gn~xus$Il3wX<1kU4kX!GBjXr)`gK
z3pn=QOzX)sn0hui3$ICc<Y}@K*ER|EK^r+BWN&+CJ7+l~)Q}U6{Vo)}Fg;jXV8{o^
zYBu2@>tnTeLiFUDuAt##vv0Fbn-9tPO=7Sbc>=-z0L_D|@E=1i?#slP0`#5Y4@4YD
z^(V!%sYjOHz&>0Ow)pbEu{&V$++x6;zBxE~I`TpC->Ig%8E46NrGXY5G5kAr`^ui!
z%i9kL6XZUpdGdGCNy%enG6%ElkBameLk)|WAT1vw=vnZRILwdNbm=3F!;G8O!g3sY
zBjX)!uF?8P&4l{_yp9}(`yuV2;VzsYS=vFwkEi1u7fdX2J{E%(U8t+8V)RNN(hTg0
z&tRO4^n>Cpys|KR2lC!-NIZj<{ZHu{^64S|$>K_RET0$j$_Fm4paUcaV&31P1%TWA
zU3HT9{txnOC%n7z`1-J9^-X)947763%SbL{j)*?w;iN~C^tc%zfcqjkTB)nHnEoNo
zAsxH+Th7>K1nbLZZ9T{7e+v&u&q!Xd^94K!WANNPUHc|%YlKndvAFI;pAuf>lR{Hq
zb7ssXVNOE*PNak##^LHQQ`?`Z&#+%D4mUj#{-dmf$T^md674tHJG*L6mQP`MjWu5*
zvO^xcjp~&-1nULekeWui&RlIDzzVn|r?;tZLmyF*)0T6so$0W<!!33W_hYL_?~p^;
z*S2ZPxWkUvF|hjy?1Dqz$15Va;!*5gjr42=kT-9L?A)xMX%}`&%jUS0@(+@C3uNVT
ze%<}KlP89;>i{*j@(^SU*&AD#d{)nFZizmvI!pLUd1!QxOFte=_7S;1q+XX>BxI!9
zXT)&R`82w~b1l9$j7vtaeh}YJ*%%8Jg~z_gd;A-Go!AVvHz(sGEeltLvG&)-8zIIg
z<<yMe9^B_-&nNF33tou_Pi4m}#}K?dlD&iCeB_6b17--1+Q}MgY=<GlB<QmeV~ww$
zY;#{FaX>Y@=`O}aVGJNxkazhScKZ82q@ypRaAI8c7t7hC4BWkNf+Nt6W!ol=FV>eP
zVzcsFrr0)l5Vf8{YkojFG0}4B35?b|8a8?QC68?p_no$3wEJ;vXqPtV9@)4zA$)9H
zNJ2e2<TDKQqUXLLz2*PJ03{Fs0RRF50s;a80|5X4000015da}EK~Z6GfsvuH!O`LH
z@&DQY2mt{A0Y4BSPn{*SH6hkEz-|f(1*47Nr9pq`RNQj)fEE#2S_D#2gPh%&U6+=t
z#vkVq0Ja9Sn+VSEgO-~C9XnJX88^J2jC`-o1Bist6lt8HR=L2yQ4$wrJ6G|XA_V9j
zZ`KnwptPg}j}sV^S|K#*Kbdm7KnehK04S-GuVlt+Xr}vNk<beE(l3HjA&fxi2Aa~}
zywYgEn)2ORt;;uN?vK7O!42rT0<Kj9L6nwh;DL7B)ID!W(o${Q%{=;dc4<5Y`o;ty
z8U}Wt78=2`4z|(I7K!VO7pcV+hBJU4h8rbE9!wix77`yW2Rz!11WAdg<S7j@Ew-1d
zfgpz|B9b`sfmlXqGJ{4#Lg{e;(<>$VQ9%wF8&1eHMFCrz#J(&xtcLVg=gvyE!s`Jc
z7TY!tIlHj#K``w?7z8U$lv(k-c!?Vaesz&u&+lxa0R`>NZOn;8I?+_oYu*P2l_;N9
zkL7vEi!Z=XerWdOpeHB26Hd?0azs5BjxZ=rfjmXZWlcpzB%OiFznrF8YY_ml`F`AF
z(jal&@zNdOMRgp2NCdDiyx7>f7m$FjS6Ctq)Ls;5ck;nPoAWFLy8ymdS?5T6S}Sl0
z9rK#7M-yo!MBpDdM&yOL!hsOxc`y_1{ATtQa(`HJV=z9w%EZ-J7NQUSEJCD{oBGE_
zXa$R8yty1aEo}#I66ZKB1TyBw07p2F#4h^PuLifX4~!6}L>a5bAS7Txg(eaP(aV$e
zs@r@LKV~gIIJDQ0hMr~<prdVj>j_=NGxeJV#73VUF;rP@RS*g^YsA2yVTia_f%wlV
zLdPCv`R5873NTHPOFdj_kro8u<l_LwL`t*qn*JTVlYfI#49kRs&#<uAbDYKo^jiwI
z>#R>J7X=tTpj>F^sSCz>=&u+T8?34gI`4+O;{oD}Kxp^0sndY9+yT!!esTHm)hjuf
zKr~)~VOHYwVxi@Wx+3;CSFPg(jtR2^>ZjHhfZ_$&w+PU%cdv|z6gIo_Xv5n5G$Obu
z{21FaA$H_KN2rqr8zEMKrC5O;^MNv3Uiko90`-Sv!4xq;*;X|aq$oqd<i<i8jq`>Y
zV?lX8Qy=V1+=T5#x(|6t+lu>09-`>G#x|p3;<T|na@IX!8(2k7;06zjo`P@>q79_q
znXGgU&q%<ojSaX=NH+`A{ioJjcSE*Kfa&3`RxV9r0Pz7j928jz+2l?h?jQsQrm8X+
z8h7EvuuwF9*l<4#HetXL4!Ty3j4%8qNRY8U++4gGJB9L698QA=MRf{%aOqV>U=pK4
zT`)92B=0ZYSR;#n?>I><ptp0|gLa{E2O_WeWsg@tAaPh2cs${D07)ZyS;MMkgzPoh
z04KGAxy;eBNi+paV6zX{^@N#O{a&$XL&<gkq<G(0kO3LNX79bOBmg{tFY02^MZ~Ll
zvFDc)^y7{Xy`&y4Da{E0ErL@-`S9dNB%+NDjtP*cv_M>EoVCr+9<cmSVYl>SQKZ@G
zuPP!Q_1-@qZnka-$=;3dWsvF~qy(WWy7P?8Y9JS})iEdxi>$rA!=j3-G>8pv4lg4M
zx)o?J4euCo%BY1!OZ5yq)=RFH0<cM^fKrwq(7#0c$MGm9HgjN~!-d<ijNtO1>0WUV
zv^vPxBYV}wVU@^A;NGg=7)@@z!xU(U<sK7w9{~tP%WYPCV6GlkGo#Zo7i1JE%*jI?
zG}bbJl{6R2j?usXW|2MXzH65Z(;E;?L0`O{KnOw_K_2|&A~2a-NRG<Xl3~HJ4A@nu
zJ4(zp+Zm%l({l5viI!6Eyg>BKHRi)f@(c84tqwUU2S?n+dhY@98{dwwlO37{qdDK^
z#VQ}H0-<D9`^Q>=3R=w3Z1PMBjWX<^+Xx+(#yN)lr50fuVqqM|ls#+I3hK82@uGli
zG@w`K92-T5V0#om3*Q)k5yA8xCb8B4b|M0y&rj9~0`R?Jd!i%E2c-CMa?p!~DZGKo
zdJJfkGvMhUHS;o5O$8{b#odc17dcNEAzgRS_vqoWfZ&H#0UGE7EQZjbII#$mjW`z=
zK^o)P!8n%?;n0CVv~1z`i=m@C05d`++J3P5vsVaIe2NLy0Fl7qXY+?yV1yd3j21j%
z7Bz6#@}wrU(TfNPxf~-zhP0ckYtD`)1?*FIj&T_?Xf{@b+N+O@0RXgiG7AT1IkRdd
z1X@c$PD)_mLSvy&TM5%fHk~ePk?<<*I`NTQVI)-SBO<*TnIwViDR;xg$!s=Wli4Q+
ztlmJgA|kV5qwf<q!#a^pwg=uOR8*8j6D9>WNOOGe7Npq=rTO+{><~yjzg{u~sMwHG
z00x~ddrAB7S!9na0~Z6UR`V4q<@m`a+dj{%Pr?wI2Dih&Qcyg34-8fG=7vLgU@CO1
zOj9fdsE-SOUh+RM=m4qy8DzTD!BkxrcR1qZL?u0LTj7{Y@U;0uQgGyphq6g+MyC1#
zavR<R!MYSFW8HV-6^9q21O?wM=%y$wlc+F>{`n6hE75qw6-Rz?*!Zxm90vaY83N{4
zJG8&HA!j&9DtG(H1F(>Ls5TSESOJH1I^p9P>ZnTQ=}S7rN<nQpi%D--3~g}XaFYjs
zfxCeMx=qx|H={s!z}x!_l}^dE+nhT*OCa(C3Y(d@D5%jv1!8WQ6=VkB1I_+0wX-Nn
zZ{GfJxua|#xEl2~?^tOuag-ENbJIOzR(eH-s&m`Xk)I1@fy)=P=Pghh17$*dI8Q4Q
zRS*OTtPPU9)a9U2DBcq-Wgr3E-~DkFMXd_gRhXj(=+HuG4cn}f^PyHJv;#;u2xJg|
zIHY-=J>*Yj@Kris0}>`xd4`_|W-J@?iZw#2k3Z`aqlp$n`%F8Io9*W*&g&b1JZWb1
znr?Tt4;|v7b0ecwdDC(00g+jN?IEr4crq#wg#he<rZTe?ag%eojJ}j+vU<emMmuk*
zgmpx4g;EXdgO1$aaJgeyIn=tr0Q%cx7gvyP#x^82(TdJc35$?|P&p<YtP^tqKy-=U
zS)lT1vq);nsVW#D@|Xp4l@wra<8q=~paaga;D_4U&Y(2e19;7rf_Fh8>2<<p!`mPj
z@i>*&ShSD}0lRXcZ#^)zpBtE(B&(yG9O|GzN|FFX9{I;GbM_#<*r|*CKQ~37UGdz+
zUIMJNnpzsfuwCO>(sOZ<qYyh4SAF9C91T7Ke>i1aX-}E&tTI-?BBQQ1oqdvlzw~5>
zPlixWn!E5}mZsU>{{Th(WI>c`KA$6Qm^6595~&Y|0Lw+)0lJb#{9qV4mx>MUkE|n8
zZNhPDdN{=ZhL?0wO_e?18*y0jMm*p&ico$Bh4+s((lV58@^n|pl<0xL2fmw5c+JQ)
zxm{w3;KQPDMKXHuX$CM{Nk%To{#gYE`gN2vw=ZT|?imiG7_*WLB@{@R$qCml-dh_6
z5Xc_>F~+yX9+3WS5DF)JP-!F2dAZxVS`iHoXL)_MM@gX#hnLn-h7v9JuCM`7@$WAK
zx+8eQ5BQK*G#xg0*LeFPL2ltk{5coIV9{<<v1GXD8uAUHS7ja?64s}U0TeW#iM%fL
zQebMqSx6e?%TDNV8sY26@8>rSvs4iU2MPn@5L!#zqJiGSddlY8U7V$>jB0i{vh``5
zqKtFE6&mdTZ?AY`L>K@o72ora_f<*k-Je)mE+B}C)Jw4r95SS9atIDu)>%b)C>|R@
zPh*4&bkLqVQED?AR06{Xc10PlmGODEcOK@>v`Jx=CkHF3XLMOh6s9Y65>Pz8z2Lvd
zM!HR&?x~W~)FJ$CYx!pm@g<5SrPxVbhZ9&Ii0tL%0GjiN{jU@Yd<KuVA+RM#@xKZq
zlR*dpt?OZR<;Ly6xST|bPS_%MB|t@8JS^i6-~j-DHB|&-Shd_6Y2p^U!;<C`VxIWY
zV&(q;Gr7<SwL8H8yU+wFT6)=nrBCK1r+Oc}I}&w663}Rww37Ixu+ZT!hsEa{(iG_D
zAZSSEH(Ucp-Zu{eL9+!Q{9+dY?#6~mbWJ91{05Ib+BL!65kikk+g!I50i*&@--x;7
zag=~tbev|!A6Nv__v2rzsIYqgFqWzTaZH+=NTPk;c`JGl;{O2V5Wjm54Jd4!VuYhs
zf$hN9B2?ocr5q7%Qot--3_z6tJaov1a*XW<jrmN0)-=Nqg@~=?+dY*Nzi1Bra%4tS
z1;rYQ7v}(2W;%l_0MdCgc;9KXjgTo6^NOl8qG+XTBw&{%1l)EJ=wQQIFbE~@KLmCV
z<9Q~+4dhZ9SkY+iVjpg~D3ez<U2$<#>KbGSJJ=T#aKl$bI7_%*oFQSua1>MDv9lO3
z2Ln=3*BqdR@aeg@L#Zuw>k%t0;DHBQ+n?4WmKiE<fN}C+L<MpdjcC9p<Bvg*-&m~u
z(D}w2fRhl~96bHvY9|`{Il=-U0C#eF8_V_+>hgfl2R&q<sU^u26M)y399q>8lu|lT
zqtgqD`86<mHuu&vq=<`Q$!__ncaSZpvh3D{IoX^h0Nv=j6PH-Gu|PoDg}0r|RieFb
zD<$=ls$6k^tW(VMyfFya6sgHpyLj=7p(E*7g?aO<)@#0$f;<I^?@S*!90e$`9`KcD
z&?yS_rR$7;m5f9>Icy!!^MF*B2^&T5aqZR>)5tEYa(+iBf>){sv4vvjW_^e$=YvPq
zEC@|VY`aFchnmRfFt-fbk~WMMe0-IZz_ZEDA0rU36rd#|357>skv@HOg-Jts!os4r
zPaC34ic$`s0f(Mm#$WcjiO5%X_+-F<;1D48AYtf%fKVWzQSu%#oMP_^AVi~~5yZke
ziI#u@#PYG5mcwN&w?HqI?+~<bX%Z9VI>p(+{!*(*ajAhWDqdJV(0bdNoyh>5D>+<&
zih)|_Dg+4y-!~Y;wrjUMk5W?;^rUwIdeU>k;DJmbVd6<6V6DO$a0JmoJW@Y+_8RPD
z>S(7goH~tM0s#l3W^U#TF37yJPeX@-<`7LwT3~!QJhcN-5pQ9{VzyzQ^g$nk;~G-G
zJ5&AVq7#$0V^~L_71`KFQ*#8=&G6WI)*HPgA<uY*f!yc<tom^=sB1tI$V{=f0#l4a
zx<42b03qo^k=R!GF{|}{>emHdo97aUY^GH<&fi$i?vNN>4L&%<tmIL}^(ppa%{5R$
z!zWmCo5__t;3}L<CjLGF=#qnYFo00bmO(klf*hFK2o;h7f#Js29`f40NgadX#veEf
zK@;Qyw|Ja!W(Q7?@dsFeryJuGGSk3eQflI9@Bzf{ArzogHqKnf<-)2h(2WA5n!3a&
z#V{TN0KW`IG2V&@jm_qa5Jk^4uGVvgmSt>3K|K@RSf|Q*iE~nJjXK0%0iQxCEfavt
zfP$iKtPvz<TZb5sk`__+Mok?)Gi_+23@UBiPBLqWic;~rmatKLp~ho!*TL@<i&jWh
z%SF&EH;a}il9{jvl=9&6B98zR8qN900C-dam?){E&LsPR0z+QfByq+DqCyA)g`paW
z&NbXK0;Hi{LyQ%5)wKbej2vp@+mHi*5mGOHrc=hz*a$AGvU9xA=!vp~VX$<b9MB9v
zv8Kdy+lEism<JBtSGZsZINll!AQbT7=ap-cRTa;R8`R4aK0<PPG~x1?K8m;DUz~Ub
zJAw*0@av3FglrWhyan}!V#r(+O1~&AVg5}Wk_2<3-#7)##NkZ>zdmrpUV@RYBf+g^
z`Y@7|ihv~dhj1|ft5ItTzPrP;0CZhAgaVX%!nkw>yA1%0Nad=FwY7Uf#d(Hv$Xm}E
z-jbh8Tj6*pLplobc*anYA*+PpY{-6H_VGgKa3sKJ6wvCkP(u(Z?a~601p=35raGna
z32OibP$lz?w^z-Ogq*L1kNE-6AJH@hIB|-Aj!R{5B<|#QRbf^N!WPQnVlK&2A~~?*
z&Il_ZtPSXeCCVkqBzX`3?F8kxjezt@Lt{Ywumds3aK7zMK1?9Jrvre%u@`)nS^jX2
zuoSAUkGqgm0BHw%`Z{KdEK0PCtPte$t!3=d?q!%!l?_1NwJ|9q%BrC?;$mh8O#rs~
zhu5r-rg*V;kYiO#YaLgHt1wa2rL12i(DBKMUEvWd_7mvFUJ2$J?6sk(y77fZA{L^!
zQ*8%4<EEN{)UlJ-#sqE_1fZZ7xyE-HPcI@G1-C}=!IYsS2rSow=M5zEID86o{9tgH
zJp)J;riSWsjy$mN*(#xS4VV`6N_mop<TAF`K!GVL7tO?Q6mmj{^Un3W2E(1_Kw+}E
z8=f-NK`K`w5f0BLLe{a-I1#=%_l2=~y_b*IIHhqRXecEoTTH(4LckZm4VuM#1S5be
z32<^|<VEYY!E~?Ng?fWmiRTsDlb-o25;TgO@4<jIohzjQ13~8vR3M4AKo1<bFq;xY
z6n1$A@x;{0bOck(t@y@m1B8N!x|O%@EN}=UmWN8{=Qst%Eopn&(emSsNGhMD+!w1B
zf~f*)`7-BL3xK;q#CSW#!qU=lm)g|i%KOw8Z!`HgxYBvbh|!ppa0bYz9PCc%44-Ry
z5~UJS%5vlt_Z%?h+$eD|%NU0Gh`kB;!V^RQLGcR9G<;-kYZGa^<w*L)bDYX*4=sZA
zG2~SRPz59ooKOQ-jNpqDsq=(C3%M(N#o`JMT^z)^(s|tbz=>gI@Iq+b*80LEY19EA
zgH3IoM9}aO66|*&lbnS@Lu10i10S3Q`O<a(Dxv`R%MfItrU2Exv1vrL5J(D54hmwe
zBdpwUZ@Gbh`CY^SyaDuLw=T6{Ab|~6IZKiY$xD}$A5;hmsBSN5lNzP+j=DAnoRL(@
z9heTH^KUs7@fhi-@Q$N9$IT%j4n$_D;|L%~A1W(#=f?4nBU&#o-W5s)sV&sstZ-or
ztB~PFFlVew!_OvPmq$Qh3dg<4eldATI6Br5kh&fNuFCy5t`Hf5i-M`ZCT(#6G<F^J
zm2WNqTVF0)<2AqBRuZ%uOs}57@O2)p3_hSBQ^eb>nugq<s?I1+6@X3f@Ozt0{a}_;
zt*Nh<M?ILe0jAdc3uecanm|;sLZF+-fcwF|a*Yu?;H%s)x)JC=T{}kFbDgwW=LR<l
z-Z9dcR3S{}dQ7<Z7b*af(A6qU;iCxBy#aP3&Km`1+w%OG9C41g6$gMi2*~F*ca0xq
z4qk9T05!1Cx6izJX#F9i_5lESF`a2z=nAld7t<;$hqdPe8@(|*JGA3mn4BsB+?{_o
z8fZ`>uG-(Z)(}>M+}lgepT;T&;JX@e3U#M<S$gZ1Z#*Nb9X)3eXdM<&$kr1Cb1)n)
z4n)W47>)qig6Gn4m(Zexyi&K$Dy+zj0ylaY%?y&c0WUq41n+KFbxIti7>9hg+>(lg
z1XpcD-D9GTpxdSkA`)dX5|y^o%8!KBK#Sx?Mw;IyYs65*Y2fB~cYp{#qYENXZ6$o<
zCSP$m1WP$}l@!2ek8bR~y1<Vue8K^;cPoel4h$NNrn_fY8Fh9H<lyCRTrx(WU^HTG
z>Bcn?Ary9*XkEGtCgxwD@<`}*We*`5X*Q0Z%M%y+zjZ8VP7ud^ss=QZO$f2poUhPj
zqr9Y7wk#WHazP~nN@>Y+%AS63G#_MlLNyY+JmPA~%W9AyHe2(J42(^C0Zlx!W9Wtr
z;;G_M^N}G<7+nT~$rwcYb*9pYd7<`ZszQ`IB16C7$AV9dt=JNd!eB;6?(&4tuVZ}X
z@f-Al1GQRlcglk#`6DJ9V}vc$8cGU)TLCAW29Rn?n4p;FiIAyHI&O%PgE-061@QSH
z$FOxZfEw(fr~G13b{$}LZM%8HQVM_V<EvAtd-@9BStEeVq9>ubzZeXJVin8d2m%lQ
zPjY<p7)NhoPUr^DjAeul_Y&(E1uqW9CG8gYPv?w0ri+zz_J24klC#2kQnuoH$wWF6
z37)=LdB;LTO;l1qZ2QJsvW2RfK#ZkQ@s=V1f_54JV};y}lUA((*khUY4hX7)K#RGG
z&RP%&y(rm;5veY57(o28?&Me$)ujhw4i+K1ArU7)1>cR~RGi8tT1?}5HI`^{;cY?>
ziQad1qMCsbU#t?9>_{~SD7x1k@f$F~5*7$KSiG_#BAy0Dv^Yy-(VQ%DQkYO=zW`XC
z8}W-is*Fh<1t+%@nbxmvXXEvNV9G*lgi3j41rTM3>1gc3AKpA!cE#+WtS(qGOOb#!
zRJU0%TX$rDvJ!^KTqAtqCqqcZUF(m*$Z`C6bGeBDQ9Yx1QEyWtd}Ih94$ZDNTb@$4
zfwrC!7b8WJ$5x?y_GYt^C|ZQQZ*hR|^VL!_!K3deSm?T;jpiUdVuhHx;PgE?j#l$N
zLV+H3dT^;~8NeYT6I~0OD{N{z1fasS^Mw491Lqrw#`1xo6x7QVhMR6c7*TcJnpP&O
zl|jr-atY{bhdDcu12pS=mm9;WiJhw5;#@5V?WEQXqcW_ckKgftR<llu^HZ_h_mAX4
zN?vFexux}iA}^qbynzjGOe1e8UFy(tz8-PCv36cjEwGCm_`7j%315sO)I^r>p-P82
zDOesP2@h7m#lykDT60OMWF{9R3)c;*Z+H;&T8oIJ9?V3Jyys=xH%(O_&?@JSFtF02
zNR$dU>lILFL(z0<d|lkrLRb<`Be(M83>-K#gQA+nFmH!|o;iLpT9BBaqC|$POgsnI
z7+?`nybm`mz&OqTNW($522;ciF>EL&WiyeD1m7ngcw;L?H|pYQXNB3qmcN{r;K<So
zw9zlt8|KItFE2+vvzDg95H!yD-^N!dNAXx~j`+OHZ~((54x3G@vw5^Qj2cMX8V#9s
zhR_8TV@=&yyyBKd5R>fkV;P-c0+hQLyO=^M)|Ow|IkhsET$l6=F(GJ!0!^K)=OqBb
zmLKCGJ!&!{FCmA^jC6`@I%*U};d;UVA|e@XQP5qpYvFX^0049*jba1~2Eq)QG0RS{
z^>lO1gqKFSz)!6Sq5-!($(x@}#c(OlM~p&IS-NXnkC8rcEdullMA4}1!m@OxN;v^(
zgQ<xE#9KQ->sUt{_JF7k_jcuqt#Wxyw440n=(6%SG2I}>03s`0Px|5*V|2r_9UYmb
zcQpe$f*dq)#!cM;qDb1n`M9xdI3kS#RNmHPF93mOKIzDHo6!9pQ(nVcyx@EFatZ}R
z5)<!Oa&$oO6}MjB7|mO>0l51LcwlXe7!E=v_<O@gk#)mlo5`n0s3|w(OsLAh;T*@{
z#j7F66kxuDVcn!f-PTo9Qd&_PRN6e_c0kl3R4Lr-aKv6*f{9d62fg5Eqm<1+@N6Yx
z^=Q*dfqt6DD2T#wgHwB`!@&@<J^65gJMSW)oAZ=45?$@{i&AJUdB=fQfN+hzoY~Zj
z2E()Se>f(9I)a-T-j8A9C<?lCqC}gF(K-)6=pv_e$TtwUV~?R}ryt1Ppq4rIldCYu
zH73%ajvE{eZYO_QJ*segZxlVxwBUj}2D!nWD<yHGLe5tn?In;!i3cZu@Zm+|4;?FL
zAJ@EDQ_Z&XL%|(ooHdG~&@YXhVEOc?I@gJ;9tebjiLzesA;PX5!k~_+vgLo3WKX2;
z2tEVl3i#uLHp2|MBtZN&B6lV?f~vdY7X6jtTr>gCPmH>%*&57yN~ALYj2nJP66cH7
z4<e1_ZpcUX<2ujHhHfVdM)j7DmQgS~6o;VAs=Cmq>8%^<z2w1w`*?Nq_`n=G6w1(4
z6e@2JC&?7j2??-<@)d7NZlSa_r#RIlL|(0^d_6gR{m&r_zC#6<Xo7U>78FiqG$6tO
zNR2RM$P$N*{p$tJy_0gGDk2yfb)P82Aw+n&qv?WUrXFAoU17Dd>@i${2IMfFkhijE
zPL|s>h==SHQhGavF`^P4@Du>+yNZlpFjk;<Y|)IZZ5IH`c{^*KczFio5CtG^_Tn5^
zN(2GMgs|mtBz<^5EaCqEOp0A-T?yxouoU7j4uThdqbj!jl6yR5QJ}W4u6~y;oT~x_
z`NO0rggtYCcFr#0qqb~!jVAd-D}rw2)*u^Hp#W<HMYoW}V<cgyfYK;BO>>N58(jv=
z%b;((bPBD9f=KMPnOI&ZtlCHIanfE`6=>1&VbW<(EJN_R#!V$43h3{*dBl0=U7+x8
z<3kD)BGb@%%U+ODRN{Pu$!Ha>#qow{H4-`&iK@o<)=zf<aBc`XR@v5YBeITb0pNpf
zI%<Lj(-J5;;|rIP?ZVV3SuE!nAwtqXd_#a*(6C}OL_88<c~l-)!p75D$t4KBUCc)S
zk_gzP^^@!xr3%s>fyqM_w0VYE6vz;+V6j)Rzc~K@{BJ73ZQ5^E;6fLvc=8Fb@4S17
zQY3qHKn=R%1A|K<tFw(9^@mq*Kn;5X?*iK(A~<Y7n(3RK#B7v7!JHfz*cDT7j}Taz
zyT!GOZ0ZsK3aazGcxjVRA&CxJpy|q3>~2eK@?}j#u<r&VM|*u>1<{g@UWMi@G<%$D
zjCm96<TLEd9D{TSH?1EdfcIhs!rcIzAW5pkRC>0I@vIXtM|M_@vDBSl;Nl7e6D^<{
zOhT`fYH6WeQ%<?W!%o-^B}d~O0mw0XPPoR_kdz5k)Mas|V{Swpjt6bcuw<S3>Owlg
zT9dt1j8_O9><PU!UOV%IM>gnt3eoA1wpHWKup?h{F@qSPI`#U&<qdHu=WbgYg)V-^
zm;+{1&`1U1<t8B&*Nd_lqo6kV9`TBCUK3}++)l6s5hHJDaA6G)H3vZqqoly0xo9Nf
zAY10R3MnT>{Z>DWa|N7MfNW^kK~C__m_88b4bSk+5i9r!0|0fbu*Z`XochMxso}Mo
zFN&^GMAXw}x2zaQ8x;<=qSNP$wfdU762ED=i@Hu&M}>H2N(PHS-@U%`Ra7_=epzL`
zJj5Cg4WyXa6d?-4G$tRMGCm<G5gs7cD3l<Bib12dj~S=vlG{qWx-lM96AJJ+8NFZ{
zB@s|Bi-<BR04k9;=PXL76~qR0S-&_)U1SI+b@4F?(@N44;kRD!kDSsX3U{x|5Nlh2
zaDA7~43yD>`O2UqwMzKb0|f(n#&~`vhJ(@YFmY{@MkVv7CMi>B)m}0o@HBT_Y?{i8
zmO?ksH^#&7I_tJZ3c-WmGzHi7jPhU(UI*=Qt)oLv9`n4I3BH4U+*JLx(HwMHE^T4F
zm0Ab3R68rD0CetLxQYaNSWh`_0+L9SCoH#!vnXg5*e3d8+Ur`kx_IH=ER60E8_cb|
zIj$oDC9N|xU@7IomDXr_D!?Dt80H|uG9_aKweML-c!Zb;<Pd%^#X$CgZI*rvAAs*>
zzj&{*8?1rADma{V?;vV7yVsl?LqN2sbQ1W;lBTrU*(WmI4%TL+IreN!#8+s@g79()
z`NJ5d;Geso@mS*xJ_mndPL05m0c+g0Kx&N&vTub2yP><H1NV>O7Pj?7>&?xIwRulu
z*Q_{%Z~=kFG`dYPy}7v`0+I*%!GM5Uw`H6!IF7ggw3LKTa~fYMtnJVRAoH51EPwzA
z35n;)gt9wzF%jL6-_BWb_~RS_OqI4IoeKSB8yB^L55UDAs$2R1+N&l&gCZO)C&ViA
z;|myd5*_4#f(G}h-#p)0Nw`a0xL!W-R4OX@z}~Iea0jxTW3GtMb}{O9VF#1}U0`wA
zO%Eb`Byl_T6K-zbcYg8S+_iC6y}z7oJ;Gdy5pPCqP~{k+Pb2bh1vJsxt6ll7b6T6*
zkMW!?2FCB>3c}UOv-OXHBSIB4pgQAN2TMK&%g)o@GJrDl4X;K%uD7v>rs?AsK?@C|
zgareQDTHI1!jY(j9uYC<niVRto$K((7PqFY8P`l9A)ho0e5_ivh(rPdF0#{>P?jH6
z8msc;K>%nS{{XDsaB;3D9p@<vxE=A21-Ng7ds**TM}eX=LKbW_jE2H%!O;(9lJSba
zRc55vqyX`wJA5FmJyQ-NF@ogRY>nkInF*D<79-v~k&U3b<ZG|SDMk4olZQ!9-S1d^
zgykDKn${TvsBzQ7s}r6RH9X}_x&k<CYkJ6#2V@gK;wgP)F=j*>LjvRk!wxteIMsFr
z!K{0eG#i71W7@s06U5Da6l+zwY3^l?1h?k5nB8>ujC-0$>b=Q>m&m<KujKt?z(5_b
z1zElC1q&t%RX0|f;c^6KtoIGA>P+4-P;Ah!5j8r+^0kE^pu09lT{w@qJlg>O003e=
z2bdFEkvH!aN+JYAR2=|z>lSF@%oiI3)6N&54g(31gk?}T$K_K_7;Ii(+H;z>aX0+c
z?;_Qe<DOsT_Gaa)wJvDzY^OWJb?h+I{xAnlr7#GHAlyP4RBa`XXJ^JE4?K$KhdgtE
z3JJqXC>0x*#*#LWN#|=NR`J(c1|8Dxg&9it%BUCd?*#_DnjM_F6ue`IFl~4WxZWP(
zL#7kx1M%+!jw8`U8x4cub4%)KmJNVqt!aXEu~wTvsRAQV#h&r76w-wiAk@Q6JL8RJ
zs|svg{xL8Pxk1y)6!WGH!nixwRsk1v)+NLh=~E%QZbQ5{bu|<sd}W-FZFKd;44#OZ
zO;N6xfSnjV?n7|dUGd40CqhBjV&yE$*>vmifJBwsg>7192o!cl{ovCXq0$4@0hMH4
zB6TU`ec-{QAyp_hNqz4C2*wnhaG$)6bS6Oqwhw1l857nCL6gadgbu|OQdxv=ROALG
ziZx!c*Xv+F8?s%Eie=IRp{JEUKb)`vE=vll88tu{hdik>i_z012obP!jbcoP&f07z
z{qG)vWSRi@pnY8LXW#?j#beJVJ;yY*Pk=Z705f4yrcTAXJdHhK>U0zYmWj%b2Y50=
z#9L(qPX%XKwB$fH1Ppfg%_|0_E6?&{B>;q*3>pfs8aK?wB0nG#qlX!(&=eor0;1Z(
zQgRfkR7u`Yl{5lshqbHd&64il%on$dh8pcrv3oHYVRj||0InT3R9yrGU7wshv7wdG
zj8Tl~%L2+5$AR3|P0Yn!037D70|am)*Ci2mkY{zIr4^(Gthq9Vc(e#H=YYUShLgmy
zn1lyg#+oQiBmvNB_FzU~*#7{*9Bj3CsMYuQ&Km_1wy<#o`WoZ7p`yTdqz_XDYKM*@
z*oKh{cyRM*&UyPn(|z%Yy`#rnKon`f=GOpIl*U0c;wxlH;9EYhUMo;v<C8a~#Ha##
z%2g)xMC&4EF<^o%0_N`&+NK)iH@$H>-ZPHLa_sUUBeyExOGQwtgQLzE@gRay((7E}
zs$-%ZDeU!#pa~^;qLSJ)LhA?T0atmw290^aFeq;~$yhksopo~r5Req0n#7(-uq{+0
zoJJ&Th_G!d*_s1w<4LImqyxEtb{#{!C=>CQU|4W&;w7pqo}IPDxJ$xtQi1f2;i0n~
z2`&hRp|oxDh*z{-5(ru!SlAZz!H)6TX}6D9Acyc`8PpEHED8@p*OLn1jUc$4Zv~B=
zD-XOlP1sjCQU(PUTds<Q+)>tyY9?7<+l49Rx>2Q=eXrbDP#P;2Srms`E7Fh7P{N&P
zhpCFmU^uX~06{jGOF*n{^beR$w}GHjKou6m6o@;?Cgv)Z#}BMqT`mb)hTw2d9q{94
zV8tU0#93I1?q${)atfRhZdW1bprzTDz8e!TB~%Hgxbkf4tl)(FVtA6BJoSkf?DR0O
zIBaNEt_KsIb7izFBHaf_6Dk61qll;**!E!L2oDzWvL{zrWVU8JP%`ib1S^PPbo{>a
zSnf&cRsCVwz_oXD0lu*8nn;6M9ZZFKSChM@=f<<Nljur>b%%!-(AhAq9X#PZ9572=
zHE>CI2y>Ongaepdq<WzneMw_op{xm-aB@<Ok4MXmL8q{(yc>|hms^%Ww(I2c!-@q$
zCW42lQ(@;U&BECL9;ZEqQh$0GK(;F%N1Qe#Z2gV_fUIwLG<rm_BB^Zv*@D!~0D?@4
zAjHl@BOawZD}Nt2j6`q>oN#QxH9OGPM{F;wP~X2PNgrytxdK$M8?f=q)()l6$PJ3y
zqZTSVW=@I^<S>B70IdG7g^)^zFRonUxddyBcqouwcaf-p%~!af@^gtSdP!(i-Y(lz
zR)Ca1*_t~D1_{JN_Bd>oc|bN7+m6F)KGQfpaf$GgnC;K!tW7viUV~+5bKYI>EWY2g
zcMn)VF&gbH2LvI!b<MLHZ4Dvfl%1Hes+}2THPsu)BLI*sPjk=KY0tE}p;U<OnvOsJ
z08M`J1bz=p>=F?Oya-ZD^NTZ>%5K0&MMEPBR|IMS(Fa!UW8ACi4yhX_B5M=jH2vWr
zL^PV#4~vOu9)e(PqX}Mp;<_J_VGho~U3L|jT#CJS8WYiA9T%K5Zl*aMJyLb2HYV{D
zUVsU2L5pMXKs6fQTzSCVDK><0gu%;m&KS3FUEgpC99M_T#z##vVsaZg<z8I4(ym7h
z*ae<2f>f9bwv7PImBJ-h*w1|;cxE+$7l8g=aiPp{a9Ck!+Gir7au`;SOao#z9~#K(
zY?Cr+L~m~VF|*hB1NZXjvBqJkS4%~qO&XZx4wthz%O;On#1F0GuQXi-*KfXga>}C#
zMipm2#~`if*)|fZefHqHP*A}gn}{B9#v4g$yw^tWJH>!>!M^_hd0K6aEF9{Gp~7Pz
z3utQuitl{m4MTW9E;s`fCN77|v0}<fD@S+_E}TX{iM$ccFC{Urysx#yP?X5k)&!d@
zdUA+YMCW`=UBt}+V6@yOA67^`TA-@wcvHMiDv$>!MMb_JIkF^a5C*N9>7j#pW8Jv7
zn{)ulE{e+mhNwqIj8c*r7NOX?^VI_xavJLoCfWKi@wT2Co;HkH;$GvC;73uMLn4FZ
zaVfbAJ4ARP<<3sl2-oq>Xhn^y^iQ;7M~@E^$|18|=9(1Hd7!L5jBYGFzCX?y3RpT$
zzgbIxz5|g`!KMwvP!}tXkIpGzs45TqW2)rZnC~PG_D((AKy#r~Gg$sqr)Gq&;ZIYX
zI03Y0H&(t+6A&px!pJ$|4a>%Bu1aCu?Pq)(0lW}9GHFTz-C3u2&&!)G5YreJ#!X#f
zmX0kHH0;eX0vchYPJ^>!`NcvQ$_9jc5qh|vM(=4BH(t$S9{`jRZ-F@ovMn=EZbh(M
zoUnDPzCyX!ZS(`I8t@??aMSb{9&yHo*%l)ufWKSC8h0{OOmNU76BeJN1OSSv602>+
z?xzEBHCrb2$&+Q~O|W919Q(zNWGD%xG-T7~2~l_w*Z?v7IXbWhk*K5JesEdguxUgR
zyO>2vI^Lh1;efCd2!RnFOv(U_;?j^S51nB#Pz!yec*sxzz>1~Nch?gu=f<eZN?oct
zU`U-zR7OQOZU`VjikgIP67!og#3Vc$Pk_RAM%|Nl^<8g1^3BquvLLm^&^cJkt)U#+
zhjL82b%-&g)T8eWbS*4NHxit;a7Dvd4)_oF!b*&)LImgUBbTCsR<ALdm`emH2v{{I
zKfEUx7n83(44j1Sc|lO6IJiMmrELY|RL6T~G>SZ%)*tLTAHeNn;PHkAvqnk^AyM|?
zS7?R;wD6Z&SRB2e0%>f?)<85gn~y2s-YpJS5KkR+V-wmsgnrQd;Oh=nf8KGmw}b#K
zyL8cCSkhUHEI{}GEE>gd0XTvsgip>{f>F}D5ul~p!-nnzr-oSn0Nr8%Yi5vkl#O9)
z`JUWVbf|XLoQ&TI)d|o%0$>x{pn*<OzVhV~yMK6_2Tfh;_me!*DsGFPW{Jq<hM*V#
zK56%HlMJ=uDjv~$Sk>Zyss8a&*#?<-o;t#C1+{YL`@&z60rp)7w-4AFj15XnA<?HI
zfC*dM;}(@KUVRv|!m}x*@QdyBgtiBixZnzXCyo8!7@H*o4gUatjHFC_o8wC~IYE##
z>%50@52zgBaOk2S#>ExpVvFr1(e6SGbmt+jhy;DN-YJ4590CtxUU!<U+#{`TCvG-m
zfF-FR-7OpR<Y{Zjj%zwGh%yu#T_Am3o#1&BBZBbOL>Og7D-jE_KR7Osg*x9Hq-PHB
zR%Ro@w3-D(&MfH)6Hr$YvD=qz6h8r>hK@cmU0Xd7R>tu-#yqTfTT#usy<=rf9Vs{8
zW+s^L<~M8W3`u0L?)8S@io@ZDmJ-r#=rK-;*?1D^;XLE(u5MrK@uK9ouP{yTYDyTR
z#M3XstR6H|TJx4*1q30r>zs9iktw&!#l?##iZ1a4ow8(~^+cBPng<t+7!7yT5f>pj
zYm7uh?HVkaQ$dm7O*T+)IzTnz<+N-%z-5AP_Tovx6N8kbn^s09bQmgfHDSD5wiyVG
zZkjL7B0)!-NNUD!ZV{%O9UY)?);a~yv?zxXkH!T`loDQ+JnQj=xjm>TsPxRS-z>E_
z734L80f%S_gy6eGxY~eadA!;ndIyY7d%7_~+=S%sTxT^ftE?5}THb2qGH`G>YRAV|
zp(BE3Z!gTo_gbpj?by`5uy~aRjVK(T?(13)x$lnnkumi;hWR&R!+G(YiR`E)pXHIV
zHWy$n$UW~C=5&6FlfaGL*kNtjtR+>ZxBFzm?7-IhK%=yniy_nk29|$@Xt9t2cTSsh
zC79M)r9`3I!S$7`My_gWmd%`EXw`9Xg-uieIah!#Ipz?v7=S+Dn9$xjFtNIGX1+0>
zEzDFE<^Xvaub@560N5hdjWd^`gLhNvNArThtxVVRkU%!T58Ht9C8%fsa98U%M(Yeg
zXasfv9b>yfRHp&}q=X5MS{Twfg_$%62y0PoZ=e9cM_z6ou=W?I(DiNSc=0#c{CLG5
zVoJbY$LWyDt@hgDK+~a#V}<B&%edRdTWEqgN(3SfRv!f%;lV3!Dyha0k}P|YZ*bNc
z9`CaezQ>igxf9CRue13XH7<8kX%5CEdBLs-7F1{}MvgC*1d(JG06e_egXnbCG2O*_
z2zvO3%Z|Bd6r>%HUk<ZI4xmH`0l{I41JIzfq3D8N7|Bu4n398c<>kkGGz0(?arR;Z
z$#qioJ0NDT1tLv5XG(uI3T*9x5e?-#c<(I#07T@w2C#>mZ$L*X5b6NB=UT_jd#IwZ
z(WA~QPOf%@d%vp~4ivk{>sBju#w4LrZB*-rSt-?fM#SHBGTyXlH}3)g%XnRC<Nl?n
zBU-UIcICE+I!6#O`rt-aPg`|MVd<7Y4Pb~ID!jh2QE+x6Vr*fYV5M+bp(T0UbBS*Y
zVX;k$a-6Wj(+alAiMLmtu!ag*Ap@iW^0;xd;Wwt$t$b$H=nAnDxWn+6F{nr?ho8Kz
zxD=qE_UuC}vbE7W+I|mtchq=ekh;Ef;1(p2D^UpRmnEkYW=q+kr;Y>T2FnWxcTYXw
z20HK+4!MBq8P^J8E_4|7=PTGk6au<UxJSzQ0SD3Rn}W7A-cYQ0=ZtY94xoY+>G{bP
zf@?IKP*Qth72t?wg(*Al0Oq%-i3_bY0FOX$zwwa9=d;xS{{SNf9^o?xt_mbdV;-b|
z`H}|ve%USqvJ6117p$Aeqsfw0;Ja{Lf(S}OO7d5uSY``6?Qr+=l;x&LZMYDlQ7y9(
z0Exg5A<e!TGUE%9Yrh8oXafoARDrX3Z9`}&N5D_2#hsRj3w}>8&OCc+ApSDqI$^Qa
zpWZk)0F;4GyY+&qu7FGtwb5V`3{yUM2&dE&V+m5i@WZ_ZcN``<;)?vkAQ7<}g0P{*
z7n0$EB-d;Usu3<kz@IykfK)Je=N2(E7kN-m+3_(`TD_g8D*brK)Y8!M5OOE15${MY
zO@b#Z<$$KAf){?c!^#lafnx37jCN?U9Dyufd@fanD#kIY6yG=@Zi6TQjvh6HP$n7`
z$)SRLyKx3y*rNmXV($pXS^xuGznqJctBflJSU|Hzap*DtS7{S>>m1Hfz%A=-UF2x#
z6*3Kxv^`^baUB{Rf=9cFDkh4p^~#^lIz_3L@fT_3n}qQtrQ7MmkbH^dsx`r*4K}+C
zc~9wrU)BwT-#oT3OQBh1Sdi7T9$4*2GQA*wI86Y61!or{4*^}roYW-*4F%(#b&LU6
z5jJ???-4bn(xI@Tz373A=D8$Ow4*Ndj4bCD=UaF$(-@kMQvOgS<M)c%9uNlfpbF5&
zZHD$vL{u!%))__X7Qsk#Q|ld`WmO1n?!L8vThhZFJ(6`r%L>k@EIVDh4;&-Ls742)
zI<n_S*?<#6f@QmdvU99hj4;uNtJ|{+==R|Wg1lgNmE{6SP=yM!v4jCFTO?X4!jeqf
z_N4&=a3EqWZ#J>chf(4(2Sb312bOyH#4wh6HKGS&esC}$k>PJ@PSj*}jFM@hKn2h(
z1{U)*vdUl=VXfoF097#nW{~LM%g)FnS^}?=RW3K?5`<xr?r#RQjQ}mXv?0gI@y2O9
z5mOPUR{lB6tsprAQ4L_&<!34YknAA5%)TTgNyH~w>hn2L*mQETWJZDDz18PhTP!eG
z1`ZOu(!os=Mvktrsu_d;Y337mmj`7o-&kN5A-p6V%}Hll=KW$@Fs50IK9`(PEYh^_
z7uIlwKsKkK4sr`I9)<|J0<SL^6GtS12ThF-X!y#<caZ=Qv@cd~7cMP}We6~YPE4Fx
zOvs1;l61qN4h%eo6z{HSGo7u}4;`$<U}ypdif1qtQWuT2n!W%}E^iJhQkBiuWY)l<
zOjU?!hT9RlTh8)BP<B#siZ_Wvwh$bg6vYCXQ)jrYK|x52=)32c`NNz1<w7{hnmpka
zC7BfoQ%?g5_t9!03Dd){0*xmk_0K-B7j|>49mwi>!v?gTB)ZxHUa)U>2na5wtLhzR
zZuw+{)}4dr9~zY@>IuVMvH%lvN~yX~^$btlY-}$z_d6e12Y0dD#{U2rBs%ttqM%Oh
z4{TDr7Cv01<0pFvq&gdVYt~UiRZDY0b>w_z+-Vj#dAD`^W2F_Yg96i3*Nl|_OUu^r
zXkdS{SRO*#sz?6-a%93@?vO)?169K~eix;In@M?a#07y(0;N1B43<04I@iXB4Xxm^
z6jJIENECb4GaN=e^h>wVxO|sY1W4B^)tbkdW1uxbZHDvaj%UwWMXnD&5WNWyBfZ8<
zZM!4w_PGp`XlDy<)xPmmY}^b6fQlIXesG<kK51S%Une-U;a!9TLt=+`pgb9|xuL!q
zb%>Fgh<SD#{7eU1$i=+e{{Suy3lOdX^Avk<_(n*#<`3%eyd29YATEX>!Qpb!2u;C;
zx(m)_I=w@@bL>%76ankDVIWDR=c}eEc4@gbCIgB!5}X29GMQr<h!;W4z2ir!26~un
z3FldBXku{&w_q$e!iuK?Yy;{HM|kC8F{qT(15>%gz8{StSJ2fCExvl*B^X6KX)en(
zu@MjlPN-plCKZ6{K5z*h6!ED3u&7=7E(NN}#4%zDqAvm1Lm1(tC~a55Hlm{Nm@HsS
zozXUqp}bCY#iV@+AzlN#b1R7nD|CnrrgiZ51Z@HWi%rP#{YQwX0)Q&G#3(B^JIhyx
zY{o9jircPKXdrgt^^t?eCjbF_VhX$)hu2wOK!E@n=&haNlE#tMKX`UibaZ?9#!5sj
zsmFL0nE-7(Vub+VL*pn|rF%bk`iOwjaoxsnA+-Z*i{$-ah0#P(?LM#_s)=2ee=ah{
z1A2$}_`t!qp@cD1-;5S~5;3SmL_qMc>cMieWEBDxW-cL!)=-GnL!95`TJe$CBAzpK
zg%fI_6JbtxInQ>~*8&_2SYdN)x4sa%wa1ekMAR07*^!pI##yZn8r@a1s^Bfy@<iGj
zdUkV-1JI7SJKeluAzyY5zXbeXMMdpUL#?2hDH0-F*rDM&&B<!;KC46=^^i#oL*Ja$
zCWhNP#BtYU=+XE9SFK|z&`6_DBSdc-#ulRUY^(Yma9hE(k%7wKOam@aq~z@~#v9&T
z0nnDAc0|6!=Lb&QKwe{$O&Q<7$FkO*gE`dp$T=vlA;!#GEnbu^gl!V<6fq*Jgm$lu
zFL-EFJR!s913@ahJm5&E1(5O0nXc4zhmptz@754g!EGQuhDj5^kN{H6gYk#~P7Dam
zrQRcSQ*^3nq61zz!4Sa&HDj$O^OtxkA{?A;KE2`gRjVNvj@Mi)cmrz?D@M7$D~dXC
z4sl-{9biGvF1W+cp<X{<j6nK9s!HUV4j6(ufG3znP)s=!FjPosq1x@nK%E+#LC`{x
z109f>hN?BL14oksDGNhNP>D(V#K6+|V1v4Vnx^JP--K8~HaG)dz$Lsc<hIfS-wYA6
z?F==bKNwE^wQ>W1y88IT+7p_kuYvinK*1g^RINAX>lcjRnPq`e2nFq!tnasNaS;bk
zSSTb`f>kxWqUR^r+)jZb)G<dD+hHJfCbF7!!LIFUTfiK7b5K=s4Nw?hb&TP*S4XBj
zbhsBs8tWElv{7zwFeuu{E+TOI#1R2Os>03q?*QQeDs4aa707~&CVKv}xpj%ROv*Y1
z7-{;>*kvLQ-_9JAN#&-@C&y-o<i$N2knvf;!-P7sN>7&v=8jKT=sz0kcz6!Aam~Uu
zTeoRv%MPG+IO0WgG@HVLVn#gAX#&iF0t=E}$VmOB528~sZ6K|-oMMyMdE^L+k210X
z8pok-m@CNjh(jXjNfCK4CNK#=g(rZ^HzC;Wz~$audR%wYyGHL$@h*Euz@9_E9?Ylq
z#Rp352xhSv@24iLA-0o72HL$vG+gn<Uf$6}f+eaO@?psl5#1pJO9*v?Tw`vkBoNq&
z^4CoZTo-NW3Yb7^dNi3N5e;vQyf&ubc>D6@2+-m;HVr?f7=FnckFjg)Tt$~qszc}l
z2~i$X9gt{7ae#cnQKEUo4v#GwUIF870K5P$p}}x1h>c27>Pb#qmbt6T1wU$>k(fl#
z3k#?e_BpLx4MYc8(bLSz6|ty%QUdRpf`D^n$V0t9W*cz@zxxp;85M#`c0NG$@sPhD
zmXLr&wVQLsHFA)JqIVQLi@X=&Z`DP41qH!1yTFwKpxfq}a&{h?$kgn#G~v@%N?Oxz
z*dS`IGO1{@OlAVCRs<lb*a$>t12lt0>;Cb9aMlXWiKJ&<>5NCHn@Ym84iKNd!EHTX
zsgT6%*G|&DE*yhyt{~7xcKH7Q$(E@{<5hgjJ(`>Xbsl{$6FBo>4@HnQ=MeOytq?@1
z9x;*)Cc6=ItyfIbg50Orfi<wEzZjX@Ru3=_&Ik;Jc|%@SfWv3REMAJ0Q;iu84YMJ?
zsf}-Gpit|V0b2#waQl9+0pv=EWC+&L-2VXFNE#R_28cZMl4_U%g;;T%$Xyek=Lrsj
z4H|d($VA4`G|w447JwmlE>39G3E~iF@-x8)XB6E{KJb76FoI6|?CTwmVcx2l!|S`-
zyZXuqH5&l28NTuH(*g2UYwg6SD_!huTX!>3ReQf@4X8>iC{Rkxyko>K6Is26#r1-D
zfH_9PfD{hR<)WL$K^7B`S+|^S6h_jJ(q3y^T$H<4mCl(-)5bYsv~o%tAB@q^wrU6y
zce9s{&%byM9R`^ujx$1d2l0@~HL6i_wwpW6CdT@P3`8!?3~Gw#B@s~o2LX4yI0&(V
z0an$0ki*ZSCIg@mcC^IplbgT;pbpG#@Mske_k*Mc?+-#Mr;j;s1T|bh1qRylJ~eR$
z5Y%%Zjfpwz&NSu;Jn=3u_n>kLv{DwN$3ub9B-aXmJQ;EWRXZ?j3Lr2r_ncuVV!eM@
z0pd3Ic9yEAQvrd=xsiTGu<5)K1V-owFdA~CdY)RkJmYF7MPVqnJ2sC{-vEr*9ssmA
ziJJ|>MC+thY)lYXp3Ox`<!%I}_zQKPo_$~*LO2uwwvf_z&2Wd#@*3Cg-dKRrmu~{Q
z!|1jEl(}WnRh8=^pN%^#bc5C!xPrhcq+Ey~J9-&X4*He{U|H>mF*b{U9IL?|@e4Y*
zF97Z$k>sXLAv4m@2;xJDv0?&f7iFa$Tg?Sz>cIEv<*AZ{Ku7J~Jo00!p_-upjpHcj
z=m-rmiUW<Aal}lDG$<N3-Y^Hf(6^(Zj)~(e;n*D--7t;icajIj+zJB%t3l_!LnVS7
z$GoTs?JVHy&O5cD=JV*l`<_~_=U5KW+n=xJI=po_!ONnHFn7Era%d?2E&zB@b$RiP
zlwv?f`@jj!z~0_n`Y|zr_5sr)gZ8-Kf`kB8r{|1Xm<Rw=RaI#u&Den33w}K1WTv<q
zHP0DBIOwNFFb1Q~8Ls>gVrT^Du5vqY6t82uYMBM%`-xsaD&yxnPHF;#4l1}wwG-F}
z8#a8*CW<1P7lw=}zB6dV=2k6vD^l^5;_wLQ3MgByO=L6>iF1|_!x8@gSi(m$L|Mjk
z2|P1+Fq2UT8ZGCX(?S&27n{FcG7_3$P1N)+#l@Z=>4hLKCl%fooUYJec1CzP#|RVw
zc$$NSi|rblNz>aBe^{b0l`YYm@rRl?9*qFdHCK#q)95n`5MiXw;m+R-P$wqGr;I{@
z1-a}Le>p%M)zW)eH;M%>X+0u}=Ib6C3`<28M8k`S`Xhf$a#w8=9Umn!U4;M@AY!Fq
zpt3V3#+|tyv<=6AX_|-57LjR!v4P{LFL`i}5}|CSxLSBKNW!R5IX2mOFi!O0PXXR7
z1~_nv_Cwo~!^M{T{{Y^xk|f9u!Y|_`9uc)@)EfH2Lf3*h)PU|+cx;gAynD?6C7rWZ
zcqxjVR4$jJ0ktSxb3nT@SO}7gsJ&br(WRc#N^F_o*VTCGCr1u2N(_^XX&sk~3}RzG
zT1(R%^@@*I$!gB4ddBT_(e*%tefctYTr$SsO|nHyHO;_@6^`|N<UpWC{{UtrLY0r(
z3tfYRi*nvlJemXJ5JOTJbO25`DV4-_9u%9CQJr^@e7Z8&7>Y-6-X-283h2Zw<>|?_
zN5J^Myg>@umDhaXwoz&aTfF5n62vIW$J3mn1M66hF2pf(0^oVG#sZbb!6sa!6l4uy
z7Q(Xdar|N_C@hSJH{1HhZ*k%!`*_6@&E9%x{9uDu!J50?MpmasxN*Da&4>iDe6`=}
z9pD{>2dn`BOCOVOtV@gOApZba%A78M-IAUh>@0wNvcsGf8-<41Ha4);1%iYp&qMc)
zgfu;iAJd9kV6+(E5L#yrDUeanGz&qxOd_P5M5qW7S6?{4JXqPA4FP<P7-uixk>Tvd
z5do(L2n;qtuBPFQ23gPM*T`{!Q*T0EiVnwkY=;;-0&>a@JBZex0Ji!t*+8sjw5KC;
zqZp7@!Teu1!I`>>r95%YB$tknsP${@m($TeVh~hcCJHlhZjWZI_M0#s)~8F<BNJDS
zGQBauBY7LzL&jJnS_Eh)w_k2oRNpxXsRc_WoJ8nIKiwMJ$`2TDFg1`B1I=;9!s-PV
z2!P3~Fv~!($q%q_rt)(63g(16)ItJKi<<XAI8>NOAkNHfycwsbbOyQy-UtU^*I=M~
z43EOybam%${kW<KgW0ncRz{HH0^+=fkQykX>o!9L+FF3#680A=C;tFvDyxpkUst(*
zShZvc5KW%9iEFDmPfq^;>xxL&+x#X$T8-Q|cv-(VqRZRJb^6B$i0IW?+%z)W4T>s=
zFOq}5cy`fpFBxDYpBKrbW0M*q&pi6WLTGfh8VV9_xRwwZYM_D2LuZB`LdV=;UvuFy
zBvnZq1qJ2b4_|piw57_S>cuO0am}EBbO>Nz9it%OP6F;@By)i?jV=1b3%H6SBlnEb
zDBggfXj6>Q9;olde^{+^hYfZ;n5OGNBJrypaUTu=rW`-_oO=LOrx{V$c1JxQIFN=6
zBJlI`?;8jWE5ycdZBeLmH@ugAnjSJH+30rS0tv>q<uJoEEsHMV;zbsO)O%kz!UnH-
z$>H8xQZS(L?Zw#wloU^9I%1B$%OkeXpYyDq^3@<aTH_}f<@Q=M`@rcsg}1|p-+Ifm
zUy^w-P3WgJVVcS8UbJCfY#>swYL9G2AmU&pFpE%7lg>pIX%8+ZPW-rT+K?SqBG)t5
zISApX=BBCFI6(MA-OKK8)-C$(a|J!$oR2RF%=jVsSBwA+Rvre>^f(I&anK&_^K~a(
zH&^~}TRnu?w%zRG<28{1r$7_X{>(P*cIay-ky8yFPnwY%X(2W@kk6S2U;z@xtB)C$
z*fp>=yqdt8fZ8LnJqWeJ3usf!+EEQT(CaqBn(yrK))$gMYwc87f+`Uw6H?ziVTM5K
zCd7dqVrP)3VFFQp4!OY1XjLd87I?^F^ew0d9|8Kn?ncB(1i^eioZwtR09R2u?6<4}
z#-tIXLZiZANx)tvE`VuL6U`*YHrf`LXc<<OqbZtecV2tE2^;Vfkl)F?44|F19^5F(
z_rUhKrKny|^??&twy`u5wY#@z{{Tz{HW65Z9vCm@IFgMtQ4`^~=W4>Xjg_K?HIOWQ
zpbsY>d3bZE<#X#P(QiBthn5VoJ7-5EdrVA%v|FHn61yojG=jyKkqv!}?8?|xFD@#y
zd64ks0~b-@{{Y;8!aPp!(*WF>{xC*l7*GQc4BhkR2Kuq=bq@6(TFG)Ui>g5TA4!T-
zuJB-BFMy#&W9<kxrAEPR3<?1NZ9FwpOj{KJTDR9(cAz~C<S-lU*Q`USFlE!<)@<=J
zbs(SK2G<y^rUIs=s7FT|v#fBX6&e&Y$;*KYyF`N$ePKfsS9naMRkbz7IJrSYJpE!a
z4jiy6y=m4rE~D;UUhwuE2AW-NxMN1Pt1%$G78`2+09YbyC3X45yepOoiiPHJ^Dt<O
z=T>i};|PGpk>r>o$2{seNZiMKOvD3M^?)+N>9U|S8`jv~(A5@%CgTDa#;gIztZ@Op
z7ca|{qX$5JyBKM{XigETA|l>$A2e0y%Q{MWj}~CM1(p3DyjS9qdidAxJBre2$DMHE
zu+a(}IS_)~Q)&MI1(gmA@YmTEUs)V@F|-qo(YFho1EJ<xiaTDq!>}kmk-(^I`8md&
z1{H~*;@N=OiOX#WJjw4G{qD`SSB|qtK#+gne({egxVthl+1Bx3#gI`&K8U#4^%p7A
z*DfD!JsJQ3{&90qN<5mWPLRIw8F|HaiXbK58^$7Bw*poa*N*TT4`xedgKpJN7*uI7
zi?qd_u?;Ux*CQysjo&){_$ag!IH)c2)+=1MRYKJ$q75-J90qIS3&0(>@rYbqJRSc4
zjJXAovMaOeE+`=C0RA!o4jv)j{qGadh$j!ubfz+OliFdX_eO5P(QNI?AXVCRmYz;6
zZ3uob0g?&sNO8mLp8!DcX?SlJDKu#N8qy_Nu1k#`NN5l19MTbJ$uVFWv<J}Pn~>@*
z-K&m#AT>Zbqa#i;fguGDtK6@~I7F&%?emE{rdu#JY#;*kW3AiGw0+kUMYKA9Ik3t$
zCwMi&L%ZuHl{ihFKX{a-g+Wi3jBR9V{{UD!lY(p82!dP{8{dp%v`A<k;m^(zu;Xa2
z9|p3)2|vmY=PwY{Q0vKYw#RP!6&g^c3kjhpYFE4-1z)=+T~jYl$Gq#a<4N{r-)Kgu
z@rop9lZ?<19bCO(9vmWG){($JId^kygr^kH@3$hTl^60L8E@LEDjH<*A3DGzv<U{&
zRs+>=o1P4XBmpklIO`(JUdTa(DA+#m(>S^zBrj*}f(Zf8P)dE6%7;V?+kY5@1t2$<
zRu=?3gvm@G14s3Pi$K%Gu-(DF1g~S81DN;*n=h$3$I4kQaHS{@7Y9OU3$`mDw>0H=
zvo8KppILbU5lUpDgtu>;l`Wv)iD^KPxXDW*umS=whQ*u)aM~Jtb{nsIOc|m|bWleo
zvVJloLeK?`RB?<T8E(yhg^g$!h=8?%vMh1mOa%5sx0@goK?#(OR)u_nW4-T;9JIB>
zF8w>pJqt-1I~#M&=N%ke(oiWr4P!{yh6e)8_!YkMaeMd~If28xT#kYu5qHXO?-&_O
zx@kkzXVzE3aN}XlB|^a@Ee`?bED=Ud=7(P!#6Wq=&l#nq*Gk1_&Hw<Yt5Mz)A4<c>
zJUX}Q6Z>V{NpxauBb39aL|;gscx@6u8(mOt8sd>ofo>>|OO`D}CNE&uSgLUD+Ie+{
z;Jm4Au3UYf#6XWY`nfPR@7@^bAYeAV;3ZA!srzPcueN7hjh_<+a)CG=zs7G$n*}VJ
z;fL=o&@w&{-}Qj?Xxmo*0FT}-gHnN3$%xz|U~ZjYAx6PZhXjH;8a~Ddf!TEd>BeH9
z19joba8CR(OCkn{eB?Tsf{J$C=d8XO$kcn_$LMl8e|g5W{;!M2?+aH-Lj8v3ayrnX
zvrvS{;;;Z0iQ^K)JvZ`xGG@?><vY0L@2aVEKU=|)BBemvsuwD;5%MCbb;-ksNZEl=
z28!>_Xn~NZ927T0SR^;t-8YIsS2V_ozP>U%5XgjS@ILV^pkvfKq#hTGjKgWc(W>%(
z@k;iFXGd2^n!um~{sm}5bS7JxnTCugQ;r`PCCDDv*H}(Ka5Z|f^@LofrqBXC-SLR{
za)C5c4?av7aqU8yrCLdIZYq%#0pM!Q7!EpA3{Ns-h{c#pw1^~)#2mGd2Z(}qbQw|#
zfcV0M@I9OkHF`Mi01cublI+d$Sb&R!Pz~I=#r<23x?57I?Z-zptcKModJ4&MZip$U
zaHU>uL7}((v`JcjrXW4N^i1Fa1(Rk9GnU0c0lqH=aejudlmdvZ-H#a}VWTBt@;<ri
z0ht5gOBpmp9&x!Nv`BzN^V@sI1Z5fviKT1El*)~8(Va*o72P+6z7tX5Dr9K8<++@h
zO`CU?lyGn-v#k%jav=+0(eOW5S}A(@W1<$<Sfd|-B!Au;dT#0X`pEh%o)3)YL~R*{
z&OC=YmZ^#@(CB-}deW5Ir+cJIt|-6~o`;{?ge3^iL#F&^aHh+TC@jCrfoe0lk47*0
zL1+fP+-9VUd+dL4-cEcN*m)xU7@jht(Lr=kjv^<u<osc@Ac_@WFO0lB2gEd-m>Syz
z#cereC5qxq)3fQ`8i}O<o?Z?4#R6!ckNaZKi?-<h0GtpmdZ>dq{Nf}qiu~j76+-Wx
zGojG#z7Ke0;OlqD%+tMs<KfS&(05NUH$PdeGPFBx9pP-+aJx7GZ8}8Pi{}_<HPP!@
zelW>m2NmP-gMk4f=M+GKExXy45kv|$ws2{@ha@#NgqHsR(<r%oC3<N-%w!t^2S1YQ
zoG_K7ig$t6&I9F)>ES)=9%JJIagK}qE(>M2ih&K#2uLoBlEJp@5#l2hKQpie5F}Nk
zxx^IoNQjJCSG?W~JxQdUe_Td1iH~+(Qy8@Z$3PJfC#5%<pzK<rg43-5=3wLWYec2U
z&JOhW$`*Kt7l0{>agb>JE=&i$s%Tu<^xYRy&sw{~&`fn$kFIab;}*C*N<94-AjW$Z
z!@U};=K}HAd&6kx3Z%SXIDn6*elUUC_db6a9f@E>1rURj_MsEhk$Scb9pQkD5Kt@3
z)VyGYV!){aFAByCe_aF#-NSk%xF3>d2FEDYs`ZT8QPC=kxn6R0cGmS0C*;Q3ER=vi
z5#r0amk@7`ly{BOsR8VZnxWCdY63(wdnPRNv^5f+a5XiBF6w3l70RHH=NOroyJB*`
zgY}RQ=`Gw{C@A?IV-9eWw4~+_74wKp8Z5UQybohkFEQ3hU_zDpWHYY9PE+@i7|E)X
zRNAzrajv@z8r8<!4QE{e08$2C%si<NWfA)(A|0p$oMN*|L(~5NGL6vMvf^Qbb!&hz
zml(G$ucm0R0=n4IN`*wT9+J7S71T<XtOd=0P<b@u&;z742a^P#I437J>SOP<zgEQs
z*9c_M;ddLP<+A=vW<~`IYaf}Ea!^QVzupKhaRAqh+EquApBbUJDvH<KIdOx06U4G&
zlrxP^S^Z$K5_h^y&zBb90+YUxVZ40<Vcx6)m&PIq*xd)8{K-rVs72P_>oh1_!hhSv
zR3VAC(X4m+IFvdx=kF3#6(%6A8wdukL-HKpDFL=~xl9<f+lIhClfPL>A*G%TuDLax
z*XcGyPsyA-!i_l#OnR6*<BFvq9d@2f1i?ph2*c+S4pNDwL|?#6u#S2qF;+uI!Gndd
zAQZ@Q0#EUl9>YK@ylmV&agcok6MzBu!Egg${e4UUgfimyu~Fjz2Vq5pUTqH;Y+HO3
z1B)SX*<DJ9Y5+jHhaBdJt3>b_F$#-N-`g)nt_WB|Z)|$ewDvNbhVL(%4`?1=r|%KV
zL-K(i$(Iq}D2g@Evz=q(FbM6)>dfRHh*arXlL!L{3`bGSCulq6xZk4<pny}-Kz5l-
z8o^~=h6p5h#Uw;Yy~DmV-Uacv3fI2$xNNE);C~nb?G)n$IuDZ*k?0q>D83CHrWFEt
z>r)P#q}(!=>Y(6q0}pK`BaA+P5Kx~qaA0a#VTdE7x;<u`7U0)jE`j%nzK0d<;YG^2
z%An{64gvXaHUJgjobutVyn#ymnFZRs+PKwwJ>^H6<%DQbq0>abQawdMDdtu%xit6d
zE<1oWjo|8Fe#FdcaW#P>&d_(x4xXHo*em1nj222E$%Rd=lrVH4$B<fnrV)^6+$}=^
z2zek$wMzL8bTSD5CqUO@I>k5hQ(QB6jC5~3VG4EtIbArk->g{Fr6>uv5ZEHM8-r@N
zou{zFe4lu={mY<eesg+W0-Jw77$ijv;!vP46MlW>=RvXGUh<BhG1*^OBGeHj{Qa3i
zWz+=Hf!7}L@r6_katGe=I(xj`9X)r82LvAb<nIn<;3ZQ+-ZU6vDoRvLK_>qI#Lc}y
z8rRl3pb#K^juYIA>3!iOU{dw#_m1-mohE^;4igaA@ng%xGh|Cd65gHeDanq+#>Rvy
zkGI2+3kpKbO`&{v-U_e}eK*b&-D+s4GlfaNIlvK@emwVaWV}w#3{dbaE#4<54;W&x
zI2R!SMxPnLj0^@+0lJZS#05D>u+k3;&p4DmXYVY7vkrQj__n5?a_bjD3y1q=(}*-K
z0KglgtlwM$G8lw;o~*;*m&gs45rgLm0X4Afedj$ylxde*T`hmcGKwWS4euR@>JltF
zAJ443t<7&O902N={FX}*Vp3mUI9QNr79HE1FAf`72QN^b0NyPK*1`Q@rP6tsvNoPW
z0|3E+PePF@0M=LG9!&@kN9B3NncA=kAY`m)bA<l@R)x(OTYJ`~5eFh@6NV?A3;};c
zY$e@_w(pF0b-NS7x0GJ8J^_LXZX*LK5TRb&3!ln|oRG=hh$;7ujZ(*MU-yDBBbM5_
z5c`-~YuO%_g$f*w2MMVykSz<Kaz(|$HI&gnBGit$$BR5b;!eR59b*D{2;)U7EjM^*
zX-uwVJX1_}hAx{4q~rR+(nU!QK<#s~P6Z)G8uu`y`f5s{*c<RUV6{ULLt<)WW%*-G
zS$iYp#YKS~17m+TDlijUW-=S7^mqEik?Itra!(t;LFD*(*G4*A&VzT4j9^wI5MR6?
z(kSOC`@+AHq!6N{IOBL)XfFvX{a~U{oI4MUTBxe~!bETt3`{h{mL017vEu`-9^FLv
z#2d0(#<$i1M?po^`FDtwA`g%&mpr#*4LjNS#^61JziZoy1d6=0xJG8fI8Gm&<_qwf
zd|VM>4J;ib`#%^!xj>Q8^F_wcxqI4p9yyqh0_>fK)-)>K9~!`;VHr8ehAE_4!?Z#!
zEy1+z=(y}LZVz<Xhd?odIR5}Rh=I4u_m8PV(XL7g2Kofszs_(QFZQ5}TO$r|!bgC3
zB0RRV7>`z|7I8WLFhIk&&y~hn7N5w7@@AAmAD}aw+>L5O+poqw=n<7_9DpCJnupTG
zz-M|k;J{BMALBF&5O*>u3^l;GO9B;mz~@V*?O=cu7w0Lu3G7d1h^2{^)UyE+?_kqc
zr!Ds5wOvSA9h;9iK_Cz)Ad-pgz|{<3X(;Q|%@EYP>jLYrWCFbWWAaZ`DGh)pJh-`B
z?W-ABL?Pt6W(%ijk=fvCy}t8QiD*^yZXS&F8!qLDk>O{1#HOO&(bvJM;;A|9*~`xr
z_PMA;hN&Y^x(~)008`M!sSS=U4J5uQD?}tip}`g)(w@!hj4BgCDo!%|J}ayfLL<N}
zU{L!iYxr!?8*Iz65!1+B90yIoj4`<i3$UGGscE%rHV3uESKD1|bOuMPQY54hT0(+t
z?SO4SMmZQBSV^oUM%gh4ocdwW%G~q<!Jt@b&0}cZQgG4azz9Qn2&+y#ZOeGDC~paC
z2+e4NiC8Lrvvcm+Dad1#q8W0&R~o{bxIxF(Ja!1ccyJJCi)nOYIx1tUCmznsTIp>Q
zObgcV72hHCFc3$z-nBoR2V{U=`*JN%B6op2q!r)D;;L8T4jbMrNlro65$fY6qzVb-
z`NH#xk0t9O(Cn{}G0~IhL=i_c?rh4b;}D8_v*#2p`ZVz2iw@_({{WXGZjM)vE0%5a
zyFzQzzs^;tl$3Zs4oXXoyiAzdEw+5)2pf~{76=9N#&XR72FfqUesUBk$+x$EIU6EX
zYk+mwC{8c{0ylJ+>7XsKoXSS=aUqIas#gSFu8a@SQm9o)0t@R9BgzO#v3Fihc*6!V
z09p}HI@y4E;Y{Q}@*I;{dki=f+6wVI91hhoigE~$?Z$DWle=Odk%5L<$epk->^w7^
zl|XY|F?7{dAZFiH;}prIqK|o|M6&aO(FS(i$gar66&z15ZZUr-=8)A&YrH@-@s8j(
zO(wkJawy71NhdLj!4|Ex_QA@<9bzDu_lq?bODf)T<hU0{lv%Tb5E<4jrkKvLY6{sR
z+j)E}-VP)Ov`SR?xgPc=&1`5p@^P246P7#De>hZ+x=6bt+V9RL+Z2+dV0Xas%b`db
zl!#YoW{22^ARPz*Q-gVEpulgrWBI@{Xt2(C*I4-LqLz8o9JCp*PvGI<B_kw?3>FD)
z2w563ATfI56i`Q(8Q;P2d}J-4kPckCsJ?ClB%44SaJ<X4!PW&x02Hp3A2_I0rYRC{
zMoCg;3mXDn87dYuAVuM0HO@HFniNz5YM+eYw3Lw`{brLgNf3EGnB0soB*n=p(W%*^
zj<8E^=#LnTEonLX#SoDM*yACsVWLydoH7C{4CS!37ITU3Q~}oU!cwKFK@Z*%8Qeg<
z{{T2SQbcd#G>||IoF(H1bQ?Xo-b6>(8tPs#4I(!*b)OU$iIc?kqrm-f_{E+85x2Yu
zpzxjjnY-G&Jn?|8!Xm>p)JjfoN9Pk8pkh#{Egm{yz{TlJ*8=!5rjV2#hYwgC60IQc
zbeMF+Vy!9-@b2dL*tja;9!13Aunx3ovL84Cq5(TPaDZshqj$U|h}(DrKIHqz#A!yO
z#k$kOfX-Mqa>-~FsnZA2-5^4Cau%;{QVG<!Km~y<CB%~&lBWx1xL<kB!$MbFgma@@
zF<m@ST$-ahW-?5NnGkXUxI2%Z7XcMo0%*YZYa5P%g1_DeH6<px>R^lnH`Z~<CWu6g
zE>mu?R+5&cp=qtF;KygtcnZ@%gx(nFM_>XfZigVeVnwY_R-Gm2$}sl_Xqy;?8si;U
zRSMsTC>Luth5);b-98g30Qql^S+n#dG+hGt>k|aK<6oS`P`MziUnKR4h85qx8rtFn
zVL(3Gn7JS-H~4&GDp8^*r_cOjK{B_L;nRrFMNv7S8_kCxBWqpo`^(C_EDyB*02m`z
zuM76rc4Y&6A|{>}{9xJvBKn-|d}RSVAS3X9Y?ClLWN*9^#wr$w&2ItMyqbpDvhKSS
z70&mYqc;868Ups3xSf9>g=@~?k5!aSb@al(1(aYayt}NzoGTWT)aUkMaB5-fM0Rfe
zOd?O1A=NZIzqUA^3<T4X?&Tl_3Y_F`o$E;`dQ%C2Nk~?Wb137@<_Bt5&%=&Lh=sc2
zE|d*}7;Baz_G6&Ysvpe3;o=~lk9fi^)HEkw;~AI0lU}oIjkMcfSFPhc;)2CrMw|eS
zpw`!qeB!Mk?1#tJR6>d=!gxI3j?{rpeluZkoh)uvbdrvlsenX!sm^zm7n`j=zt%b$
zCXnZN=xH`$=QZznr}$KnrYqOV$dUzh*VX-v$;-qA4~tCDesB#EC22VFZ{7%>8mCaP
zc`!ymt>WiOyT`cl<Q5W!%(^n6sW-M|2_=_!ZO9W|((g1P1w&9Fq}#~FqzaN~pDiz}
z4Oy#Ji4OKyxoe_jYY7v`C#+q;3vLG(8ZNgm&zTz82OWy;VJmb77fMfmyxBt6WO?MV
zj<B`24Jbmsjg0a*`Yggohyx0agQ!fh29h}thrQ8ttP=Kd3Q$fJ&bNtGx+;uGIR=GW
z&hRe~)bWS&$&h%;)16D~fdI-7`@ywEu12>GplC3DSuzwbIuF^6v<7QL=Ef{eRyIX<
z?E`h--LRd=#ad3yv`#T}m*-MR-sR_bRn%em8V@k!&QjLJiRDJ{J*g9O`ea1`q&{(E
zItcqh?hc`7Q%<S<j4ZN^i*0}M!|8(!tJ2@Dv1YQG`?3we`!zoJL-B;VyP%KRIi*u>
zIeg7Jz>OIxPTo58jG+w@c5pWnR2@hBI2mKk#cm{WOcuis03!KL@H2?WAqm!$`o?Ai
zNhh2dS#M?z0S9WpO1m^DJITWKH`pkQ>|Aq!mQj~QsJncb8Dk)fM#v33VfqOWuKR0{
zO-C6<yJ>Py?8W&Lm&}(_zsrnvx2uN+oQP)L)CMU1g?ENKN)79e;<$wgu}7<oZJo|Y
z>-fqlyCDAn89$9UA<gd|{>Jw{PBJSSM~f0`yi`IQZA0q<penA<tT{YO6p6v#zH;t@
z7l*Ftml;Z>hldPV&`s5C$@PP@=XKew{Foxy8<1=O*9RCtFo?w8;~D{@XB)wELYqAL
z{bHAs3t~0R?8{X%kCt)@B6h;!NC64Obm}Q4+QCa*0zJ_at@VMjww=C1sXbt#+eW53
zJxnWN;@rISVN6{NqS<)zn5V^#6NU^|kG2tJ2IrN?hjNFk=%Z2rbl2070&ocJjUbIZ
z<F13-kzHEgdvFKRS)`OWcGo6xc-Sa2Oxn@WvnJy!5Dkgt7yH17(Eu##g@2|nK@2+v
zhX^k2`k0umLQo(MDYY<r$em<dRvnE?Jmr!@f(@6j-m>t3l$v%3+!Y8Dv86%K6SKwu
zOJF3OvD*jEAS#{#KzHOvuH0N@_?&<`9nRd*OqXgw6nU)S#HlV)Tg_tgz~Xs^K1>eM
zwBm0Oxc~~K{GVQOkYd1jcCZHj066-{Guq%9@1u+g=3Xf3A|kjsD}~S~oSokIv+o7i
z0-`S+pqc4qr61Sl9Vf=rC-ePaVO5L@b<#MZ&~$11#lOZ(AmTfJIbhs|h`$Yf@o=L7
zH+|}T;4HJCU*7@M29X~=o&GaZ0ZsLO4dJfK%J2Ep_kxpbB8OY7st7GdX8fP$0PGU<
zf!J*0;jxZGO0K)>4lfKd;iwYpLt}?I0$_-^&}^)l&0`WC>g9-I91h)F-i-6%MtQ>h
z9Wkz+I{aXgpnqA3`F&4$608pmXq~C~#g##77aP#7n&8fDg0khcQ@mO>9vmm%felfA
zGZOF=d58Su+)*?Y=3V!J+cKg<)@$O0@iutzoPtEd(+o789dc!KN5rH*cmT8r>W}a5
z0j?$y^V5~cmR%UcB@HxuzVJOZQ7^&%vYKum90%%R>)5-W3jFIfglO_HLdj%nUU4#z
zUTxMi>QogSlM)Epz?xwRfHYRvm=%El?0ND1=K4489}Tu@Wy{L);k@YUZWNY8zclr{
z1;Bs;1ncW4Pmts+Z#RsbC5%U<$*r68;Wz-{2FUTR1mhl)BD7O1;P3UD4-;LG&}19&
zktAxs`v<{+92qY1c1>?x;**z#4bl#~z|Uk#L|t9TPdIZS4#fDgUf{v%;#<gk8k;<f
zl{O%{A3FqYzA?sVgmxfi=#V3X2J#+G@lNb)yIE-dF(ynytOTLDSBxp>#!y~>H0KP2
z?@o~}6#6}}+HolFpfRH>gN&BN`E^5MY%)-J9Ypp?j1mgy;1X$Ox;$Yhv4WvFb5_nA
za*OCf(gxsY(B-B`C`XT&Vz5xmKvc3}R&!iKA|kpquJB<<RwwW<0bort?wkTG(paIe
zox3w<X5nG+5B%oxsm=!UukpaKSJ0t<mk1`u2nX%&{oviaV7PHF%Q*lo6hcUUW;sxo
zKojl7v}p2A#XhE4tVn0`R(^6(OSaQturpvy^r(GYJ7)HBfyX%J1q8K$C~)%lz$c^v
z0z;8b4l;kzMN2ogfh6w@8Zf%n{{YAD06?X*ym-NA7LZzF{X0Q1$>{m{VDZ9jgsnQT
z;&5Zpt*1A{Gw8<KU7hlp{22!88q2<}@LyP9XsW!Gt9R=+m_iZ@ObEspMu&RE8inP8
z%)TzbAb79GIAW@3JM)7KZE#uhiO{K`<;BYipfzyXQNRHE?<_ZKt)uM0XwU>z^mB7&
zn8FJPwKR!0kVzjv#{J?b)|wiQ8qq`|)5jdRD8Av}e3SE!Wkbws)=*IKLpi{V8PGQG
z&x~aq490rKfU97j_TnMV1*Q&wQ;p$WunI)4p&63b8<J`oO*?vH#uqVoS}XOE+My^Z
z!^f;a&Vxb6OTFNQHIRIgE7?rD&hbN_Zp(%NdukRcoAwy#oW(mdn>2do2}$IdHjQ+W
zxDYEO7)(&@LUo+Mjy=dVKxd3$dxWGqIwU|a-bmsolyf-^>B%PHL9iz=qP!u-FoMBg
zlfR6M?F0~cryDWx)dP(dK30<nR2>pPA$tMYryj=U6rjYq0pj}2T1KfVq?RZH2Uwhs
zFoVT-Gis+;*wX@ud1b)&F%9LQga%I&4zRY2tsN5YQ2W4-(kZDS@CQBN9*L=~X!S!9
z=EEo^dRHvriYvc2%YLvcS1^=*kKQ_r2$lIO5`c72ul)6t%-EwZL*K?bHM~E>6Bpdk
z_jVwM*~T^kn|&8h%f0N&$Q4DmJco=~iJQEq96Ug=o;NknRv@l<e)t%dn-odMReqT0
zmN(RY$2iaxP!saRn1Vs0WhCpIF`?GrPM#Ts4iu%^i$aW_ywfpG>udD?02l=kskKT^
z;}Ae7A{q{U@hWt0eBcIv(pG)FU;to%yf<HrYZyQq4o}JF8!6ozb(i*!Oqn`wE%C-y
zB$WHb<7m}anyF~cv4@|82pX;V_!$#ASsu#6ZX#*%fo)qYY)`p_4FsA3IjhG3I5yDJ
zaWn(dzSWPs)rAGz)iEZGbRo7oU;5&p5m3{7e|aoPTFo9NBUB0=`kwA+Whkf0s_~Je
z!_ie-6l(1O^l>@~k3^sWqXHdh>rdZ|LL6J$KNk(4?EYIEVnr&Y8#O1!UK1J(IV|Pt
z>m4DGYt>SE-ts?V7^VZ{z#dnu-rJ{^H}K~nKkSoewumpr1fxiIb<xhG`7!W`4AI%F
zWq}4U4FUc#h8|)W>_l;G$bseqy-oXbLoE=AvI7Taj07ry7g1<)=Zk{Td>OnH1EZUp
z)n6f@WK(%vOp9Du&^DW<0GqtHZrB#J$+6|UVyhs&nCQE7>6cO0$%xPp;!$;FqvV`^
zW;jY?E7rD-1_5Tpw23+Qfxy~@5E#v>I}E6ak7*tpH~`-`)b05N6L&7IJu_MorAh*i
zoHvM|Bd9LKCYp7MP!(wC-4#-}W~aBhNZ2+ZCNM#jb6T@c-frCuo%J&C?;Lft5(-G=
zAi3uPix!mGrNH)&b-KLJe~ds7c{=>y!Put4SKtg9H%5Ub1j3foz4g3^GsH{zVnlyR
zNzYK2ScBO|bEv;~5Qn@DX8U4DgvbbfoUtrESMAW_-fw6fmyfps0N;r|a6x*P<o^JM
zBSN?64fArqMwHiJZ5=%3vJ*|hngLoIm{3c>*5CZbm?>G#!t?csK)6@WXYrdrw$<Ml
z4K)e9<R%?yw^+>cKF(OX9iNPFpLU4j^N<%?!UGHP%G8kCa%MAtw{~FDAkr=3UpGP4
zHE9ZOuXqxa&AJGNI9JuUREqgqP5Z)u3q?a(=j#lKRiVB=)<4xy)Q_>=8lwS@VT7*u
zmU8hhHE|uTIi>?e4)`IBN!CbyJmV@zVsosP!_Cl7%XthQSUqp7=Or%b?ac@!HbV1(
zmHrU5e<oa?A#5lRZ>}<F#PwLDRB)2va8*5}NjXK=oGAH#U@7Uu$FLwNwQEf0-fq~G
zB*;7@`}LPCUj>(4VjWO%r#??c1$Gyvv`w65;qHr*Kn`u+jB25{fbQP)$6Vvj!XrQl
zNL}pf1s3GO90s%kB)Jf^=nvz@8*1f2-<-M-KO;2bL2nvLeltqF9PP#F27q?)`oW=l
zr2hbUQY31Z=*vw7<lZtR0&jZOGMTMVI`^CrchF7Xwglkc&2pj-69N0r6uigu-_{G^
z&Q@O>Vi4xo5Al!l3IP2~WWofVU+|xJ0|Hf{sn-~hV?v&8qhEKdasoB3{{S$=0I7Kb
z$D4pv<QTngJEQKMF5``k%W-A9z3cakfl{ILcss-ZFE9buS!5PQsQEAC%JnBB3i;j6
z8c7qU<}suK8xd{S34lpe<o^IKUT{fwp$<>KkKQ7fHr=86OB%4V5EbUFc>Q2eQ%yh_
zzwZ#GM~VLcb#R1Nkbm7DjAzZd`0oK($y%1qu!Lb2bH+~a(#!xx92yIgDK7&W)C*bo
z#akrr=N}=@7-T-j{{V9B-WgX&DFp3DhsTo=Y_;W2lXP_D-$P(Fd}LH(=f*jJyYE|Y
z%26hQ&SoSKX>{YaL^B4?a#gBo4mJH_ItWI$`o)Q%$-cSDfLhN%lu?$3*Z52xB9q5V
z<Kqm55^4jv?;Kq9UJJ&u@j(PAsczCf91a%ML=#}v@s!o6*P18$z-I6oA4B-XcYPh7
z1NAUkIDnk_G<|r$jZJKLO~<DZcxPlEM8?7dhX=<jauSJ8sH~_YEEnng;wG)rI(iS&
ziJIv_f`QO??qz+_!Fy^lAhNMF`Tqbp1z5r~pIXBL1*50>aniUjgU47hqC;pt#{U3}
zdtja&lJVy@DK63e=0YS{qF>c;$}QJP%>Ho#w7j>klR0f)2z(ACK1U$@VnJWD(fL2l
zDpYS@$XEHwQK*oBKCUKk90mBst3%CG__rwoR2%$n6qBn}1L?Op_Z+O#y5iNasJHNE
z;{}0wg+BDme<aw8U*0!e$`1W)g)}To^oado8lD~#?XT+$g?rv5?i`g1fwlS2#T2dB
z#CNNMP?W0+XL16F@62;X3DWfPFfkHNL`USU{9w{Z6t(8;HJx*8e4`%)d}Z227HNah
z9RTxypa#RoTFxz8J1*RQv#2r$Z*O@Fnr`wRSS0@d5PQHx*hA4XmO#@T<z$kIKfJW)
zq(N-TFj<e2{?-fxLf6hAzzQQ~DoU4)Qvw}o2X_@fNC>VkTu%cMJqb1%Y53j=p({c_
z!b<1|ry&&_ptBFErfCgp{_hCTbwfX#dWeVg69VW_R>t^DaG}-y%zAmt%?Um`#=$F%
z3h4<kh(V$hPZQ<GsgpHBI>(I`!WIWV&Q{R{66s#dI*!E?jqCA%HW#9A-P}z|axY)5
zch*^;0#aR`ll8_&8_8<l#l><i$;w8zcya~MSEF~K@L`}MZcFX1aKw(lwd$@r6;;=d
z2Uq=Ms?esL3D_@_01L|t#@*i-fuK7Pt@;DrH*Lv(wrdK(_B4Kxg{2fA^!ml6!@WFn
ztV$ovf%z)n?ujq5d>#J)8T0_q{{S!N0c*BlAHZTC3z8q@3}3gB{*(8C9efM?U|K%-
z0q3E>c5Ap-Tsq_cD`eq4=F~XxV13W)2dZZs{;(im9F1Uxhoj%tT6dGt?I6ES@d0q!
zQtuS3hmED(+Z8*ObPjo}WM!`;eh0kRl!F_-Z#`TYG0P16r}u_d9tUqu@%zoOQP4v;
zuJI#8Lr`(&AXL|cN&I#15flu49rQ4Y0gZiXU>YiJlyArRVgP`rXYc<2FhWnMfnCn_
z`(T3v00;sHpn!VED}ew`N`H(CAk_!MA3!KgEAfKMc!vRyh&zVw{Y(>(5x)}+Y$_2_
zy}x+9s&2e^)WlI~WT(G*x+OcZ&u(TEB1-R1fA2IAok4ka;RKM+W2`_sBy59@+(um0
z!3G%UJYgpT9MI}jJW1YD6a>&5eDjq+QrJ1KL?ISW&T%r^Q%R|Ys4&(Y_vp(k*xO$j
z!6So9_`?aci?QeHoUNdWO6#vYV-yYsw3~40w+YPn#Y8k&IexN)>d<)BzA<v!*fwLu
zLxRmL^*<P-K@=hG{{Rf9bc4P4+mus7fDcgPSrABdzA2G|{ioK`5V~?CeSb`lp=fq|
zztbI^-~xVw{NkJ_oBA*i`*8q(&My6J@6I7XiZK5G5SgNPk5Biky{X6*Jr(`q7!dLq
z`ojpYF{0P0q;N#ju3}<c+`NByrk#EG%^%&%d+QfZZ+I<4`nj{i3MJQ>j=ErRGKBiX
zOSVnvGe;Vo=P5z9(f}SkJ~H4k7s|4J@rY*T`9BUm@ofN!`@vW$^Sk%FaH96Jtkg8_
zclgAcfQG~R4jdYa?rY>{;mFn|xzJAcfR`EwPo7vNgjf-J*c`bKc$N94Bq<_uZt@}l
zVZ-Ck^@Np(x4^R}3jD4DUW$viOg}kfC>XSN%S(=wH_se@tOlSsCB%^{XAj0xb?4{x
zf)}C=JNJTwa)qP|&%Cmj6gJna63BEqKJ|>|hLWT<lO4Sk6zUqM*80E$7nw<f#Tu2)
zG%|WxeR%v}J?L$qyAO_YOwa>!D3eDE+?qMXJ`O@1H2Bx!n#VFa5bNgPh5=C0W6N+M
zq2aCspOx2#6p&FMme1=M5MqLO+#9R_<fx|XkK-tGL@Gb$DgGf1(~N+=B&N>1WEBE`
z>4M=zdu#g1nnLz@a^eQtugbdn&7>ZYa;9df!gS0OK!*%0RSs7=n9^55%jX7yCe81g
zf1F_!yMcrN1mLsvi1}lqnTcD~c_Q<SXf1?z?Lqj(TPYO7Q+gmFWmA8Ca>bx0ywp=D
zAa}hvNcKeyV28=g#UfkIu?tC@JPsV|tf8mkI5(C1=PAQyoGH^&oSD9}j+8!dI{yGJ
z@?Q7i;!Iuq;-VaU<rboY;GeVcg3`g1&fhm4qKA9DeB<>J^={}EWeN5wHsAcjYnd5K
z3F7<2QowIvg<U7xgn}Wmu<9Y>DMXqH!i9f~C&H5NgJJe!+n~i!FL72(X!0fMY>vF(
zDQG6xzl<l+rTOWFWHvHKFEm~t^?|o>`KJONuqQzh{$_1<x)YxF&PCB6Av7QRn@Zz-
zFw}AKrF2a^?>CX4Xozs&shfh0ht<S|Q*n*qZBw(O`Nd#~z>lmt<kAQJm`iIu2*ZWe
zPyiQ2F1Y^ycqrG<jPZ)ff~1ofA+fMzsZ)(VZ0IQ*uUY3239c0TVJ!sio<4Bb05%=v
z2}Kd-#zisH3ic-(z!VXPcsT9e0gV@O=t7(9WnhKiKaQ~gf-w#sc?iMOy5j!;++-m3
zLLL7A?i~wS2TY%Q;@Afyjs|FT!iSwR=Mb1|(g}a5?<B1<ibaf>q)FLUpJphLoPgy<
z*go@H->ZVvvDWm&c7+5qpXU(DS8V?PSQ=Y=xw3d1k7iQPv5evV@GV9hIkSltqpU+y
zk2ex`v24WxfN!IW5E{P*a0eHPPagAknr5?>+$(vYaqwVDP5Q~@;{frRcF{d%zJ79f
zPq~PcP4S7j)<fYn>b|kvv9g@?X@3}=AJUE<f57IT5Eeo&91MP`pr9UiPIIA`JBQx=
z_`v$PP{;7!7``&F2)xQC?-fFls?FDN`p-)C?yu}&?@`|1XyHHekD=UyT*tF~IBRcd
znX^s&<09K^tG|~ks2Q|ukEQX}Zb--asp}FtDXn5k#_aF*IXa+uKy*w7wGWEx0>`(I
zyI~*9z#&m+8^3s4OR*>9GbrKSlSj|FoC<&y!<AROpaHrX>5s;TQ4spg=<j@6<o<Fo
z&!xs}VUUCqet(=!i))!N!Xwc(dd)mg2Z%F|tdfE*(JnI20z_`#<BTp9()m#DoIi5V
zp%?*S)yiIuF<y}Y2Y5bNGDi!p;k}1hBs@f&VK6~Kvbdln*Mm2j!6g8iIOEP$mIdaL
z$LirHp~}98W9JdERLWQBz`cV&C~L*TG-Cw@{1+9)E@?PN5hwuUpMQA~PcxhsX?KAK
zesODgKJgg=u-}a42zPQOpCo@7q)OLBZc??Y)4Y&S=vP^*L9K-61hmoJJLo@H0pe7x
z?YH=t;apKRrQ@x}VK?C(Y#*$nb5S0SRd<S1H(Zb9Yb4AX-8W>oa56m~J^uh$#6vt2
z?#4pVO2MC1%966D1IEm=P+ixb7y9R(x|03`&gC4c4n8r~_6E+sIVxio%J0N~oLYtM
zsvdNi29tC1Frm*Ie1%;K{b2DtkX~*YO>afGGp)f9@mYX?Z%(@B9w{rhkv;c+Sb|>)
z;@&P@gI{a?;4`9aIDBk;VvPWIQ&-RoXo4IkJ^03{4DkLt$_Qg#llZ|3Qc^8*<BWlT
z@HXgw#_@zW4OISdghi;`<HQ)k0|9Up9BZsDFJP;RhLD9>tQ4S$98RVl+V+4^{{R`=
z*cDXQoK!)yt=kXY1c(if_m<VYon^HFN_DJU+eWv%ZAjfITvY+HW5x`tZ+?I4l#8MP
z*FWbjG&^ofT|oe=!r!xjqFJ=>@O6#xb7bx7Dy|^mbcXp+@2rzYm2c++$tCRGJbmDW
znu6n*K8yoHWz(DSi>qipZ^3g^A|M2{^^B<tID!ZBngK`#kNv!94QM?NoPH;`-DL@h
z03NYP4|T~+e4kSaCN7lmY{g_c16fTo*34(jQAb`I#lr)j^WuEwfN1l7oZ=9)Fi33?
zF&$6DPA!!~B3HfUz#6C@EW|Mvz&_?Wh-s94n6Ni_3NZ8e&R4zOCO!l-IPw%(spGH4
z4l0vKbFN>kVgmh7-b7Kw8Rxe)%O?FjVydIQ@J(enA%d;Z;mD1xZiW5efl-4uC8qDj
zFBCUiu|%LWkwWI#eSqJreEA}V4%L`Nf$<h0@v-sDC&?eyDInZ#v0EzSf5sXF!<*CX
zzd5-18h_^<sEkqAyb~1Eg;!U9on_zvuM&7H#lUPAO6$+gBY-Wg!GI#>)hl(57ZjWE
zgav^kKHJDhB#lr$x-pF;Pz^Y^l88|h=9yul*hqWa`21m8fJ52eA1~u2D50`y@w_M+
z02Q}>fAb3~!mtm<rXv_Fs_WxAIH2n9$5@Oxx(T~6$w^CPOhcNEX)plf0&Uc}QMnX)
zd&Qw8cd_0m16Txi&I3Faz<<0EO~=nTnGXve-V=d0q%$?7BJAtga1B$rczO0_kEk_D
z4LgsV2CI)_el})>fQ{drK-$>4UtgSgA?_HO8X&^2u5g@CXr{c!0guQso_`oNfqD@<
zo8u&-W#RlVZW<>a?=|=ouKE0Z;8`FNPMv#(2NFaib%-Ie!Cm<AnlX7f9<X=OH>SL3
z%Oa!Rjs0ez8*8I|ViF5mPB@%R<F^`EJaPET2UD|8Tp^8Ss<t1TikIj!V_EpYQ$#~S
z>#Su75|^E|m^y%3(r@vUoVqCU#xN}(8ogv9UAbN|1a}<&00D5?gNJtuqC3coN0+?N
z0=-bqL<6aVaJ^JB$2h}lW~K=jCZ;<`4<-fz>HdH7C;$VZf4m@HVz9?T4#y^?gt~WZ
z`lHq>=P20h$bb;U%E7b@J~j2;T!3u@XWmc{c&|AUtGSF3FHz$dHhGE8aHz?r`^6Bd
z&~L0T5z_gdTrwyU*I3|aDWaMa%zvyQI3z-I{%~YM>54!G>%GGm;EJb-Ik?6a#i{q>
zcme|f)~*Y@s|B7~e)7RVQEHb@?+RwZlVJY<?kb~JTi;jC0<3Wc>&g6Mt)oFvIrZLL
zwzE#Yv8CDo8O^U@_l_zfmizUPQ8+y8$3PRt`M5UKbU}yX5V7v+*-w^zV?bu>oLGQC
zOTA&#&`k|b{l!^LR8!uygC_E>tQ~33X{c@UUhz1t+tdF5F;51A8h`7Eu;5j0yTJuw
zi&T#WA|wfI5bWR1@Q~=2C(}2VL_;9@InAKp9EHRRgtky4$9w!>9UTz9a3;|OKp0)V
z{9|P-v3Wn^6B0ead}iv<_u^uLJe>FQkvGqmABzN604jTOA_7h8;{g+L_1gdqs$fkA
zGlL)~Lc2I1nwp6xc!?VBkzdv!Dgtp*ae_2J*|7fr83k=C*!*PVQ0zF}TJ3ebuFyL}
z1a1QFiHPP&=e%Ca4xMC$6c8c&;z$SNxxk{5_T)R0y<D6Nc4+<ajFA!C!?iDxPguwm
zX+_YMKJeWav+}EvjXgJov8iH;;4!0uzuqZYBcO5&34=|qSlwib2H*EsEnp#k-f#-2
z^afB7-4rfYJaUCTB4N?-4p8~IAFLZTNwa8c`7zl9{vI%Fc5dIvk#tv=fA<*R5SgXZ
z;lY(OG>P8)WO{*t$vgebzm1W2IEFk3-h%#EPEjm;r7@`iNK4oG$9dcWPY3(J*(0S5
zxy_o|xFMpd>2qepoSDlNg@pbv6%+>PtQ{kia)3{YxDKLP9&gSiNg(sNaU?>H+dUZ}
zMcR~Jzs4XMg7e2dvP6Pf@Oma&3JOZ%ZvIR{l<~F)r}KpE6cAri0wzbdCm)Wnv;;zk
zDXHdV;fnALJmDq{DBTzEZtDZSrM3^Aa*erR2WRnsyHHNVG5g4Nil0O0&If2J!msZI
z0c;n?`r|ioS4qe39m*i#@qq|$)^ZVhax{f$SOh|m<j9CEbzFP@0K8&pFKtL1e>uwM
zgyH7_$h}tY{ooiCpokjdC4ke*fp39M_0A$qEWBcgIFZ*L@U|Vc=+0qAtq*v3-`{K-
z82SuxDQM7Zc=A%aj#TW^U0}-tZpQxbD^l?+!pMWu`^30gjESRke^{XjH^#6;@6r?A
z9P4;WS>;XooMotHiS|@}?l<7xN4^{=-d((X=zJzOpok&%f&0Yx0I;Xr-`H5IXQT8$
z$<F#9{TDfj`;qxK^MumgTEv~-=M*)q(tZ{plBWA-$^GD9W0^h?sGhQovZ}OX^y&D?
zrp{h5YIG$Xf9@hU9!^|%Q%3Z0kBazTclyJ)yeLd|P&dg8HAO~{agqj$d++y~MV`L+
z%3wqGeliih4>_?RxbUA@#>?$YhxWyfPlu=VlmoC=c+B3O;FgQak>OFNO>v0qh9_rQ
zezIcNyzp;Y&C_sld%?^sSPOsv=|d#yF0zFgqHo?PNTfiop0a@gXn(d;38LW~VQEJx
zNOSA`WG3#(`5rZmh8!XT$ld2M*|FEg8Vf>fb9nC%+fM5!{{VliMiL6r-nqX$VH5!h
z4!OcZc|>Yq2t`HL<0xEd7kI=bmt$}U#<#KhOe*p<p=%9-lgRz$K}vf$8DcMYCjPLS
zf^Z9zqJthz0QNu_4d$Q->8BUQ5^U5UtBq?6q1#Rf6Po0?HB7sWKRLXzt&>5=KCl9s
z1qz0}xQyIU<@sVr4rchpD;wj705d}bZ27^rW3A(-lS=i87zx9F_ndi$<j6<2tTvL4
zZ*cj}`iY|9z;ETQT*L#KwYQma;GwOD3fvE$IBuYkit7Q}Vd~>1mwkE1MhoW<NNg`U
za1h$>cnU{8Fl$cn>!n-3C(OjMcMcVJam4^^JmtuN=$Wg4aJz7J6k0QL2cn@ZxOPi|
zdONY!A4zBzoSO?k3y$&hN4HqapiRRM!HWCE?Ir*L&u2Kv5n8TF1U9?Kbb(^ItA<gE
z$a((&STJGYpU+sGNbNU{_uc}uG{N*rciu|Ga36smykf~0TJN@x%ad~|(6>S_;{_`~
zRxbd=&EZ(eF6{*G1EDKUpt+{7*{iSo#+kic{{Y@FU<62)_nQgI4OsCHtTxP{RG|Rn
z$}Xb;<e>U8WQ`r8t54<ef*F9iOY!6UVbSlC4~vKu?!+CkRHM0zCMN;!jM=^DgWq^M
z1E9=VK;6@f5=|5Jnnsz<jr@M_g1sF$^Ymb#b^uE?>k(o`l__ex{O2Iqz-LHD!EOj$
z67xFwb3<VxCqIl`PF&#x5Gx*r2zc07`o{KS$PwNy0u4v9V5BH*6a3@!1Ec!R7fqLc
z-Za@EyPn3c4_KrRkl>EUIo<wp3BtPP0btYPV~FX)Smu&9d)6ILCkw;IG(ev3IIY#E
zPB995a;A;-vB(+m?;4fzGH5FN;vAj$ZWfKa^OTAu<5J@&A0upDwg*XZ1Zd(Te(_O?
zA?c_$$=u|_5M0EY$h(^qbsSO%-LDsns27mKiVArq8{;*Z_{sVMUo9NJSgN2OCm!)C
zZ-<5iKLU||czn=X@Za}>>m}4>wE}H)wao<@ZR!1C1v|2i1Bp}Uc+GIaf0SwbW#8UV
z<>!nf1|DGfy|EqP=q<xUwJh0pg6-3&bJst3$~O^1^@lrJn@8o|BeF<|*OS&bm9d=g
zD?YKfHdS`}dHv-`G6=s0{pCTg(B=KJcV1Kv%F5>}OLfM${;&dMs2o?n)=!uTvtREO
zR~KwCS=&5ha&rA)r!Pac;hST;Whvm9e*46Ppb9OR2Wf5Fkj+3o^Hz4Q&E}8+vL+=;
z@QnG(N*%V#`ov(akKz5`so;8ie~bhzcT|onW}V9Kc;Tuln|@4Ck+YE|ykbKFkxnwY
znWW_3Ba3<m5R)irc<}!Ky2e9aMm>wJS({CuNYun#s-6y5n=ioQ5mL%=G<bKu@C8*o
z7+40K{b0gE@TMZ0eFi|?zB$07IM*M>DWwCaHGs+X;RS7yIE*p(&K9Pxj732EW56X{
z?-GkW&+je)K5=PC32s~Z#+Rrd8k#;mW1HmwGn|4$B8X<Dk=^f({_^Iig`8mV8P`(f
z`ofS=KY0*n?PGw7t;F{+J|KiJYsFAufDE->ec}b!n%3X~tlS(CP!nT0&7F_i1jIDy
zF=-LN9<XcE;q90bP;y7H#KN76e)E)f<C}^a7091>fC~a?PViB3Y`S><{{WncTB3@4
ze(|(4S6%PkD4?DJoS-$}m|+v)fJ8%!C+9YD@4@^gObK3f{eChz2VNA-*Je*<G-|2`
zXU-7!JQjX3f?IkfF|Pq093yMS)0P+pjO!7KLr&P~29n9Q0@Npu7;F{N9A7ymR8G^A
z4uHIX*B<feW;~rgKN&~X&`d_vUJa@L0J#NBI`aHD2s8~lpT;9V@%`X{M`G=aEuY&5
z9(ceGL(&)f$P{<naF_YODA@w@#sQ`auUHP>LgKEH_t)zzpO!Ep23vm^M06g>b7E~f
zBlC#a_!4A;Ky+fpljDu%zz{sSKNuL*t%2F^B!dHbH<Qg5lZI?*aBHvLG8c66U=+*D
zHTuDPfi?o?ERo)-zpSf9fN+>u-4BuCezHHzP~7qR&A5idc*RpPg!<%gBDe(u6ZpaC
zEI-6t(&+3K{{R?GUQMC;z>UrYD?b|NHAFP*4~&Wei+W#p+3{^9_{YGrn|GQ*uSXZw
zT7+3#8{{Q^adsV!u<R>k9zWJx6h1Oo+r*q>R9FP%J~+ap2wCTNpf(P$a3ZhfZwQ*w
ziq1OxV^9FQI>AbbJi)Ix#mOey#{Ik<90wN6XU|{8Lnb_~RPY{He;86JeK!&~lBXrX
z#0sF(?-=Y~JGhS^*)rB_dpS+N3Bi{&$_#&l7U8Wy>nQM@YZPQEQ+n2DjrMkdg)ITA
z95^Hlp)pvat*IQLLYl6(h*xGVop`wFoJPskX*%nUedj$dWPW(Wn>57++|!_QFC14f
z-BKoYReP<)8+Eq^{{ZtL1dg8EWw7`geRYx)8ttE(gA{HNJzx{DHvnJ+wVgSvSC8j6
zHhUV#8(z~nnp}qnx|QOl91i+!jsZ$}32<tT_<A#qtFBnHr5@AXK$C8!+#MwKPu4;W
zjm_MIsnnfjsknO0HF{I{#i-fJ#W~@3je5rKItbR8_ttE|#X=6-p82>uFN=>+$r^v=
z5CB3&f7UWAn!q3L2^x-dRxANP!f)}56@eBL6FWao{mQ3J*q7rcjz|Ok^P9kXLVmH5
zs0{!a!&ieY3ql!LKUt~Up;q`@NJS=s>3PfNG=K82LpTjw(d*t%9%5eb-v}KAxjwKo
z9=DcZ9%mj5;t<+SFa2|9!8t$13+FddnCo0|$NlRWY_c$z&KeQ&f7-yQX~S9!&>?x8
zbYO~&c7I*qP(uwQoM0%hPcg%UC!AFs5I1yrxy3X%X7Aob0MM0xtQx+%GkDR>=*I#h
zzgG<m(2M*yn2eyK^S{moup6lHnnb3;BHuIP5$sx;Q~J(=f#g4|4T5*gxVMsNQVwbJ
z4S<L55-uCv!D1EZqlHyM>>G}3J^Wyzrjn}L<0nm_?qLq9YUBZYgp&XfO$LJkH-sR6
zIHm*&^v*!nPK%4YOeF<>Q{yRD@rEM(?OFV0fKm&i9T6q>k+7n1bB3_^rW}Hl;jrT-
z*b?gRHj?exwEqC6HxDG7;P?y1{{T3<<mokoh!qQ$rj;B5Tu%!ywyEw*_F%!Z0V0Wa
z^NZkuBtY~cA6Q9N_G8!%M+%V+GqplxIE5*v;j;pW5{yCkUMH+Vr*|&=C}B17r{ty*
zY}2RsvB!fJs0I%>h71esKcswh)>!lDaNmIb^3rlGAB5M~=Nx|7JUYf5&SHJvc%~h_
zFBll3**5<G?yx#zq{IgF$#@I_L<v;w#nWAk8;o!tYyLCI4KAEhQjDd?1aN9V?zN5^
zsvbhO{^K_&r*kV*S2KoVAvV=cF(ZA;3hpVV%M{>ah5rB<0XB9Q3Q{GnBZ{7dyCx|B
zFHQ>rl=l8{0f2d|dF2@N2u)X6#D?;+CQ(TozIE0;Hd6UF(Z)*E+-s}|MD%xi!9hE~
zI@kKdHX_@sq7KjF{?;IfJWC7_2Z+<H>*E1KLlp(qaDb!yX9(IG{{UQLw62r0EP~X1
zW2(RDj}dm7dj9~&7+FN9N9#7A-rNNQ?d>jB*#+dlVJ|)60y1g&#miDT<^KR&O~C=C
z{{Y@H6|L7un4v^_kNUvDEr<mA$FL%xG+=Tl5W9w2Rdd9@yd0HPnm^tIhywHf0IcD}
z=zKo><me{CDcOloIcVv(-b5Wn<u(5R7@eZZF7MCd3pSDEwZTRT2);0=k?0)dTMp?i
zCWfZx&A1D0@M8V_;c~`}fOFA}RgQcB^@Bje6<{$%%uhca-f?0QY2f}a7gD8AcjtI!
zi;GTu;6)&C0qSC)&Qt}yzgWJ4rLXad{oM@x;3HTzYX0#hG&xzb;$=8M=t3BsQ?SEN
z9$Yv@DVj17YstWOlSM|CasDvp*ne;Jh$2?&SVgL`cF2@T*l+7L^VDZckOb(#Ot=s;
z+h2@G1`(zj6wqB|3cA~IEMFUPX+dvHHn7Qeyf*+h6~wVdKpa&?c+-VIbl8XY@q{@|
ZKR;Qf1T?zSgPC~r0XWOV9(T7t|Jei=xyJwi

literal 0
HcmV?d00001

diff --git a/doc/tutorials/videoio/orbbec-astra/images/astra_depth.png b/doc/tutorials/videoio/orbbec-astra/images/astra_depth.png
new file mode 100644
index 0000000000000000000000000000000000000000..6fe2c6cd381c6ef00307eeb0e39bf06ab0eedb7f
GIT binary patch
literal 29766
zcmeFZcQo8z*Ejr~!RU<M8ANZRMmKsd(M21^D2d)fFiMc<CDD?o5eY$v8i`RNqC^d%
zixQ%j=;W3BuKT)v_xs%M{k->D&-349Ez5lO`Rvc$XP>>#IrjQ;-^f6dl!%cC002^L
zEj42R0Am0EG)I7QB{|~EsRRJ3HzB5${>BbLFmGQkXE%2=%>Rxz8io#ba|VFm$yd2<
z>6|jY_kJx>%Yp;(lcT@rT_4>2`HrBbe(PykPHBxm&WCH9Ku`qJ#Mb#hg#D5I`6nTX
ztc1GB+dt1gDI7^37oHpq1i#16&)*(k3#U;UxIAo`I@(7qiSA}AdX;UXjun61={OQ4
z3FmLMDSI^_va;Q}dJ>knnL-@qnfZp}l}Ci|s1lvd?uGiX4YFlwukQL;0P%iHO8!wR
zdcJC;!D;(_E8WTd+}E;MVfh?KdA@4{N7>)rVh+v&+;$Rn?t1Npg_JzrII&X_D1=2=
z@cin1Ih}8NvOB_Nn{%{3rIg#=|I|YI>G8YlpC1?J6xn*_=;jo-E|)*MUC_tPJ@vXC
zqtvi}5xVWUHM_s%xum1L?qV`0q^PvpuKj6bzrH7Hw_&GN!Tw3rO7^UJP`!QlUY!bg
zTK#lt2N~zI_tTFww7Pw}YKx*>G$Z>qy0oD-KW-Mt-Mz;o^z)5Lv0uU=_tPY$G0LZB
zhu;GQBs^PisuCj;@RFiN8h&+-E7`5uCCTG?8m@PY<U=K61XZ7m1lg&pwqJa{afj&q
z;&eadG<Owwr)PR%etvR*%b76$>oD`P=R^1DXj$zoai{W$jX1V!ElCKv+s0FM%G#_G
zX~6;R2QJ=)v?j?Jv@yotoF9rlH+3V6UzlzA*uFn$`Rz>Nu{x*DOW?&&fpV2tXV<x<
zXWt<2izQ<nA@iGS=uuNYp*r(&j#h_HH8kbUV--`bFP6=3dW_}XewV}=8pW(zH50P_
zE~QN$--5LzbSFyG`LWxImGcv+sSQ8DE|+C9bKCk{>#!u2pvj%i%BjGquoNn?V*fq9
z=d5y@2jarTfg}A5(;=JnQ`^Twb@Q|Jm&@zFX6tq~Z{C%CMmL#Kxocj#%@ws;Qs&|>
zxPtho&w?x{4q378mXWP9P}o<9dO&G~zTp>W^l=rb$WB}EYjbAc^TpWpto>i*Ub|B=
z&&Mu!WWSF#N32fUpB-Meoiff_&IpygM*p%*5!9OZqae1oin6DFdJU6mQ^L26+V2g#
ze<wMRj86KQXTzlOU5(lM3pu)GhVKN4UFc9|g-IqONyRaC!%fuw`49c#j~?I3d#in%
zEN=eV;ECvw+4__F#N0&E%^#Xn*%atPMl`MRD4bvLnP4YkO^-jK0w>k;nmX+tT{fiD
z)z@+5hHTxrI}08=qG1zSzg^n{H>mX6kooEVwrSw41%EkRan@!HethtW?(@%zf{y#-
z5~%7KCYDztkiArE_@fI!_BRS$cDDoGI+!_gei!mS&TwWkzoVH)<;pyuX2;ZbLk4e>
zaxQ;r=tT9)R{h<;L`MA;jv}Dp#&>DnjhTRYP}u8x@+$HlHGFUPiIdf-(Q%8@uep|Z
zg=N7Pi#P>5nq^&SGR)gw=LDg<EqOj{Z3>l~)fGMaEMr)HdkPQigWlH*V#~NZb#%Mw
zxYLjIY`Cj>R4b0o^6|+-n^Y1U+)nTl1IvgJ(pGII-K@9$=n&~q@S&|V<$jK1(RDiK
zVa{peF#qDhifT2zpTWBK9OtBolS$RCNw>sSC?4L06uYA9X>SRWOnsID?|0=LB5$un
zZI7np(-KKe(~=|x1roV`?RD8MKiN$z__dhEvbE{ofF}9i@~Epdn=90rFWlwR{KuAe
z_w&>9`fg84#s0KY_1IPm+h$lLsg+*WHy^l}i6*0+JcJB1oz*h&XI=Q)kSt;!@Q>cG
z3YXc*tO}wgnl-QF+!V!eGCqsxZ|^$y@1%&TfDUJC+)F5S9K1=GAjOuDn`6!EO!uLE
z(aph~JB#1N*;D#95h|`H>?vAxpwF#)D|+mtXbERc)Zuv#R6Je$L360W39A}RMEG^D
z)x9>$K1)NNnMUSULmIBu)0+m2<jNWc$YGZUj*cdkuD<=)68e~_{d?{&MpWpt6^OQm
z#Vm=b!opZsh^=0m^}@vADCM2pA<NE*YC)Pw3<~4HWbV0*_h#v7jdjLsy}8vE`tHb%
zoBfJg+?NUZL^_<{gM?2{!y7uB$?5lwOd|@WC)QuSZ5Df&a@akg@nD9%O}adj)Bh1D
zEJT*GN3S4s7J4*x7wjvs$+Nql0sR8946Go3st_vv)}|gc<y>y+gn(+QhOFLd8lMQ*
z*fG5KdFX3izqYiIfK!HiOfKsqow*fZw^z^GtVj~}9_*`8$bFn?IuM>H;fg65`p{-%
zO{Zew6}k1u=NBv9o`mLF7$o;&GyUEJSF!LQ@j+rSyqOa+W?|p8)Vvv<I;-(1<F}2|
zm1Y?@5-{R!AJT{;JM%6C^NUmG-f+llAj30bV{i6+#hkA!4lfm4aXLL$Y0c#|X9vvj
zWrYKx#wz7WVU1y@otHU8cXhP;*-u}Hc1rNookRr)+Of*TP!}$fc||sepFJUtymt(u
zTl`#VsF|MTIho&+olfZA8hBh0|4J%|iCA9LYNy}%TS*EhoEQG;&HFLS0?qW5u7HDe
z1DbTQ-R8sn(zZ`5*>2o)N_|dI4#H#+bMQTxQ1M5+II1a<pB(w1+ZAIq!VgB>1O_&8
zEL@g(m^_%>dGljd>FE<(ma3D}_0v)wh>=_{I}b1it~(tz>*VN8Sf<tCw!}uR$C8Tm
zfU;+Oo-D9GsB3M$aFtP8eLo=jS-tj*OroVI%~?HKeP#d=>XnoBv^DwQU^&M%F8frx
zn)?lD&=}e02$RcUxK+GNJ!R;KEI~9yBX*s`__(J~T8!pyprHOla?bTzV%35yhSCQj
zH4rXkyO&En)b0znn_xCYT&#6s@te7|7&S2^-7m4N!rePsxEpS7kZm2&T!s!?dY%ak
zYDqq~5AdlqDm?AqOIw3#RuMbZP(I$!HNSwD(p@?f#{oZ!Jh^&4Tl;w-Y4}OiXQlAv
zzDvhLUvztaYTNcrW3T&YX;^O)mvW49m)0Oin#m+hP1p2+A=BMrBny6}+N9xBQk+Ev
z)hI{PL!0HP8iW?NqQm;cW_h6$xuVYVb?Ly^f|RtwE27i;kWkT(;>F5pHK8Fcc?r*e
zEfc!i4A=zd%{@qVV8|nk;55%(AXT+`=1dnU{@obRmxntU_%`l(GN(Le3CuMk@r%i^
zn2(?I3rPC}jxkiFpE3<ORR{(+2var|ajNZcavoZ>XG^AtXr>}RKCtRuJ<673b^7&L
zdJxYId2JNB5ZPN|=WQh)L%%kr(?X`*#$$?L0*ffQ>qDe^GmhF#Fut*AgRf0lV$;7-
znK<(jhlU6c?kn~diKgUjW8ZJ^ohQ|W<}Ia&r71L$hccdW?aD}Jg~7@lhLr&SM%_{B
zS<%`XKXEpwI;|Gigc@O;NlSXdQGN8meJu5{<~JVI-|PW(VP%Y82D5HPc|Eg92`LC?
zGa8^9UBvlu8a4Ta?hED1i#($+qomksvF`L;w7*1-xtL<w1o1WGLXpu|W@F>k#YPPE
z+osA!y2gH<DEQ;|ZD9(}f*eu=Se2V;kuT+T%T4PA<NBhasnT*>?lU<PHKOzyUV=xy
z?W`A$d*h|6NVOC|+(%^#<pdXoZxrQ`tUEt{mC*-A>qCpOTq#}<<So66BD-_yKp)ux
zpCY*t>B6r8s*myYfzZDqlyS51%fZIz#iG9(M;Q&zJHpsY&957+J9ucAycuzsYY;hq
z?dav#SRL6=l#VTAp5mQcbW?WvV5s~~k6hN8!U|CN)|+wv-2}&7MQ24td{<%j>WAw@
znGjV3GjL14O0$O4muuE2zEcaUD;UGS@VI)+!?1u@PPAg^?KK`(jDUy+PR3x--2f~;
zqhDfM2lqpDYA@KP{AbP@c4T{_InvLlZi(dvK(;2K|Dl5yx`*RNwDpv@+{WjwFD?xm
zLy2_4<6lpW`4QE%a%B5@Wo@i<{>P_*j40dq&|{jKpZuiHRl({tVgyTe12gJXaDL%2
z<IXn0$GYt<e(Rkj4*Uk1b-Rd#bi88)l7yu!&B8@QBj^^#+G|@U$Kj&VPhX3Lw-aZK
zFWJp=(H0dXAqOO4E3|I<k{M8~9X+44Xp!zyQ-dAyl>%M6*S0c^vKj}UKI1NXJ-B?)
zvj2Q<EFX5EeupiwSqULqx+1Pao0YsC028Sa%I~d)yyTrkI@S?$Y3PMuq**%ggDTs@
zk%c;em$8JY5cO~J3qKxEq?CxBHyQk-oJi&fwCQKEP)H^<EFnOblpL(8t;ueRvN#Zs
z$1?@7`?l}ZZ#gznit6m<_JBddtevHyQ7nM(Y^Y8^5PjD%rT)ZxS#Srg8n7LUVe}jO
zjHLm$KjWaW?)({@MIW}v$UqzOfd1J=5AIUwba{xSL8`)zoph4lr}SZq_}0R>rUHr~
zW$sOxf!OhQDTav9_6}#n<%|_)RkeO!)wc+|F|iri<x_9(a*o_jF9fxSL=6q;vv0GS
zqVL^qe;<Y5g68b$d??LZuw1xLO3GLBjBsS@^O!3ucp!QHy4X47TT42{EN9HJ`Umw?
zu0~2a_G@+--xkO^vo<<1$)Yc9JFH@6Mm@nlt$kec)PnUl_&TTYrC3@MO6CFXqmf9n
zYhyRO?BN}})zk651afl;mUn7`C!FlLKIndjTMvSxGJ9g)PJXH`r#!|iaw&=5$cdaz
z?N#&O>Ae^0u1dN@+nE$<i`H*$s3c?{=&ILI!-Kf;-^}ImqTg8IzRzf17EN9;G#0U8
z0!$>y65V+xzVZN4KrM2CtSI#G$64ygjU|&&Nl#WPBP%>7o;mG6n3VWbk#UjCa%<^P
zTRhLr>aHX>R(};u9-;Y4f95^+9Mx26uX@MO!7%O0x7GXQ#q#HDO{5fUToC3wsI2+O
ziwP0@KzhA<h1CV#hGH?71g9(!W?AyCG*H;5{l`VsB^z4?7d@v)X?`(8$vYc5t4+|;
z(#^U>=>XaL@l=wZ3P3jP{!rb<)mxn#fzN4E<y6*49g1IpP#>u@M&!5P=o$%!ok&Q|
z^`aTCq(~`L2Z8M%_%*3iD392PnQ+)KO?nGQkCrSJxR#FTZco<+k%MZK*_`UAn8F-t
zjHKeM(e)|mpjps;m*lP@6MFlwYai6azEAwrWfiBSN*7#KR}oQM)YBLx81-Cbj?ayn
zwy}nVdG~j8a((T3v)m@Y5hL1pOQ$h_QK7)5?D)w_tZVT21VBb(+TMA8*WT1FhB2eQ
zuKlsDdUO9oRoa8JAKHd+N;s4o^0xobCes%0QcvR}KD6O;<gX!aE=?8x3}y1%^h%U1
ztNEx%PG%+vY_pfEAEe=IdnX+6A;XmO6N0w7$Mx5-&xQ{2j{8!%VamNXJn@P@OlMsB
zOpa#|lj6(4c{c1ZW4i`P=c7vnpZIkx*KAJj$2sa9^~o_yWIhRF8{XTy^{T}@blU0e
zK!GYL%_fJ}?3#mlV}jm&1-=*3(_CM=JbLp-TpAreC9a=irCtsP#IWtkesiej>*A4K
z;9hp`GOvJbzop7|*Qt3nXlc(O{mM&SnKX0V=_z50jpGgEOBL_?149t`vJ}$0F(Ps1
zxcI)IB$hL>u`%}_igjn<B-#lXd^FJhNMgv3u|z6V3^wZ#lC&yMP@WW%Q4BnMit>9B
zF!1fer6ngaKXt9&l+v9%Bi&NcIGPXRmA3()I>T%p6|ygEs@KeFZH;V*c~fp<SK(4{
z7O{s~o7Alfft@CT4(*}^tsz-CyH9WeAH|F*eUud9I&f^^sbAJjE_XG$%+>O*5NByi
z406OLv`DH4g&c%*r0YZ^I&rys=t)qt?vjP=s5*11-Ipo@%hqqhpAYCsy;S1u66eDG
zQn>iY&(9{ruNLRKm<&|}*_t)zg(zX8dcKW`WkhMHfx{5Wj)Y4+raXns%UlC%sn{H8
zD8tl?-&TD?%x8!X=j(^1`Dm&e?!upDZal|RgN~_ce&v>yY2=&~LJV4m*+il~DeF|~
zTRg$Rdm_`wuyMjG>=?$*>fcp^X=RXqV$Eer>^w?E{yu<LG`VCWJ@T}e9xXv4*SJN(
zR%Nb}tXiKj32sA0l_>_7la>cRDr!mIg4l(AezN1ai)%?@zjlqlr|M|GWaRyG8KZ07
zqE)Iuv!>>QxQlp4ks>V@=y0gFzPnCY_lH`a^4R6PKD`?go7eOO>7@Lsh{Tiq%v`eZ
zy_^fC*gUejNzX+y9&v-M{E^aw0y>&;hi#K8lgjwe@7+2gELB|e*7*(`olDm$DC=I;
zj+(RH2x=wSstl^C+1321kqN=9*K!CG+S+!m4yQ%EHOw~ECyZU1S$!>B{BxhGc8+xj
z%Gr>pONW%c!?6=Xtf*D*J94c9y1BYH%MzJMIm&q&GZ}E1Dm-@szld8yE)fT}z*oNc
zb4WFBl(s$bokFm#>AeR|OQh66Vg6;}l-kT0Mda|O-?$p{lx-B(Y^+=!<Jo51Yhe<j
z6&D&t`*s!%tYj}7i4P2~-nzqq_W{e9Z*y31V-S}WrJdPFb-U`)CD!~Fp@7_zt6!|L
z;Zc>{$#3ptlJ=rwp&P}b?E;aXlV43+@hBz|{fIOCvP-BI<j@`aJn^CS0KNYx^ORsh
zeqQXG`vF5whsKqUY80IWWnJ&w(S3f*2JyROQ!F|?O6lpj@X541So0oIVHtBsDu|9$
zQ&XT^0gk#M^Fslhaw(sya@lvsfMl}Eg@v3!_>X0hp|c7q!yOUD=NCUI*wl&kwjw~;
zr;B!7>r;@cX5KwFRaGNxRn`CK-(7X>9*3nVXmuc%KiHe74?QSjQDYV}eNL;Uj!XAS
zg@QlkW}z8=hHh(twrcvfZ@i7P%;%OBvW?Q~Kj!%fqS#;t$p#OEjlca$S4prWPd?k-
znqAytfte=g)lPN+`VIYRg8Ze!<&GZ(nuk&pV?;T{d$Ti2q;Doyg&rS=&@EWK8n}=N
z8BP2E<ZH?Il8kWA!k$jPA^m*Tsr)F*?3>@WYHhk};Y-cmcA36E?gbB!<bLXu_er(v
zlYBkgDd=^pGozE;Y=qBV>rRF5?6(S<-imO;%XWl@z150rze8trJeL-<upklqpqir4
zjOT+&)M2sExoV|7N0v$@m|lj~C<^lRXY{YOZO>O5osC)4@!(C7<b6@F{|313!Qx#u
zIfG5y9VHx#kPiE+YiWLo1>?<Cg4G#_a3xc24Zh755N#Oi-cz~soX@Eid&UrE!bsnU
z*F4rT`{PVH3`id}JBLV1<7)V2zMK6v^Q8aXMQG{l9QiE5cR1`)+y(%k)^1k~RZD$6
zS(KNDu!EDABU(7v!~3e?3IOs-!QKw2+h~87BihByQ-NdqZ7T=N%}Ig7LQ)^D@2!e<
zb<+y*MVo{en4&^%qhy>oloW~NgJrJ>Jkb6QuwV~&Pe0jU1&%+wvR9vfi$yqKe<1$1
z6*w&QjbN%?zG#?)u!JyNNIlpsP>e&72qy3A<Sc8frtuGot0x5xSATzRSrL(-pdjHO
zabYiC7ZFhz85t3{n24B|&=o?+?~bRxL$HviALnn1zc|#;ekflzZ+|y0PuOox2S={}
ze+3SXt8v&r$gf6G>cLmSe;D}v{D<Dp-$_LK>OtaaK34z{F*saW2rechCL{89{i{)Z
z{eNnE`u!t{SMd}HcJLMv6^4s=c>E^~KY#VWf7$ylHT+Dk`r0DKXg{w2UldwB5bf#D
z`FEq<?g4&(w;AAv{w@0Bx4V<G$dyxnEdO0cQ(NEYpE|$8=;G$#{YT?B`tL|5)IV|F
z0lw~kFit2Dv^(14$`QXSX3_tI_jhyt*8=^g`TQRFUmUqI_fP)+g#M3s{qf~bTxHd~
zPyxS<YO5)5{PtJY$qVJ?B>U&1<5kLuN;*pjIie*bge06K9fTa<qF0~993&l3V$PC|
z*PZ@GrS0kG@8F3-|E9Vk7k0bixq`qYoZ-$wGSZ@OAqhz-l#q;!w384VC4)jap+u!6
zL?!-4Vd(32RS6F6fA{J)mD3fKgQF<gLDCr|<R~HWn@ZY2NCqVd7m|{4ka9#{@rk3w
z|4{v22U%q!Z3PZ7Vfeo!M(z&&&R)JA3LJWFo&mxC8ZdS9K%4kG{0^F^6kPJUn5dN0
zbs14<DH*YUfy~jqepgBUn^P1nEcVxk6G~R&iqYXJ+1xxFT+kxko-Th>elLsc)nu*$
z>+riau9*MGujV4F>Wg;p_wqIM@^V+;`0Xp~H|3v_g314*SY-9QP=6HvfYDCB>+K&E
zr{dru^5;=r<i7*|FH9z`UO}G!|9Jid{U?jEuYZu2?`=b0L&sZal>dLv^Iw7g$z*)B
z*ZKMT-qHTwOzQuIlm9DSwXSr%eDD0tz6sjrucyB@BzLzzp@PBwYyz?lsK3JR=Maeg
zD_5`L_}381)xpyReYM;EqtyOG?)G1#F<e~65iRMA77~|mcDl;F>kdMWlG4sX(vo6Q
zPLh%mXh+GvGUA`?eqPT0K@PrXWtXcQxLW6{()zQ`Fy24e$M+{4{)rB9MPKEJ09;%a
z4(Ir5dH=E8|99r&=;SPpl5utvlDaO27LpK0i3`bGcW@LE6?Kr1lyE{zOQ8Ob&qrS5
z_W|)A%O(GN3)R<`{riH-|K4I{wSQNVNr1PvyBpf~KQHuuB>w*g?r-}4D6;>Z`rl!H
zX{&m9-?`d^T>TA$JpWts{{`?b20b?v+SAYLzjOWXkiWw6w{!H$=f7lE=kwKZFY?dx
z{vT<6l{Ei1zy47^|2Iv*VE;DqKZ@_a!}afQ{f{E>KLY<dyZ#-n|4{_~N8o>F*Z;9_
z5&f%UfcCt)xCUMI0~B5>b6@om2)(te`~ZNM>i0JY$bC$ICB*mF)>p@0BBrDe5pi8j
zQ@IkoG14<tzxoLPN&tWW0A&EU5~~7$8US3$H2^>p0JH!=8vt|wKo<b?0l)wN3<1Ch
z0B!)l)k|Y00ALCLW&qe6z_9>uEdhuXKyVWvv<8T501{h()D9rG2PlvLr2|0i2+*Ja
zS|@-W4KO+bOfCSkD<J3#2>AiR{s24x5DNsvg8+$OK<W-49RkRN0<w1j`7l5+98ih?
z5Ewuu5>Sf*)T2$&MblSagB*1=)qr?BmMb6ik=km?rooe&Gx(@!sTyEj=E!F2gU;vO
zTU@W2A1`e=YFlen?)~~TZXKA(fE?7SRFO_YX`3%*F=T>(2re&?xWjSXpVs@vUU5Ou
zs&AV>#|gn_x=Zyzw=1{3Jk&>48_HgVieCt4cw*{XV{g5A&uHnkvjT(G(LS-qPX+Zx
z3$V@4(ja}_ct~KF5oWk8FD7-1>c>H2tvrJd1;Ml*NCpM)M_!%s+vSqjNNLgR*)jS`
zcKN^V|75RU-f8AM_D<#<YhCLQ@E#5vo-;;8Jo~X#M|rde7Bs?~h51Y?Zyf`N_T3)j
zJKW|3(<6&X`~(KPVA`t5Sy6x**UnzGqWeg!NHiSR{VZ4<TL=OZRZ|oAh*jmxxt;_;
zr0EfYS%<vZm_kwS)_l8p0oPC=>;(xtO4@qoECFXHSE(6m-@kj^GTiw?gA6#<K!SK>
zXK-9!=GQ%-<t@#Hs7gUM0jN92k@fY*qodoOKo6EKYXn1if$bh{oEB%p%43}A0l;1W
zvHvpeQM3T&dL>}*8lJ(n_dYgf`TkGPiC+)_qOsM26SmXEpb2;z?wwiVeLMi{r$JJ<
z+sk!OXC5e?G=i-v+izzHkgc>ipr{51Zxio4vmVU8d7e>D;vDt~^!^xB!7;B2VELTq
zOK<b&U8eCJ)3Yx>zWyC8$ALNWm5DOJ-H4bo#D)14?DC^fYr<6*%OiSir@<#6(n!{F
zAJo7*YR@7&A72e%<pyZXq<=OHfmT0OPQpA>PN9}}#TCqT5=hj&BrBereiRiN*<()(
znGp1PlMx;f;1^QJBn7-6bSE+y;&5*M=8wdvvy)5NeYCM>#X%Mx%e^teh=@dRm6=qo
z0rLSbAd#UZvv&rmZPw4GNCQcMedcXSDFzwnyl%S&Akir&M4#OFqBz*>(sz{NiS@&9
zwpj)V02e>}1@+d^u*E20Nh)FJq$5ah+9m5lVP`qml~(IC1D~~NHW`BCsRlffttx9$
z7(ij5Ef)-BV>5ZqNAR&20xZ0XWu+h}_#j<F312We+I|={WttlbF0lr%Sy`jNu3#dd
z#)(W+pNpbc?3Bs+77tn(V`!OPO2jjYco;+!u06}B%ZQz-FGBG9(RR$}cG?xxW$@^|
zbnU%r=wB}1Q3h?yI_{5?o6-YeU9F$A9$Al;hy4&C#ag2$hAhoMUHqYZ8vrqQ{)Yb1
za+lHq;3q5Lcv7#05BJfl0F5c{&}O#3q;t3Us5@-+NJW|^{;BOOo5`=r{?sM<YnsCn
zq;NxGc);O@BIL<wDY@&{P6{tv8wu+;UQ87#V2r5xSzMwjJW@vHJT=M<iF+;pdRr<t
ze$I&_sKllPXglqK3y8dbO@;(BBC_z3K0C>&ecp_oi;^T@Acl#i^RAiQ2qM1@1Y**%
z0zbJehwYwn6u@6`<e-FOmioSC;YOzh$KkQ_HRf!y-u+d=2WCC0@a1`iJ#}je_|Zh1
zpz>ZQ*Itv*+Y1!m_cD>pJ7UzKD$hnrZ>$N=C|`;KGil|vYUDKW1MGpFdbiijpi?N^
z0O$DT@}>)1HkwoVvms(Z1G_+`laVsQ+m|7tIkLO{z@`H5yK5Z6ixG)<201Rutl7xx
z_t6RXfdq(2k?geREOgiw*!Rp$;30XMrq?v}Imde~i6fqa&0GGP5&q&sGjFl1!xaaN
zWIqIwCGaxJUDOTy!9#8ttFH%&$&V>z4u{I?>l$TYhxU(-4d8lax{=oMhlTNsORm;<
zGSSzGuoG4dUsWsn0HV&^8tgg*Iqg}{7|@MwUhd{BxXCq`DEL64odc|Ayj2syuJH~Z
z^E0^zr*(b3yj-9$su8I3BWnIeoABZyKG3*NF)06b2w-mvn$~q`+;~l8`|?M0)&qOI
zYKd$BX>Sg1%5fgEVZJ^46&}Z-qdD&R5qIJV3&XoTJ67LS6Onehk*hapz*9e`FC>Dz
z;XKhNiO(d7-;u}PWMV<4&t!Eob|u^lg`M06l@dg<VAnigDXSuMk7f>T5ZLial45+8
z_?{K{QxG%$l8x{I)xT5l(cSi+TnH7(A6Ur8_85ofIT4Y)C3l$XtE1QbnYl(U1{wz!
zR|ampWq6<ViX~U9i_hLf@o5WyAL2-=98<WN2A*C|c>9f!$*O()0?iMYv%`lRw<j)d
zMgH7)elBr|XAq`Ak!-j7o!`6ShIe;YgMwYnbdtY5FEwVo17NN_i&Mnphf{(sdz+>-
z!ORv@0L5TbZ-|i*Ug`$e&MvlOUWMVp;90o~_&DO%QI#r=H%nl5bjl}CUogO#uYRi~
zwn0ftYXhf;Vn>XmK=wGh^!-5G;mCKL`CkuTK?o}Ne$+Q2E6}8+?_Ww1Pyq*Adr@i#
z()nkLg1Uv8SiU5`695FtF1AIaG#?kX&qmsIRB>3oPX?}D0W)s#vbeE4>ydF`dvtCC
zJW$nXIz%#3375U^u}4?$#(^`3ogL=Rq`fg6j&{Uw6GpPZlR<V9%g|kXf?Or<{x@3a
zeTjB^WwqAQx=Vh{ElSp)X{3I2^T)#gVPNs#Jc<KG34p4u&6}C(f6C99a~TAwhz7c&
zg0@Xla|93?m*_1}#O~y?DOL!o8>`-szHU0uFvk|Dh+6=Ii*Rp=sM>u=lsg7EXId7T
zGH^g;&hyC`)(UIk!_nk8*ocec#gtM4OFaT)?A~n+me<DKJwgmKK9&fuU?0WKDiD6z
z2M}ncM}<b^)SZA#xsQxN?x&GzxYH&8k6m$Jd;|{<j4X;N!UTWf`?&mV?mT>;JNqz{
zkb)P4J6I^Q^<6slILOzSXcTm5sVDwpxtoA<+pZHoJKW-$F^S0;3XtDA4jPtBJs$!R
z?yD)e$LFczH_nlhK%Fr3k{%~0M_a`;HSyY3sy4P&Q1wgbC@A^y!&-M~%o#gCONoGk
zOjVJuvj&p}3<Xu{x#p=}+y}_FM*!T@)x@E!z>CRiCU6)~B@xkX8T>%*!JGEe$3Xn*
zx|lcd&PTTUP|HMOII(AjUfUj>pT0%Ae@5?_V~+)RWyURZg8U43|9J_L68SWR&}cDJ
z6R(VoN~SNzHFQ(P*0zlc`OLE4^x)0McX+npcU0<}8CUGhG1{CkV+)MITM|6)R<>~H
zFu3BqU<Z5Y2xVLrFP&6`lEtfU7R08&WxUrE@7{~0xi<j59DR@>DP7uwR>w&f)DjqR
z5vp>)!UzyTHSl=xIt2|DaWbXS$)|`V0Ko?+X%^m(nppP;4NONr?vDUr95C}9(#ClH
z^{WuAPnHRoY{ejqvYZ%L;35{Mk*%5Gg61P(Dr*$-av0QCgAum>=-o&yZ#h~EuBhV7
z`b?D@GG{1#76&;N2kqFKJirVaapo$S<Lcp8d*e+!%rua=bPIYxSOqB}KNjB-VaKMe
zJjMY^Yb*xiN+~*00|#G=Q0<k7SK>n1#+cgK$e=|f_9&vyWq^!4wr#|dotYLFf|*J6
zJuRM-_Y(2rTiPz#JF1;r+BVw;)2@Mr8A7x$XB4?_f(R^u6a+FT&*+y>=0{tz3uPvR
z5U}zAP66+j<;(-=5G?TG9s+Wa7d6xO)AGrx<%I?O`x4x(Yu(r9nfew%F<A=Cc-Td)
zjyrM`LQ0=IEWGx32(Bf(ZpjkS+cs$VSRIC4x={^(YIYBT=G$e(!`f8fXM)U>LTi->
z3z!wco~OtF4?W!Z&Np-C$dZUKFF27w+-!<@DghV%82HI5&uGQcEdq(%b-en^yT<U>
zy6G}g>D2Sd5PF5$VXtD)RuYO_8WOpJWBmg*ahJF$az5qeuOYwUMi!(LGxiA~veN{1
z<Q#eIWGBuvLqAQoIv+F%e%-9_<2BuZGTM0G(ysXMzBtJF6R;InO8ppuY*Tz;Xh(4a
zwdfD5tSW~c`)fPMnRSpspRn$(fxm%Y)6`#!Ccn)F=T_R}r@t+fu#+6XZ(dV7Y4#Y~
zqglD7)-DqBspH%XmqnLQOb2%P^efC`oO}Kau#Oh*=*|j$`Rxl~_ZdjcJZGZM-oW+_
z5@tDgzo94Qo$_~r2{r>XkWD%ssG^${mGJJpOV)I&?zM&X`mY{3fZtSDX?^*pUDyO;
zZ+s<kMV!<!XggPU7QSM7#~Cx!)f%h6I94x0bmrN+3#{DQ)t%sAc<p9_?O;fh2AHK+
z3(t#6=i5mJF9=6QVlv_}cd$>?%9Qr87B?lJj`o4nz|ZMCW+Vl~+a9_<itmxCh`Zv!
zp78omWPp>xw8K8%dy4#J#m#^NJ(Cc|L4w;2?0mt41Z92CrLWC}vCp#c65}FzkQzAI
zAHikM?5PW=!DoDwYr!y5<Re0(+J&iPqOdd&|DvrEUJQA4&bu}U`}OOtpOZ-7bnC~0
z`+YZzt2Yd9KHH8H?r9GjW5x1#P}13d$?3fpEE7B|xtBm_l;b}X3rhxwabXC2IJ#9t
zB(>OrAp9EQdfGcInf8&#N+yB}>>NUFs5Rt>+lRk-z;Lk%OUDX_-g;>EJ{o!<&V2k(
zs0pzUD@lxv0$VN_h`vtcww;W7PI^b&>28r40=Hnty5|e&Rv9NBRtobXv1QBIq=9cn
zVpFOPv0EzXmwL@2BB6avB9y}HXs4dZ<1-!ud->yAi82s)UVGVf8pMn7I4}uu<lqTa
zYtNem2AR5L{aov0G16$zw8u|vB^sD{mLQl}?EBS~y+2cYJl~J4I$xK2#dc8HW5F-x
z;qk6+`1ts8>v+h+TQ6_Y`VS_2s0-17r{KUXLL+a{)U{}7KB2q@Vn$80ef!P|dia3^
zAHzx%lO98a909i0A$-@JznKV5Tnxl0#$^+5V+u^6KWaz6W->OdP{@Hfh&_g^ZOQhH
zpUtha&W91h7iN)JGPF@_Oa=V>BVFt&C91dvWXMO*g46C1){)^^OHP~(ywd5%tnwfG
zE_CnF6wou)>D=4crUVgJJcW7S#;_u{w_oBM?*Q5+)d4XS-<=fQZW)F=vKG9K;#?Sd
z^JT2)2t=yuA&up512#O9ZhMkO#??O)RPFZQ`rKr(`OvPns4_Ah++ZCRYxK&9g$ha9
z@AKmWUO{ORID2d^Y1NQY`h1Jb{J40|b!R+t`lzV$dq#klk@=EFBIdz6DKIxqJY4sV
z9vc({M-()*z&H>l&1OW}X<sBu$a(R!(A?Zi9R+2)r?3*(X@J>7ED|p$TZv}A!E0Wr
z8s0i4b?%MNOD-V|5Jt+CAib|)%#`3EM8ff3Y9mu(uHnpoG5fwf)qrVIQ1$Lhkk@!3
z{>XiK=9X(bXL6-$jy{q(s`Gs|aKA2r-SA%Kc7h2=g&dmw+VC-}1_!b=v}JB#VY{jt
z)JGA)j5&2bpa(u|yuIx>|M}zGXI}!w8hP_X!kr$r0vqmo<MuR<XH*L*j3)I;Dk{Dv
zZ?83nj&UtSf}|oQHixec86#tHZ24z4U8LuW9j28`Yf@wvukc_T^W^Q?*C2?*M*_=4
z&<+Ie_|?M4H9X48*!M2_lcU!Cb0C3q3h^(C6r)|ORZKA0WhIuOpl}|Y2*fidUtNv(
z3wglskrn&PaDe=(f?(AU_G{QSvfL~wE63umBnpppvV#g$ib?gJ29umObIu3&W18R_
z^ri)b1@K26Srbd7KC#pCUEpd2JcJEe$_9|j95Y!IG>3NkdM_C@cmgZrJ>$QIJww-f
z*7%%?{Q)kYDjMVhc!vo^pt3omMFXx)RE%c8vDA?r6xh75dDmC^b?NIPHNN0npvo09
z&tYU`z(ifpD5QUq^J_2RuH%8}$r^=smIQc<hiK;qlQ^yvkL!F;0^REer-JzVFS1}T
z&`wr<3T}b<1L(Zh?Iw!bYM6UMU@_H#z@ErvC-ai^Wpt^@qlauVfw%RgzOKY<><o<I
zAbm`Mwnq`W@0!)0X0in+3RA&IBwhvxo)Gsn7b)RjFPvl}sS#-~BQvub3EyWf(0#|(
zzY;VOr(ois@#@%DgfNn~TfN{ie~>^VjA-<hjqffr>osU4`BEpUiW9G4GIJux6@sB^
zOdc153;`?OHV0>PKJo(M#0caG6Vd$Vh52aPH`7xRCfb*sVmoz^o#t~*wtEEk?sDph
zE?zEufHSqOa5&Iw<PY~wp$X-}Ubb>VCblVsY0PyAIk0y-OTngDj{3XXYOZhuW+brA
zZM)TPdTqVHYZ&n0JHF`#qoqsyVVLi`LXI#fk(%zSVf@kkj9kh0g4aplEKdyEPKp$P
z=c{qDH5K9n`=$f)pWj#6;EvPfZaH$#-JCbSU8>yu2J{X595iSI;E`Q^TSCfVg+CEl
z(5pkVPNockpc`a6j9ozX@@cO13iJ{)d>ufrFFUT^N;~&Bdj=K8qO{tcAZ~DwumR%k
zLs`@OiqeR?+fH2-T>TvCtwLXukx&(VwRKpP22L5HKN*IHhY#Rz!4JR-d+ea`_}ImL
zWB~gLD`3v_9^X|+wi-vikg=06vWt}ElcPPuu`EW;0%O8UASe&CyGGAY1I7N}I-ClQ
zFJkzyiu<sV?6LRIH9cH<DDu&Fh5!chSdkEVA=o)QD1QoK_4rzgD)-I6ufoq-;h3}g
zHf`lN752)+%w9zUtUy3S@bnooaRG-OSCcLyI2tfmf~>au9M4{FeSdRmnTrKn+qdiK
z<3?L0Qm#pyH`OK*1^4U0I!8bv<z|8G)k!o(pkIpt=G!d|d}!qtk?lRiwy~=g!NK8p
ze}&WLN$*4;c|HbL5R<<MNPMn5aCtejE$YeUjqrV>dUKMXjLOuS4J=LslXX$q{J!e=
zI5^h~p%Rk~bm$!3#FP?Z2Tkl%05TcKC!q%RZu_q@GYk20qxMNVdo{#2CStYTA`WaK
zcLd^g*xuZ;2e31rA%ool#N<)RK!T`Awf6jl?bj{0NG`0#gB!HO$j<1aY(Pm9OkYTz
z)z4?i6Z{b%C}LY3=;+pp%$e9t2z{v|hmS9h$>#)3C(YGsf~)7m4YJY>Pa0M%>~5&y
zv{2t)qeI^J>d#bGpFLuGn_<<C`{*3wX^zQHF|p^xrez3|?QA6zLSumV$LA<ZSykg|
zHtg0?G)s2gy?hz))?jlq-#qH9xc4=ZKKO0~?9>m(GD6I~$AIN2Z{6LM9@=Q7u!AMS
z9dS|v0r3g_QigT=Y#<}a?+JN{3gu3r)p^|r+~_kGZ4(VL8S+8i@99A~SsL!!_ad^0
zzB!kP7iI#c%jYpb%?ZOxhxBV!4&$lS_S0QAa3}0cjuGUlvKamNI-6U!Uh=q>>#O3w
z4LSQcE^?C&`V(Ky5ht#`oe5HX0{vQWnz(G425D3&RaX}?;M+W(33}Dc`Qott=Bpi1
zEpY-LM!f<Psk<1NIW}_Sm!#>!Iy2Mz%JkU63ku-Zx07{^swUCPPnspmS2q+H3OJou
zHBe1a2F`SL5Yxo4i$}~4hF|P06BiR(w$|RI%&))0YfcVlQv5XxG&R*#+qZJ(<HazO
zneG5lnUP!=0cK=0M~}abJmy)82p3l5P#AUquFDAiB3AAb*E0N-X#ue1$JcAHcZ=Jl
zSzR;53u4sR8z-m$9TH4^AHD3aK53B;0nUe|0-)^x!B3dxp_C%FboHw)5{}s0qT9tp
zRjZtMLW_<=%QQFw&?<EJ7Lr(<r@{EQz7ecG3S$AO1f{MT;7dPleMM8yaiA-R!PAFt
z{bbYv1VLjSW+0m{jClxl+qK7x1*%6?KaaK}1i)?)6As52powC?E;@2#B})RWk5?*W
zr_ifE&%y{tt(C$3K8hOS;v$B90OJ81vqa<X34KLAQP_`1Wq}La5|wQ*Uy?$LrTbP%
z&VhbO6@mSXff_!i`C3NU!KEN@|5uL2`o1KwyMo$W866W$ahrooL<M#eG?^bx`|Bl)
zf{l3o>(?B5kU+$jregyj2vyY*05qiKG9c};yXb$|=rnXW<cIl8Wt&@oe_S$R5<e=1
zpV~eZ>Ky~z&zo?jlZd<pNUpEa`cNn0jB*=1$jqv&BR^&(#mEdG6a!CfrCyOmzG}4(
zhcFhL{3=-hvkuRRQ>uPOc1(47LDO}CXOi_2d2DVNTlTgx_S6NhuabP17kjn0apmuF
zzI7lbzsr-&aM`m$R-I=|D|{-D&Z3t8vvF%k)Ef`lg-aj|6n2gC%;y_UbHEa%X6UTu
zODR@~;MX(jugRS0>9+YoDsP==fXfV|#lSFT$onrl4M`EnAn$^kin*w1XyY=_r!Vlf
zzeHE;Zdg3KnD^7=;%D>x?ELvIby2#ifcz?s1jz3#CK{0V&@mx1Axq!@=ml}qW-lI~
zp&pOL1Q8n}nF5glCb%(-1uNSzS(prR#ijf2JFT)IjXXTLUv5UlJTLqycTWuy)vCf&
zcGX-oyFO|ERr<19bO*jKICXF?4W#9)P8-jp7tzooj5wa*AmZWsiQ!#e9^mcJy~64K
z0dl4Q-HaT1DDsxwq<WT+84i9KsuF+R`<OTpl6py<C13-C42l7T>rlsMR>IiLdYQ#L
zQ+nH*-(nFTZ+Iskf-;`X|9JL=%}eIm#p7Ng{E0I;1%;nv7*Wuc4M4QcHnnNQXPFot
z`!;Zh*oZS3@zN7%YnGb_IO}2Bh?*|FF?C91bcoF88{|tQm)|N|hhzd1)UgGQbi_)}
z3F0%wpC@59DMZYy6jE8RWb>XM0s1KD1Ykjc9U+7H3ZFzWlU7d=Q3h1>ZsFXdi+JZ!
zxH$q>(IhW?6bR)2Rj?{9R9UU!?VWl&%T)liKAiN9kW_w{uTnW2um8TYS*$BH>O)Ah
z1hKac-~?hm7|XhJQu<kt(1TbuoDk+4lbr%Qc|b<w5%}ThO?&?5og)LK4q9w%4Q|IC
zol=q*tB6L=RvzB0WGn8&hf@O%FG|I@K}Itq&(m}p!3}juSKHi_2W4d53vizV#^ByT
zMNJDk1oi;&b?!y5Soqv>EfC{6qxj-+is<;`65Lqw>iF4m@z#ESPYoEB<efyk(vdq0
z#>QqImkWcTUBGc~z#*XiUhsuT(Kk<X;}nMnqb-~!@r3wkHTQ!SNujQf_6~qWFrc<_
ziS@U+J5xW^n83!3*&9um^VEGS&xz0i8z~;-Nw>)4D9wI>9G`?fA`c&uz@2Bnfo)V=
zW`i3xa_H(F+|eza*FrgHVp6$5#oxurZ&3dTeeW4IM3a)w4ZV-Cg*+;~N~?8wcV}12
z4|ShkrFz}R@fNqme6r|PAq%bqz)S_g-Y)_`%Kfb~BWURK@H=JsLa7<esimhZ&Wz#C
zm^;Q8QS!p0qd0%!DIEOfaw4oaxN=aG;4$y_Uz_e(5K)b5PH~mS<v7!xCy~Mq$F&C_
zs<~HEef1<=?B7n?4s!^gEu00k;PRti6){R{Ac4Mm!Kk7~HT=&X1_k>N&fMYR;sQ|e
z9URC^$$1M>c-fgIK>dpv0$tL<Jx)BP0%z9aMMtdmnFU2>`lgNQD5yM&_aEi%_2-1*
zU=NYYiv*GVVp=P7pf9!WSu~~Tx(r}OyW7w6!%y`Lx0T->CxrLp{eYmCKO5~0ZafT(
z-$2hiVJ0fjB#Pw(8b>&D@Uea1!KYDoWEw%dr)T%~c*vwu2ZF+qrsu=l4jMR;M=C{g
zQ)K90kxTys8W*3Ty5hbva5~Z02H9PzVSEoIPi~)vQ)6%A!nzC&G?^+0axY2_8W>Q}
z$57<TEcvwCYtzPFiQBaf>g@YlI8S?kh??WuT~H2e-b>q~{uRU9rFmvR{aP+QmXjg+
z%@KG3+{$wEBu{<bfFb@(K+w$#o()03V%5=x9C{3_QwT_KB{wuQT=xVlTsf=gg}j;`
z8p!G22+36hrxU;9i-Y}un-7k^VVzGt26_VUolaQDs%`LuQEXsubsULtI`{SNhE1Rz
z>;=c@MU-Gyik2mqBCgv3Z3J-DTO^~5LkmDpL<xxU1HojHs_({Bj~fLc#*7QR+o_9*
zThP3Y`-0-p?m$hMz}={~awG+)OG&B<#m^3wETqUg8&_9gaT=tQIWBeDo73>K&4%=X
z>cS{Vx@?LWF>}5Gvg-VMG6ZjefMtfYo_wM~p35|F6(eSO)L=E&8tpameg~g`7g0y~
zv=J#!=c&Z9nx5?02|GD^bqfjbK7F%+H@HGxq7(D-0#^z%>{1Q1_~SH+;Rxbw6#>&R
zpivHFke7n7l3~3vh-kf@qm>95T8bGXHLaYgCe?e*TELEX{U@9h`yt8-$^YZF*p84K
zQ0bnuGY+)ST0A!iIHw18H{Vyjj2*R5M_jwSIGo!4=?vXyX8&1_vuI`x!T96$g<zh8
zu|i2+<C~pI=hk-Q&(7YG?K~&W1VNrH=*we{lS$d}{PYrWj1G^^wtsktuma*Wyn_VQ
zxvIEWAW)Y?jt@q}_LX@4!50#~#^gu`K4}O7D9V-bYgM@}DhHZ3XNWMp41YdgH74Am
z@;thh<sFCn7C4Yia}6&LH*W}%yHZ}8?Tv|PrFr`dr|~|XcbQmJ_P(h8w(ahbT$5Ol
zjRpVQaS^i~+qWvJGU~5Kc$PnPWqRs?d%^`WKxI2%_Y-JKcPZC?v<q6vz$qhB4*$i1
zPjC}A)46aLrC@V$A86=*@HDClzbwmEK|U3TzF_e4KDm#jU5UlXX3+=SflKb6zep&I
zTB~z#3DoEY&ok3zRICXLQllE0TXhN;3V4+K2o6BqPlEzkVw^Mys=ZqC0Db|_BJBh3
z?o^Trko?m0VaTV_gFA5qrl4H#{0}k2{6pSBGVizdsV`kIC!ztMx4AK=?7pBaUd)fs
zxtS9H_W;B9f_TEyydyBCn_}>3q(!+UP~`zECLyFp;3g@UfZ<MH?DX<@k^H=#&I7Ca
zNRf?Tx`{jrGzS{>KdQKH9srP{kOt&CQJhAQ;7tN=s^JO>c*1KRd5<4;04^;;bu;WC
zj+X+6gV?&ipqMOjraii=iXq0e*W17ZPDdwzA30vh?oHL5LIZlvc=Uq<JT#ch07Tl~
zG6J<UHNES=%Z-DnDTJP>zlkPyTBpD-2!k{WQgsN_nMKY5*IxkbnV}Jz^|k^0hWPPk
ze&FDd>vWL_3#mH$&U%Ks^Z6}DJUA`BK(GUD<K6}?(LQF40|^>Qinw_1qn1Vqe3g59
zaUSVI)<sf1MPiq4T^YU<&YC-f(_)hYva86(Wmud~lBakgfoIq-KJM{-@Fkug4sw*O
z>Ecl1*10DPLxWtel993(*8=?4Kz26L9}xoBBr(#)lTct<TR{oMkg>WQH2am-+TgsH
zL^c_5I`9R2uI|B&v3L)7fKOzCseDM>C#b~uFf&Hir+jeh&o7UogIaoMCjc36c`g32
zMa&|wZHD>GaQv+uXa1oSMR8R~=Rt#LR<S{ky+xBA)772&QUx>%BMqd*%qUK=kk1c<
znSf?RL6!7?$RM9`h^X4+Orf1Pi*`N;srZ^;f|)MU>W9V$;5d%3Knse+n@?IX+Xfh)
z3zGo1qpr3HXqPC+1h8=8sU{2~!jAO^X3)-Op1aZDl(sK-(1{$2gJYg-u1b#1k(Q;F
ze802fk|S4(Q$ZqiSw^r*1~u$hg_oQP%W+z^dr<DKL&^*!8fLJ4)1(y|tone{<Be~E
z&`h(sy(r}Fov1t|hWSY3i6ILP6Q&{j-c>p<M~zC;tJ&DL8kSIT0gNC6p1n<p*h3bD
z2#iAKot<*uYN=xI#R%?{i~udhtQIVRk0pNjnx|NKnI?{jZa$odFxX|Hz+uL;>n3Uu
za1T-2gR2Sf2;r&ZU>NW(Ft)G>fH4kB7aMEJQwHcp`T`rnJ4he_1loE`*laubeRI1$
zbR!nZt2O=Td)nl<5xW!uKirjBEd!2q&ej0M8~{N%;k0wfbm0Cw!M)N((Kyu#1bev~
zc&8F%SdD@!Z|rKxVl?rk5x}%Vmt`2@`IJ}`gq;fDC4NJqEIiQ<@fPRD?nSLGodm{l
z5--y|jqH7HXATN<;NeFYu<gLApFrF}#uVN(WZw7O4VoGFanpf}pyNOYhC(FnV8{a8
zLiIGn5mVQek&%&a815<Jch9Z>2b++f#SYalXiMeA>y;af>7o54s)1>}HP3JBDGbQ_
zgFY#1t@;$58eWvNw$;7Yt+gji19HW8KB$*rdtU%C(|+?m0Hd<a-ig|~qIgn<r1L66
z7NC}OLsT*;lrV}%=Plg>3eUQm*}T9Sz2zau3)v2!`Zqq%`y8KUu;u=!lF|alj9&eB
z8hT=^8Gwi^U<R*y-Lb(ikicJ0<B!<Bgu3EPC=yY_(jvV$nAbcYyr!tSgtj5^YGq_C
zqr5P7sjZ4_yEGwU8i<d@-eO#V;1js{!L=xMV)1v{s}=x!CI~t1pu}8O5&#W#)xy2T
z7(=Ai%H1~Cp+Hrj3&>aHnjtNqeiFd_;?|_*H#hX}hFYsW<Gbu*jNCkDqQ~=n_2O)}
z&0A<i`$e_fLsk5;0ML_fY*h2hYCx2=Ylp$DLY}<k5x7`>w==$L<7!H<^D_kdVd^Dw
zS6cx{;!WK97k~#t_BuIxW4p?Hf0B6${1X9vwsDv^d;-MCdT1+&6EDvJLY^FWd1_-C
zum*2V*Wa7}B7yAKcyuDvh$2xq{<7J4?5p6xT6-5eBB>n#)D+RdcT;t2XyMEZ1F+X?
z7c+i$lkw^;3*u<NfA=|&+K-wpHawRWYQ2E<z_(6dq3?Y`d69|49zj2Z%D?#WXY9PY
zR(&%H1_Zvj`g3z{s5!&l`v;_FlUv(vn0hM&UHBMuqOT*7WDw<@5Th=DpbOpm@<IYp
zQyVaz;s**d5W~Ke6T_dYXNSt{QJ;X{_nht=2if04tl7|uBi0F)c1W85n@@ljH!Da~
zbLL`z@B9Umd?KK~U`RKi#KZtoAQ@7WW}B~yi`B$JkP%YoAPJJ(NK#-xD9JLN$$?&v
zEUL}zrVRGscIMN&$HgSj6!`yB+I9a^`TqawzR$Fey>pC=>={|d9tok$I8Cz%Nrc-<
z!)j>=r-7DHZ!&Xk6jHKFh+~!QWF_<K^EZ5dyYKtA`*B^5>-Bm)*VU>dv0nLs<oDoe
zXWgEJVb`=gM)2W^I?+Dy^eEp~=Ct3`ukLAHyz>Q53hyM;9IIM?>{1hSFJJxpy)3Ey
zmZDB-^BF+im|uZ#54<Qke-)ieZ+2is*zQuZ6JLPH(|L(T#uufmL9Kmm;`f<y44T!?
zxv+7`0!?KJ`F!B`A^Pu()HqYtth{Sl#H#(XZ8ADFfq6Tx)Hc^vTt#T|rPU+1v5}{9
zk|MBI#~B_B0KT&$wi3Z9c_ojewtE#>^c$=2o+@ZGD@_s%B`Q*ij*mFZffIlpbG!Jt
z4KKJ!l0Wi6rsVT}@If_9a_lHie4A6&AGfr#*?)tU+KK2o2Oi<#dJMQ@?$B{c<J}7R
z`nSYDZjF`(feQ*bOKAD}pmXc&Kz2_=)2w1~)wfsl4_L-~S?0XGpwJHqHfml{bo}rm
z;Jt=1uri~vB7t{&w2;me@wq>(`k$+g=9uWC(LH%Gj`NUFrh<?cv~9c52;SYctccp1
zppD5(NkNO1qM|_e#=tv*{<lOfe%c}(A%+BE<XP{W^PYss%aWw|Lsn%~$#kE%t-HKH
zu)i!nMD6f(tX(pS1>XY)^F+}k*?ceCFIzvhe`TqF+IwK==o^c^Z8i>L6WW-<UAs7+
zI`V=IYwc-JOI%7xH@jtymLnl;xciqdUk0K1buSO!p`y@=l(sw*49v9Wk)@wBWSI+f
zR8`%2vc1~@+G0-u_LYn2#m(jLNsLmqD5Y$0m+=|ok^aGM{0|{8O|X%Q7of*Ou|FEB
zYZUJ>7~a*-WwEsJcJ`3txJY4fhThRz9qFc{U-c@$#*PknW}h<NME`P6Vy{hIWKr6k
z-326Y)Aljf3x+R;dwSbG72^q0<i)DS`Mg|xb(p4&^L$A)1>Q2d!Un1lWnmBX-IAc+
zcwv%l_qpH;=U;jH+_kFzIsZ9!qqBVDEJr2Xhd<5_D`Q3jYDe(0ri|0ee6QIpBf;%G
z{ig8vdKg%n^He64>D?D0Pl_V>#gCc#7g<F}amzI~z^i0ky}ucdnigE#`Ms$3%q*rL
z_w1SVK&z%K&(E)c#QeZ5r8)8E%9D5NMH%gzHdyx&hqU;K1763(iCbC>ehi~3xKSqs
z3wf0&f0n2NDa?xAM2V$-gWL4r6FhZ$mq*|l?5v&Rh%d7c-aNX~ZVIJp;jD?NnF$K<
zGujf2%%u()RQz6^I&=}dx=9>7esAX?@hy0nBIduVX)!WD-@C9ig2>oCn%Hkve920S
z@0?W%nENdd>tR(dYR$0KgX3pdYKvw4i_7D$v1YO&VsTxR%BfOY(Oa{^YnujMR0HI`
zc8>%{NXyE8Y07H(PqBC|nceCz>kvdysGW4^yR{kfuVG0p1$`&=5L7PWA|oPxWhN%N
znz|r?a)=tkHYAlV2<ltdkEx-UNpG$+f4&OnrCZk&8-}arr>#LYiCBN|{BhtMpxJI#
zL9E(*dE<Z*L=pm$`_Px%>kswQ4#%-gHXl5uR~93wOJwN16@o`nT#AL2#b`|mV$@^Z
zU0KZ?%xS@1hgKum(;$ebqZn2UJ_lUsp1wD_F#MbzOkd%R4Ly{~1C_YXknNB=CNWa7
z4U#X+purObYkO7oH~Tp@)-dl>ZcF(QYrlOrDDE7}<EE}`z6^cX{Zc`aIUKpk3&n2r
zV`eLGuQp6frD-PQP`fWJKwQcrOuacm@!O{X#llqXkea<vtA)zt_`kg_Wjv7f;q^6)
z5Euw;=ze#kMAc_oR6dR#6Zq>9Khm*qaKU~$gBxm3YT}N(a6ydQ@j3g?YXK2Wb~U_1
z@kZajuro)t>v7Z=CW;Y~rm>R(LM`J>P3V9betw^p@GcK|0Y2yPK2!;+^@YN{QuOsT
zx$QuRxF-DMiA-!)_R7WK6CFV(Q^i+}Y}5D$Yfc|O)z_)={#&wY%oH0BbRm8;i${#+
zbc%T>u+_lMr+N%@0EhAvUKiXRBZ+5!n==qksUoM&%$xog2f>&~^tx3M@y+>Q*9*Px
zvPJ(roD5va;g0i7puepy+vfNg=8vt+glWf(ukte<R0+#I&AxVDU78GMDrZj4THxe!
zpAd!)Enj%0CCR$aqol+<E=BH#!QuG4!I#?xqSQ;a?1Pe^#Or86yHFZ?{9$nJTXrO<
zQ29^U*5_QaZACg%`d5&Jq0HeJ^>^{(U8dxEbCt}R@`o_Tx|pZf)(bB#ns75ye<<no
z(U~v3(@&C4LC*>tW;Y`Y(X)2QsQQz?yXNC<Fk>cP9ISwZ@|*JS*pi6o1W#`Tm=fE=
zLlwk*JoZ3{Vu>|af}UTY1Y&1hFTU7(HM_&Wy#8uxW-Yv$*?ny{`$7^Py5ajN8m<ih
zLxV$afhn%^7Y%qhfxC$`c9<XOw|%6fmSD_=Dt<GP-Uq+qC&q#{-j>$22*od(l5&mF
z>i=DY7LpV1V4!a-Yv~Zl9JvrWzZtIwj>2?qj&&vWhEL{`RcaB4q-2L7E_-_W1V-!E
zNUZPaR5f2r<N}f8sl%<3A~Tl9198QmueL)sI2obt7L=#P7!$uyX_$FMB&kC>^FK|x
zkbDvvb&@Aiz^LS38-bDah-EhG%17n88W*te@n_wcm~0LYHF<#DL#cEULEnC}S{n!`
zRObPn@%{B;E)to0pu8L_5cvtN)Ol3Rt-&FE5q_a?BIt)}Dxx2%-Dr#pE>|9}2PF^v
zQGI*(8`fKS;EF6&oJKD7%su+*Q_rien<gO)c6MdQmU^S)_P`2DeoO)fqj+Jt^CSZT
zCk6(W7hUGm`l2D{8eEY=!adLxP(!NJ!cq(|H-LmWvM6{p$H5+A|AfjS_fS}Vw+Q`b
z%tKR#uWMc&{d2iREL;j{z3Hlf&RG_d23urmx-P4OP{^AHc9?i;Bm-K*I=QaX)8_n?
zSZMw{>q@3_A*b>3cFbP?zQ$ZVTul+x(Yd3h<v)bKh$CwBE_-RoO_e$Sz1?$=xob$T
zL>w7@c2Vk6#ejrM8R%_rQr=d$-Dw(gpM@$2UE5U;Vw*(?5=7HJDJv~RUzWnzDvy0*
z#K;*99x&*B`}g~n$(-@fjppEGxr*~(m1AGjXJ;I>N<HTQ{~m(;<>b~s9rmEzE1N(u
z?~+xa3{Z6Ef;59MVh4!aI@iSZ`0;Pw2f@-XUv;kNKGuCKl^bxrXu#`Z=u!wGWui&3
z?L5P42p?!h{r;5)ec=eP27|U>zi1g(L5`XC97N~0FA4hh2dE58JNwIibUEzB+Ow(c
zGyHzp;yJ|ZdKmBT`?*ooo_+RPV)w>a8zwX~#31oZyy#<nE9g#K&|Kb4;Kp0q4Gr=9
z3BGWx%#wUy<K*@Dee_=kYrC(IzjHzz&q{(gB^|FJGcsxJ9EPV7PMcHIeQbC;2ULaw
zKs<>24%Hr!Ca*p<fpX7@K@mQr#eM$*{U8kt`BO<CrzD9go-(Ytg+HgaT?(IFDCM`X
z+3o@In>H_=Wf9b2N)e`V6+h|WC<ke!#;cyd@DW*g)l-6F-gAC%?T_Zf^xYA$Q-&j+
z?ThQ`dAxO0gMb_YYS6Qa<mwIQC#7)5JJu5uzokm?Lf>+gL7ojlSr``*rQSWmaWVg#
z*RHj|7MZB3Bo{Tjh!ckVD#P1FX(3uP&W<Y1UKXmHngIHnj0~{AgE}C)s9r3l_faa2
zR;O;yS0<1yS?%=#NEc8;K;*nIq<%$&Ld35Hfi<tXPJD<y56VGzt^A>}^>~b-uMVa`
zzB;Xsn-4NABVGk}e4&%47`MoKSj)x4h|H?sJN6j#amt>zWN8h{g^q_wH8vDOz^V{w
zMhyC`z)7$Ebp1Xs!L+oZyYs`Z6YALF1~bNg5S4e^8gUndtT2`S43;r>b+f1Hog)$B
z=Xk2Q;(J*iU^6#oMlCE9yL&&^RqP)FuDQo3Fzuz92<ltp-#A+QVE<l@AP4Zg{Ym&D
z-%aoBb!Wrsu%!ikgu_H_bITm;w*e~m3=rRa=#etwS}Kk{2IRw8Ki(}b$}JFINLkPf
z>7AF@v;I@KlD>_(x(m&3)XJgye2mstY?WnYr*bGf3{6o6^M|Wv1Ahwn1ZD7?PWp(0
ziIWwoFGe18j3^T#dScK+e%M-0OywzUXCixE{9%1$kf=)qe6V=eJ13bQ?-?TL)k#0d
zNNZc?N=Ct%<7DX|4P=nLc3h`Bz#1_!+%~K+-!*ugvjY{n=pkeC2#3&##T>>$oq*pF
z=i;bkiKaVaF`Vy6V5q+aX>vy=?gRbEl^f+uRbB@3Keb9DN!-)mZ_Tnn$e|xL1ZEw+
zWlQ&){TV4_(B}SW1nmQXNxgx&azap%dyjI{d0YTkkN~4G2?+f<rY!HehzQhP)-Z`2
zZdno;`2|RL2~6-8@BnixrT2>d$KfqArP26^$2Iw%PxCbC!y=%Vd|AD3umICxAHplm
zo&Zt5h&B*_kOFupcft}|nIlAb_QA+$GZ5If_(VGwykPTFy5fBWnmxhzbpZ^1fP^IB
z1jm(Fx!@_W?BO%YY_rh1*ws>YuK2ws#0R*tB~d)>-MM#v<o90HVCBY>4eR=c&+Jkd
zu8o)=?D{=v1LOZT4YSj4i@a_cI<KN9i_&avThFc^5;Ottw~jYvQjm7B=ueOo#{DF_
zSlR7-u>!R-gEMg#p~yYlWNlI%;&P0>YZZz9m_n#L;y~kpvhc?#!AFr}>M$?Eorw0q
zd2)o1?%8oa1$NF-L&pk(4`_I*r_9mO2+Kvo4RPPC1rf?FAYky-v~({^O+GcW`jQ5i
zI#E{!1{PlIT)R=2x?k;deDB_*l;NWcE|Q*ca9Z8L{R3}yIXsxTi!AK@!Dh$vw|eys
zC(Y=(_9|;P#AkoryA`yz`h;4X3EP;!_uE%`FE$V=-+>RD<(a>ssY~};wkNM+3-s^B
zr!Z9cDPQeck9JR1&F*bd`6H@(yX24V?YJKDfTF@JSKY?^==3Zuhx!+{td0z>%7u{<
zGyA|jm*Yk<SEH4XH?lh)Si9037YFUyX}znQ*UDqHY8UOM!ud+`oMm(GU%oV9>8M}(
zdPFWT97OuVLSHgSk@4Vi`fpWt*7**99M-eELp8)`;$@UZEeFr%=}uj-J91EM&{wAw
zOy5xdTY>if^0{WJy1yqJ;$|xGQw(u4*{M!Ys-qYE=cL)qA}ujaQJ3(W@iFdfmngzq
zWFJO<NRsr}P->0mXOGf9(c(N*nsFdjD~(_17^>{;#?R_#B#W`BZ<^Dyhc6(3!SSIf
z9a!c&eTkI-HN>0~|D;FjiZLh3QTO%?)_V3S-?rtWiPVD{a1$m{5~@A%n`#_egzAfv
z4T(kviH@SlEux0GDd1}H(+8%sqph{Q@b2I$5Vx2SHN^+(BNI6~hez|23Ex49JGa^`
zV|%R2JC7?H{{i#cb4{=#%cb)-W|GU{kt+H2qzdD#^Ok&YresK)4LZI-1o?@_C)KTk
zki`J852A~<tES)_J7^n9z%V?7p(U6+<HrXxEaKwBI692itKerf$d#R+SrA9j4g6)Z
zV7}yUe0C6DR`|vXf#u7;l&fn_vVy}20>~-OZ-Tm4{RK4XX=<!GF#G*Pz0+(#tZ#D-
zvm%|%DKE5gYc8FwxyBw+wCCP{0sj&^Ssxn(@yWUAWUm^hs+02ae-OdvQ0$_g&W;Vb
ze)Ml+upJ^u(x29TlOYII%<Vfz=x}-$^oBn-!6M!cd7XG^7N`!ed0b+-I2KhD&tCdE
zrRoe6&K>zud)Z2Ad)KVCPxGJA5moeI;>ITmCnfNZH&gypQChs~klv@0LaA+ymGFn(
z&-Xh?kOJbT|JMK>ltY?Gee9->HZB;%T{!uSI<<U*S!5C)NM=Rva(4U;(df3L=~Kc&
zw3O~>oy=TBWO!%`rc4D*7!Qr(DZa=ttQajKSDh`=1`79h;CqFlNx6LZ;#hRDhfgZ4
z*X$(tx6_#Oqz$hYAy=x^imkjkbFcYb3Xy%KBK%YR2QpUt$a21>w=3aiL}2|<#Ke&D
zA9}`D);#ypO_7rVyBf-De`r?X`bGa+jV@)}417(@bl!yi23%_a6`vqbyl6zVeB)nQ
zU(R#^v&|`N4lvaVy1P}r$T^o{_A1yTRJt#>e;9LT^Ac<zj%V<GEZ#$oHm58^b+Q1j
zE5N`?4*vY1dVc23fJ=0U^xDR669yMQD)87D`x-;lXHUm4@34dj111ZhZ*4|4&k(XV
zr`8BGw4$A$c)h&RjO6vIcgJbNw>Osb(3!}mp*BQVmTxlF)(P0hY`7p>ZIbMJ%opib
zdDphOz){NUW}#*4Gv)xhiG*KAI_{32{B2fa-*A6r#97_@pk%diyoaXQBoDOTIVkV~
zDF{r)>ac!e@$%<)Z{>LCvE042#dHW^LAL$`AubrpW!Lw5vXlW@V?LLzO<Z;z8&UDI
z*0Kf%8pxN6aQ$gsV0p*wICOW4t7GrROVFMJRj+FnFxsB&O4nTscmIl&If$br4@=^+
z%y?JH<^~VOu6w3U=f)|6_GWCQp(XM-rRRiXNPYx-795T<c3ILuQ;`tGA!lifK?c5q
zImX1U^=y7#Y4%`wphWir_nJu0;j*=j5dsgnNA;7}=3v^{@y{N+FyELJMVk=qpke&p
z6v^fW(<^G6iCj49#&&y_p(OdIb0fC$(QY@ShVY8xN=)9rw!SPoR6fNCxaNNS5+JN4
zzF2`udE8WQf><rPg$%Bc2Y_~vGhGZSWN}on)`q;AaPp7R9pO`7jCPofh1D!^+a_Cq
zGqTbc`OhtG*jgs=lfY(wGkqyh&z?C6OX-hFiE-PenXbQ|BE+Q<*I3SO4L{#rwkfOp
z_O&pT1UXzv6H87dge1hEE0zi^83Le9(h=uYKy{(@`moG9uW<QEL533ALezhsk1%AM
znC4J%Gc5lae~;U-y@3aHkU_sb=BDvM6Ncx1pNm2Bt%#4L$*;KlV4+gw0kz%IDOuqr
z*D>wj=nE0;kky^v_U`yYAWum9ufrdx!6kr;h}<hsw<b~E^7=t7BLwAY2j-igl{g;!
z=kO9ZyjFaw=ary*d*kb)N0Bc!_gwj*n}?=MnlWymm78>K<-FR|!q~_?rqAB$j8IL3
zD}hyu>eE_e1;=@;Ey&A#y<y<ZkFNBWDyHXNN#}BWN$k-_KoUK}{MB~<NX2|RKDk!;
zJU65=(N=GI_+@36z+*Pu3IF@QtPjt*oUUfO&(28BO1K#y^d;}G&%lT8`-0bH*tAq|
zuVyRX8{<(nVaC$!+^c!HQW&u~f>*`B>x#m2Z0UWtQ$@)nZf2hOj>GA{j{^;j!gS6N
z6kN0gj$99!?w+)#v6uP;u-T)Q2(NYBO0kD0J{=P>1sv0nB;$v9wB7m6Ex1%-ujcBh
z1HdZ;uv)wjjC{t%5!b|LkT&FVv0+ohq23=j<xmtU3B!b!CQCK)%%^2;i#A0yWUhF(
zE3kn!z=m5{QJ3`vkU$pNW46d#5yt7;@g*yveq3>2g&s2lbh<UWTqNHL%(8^MaqGKP
ziqCw0p5=OMH~#savV=kg{{cLUfn*P>V~GXO*ovh!O7z6kbG@wB5ma>U%}OLag?kRf
zC*?WDVbM0Nb)>H<%33*f)gX%n@;>%-Z>Q7d$v-n2@vX0_={HL5mg=BxP(L<T9WvSr
z>t5r&8WEX`?NDRmy=?vV%^tk+HN4(V!h_+By2rr_S=Jrp)sjGEM*x&(Z%#4`C2A99
zKiNT~7bzx??Z3Yr*G~5RC#)prU;1O}1|^Q4azZH7HXP2of+PNVEu{J8gv8g_+l~W@
zFv*l^5ce)hNE$s5_2%DA{HLLR$rpr^MuLw&U>AcKV`JS<yU8D@R|Lm4+tZtIvY6-N
zjJsv3Q~f+UoG%0`7ukvNT7EwN{t%wC*<qM({8fPP8@3(A-Js#5;WKmNB~YZ14Ep)t
zNw`=*$6%TJe1}wK?phhfi=CBullix0%$Lh+QGaGhe#Y&PD6b_OQgDp|G466Rnn7Gp
z;_9Ws?yI031F~#Z|GXi5Z#-#5*(g^W<#@)O?+(38%p3>VHQXh|@`&|v^g^d!LS7|z
zCMIMACPntI4@*2~gF9ktH)LZo1Lu<hycwbu7ZC1Z*0<w~gJG<A!>#GcA%(du{q)8?
zRNT*gf<YRNnTmBw)+G4NgP5=Ty*<x;|2BV2X4tCszOS-(>V9364Uej9yKlaK`tUB*
zP_a8gD^Vka5b}mOg*9qAi$@-F&$`!*`YU^D%*0Lc#way70@cHD>#fbvb;ez^YNIaJ
zvPZW5N~Uf~U*I-ya&#ls&kkvSgbGaH-M7UT6k)O%<Aow6j5z);y4f#_EU(o50CY{L
z5<=NTDTk<B9T^XTir4=}e|G~PUDgH*n#(WypAWrsR=uMGlpj~?0QFjHk$=CY>=F{Q
zr&iT7x6?nBXRIa@L+VAZAcb@u_bpS$zdvz|C6|ZQ5t=Z{ug&rli~b$j{sIpO^`$5M
z1mDI5LWw<%<iodC_-^@4oq=@uqeUp=kwZfJfAxb=V}+vSk9m1|90Vw6x>O)w%o3f#
z%zk}rFAVa=d7nJxG@S*mQ(M9;tnPg*G)H1(DXUE3ldS_tB4pk^`Cat}@%;%aOk}PQ
zO&RO4`M!JQy>fOTdQJxZ1gS`avMZE>f*p<4=%Kd`mywnqLFzz7*|$s>Cbx1;tQ-yA
z%%yd3jblFab>ja#L<^gnk3;Dlgv+Ca)Kl5_8-+exHh3$FI+Pg~)LmhCO4r98d6u0D
zrT9}+5hr3&bD=9&=7o=iQ5piG)QzXw;_Y*g<KpknTpUTON(Mq`7?R22o7iw)LDwI9
z$C){?*DDGHshoMdrL6q@ozt$YdVXCk`Tj0N-|eR0W*~((6)@P7VTc>k9@L=BDA`3T
z^u8FDe}?t@^H)-4o%?(ykDD;|w?E~#5mgYDz&Mpka_okL7HsOlWj>k5Ipc-U$v+v~
zn2lp${@_XUSZmiktniwy6s>IM0TD?D#}337@C|NgUfda~);`Iew1=$bnjqX}z}tdb
zJ0^jM9b~mk7c<G&a}DnB74rJ&?XbQ<>dv!Zq-+Zb56LhZ4wK=I(^L0qST$5A-S1Bn
zgwcb*1H#^#1&+E(N)2)}F8KY!!?cj=hhGD0ws68)&Z~A<?MU*$I9lRVc-8zbbx}$c
zSBue$0devHwyk=0W>H)t%S%}LOFbDbClSwS#;)$w#iRF+U`@ZU86GniN-l*Cc=t5p
z-I|PPTkkmnT}W6f<VJD@MF0)Bv$2+=pu&yHZPkr369ZGh-DJ`MOwBOgwFvl+gcF9u
z5f?lx%k7r)+yXK%G)$9n#QBN%t@X3vg<^GF`;K!ey5(ado0x=UEbZpE?ES%FxP4ow
z5!O;9!A**6_%)Kf!<XRqTqRQE)e5)cpup_q)0N#W%m0$E<z>_Nn=VOQ3s0&1co4mc
z&dWg>6(&&sFN@uy#z{SOBWJ}P#N#q~w}+m$1(UnPrTxCv-%vwx&?sfMFXiVq)uD%S
zmRHV>+8dp9E$Ftual)5a`cm?6lAUaPrZjFw!y93cV5$ro2_jPUg*I69C6}%Sg_sei
zv%6RPpUQr~R?@&XNoMqB6JGZ&c)q~)?ir)|6v$ax5cM3BJ<dYzNLf-5xM@n}fo@|i
zHvyxxIR|{p1H2e36&v^5#EC2dVYd7wOScYFx>>|jVf%bNj92xLTM0&M6;z3%`)7Y)
znbIG=pr<&E5|>7tncdhe1%=4VyDGBaM-R&nI4ap(6@^}qA|qC}HcAzQa%F#%bwg$#
zSHoG2!!ZXkpVS-eK0%hv#8##f9QoI_b`$;eam^TOJi6bzO@}04C3zFNWfRFH>aOtB
zo`AisU}OlASzxWH#yu+D=?Ha@&kh)5a53A>^y`Rg<E$Mc=}v;knuwQD+%t;dZiC?<
z4W32C+kov$J-|fw>EQ3z9$vIYvP_P#5&Bke_s**%zRh%D#_9KFjbf0)ZCL9f7_Pmx
zw}VyfFxDUkdI(8X9I5efYwr^n)rK!E$tF{RWSQNC4j8a$gn1pv&Ge@PBL^K2s(WIz
zraXlla9oy+>7b7mN((2M6IybU)p6+l;4Y#T%>D}ZOh+ysTKoM5ekMh0zJMD9*1%2-
z$l*ym4E?W0jHhoEZLTBg8q6*ZvIlnS3>C>smxs46jf1?7fY&S_buPG!SZU45Y<I;t
z)+v1DB^UDg*`X{@-5Y4LL!y)WvDh$7s%v&<H;O_sY_%J7LE1p(5Id*ZFC}(P=4JfV
zyVB1Hk-s>ZZP}A>8c2RHsS5;!(51Dt0s`|*&H0=Axx&;BpcfUOx*Se)(TZE#M+sRu
zRND5jbUu5qtRS;zNVJGx2bC&YUKGP{i9_`l_Ri0Y&OFw`B(<!XLN9}uiDtPW?imit
z@!8Xbll(^gAorsfP82d-2Q4`3d6oiP6r@Gm{O{?}=$RdAPczh~|HiBD(*fh#7k*#^
zYIhbE3U2xBM^;5^ns94PFsT-%N_Y74nL?Y+ysPgMk8Ih*F70f))=^=sL8}E_Jqzzg
zKmP5lg3K<9F$UR9FDQF+I1fFN5x`nvtS6{I;f2)5T->c!_+vjF4zGW!<w!kbkp7EV
z`0GG~q<})9mcbrNVn&zG5h>}h`+CnIQ^WqXq3bi8C*ZTL#GFKj#&4rv**&`m90(sp
zzGNQ$Zktq+3tlaVF7)&klu2~$%yP`a6UM7?&!_)wc$FNB`}t8*Axc|Sq%fo7#un$W
zva8?B*$YqsNk|0pM;20u$hw-Fu^99mL<Y=52g2cd(-}_=K1xkl$=k_FLwpG>XD3oN
zOBSma3@PI`Vp9M3yY9M$A(%|qg(V(uxf3McQ}}2nHynO!Z_p$AJ64Bxwtv$-Hj@r0
z$_2KuoE^$R^J5f**!6VFjXB5>P77AwiEDScAwiELu>d_vOzqs$9UFPx=a!X877L?l
z!YW-A!-aLM{15+V12L@MClm@A+)L@-2U^DtZecZGMwKTJ+d~VneYuOYSeQ;&VVA>u
zX4`8uyDJEh=WzF5lkRerieLy9l-9?rA?GX+!C28Tp>6g?#t0MuIpSNqShriD=d6!d
zp3FHJ?aS-Sp5N_n40G=3atyOiOZ2Kv@k@e%l8;kf=_M|nUnJEI5yN#0#SA4LUPb1?
z&f)zFw<iA{MGoqtB?V`6X(uDjnD`kLe!qCIS`rs+j#3bF><4;#G8gJjrJNkh>hTf%
z@-y?(6sArbkn2dBH#w0T7F29cx_@@EI`H_~;_)!MZFqNWe4JT$0dXGC*WpL;Z$g<>
z|C)~G8ne*k9kG+#(9TFdiwuY@#b_zlcW(ZAa6j^Pyf4dUvL)(>tjTCa9mtMmY9^t=
z3;?$aZ5zeR7R&L-op=OoO9!z_VNnMxYH=CSRBx^lvPw)c=BLJ3MUhcm;mS=7Bc4P?
zvMZ|&XX5XCT`nXj!Fto|9C6#lD0t*;=yzvm3~Vy%qfGxbNHD(Z;RqRKrxJX(H+w`i
z_>uYMC-}7Z5kolh{`|5iMiKr^oKz}pf5Zu1uq9m6Pe?w_jFv$5m}Op<&^pp5VMNVG
z?2sD(nK-8XpZb;h#*9yryiXG|*hHwkOS|^kVataEm!)6~Gm$;>IoN*|cfP&e^w6|j
zroMC$>C`N}XRW9s!#jV3tILiy(AlxW6oK{w{y09gTS%djSN_gKd-bfkBRNuq&A@rT
z*Jzk(5xqB{T*~N^6`d#)s^wY5GSNubtuN73sZ&%e;|F`DrpUaNbU*T;*kRQQY`O`;
z=JcGVaR+?gpWE16OMGRA{q@{<_x@?q1Ep6}NF%=3HFZ6-BDhmQNSG`Ha+{#a`_TG0
z^<&--#y`F347{7Hu`o3}UMH@N;>8hPr~s+>A}ap(l^tW~H+I92z}g$U^|716J3-!`
zeDv8}rKwTK?+X&q=f066zj7o@M{BDukJj4$9yez+7l>$0^FJ6r^kCr)cwQ9cxfOF^
zwF3Rx^3Y03B~1MsnAn!l#unqUh(r0r_$=(8eG=R7;R6WU9BoLLY|QeRvmM1}ZgeJ-
z)>NX1mp?D3*oAt4*sW<%__q;l_?EQ<%fR;lt75&e87YR9I)NUYJ^zFex-I-{FLfe&
z#Gx^g=%UJ*d-~7qQnTWSbyN%q9{WgN6q9lMI!eMd{kZ)J(!KXVW?JdMcG|mr<+cqA
zPwYREx2F~SWWe>dkyA<%$kP1si*h6UEob#$v#rAx2ns5%;!8c#^)KK0SLMJ#J$9RP
zkl|^teYdDC_x`f$yY6lww)5kX>Pwn=GUz_M(|_^Mde6Si#jw~lzePfa$9V(pE3Q16
z#)}F(vz&5dQ^o*@3qBGzSKv72nvdC>{%MXZE#;nJ58`T$6qIGoc}8!Jl{<ZY!gY-s
zG^7Ss;~yJ}Pa5KwXRp#%Wx^{;UK0As=4D(W5=$nAUoL-jxYUo$CRwMO>B67ZT%lCv
zk**(=dp*?8`h=Hwec_5+cNcmlHhVjHYE$%6-X5;sR)42K*J&1=1U`C{sF)1UHLi4(
zDSae<^H!;%$$xZV#^7u2QvJdn8D`Nd`a|zH{Pf^2%da01mj@Qa%QIbD(ceGR{TLp6
zz1=hIS4U|W9>Mf}`r9Z5emB8=D%WcNBbAlc{k<p@Yh9Sxe)e3G(rcyGOWd>DXMS#E
zZ?&Z<JNAq!IbO~FNyyoKQzd<<#;SAC!%7P=kK*2jjS=d%N7F9`#V;(&vObC{4hx$h
z#PThv2?d-Y#)N@Ko{m<Iv#fQPvo1=9wI;L2h6f)Fq{ONz{V-{GlrvllYgA-T3E>=a
z(o9xlx70sASs2_-hm-gDmw)QC@A?~y*R6;=X40)sg?(efZWu_Pg!{H#qZb%8dYf%$
zJ665%x-;1Wnd^6ScFce2)YAX=RZ1ap+yBeib79#7gTHhts@nb^kDjTkJA3+`5kG!y
S-@6;U25c<s&5KQ`3I7Lvtp$z%

literal 0
HcmV?d00001

diff --git a/doc/tutorials/videoio/orbbec-astra/orbbec_astra.markdown b/doc/tutorials/videoio/orbbec-astra/orbbec_astra.markdown
new file mode 100644
index 0000000000..664e4f6dfe
--- /dev/null
+++ b/doc/tutorials/videoio/orbbec-astra/orbbec_astra.markdown
@@ -0,0 +1,150 @@
+Using Orbbec Astra 3D cameras {#tutorial_orbbec_astra}
+======================================================
+
+@prev_tutorial{tutorial_kinect_openni}
+@next_tutorial{tutorial_intelperc}
+
+
+### Introduction
+
+This tutorial is devoted to the Astra Series of Orbbec 3D cameras (https://orbbec3d.com/product-astra-pro/).
+That cameras have a depth sensor in addition to a common color sensor. The depth sensors can be read using
+the OpenNI interface with @ref cv::VideoCapture class. The video stream is provided through the regular camera
+interface.
+
+### Installation Instructions
+
+In order to use a depth sensor with OpenCV you should do the following steps:
+
+-#  Download the latest version of Orbbec OpenNI SDK (from here <https://orbbec3d.com/develop/>).
+    Unzip the archive, choose the build according to your operating system and follow installation
+    steps provided in the Readme file. For instance, if you use 64bit GNU/Linux run:
+    @code{.bash}
+    $ cd Linux/OpenNI-Linux-x64-2.3.0.63/
+    $ sudo ./install.sh
+    @endcode
+    When you are done with the installation, make sure to replug your device for udev rules to take
+    effect. The camera should now work as a general camera device. Note that your current user should
+    belong to group `video` to have access to the camera. Also, make sure to source `OpenNIDevEnvironment` file:
+    @code{.bash}
+    $ source OpenNIDevEnvironment
+    @endcode
+
+-#  Run the following commands to verify that OpenNI library and header files can be found. You should see
+    something similar in your terminal:
+    @code{.bash}
+    $ echo $OPENNI2_INCLUDE
+    /home/user/OpenNI_2.3.0.63/Linux/OpenNI-Linux-x64-2.3.0.63/Include
+    $ echo $OPENNI2_REDIST
+    /home/user/OpenNI_2.3.0.63/Linux/OpenNI-Linux-x64-2.3.0.63/Redist
+    @endcode
+    If the above two variables are empty, then you need to source `OpenNIDevEnvironment` again. Now you can
+    configure OpenCV with OpenNI support enabled by setting the `WITH_OPENNI2` flag in CMake.
+    You may also like to enable the `BUILD_EXAMPLES` flag to get a code sample working with your Astra camera.
+    Run the following commands in the directory containing OpenCV source code to enable OpenNI support:
+    @code{.bash}
+    $ mkdir build
+    $ cd build
+    $ cmake -DWITH_OPENNI2=ON ..
+    @endcode
+    If the OpenNI library is found, OpenCV will be built with OpenNI2 support. You can see the status of OpenNI2
+    support in the CMake log:
+    @code{.text}
+    --   Video I/O:
+    --     DC1394:                      YES (2.2.6)
+    --     FFMPEG:                      YES
+    --       avcodec:                   YES (58.91.100)
+    --       avformat:                  YES (58.45.100)
+    --       avutil:                    YES (56.51.100)
+    --       swscale:                   YES (5.7.100)
+    --       avresample:                NO
+    --     GStreamer:                   YES (1.18.1)
+    --     OpenNI2:                     YES (2.3.0)
+    --     v4l/v4l2:                    YES (linux/videodev2.h)
+    @endcode
+
+-#  Build OpenCV:
+    @code{.bash}
+    $ make
+    @endcode
+
+### Code
+
+To get both depth and color frames, two @ref cv::VideoCapture objects should be created:
+
+@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Open streams
+
+The first object will use the regular Video4Linux2 interface to access the color sensor. The second one
+is using OpenNI2 API to retrieve depth data.
+
+Before using the created VideoCapture objects you may want to setup stream parameters by setting
+objects' properties. The most important parameters are frame width, frame height and fps:
+
+@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Setup streams
+
+For setting and getting some property of sensor data generators use @ref cv::VideoCapture::set and
+@ref cv::VideoCapture::get methods respectively, e.g. :
+
+@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Get properties
+
+The following properties of cameras available through OpenNI interfaces are supported for the depth
+generator:
+
+-   @ref cv::CAP_PROP_FRAME_WIDTH -- Frame width in pixels.
+-   @ref cv::CAP_PROP_FRAME_HEIGHT -- Frame height in pixels.
+-   @ref cv::CAP_PROP_FPS -- Frame rate in FPS.
+-   @ref cv::CAP_PROP_OPENNI_REGISTRATION -- Flag that registers the remapping depth map to image map
+    by changing the depth generator's viewpoint (if the flag is "on") or sets this view point to
+    its normal one (if the flag is "off"). The registration process’ resulting images are
+    pixel-aligned, which means that every pixel in the image is aligned to a pixel in the depth
+    image.
+-   @ref cv::CAP_PROP_OPENNI2_MIRROR -- Flag to enable or disable mirroring for this stream. Set to 0
+    to disable mirroring
+
+    Next properties are available for getting only:
+
+-   @ref cv::CAP_PROP_OPENNI_FRAME_MAX_DEPTH -- A maximum supported depth of the camera in mm.
+-   @ref cv::CAP_PROP_OPENNI_BASELINE -- Baseline value in mm.
+
+After the VideoCapture objects are set up you can start reading frames from them.
+
+@note
+    OpenCV's VideoCapture provides synchronous API, so you have to grab frames in a new thread
+    to avoid one stream blocking while another stream is being read. VideoCapture is not a
+    thread-safe class, so you need to be careful to avoid any possible deadlocks or data races.
+
+Example implementation that gets frames from each sensor in a new thread and stores them
+in a list along with their timestamps:
+
+@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Read streams
+
+VideoCapture can retrieve the following data:
+
+-#  data given from the depth generator:
+    -   @ref cv::CAP_OPENNI_DEPTH_MAP - depth values in mm (CV_16UC1)
+    -   @ref cv::CAP_OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)
+    -   @ref cv::CAP_OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)
+    -   @ref cv::CAP_OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)
+    -   @ref cv::CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not occluded, not shaded, etc.)
+        (CV_8UC1)
+
+-#  data given from the color sensor is a regular BGR image (CV_8UC3).
+
+When new data is available a reading thread notifies the main thread. A frame is stored in the
+ordered list -- the first frame is the latest one:
+
+@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Show color frame
+
+Depth frames can be picked the same way from the `depthFrames` list.
+
+After that, you'll have two frames: one containing color information and another one -- depth
+information. In the sample images below you can see the color frame and the depth frame showing
+the same scene. Looking at the color frame it's hard to distinguish plant leaves from leaves painted
+on a wall, but the depth data makes it easy.
+
+![Color frame](images/astra_color.jpg)
+![Depth frame](images/astra_depth.png)
+
+The complete implementation can be found in
+[orbbec_astra.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp)
+in `samples/cpp/tutorial_code/videoio` directory.
diff --git a/doc/tutorials/videoio/table_of_content_videoio.markdown b/doc/tutorials/videoio/table_of_content_videoio.markdown
index b27726bd87..393a0fc236 100644
--- a/doc/tutorials/videoio/table_of_content_videoio.markdown
+++ b/doc/tutorials/videoio/table_of_content_videoio.markdown
@@ -26,6 +26,10 @@ This section contains tutorials about how to read/save your video files.
 
     *Languages:* C++
 
+-   @subpage tutorial_orbbec_astra
+
+    *Languages:* C++
+
 -   @subpage tutorial_intelperc
 
-    *Languages:* C++
\ No newline at end of file
+    *Languages:* C++
diff --git a/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp b/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp
new file mode 100644
index 0000000000..a6dc6dd75c
--- /dev/null
+++ b/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp
@@ -0,0 +1,195 @@
+#include <opencv2/videoio/videoio.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include <list>
+#include <iostream>
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <atomic>
+
+using namespace cv;
+using std::cout;
+using std::cerr;
+using std::endl;
+
+
+// Stores frames along with their timestamps
+struct Frame
+{
+    int64 timestamp;
+    Mat frame;
+};
+
+int main()
+{
+    //! [Open streams]
+    // Open color stream
+    VideoCapture colorStream(CAP_V4L2);
+    // Open depth stream
+    VideoCapture depthStream(CAP_OPENNI2_ASTRA);
+    //! [Open streams]
+
+    // Check that stream has opened
+    if (!colorStream.isOpened())
+    {
+        cerr << "ERROR: Unable to open color stream" << endl;
+        return 1;
+    }
+
+    // Check that stream has opened
+    if (!depthStream.isOpened())
+    {
+        cerr << "ERROR: Unable to open depth stream" << endl;
+        return 1;
+    }
+
+    //! [Setup streams]
+    // Set color and depth stream parameters
+    colorStream.set(CAP_PROP_FRAME_WIDTH,  640);
+    colorStream.set(CAP_PROP_FRAME_HEIGHT, 480);
+    depthStream.set(CAP_PROP_FRAME_WIDTH,  640);
+    depthStream.set(CAP_PROP_FRAME_HEIGHT, 480);
+    depthStream.set(CAP_PROP_OPENNI2_MIRROR, 0);
+    //! [Setup streams]
+
+    // Print color stream parameters
+    cout << "Color stream: "
+         << colorStream.get(CAP_PROP_FRAME_WIDTH) << "x" << colorStream.get(CAP_PROP_FRAME_HEIGHT)
+         << " @" << colorStream.get(CAP_PROP_FPS) << " fps" << endl;
+
+    //! [Get properties]
+    // Print depth stream parameters
+    cout << "Depth stream: "
+         << depthStream.get(CAP_PROP_FRAME_WIDTH) << "x" << depthStream.get(CAP_PROP_FRAME_HEIGHT)
+         << " @" << depthStream.get(CAP_PROP_FPS) << " fps" << endl;
+    //! [Get properties]
+
+    //! [Read streams]
+    // Create two lists to store frames
+    std::list<Frame> depthFrames, colorFrames;
+    std::mutex depthFramesMtx, colorFramesMtx;
+    const std::size_t maxFrames = 64;
+
+    // Synchronization objects
+    std::mutex mtx;
+    std::condition_variable dataReady;
+    std::atomic<bool> isFinish;
+
+    isFinish = false;
+
+    // Start depth reading thread
+    std::thread depthReader([&]
+    {
+        while (!isFinish)
+        {
+            // Grab and decode new frame
+            if (depthStream.grab())
+            {
+                Frame f;
+                f.timestamp = cv::getTickCount();
+                depthStream.retrieve(f.frame, CAP_OPENNI_DEPTH_MAP);
+                //depthStream.retrieve(f.frame, CAP_OPENNI_DISPARITY_MAP);
+                //depthStream.retrieve(f.frame, CAP_OPENNI_IR_IMAGE);
+                if (f.frame.empty())
+                {
+                    cerr << "ERROR: Failed to decode frame from depth stream" << endl;
+                    break;
+                }
+
+                {
+                    std::lock_guard<std::mutex> lk(depthFramesMtx);
+                    if (depthFrames.size() >= maxFrames)
+                        depthFrames.pop_front();
+                    depthFrames.push_back(f);
+                }
+                dataReady.notify_one();
+            }
+        }
+    });
+
+    // Start color reading thread
+    std::thread colorReader([&]
+    {
+        while (!isFinish)
+        {
+            // Grab and decode new frame
+            if (colorStream.grab())
+            {
+                Frame f;
+                f.timestamp = cv::getTickCount();
+                colorStream.retrieve(f.frame);
+                if (f.frame.empty())
+                {
+                    cerr << "ERROR: Failed to decode frame from color stream" << endl;
+                    break;
+                }
+
+                {
+                    std::lock_guard<std::mutex> lk(colorFramesMtx);
+                    if (colorFrames.size() >= maxFrames)
+                        colorFrames.pop_front();
+                    colorFrames.push_back(f);
+                }
+                dataReady.notify_one();
+            }
+        }
+    });
+    //! [Read streams]
+
+    while (true)
+    {
+        std::unique_lock<std::mutex> lk(mtx);
+        while (depthFrames.empty() && colorFrames.empty())
+            dataReady.wait(lk);
+
+        depthFramesMtx.lock();
+        if (depthFrames.empty())
+        {
+            depthFramesMtx.unlock();
+        }
+        else
+        {
+            // Get a frame from the list
+            Mat depthMap = depthFrames.front().frame;
+            depthFrames.pop_front();
+            depthFramesMtx.unlock();
+
+            // Show depth frame
+            Mat d8, dColor;
+            depthMap.convertTo(d8, CV_8U, 255.0 / 2500);
+            applyColorMap(d8, dColor, COLORMAP_OCEAN);
+            imshow("Depth (colored)", dColor);
+        }
+
+        //! [Show color frame]
+        colorFramesMtx.lock();
+        if (colorFrames.empty())
+        {
+            colorFramesMtx.unlock();
+        }
+        else
+        {
+            // Get a frame from the list
+            Mat colorFrame = colorFrames.front().frame;
+            colorFrames.pop_front();
+            colorFramesMtx.unlock();
+
+            // Show color frame
+            imshow("Color", colorFrame);
+        }
+        //! [Show color frame]
+
+        // Exit on Esc key press
+        int key = waitKey(1);
+        if (key == 27) // ESC
+            break;
+    }
+
+    isFinish = true;
+    depthReader.join();
+    colorReader.join();
+
+    return 0;
+}
diff --git a/samples/cpp/videocapture_openni.cpp b/samples/cpp/videocapture_openni.cpp
index 0b67d92f61..5b4b23f19b 100644
--- a/samples/cpp/videocapture_openni.cpp
+++ b/samples/cpp/videocapture_openni.cpp
@@ -61,7 +61,7 @@ static void printCommandLineParams()
     cout << "-fmd=      Fixed max disparity? (0 or 1; 0 by default) Ignored if disparity map is not colorized (-cd 0)." << endl;
     cout << "-mode=     image mode: resolution and fps, supported three values:  0 - CAP_OPENNI_VGA_30HZ, 1 - CAP_OPENNI_SXGA_15HZ," << endl;
     cout << "          2 - CAP_OPENNI_SXGA_30HZ (0 by default). Ignored if rgb image or gray image are not selected to show." << endl;
-    cout << "-m=        Mask to set which output images are need. It is a string of size 5. Each element of this is '0' or '1' and" << endl;
+    cout << "-m=        Mask to set which output images are need. It is a string of size 6. Each element of this is '0' or '1' and" << endl;
     cout << "          determine: is depth map, disparity map, valid pixels mask, rgb image, gray image need or not (correspondently), ir image" << endl ;
     cout << "          By default -m=010100 i.e. disparity map and rgb image will be shown." << endl ;
     cout << "-r=        Filename of .oni video file. The data will grabbed from it." << endl ;

From bb067c7ebff9e80c86a15777e33b552218ef23f0 Mon Sep 17 00:00:00 2001
From: Ian Maquignaz <ian.maquignaz@gmail.com>
Date: Thu, 19 Nov 2020 06:20:20 -0500
Subject: [PATCH 125/152] Merge pull request #18849 from
 IanMaquignaz:fix_findFundamentalMat_parameters

Minimum change to address issue #18837
---
 modules/calib3d/include/opencv2/calib3d.hpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp
index 7ce36c6fed..22c11cfac7 100644
--- a/modules/calib3d/include/opencv2/calib3d.hpp
+++ b/modules/calib3d/include/opencv2/calib3d.hpp
@@ -2153,10 +2153,10 @@ CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
 floating-point (single or double precision).
 @param points2 Array of the second image points of the same size and format as points1 .
 @param method Method for computing a fundamental matrix.
--   **CV_FM_7POINT** for a 7-point algorithm. \f$N = 7\f$
--   **CV_FM_8POINT** for an 8-point algorithm. \f$N \ge 8\f$
--   **CV_FM_RANSAC** for the RANSAC algorithm. \f$N \ge 8\f$
--   **CV_FM_LMEDS** for the LMedS algorithm. \f$N \ge 8\f$
+-   @ref FM_7POINT for a 7-point algorithm. \f$N = 7\f$
+-   @ref FM_8POINT for an 8-point algorithm. \f$N \ge 8\f$
+-   @ref FM_RANSAC for the RANSAC algorithm. \f$N \ge 8\f$
+-   @ref FM_LMEDS for the LMedS algorithm. \f$N \ge 8\f$
 @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
 line in pixels, beyond which the point is considered an outlier and is not used for computing the
 final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the

From 11cfa64a10a8aa791df5783dcee0a500c7bc4eb1 Mon Sep 17 00:00:00 2001
From: chargerKong <49099366+chargerKong@users.noreply.github.com>
Date: Fri, 20 Nov 2020 00:59:33 +0800
Subject: [PATCH 126/152] Merge pull request #18335 from chargerKong:master

Ordinary quaternion

* version 1.0

* add assumeUnit;
add UnitTest;
check boundary value;
fix the func using method: func(obj);
fix 4x4;
add rodrigues vector transformation;
fix mat to quat;

* fix blank and tab

* fix blank and tab
modify test;cpp to hpp

* mainly improve comment;
add rvec2Quat;fix toRodrigues;
fix throw to CV_Error

* fix bug of quatd * int;
combine hpp and cpp;
fix << overload error in win system;
modify include in test file;

* move implementation to quaternion.ini.hpp;
change some constructor to createFrom* function;
change Rodrigues vector to rotation vector;
change the matexpr to mat of 3x3 return type;
improve comments;

* try fix log function error in win

* add enums for assumeUnit;
improve docs;
add using std::cos funcs

* remove using std::* from header;
add std::* in affine.hpp,warpers_inl.hpp;

* quat: coding style

* quat: AssumeType => QuatAssumeType
---
 modules/core/include/opencv2/core/affine.hpp  |    2 +-
 .../core/include/opencv2/core/quaternion.hpp  | 1194 +++++++++++++++++
 .../include/opencv2/core/quaternion.inl.hpp   |  849 ++++++++++++
 modules/core/test/test_quaternion.cpp         |  255 ++++
 .../opencv2/stitching/detail/warpers_inl.hpp  |    6 +-
 5 files changed, 2302 insertions(+), 4 deletions(-)
 create mode 100644 modules/core/include/opencv2/core/quaternion.hpp
 create mode 100644 modules/core/include/opencv2/core/quaternion.inl.hpp
 create mode 100644 modules/core/test/test_quaternion.cpp

diff --git a/modules/core/include/opencv2/core/affine.hpp b/modules/core/include/opencv2/core/affine.hpp
index 7e2ed30785..1806382e99 100644
--- a/modules/core/include/opencv2/core/affine.hpp
+++ b/modules/core/include/opencv2/core/affine.hpp
@@ -499,7 +499,7 @@ typename cv::Affine3<T>::Vec3 cv::Affine3<T>::rvec() const
     double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
     double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
     c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
-    double theta = acos(c);
+    double theta = std::acos(c);
 
     if( s < 1e-5 )
     {
diff --git a/modules/core/include/opencv2/core/quaternion.hpp b/modules/core/include/opencv2/core/quaternion.hpp
new file mode 100644
index 0000000000..c72ee8c37f
--- /dev/null
+++ b/modules/core/include/opencv2/core/quaternion.hpp
@@ -0,0 +1,1194 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+//
+//                          License Agreement
+//                For Open Source Computer Vision Library
+//
+// Copyright (C) 2020, Huawei Technologies Co., Ltd. All rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//       http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Author: Liangqian Kong <chargerKong@126.com>
+//         Longbu Wang <riskiest@gmail.com>
+#ifndef OPENCV_CORE_QUATERNION_HPP
+#define OPENCV_CORE_QUATERNION_HPP
+
+#include <opencv2/core.hpp>
+#include <iostream>
+namespace cv
+{
+//! @addtogroup core
+//! @{
+
+//! Unit quaternion flag
+enum QuatAssumeType
+{
+    /**
+     * This flag is specified by default.
+     * If this flag is specified, the input quaternions are assumed to be not unit quaternions.
+     * It can guarantee the correctness of the calculations,
+     * although the calculation speed will be slower than the flag QUAT_ASSUME_UNIT.
+     */
+    QUAT_ASSUME_NOT_UNIT,
+    /**
+     * If this flag is specified, the input quaternions are assumed to be unit quaternions which
+     * will save some computations. However, if this flag is specified without unit quaternion,
+     * the program correctness of the result will not be guaranteed.
+     */
+    QUAT_ASSUME_UNIT
+};
+
+template <typename _Tp> class Quat;
+template <typename _Tp> std::ostream& operator<<(std::ostream&, const Quat<_Tp>&);
+
+/**
+ * Quaternion is a number system that extends the complex numbers. It can be expressed as a
+ * rotation in three-dimensional space.
+ * A quaternion is generally represented in the form:
+ *      \f[q = w + x\boldsymbol{i} + y\boldsymbol{j} + z\boldsymbol{k}\f]
+ *      \f[q = [w, x, y, z]\f]
+ *      \f[q = [w, \boldsymbol{v}] \f]
+ *      \f[q = ||q||[\cos\psi, u_x\sin\psi,u_y\sin\psi,  u_z\sin\psi].\f]
+ *      \f[q = ||q||[\cos\psi, \boldsymbol{u}\sin\psi]\f]
+ * where \f$\psi = \frac{\theta}{2}\f$, \f$\theta\f$ represents rotation angle,
+ * \f$\boldsymbol{u} = [u_x, u_y, u_z]\f$ represents normalized rotation axis,
+ * and \f$||q||\f$ represents the norm of \f$q\f$.
+ *
+ * A unit quaternion is usually represents rotation, which has the form:
+ *      \f[q = [\cos\psi, u_x\sin\psi,u_y\sin\psi,  u_z\sin\psi].\f]
+ *
+ * To create a quaternion representing the rotation around the axis \f$\boldsymbol{u}\f$
+ * with angle \f$\theta\f$, you can use
+ * ```
+ * using namespace cv;
+ * double angle = CV_PI;
+ * Vec3d axis = {0, 0, 1};
+ * Quatd q = Quatd::createFromAngleAxis(angle, axis);
+ * ```
+ *
+ * You can simply use four same type number to create a quaternion
+ * ```
+ * Quatd q(1, 2, 3, 4);
+ * ```
+ * Or use a Vec4d or Vec4f vector.
+ * ```
+ * Vec4d vec{1, 2, 3, 4};
+ * Quatd q(vec);
+ * ```
+ *
+ * ```
+ * Vec4f vec{1, 2, 3, 4};
+ * Quatf q(vec);
+ * ```
+ *
+ * If you already have a 3x3 rotation matrix R, then you can use
+ * ```
+ * Quatd q = Quatd::createFromRotMat(R);
+ * ```
+ *
+ * If you already have a rotation vector rvec which has the form of `angle * axis`, then you can use
+ * ```
+ * Quatd q = Quatd::createFromRvec(rvec);
+ * ```
+ *
+ * To extract the rotation matrix from quaternion, see toRotMat3x3()
+ *
+ * To extract the Vec4d or Vec4f, see toVec()
+ *
+ * To extract the rotation vector, see toRotVec()
+ *
+ * If there are two quaternions \f$q_0, q_1\f$ are needed to interpolate, you can use nlerp(), slerp() or spline()
+ * ```
+ * Quatd::nlerp(q0, q1, t)
+ *
+ * Quatd::slerp(q0, q1, t)
+ *
+ * Quatd::spline(q0, q0, q1, q1, t)
+ * ```
+ * spline can smoothly connect rotations of  multiple quaternions
+ *
+ * Three ways to get an element in Quaternion
+ * ```
+ * Quatf q(1,2,3,4);
+ * std::cout << q.w << std::endl; // w=1, x=2, y=3, z=4
+ * std::cout << q[0] << std::endl; // q[0]=1, q[1]=2, q[2]=3, q[3]=4
+ * std::cout << q.at(0) << std::endl;
+ * ```
+ */
+template <typename _Tp>
+class Quat
+{
+    static_assert(std::is_floating_point<_Tp>::value, "Quaternion only make sense with type of float or double");
+    using value_type = _Tp;
+
+public:
+    static constexpr _Tp CV_QUAT_EPS = (_Tp)1.e-6;
+
+    Quat();
+
+    /**
+     * @brief From Vec4d or Vec4f.
+     */
+    explicit Quat(const Vec<_Tp, 4> &coeff);
+
+    /**
+     * @brief from four numbers.
+     */
+    Quat(_Tp w, _Tp x, _Tp y, _Tp z);
+
+    /**
+     * @brief from an angle, axis. Axis will be normalized in this function. And
+     * it generates
+     * \f[q = [\cos\psi, u_x\sin\psi,u_y\sin\psi,  u_z\sin\psi].\f]
+     * where \f$\psi = \frac{\theta}{2}\f$, \f$\theta\f$ is the rotation angle.
+     */
+    static Quat<_Tp> createFromAngleAxis(const _Tp angle, const Vec<_Tp, 3> &axis);
+
+    /**
+     * @brief from a 3x3 rotation matrix.
+     */
+    static Quat<_Tp> createFromRotMat(InputArray R);
+
+    /**
+     * @brief from a rotation vector
+     * \f$r\f$ has the form \f$\theta \cdot \boldsymbol{u}\f$, where \f$\theta\f$
+     * represents rotation angle and \f$\boldsymbol{u}\f$ represents normalized rotation axis.
+     *
+     * Angle and axis could be easily derived as:
+     * \f[
+     * \begin{equation}
+     * \begin{split}
+     * \psi &= ||r||\\
+     * \boldsymbol{u} &= \frac{r}{\theta}
+     * \end{split}
+     * \end{equation}
+     * \f]
+     * Then a quaternion can be calculated by
+     *  \f[q = [\cos\psi, \boldsymbol{u}\sin\psi]\f]
+     *  where \f$\psi = \theta / 2 \f$
+     */
+    static Quat<_Tp> createFromRvec(InputArray rvec);
+
+    /**
+     * @brief a way to get element.
+     * @param index over a range [0, 3].
+     *
+     * A quaternion q
+     *
+     * q.at(0) is equivalent to q.w,
+     *
+     * q.at(1) is equivalent to q.x,
+     *
+     * q.at(2) is equivalent to q.y,
+     *
+     * q.at(3) is equivalent to q.z.
+     */
+    _Tp at(size_t index) const;
+
+    /**
+     * @brief return the conjugate of this quaternion.
+     * \f[q.conjugate() = (w, -x, -y, -z).\f]
+     */
+    Quat<_Tp> conjugate() const;
+
+    /**
+     *
+     * @brief return the value of exponential value.
+     * \f[\exp(q) = e^w (\cos||\boldsymbol{v}||+ \frac{v}{||\boldsymbol{v}||})\sin||\boldsymbol{v}||\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example:
+     * ```
+     * Quatd q{1,2,3,4};
+     * cout << exp(q) << endl;
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> exp(const Quat<T> &q);
+
+    /**
+     * @brief return the value of exponential value.
+     * \f[\exp(q) = e^w (\cos||\boldsymbol{v}||+ \frac{v}{||\boldsymbol{v}||}\sin||\boldsymbol{v}||)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q{1,2,3,4};
+     * cout << q.exp() << endl;
+     * ```
+     */
+    Quat<_Tp> exp() const;
+
+    /**
+     * @brief return the value of logarithm function.
+     * \f[\ln(q) = \ln||q|| + \frac{\boldsymbol{v}}{||\boldsymbol{v}||}\arccos\frac{w}{||q||}.\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, q assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd q1{1,2,3,4};
+     * cout << log(q1) << endl;
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> log(const Quat<T> &q, QuatAssumeType assumeUnit);
+
+    /**
+     * @brief return the value of logarithm function.
+     *  \f[\ln(q) = \ln||q|| + \frac{\boldsymbol{v}}{||\boldsymbol{v}||}\arccos\frac{w}{||q||}\f].
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.log();
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * Quatd q1(1,2,3,4);
+     * q1.normalize().log(assumeUnit);
+     * ```
+     */
+    Quat<_Tp> log(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief return the value of power function with index \f$x\f$.
+     * \f[q^x = ||q||(cos(x\theta) + \boldsymbol{u}sin(x\theta))).\f]
+     * @param q a quaternion.
+     * @param x index of exponentiation.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, quaternion q assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * power(q, 2);
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * double angle = CV_PI;
+     * Vec3d axis{0, 0, 1};
+     * Quatd q1 = Quatd::createFromAngleAxis(angle, axis); //generate a unit quat by axis and angle
+     * power(q1, 2, assumeUnit);//This assumeUnit means q1 is a unit quaternion.
+     * ```
+     */
+    template <typename T, typename _T>
+    friend Quat<T> power(const Quat<T> &q, _T x, QuatAssumeType assumeUnit);
+
+    /**
+     * @brief return the value of power function with index \f$x\f$.
+     * \f[q^x = ||q||(\cos(x\theta) + \boldsymbol{u}\sin(x\theta))).\f]
+     * @param x index of exponentiation.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.power(2);
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * double angle = CV_PI;
+     * Vec3d axis{0, 0, 1};
+     * Quatd q1 = Quatd::createFromAngleAxis(angle, axis); //generate a unit quat by axis and angle
+     * q1.power(2, assumeUnit); //This assumeUnt means q1 is a unit quaternion
+     * ```
+     */
+    template <typename _T>
+    Quat<_Tp> power(_T x, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief return \f$\sqrt{q}\f$.
+     * @param q a quaternion.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, quaternion q assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatf q(1,2,3,4);
+     * sqrt(q);
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * q = {1,0,0,0};
+     * sqrt(q, assumeUnit); //This assumeUnit means q is a unit quaternion.
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> sqrt(const Quat<T> &q, QuatAssumeType assumeUnit);
+
+    /**
+     * @brief return \f$\sqrt{q}\f$.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatf q(1,2,3,4);
+     * q.sqrt();
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * q = {1,0,0,0};
+     * q.sqrt(assumeUnit); //This assumeUnit means q is a unit quaternion
+     * ```
+     */
+    Quat<_Tp> sqrt(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief return the value of power function with quaternion \f$q\f$.
+     * \f[p^q = e^{q\ln(p)}.\f]
+     * @param p base quaternion of power function.
+     * @param q index quaternion of power function.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, quaternion \f$p\f$ assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd p(1,2,3,4);
+     * Quatd q(5,6,7,8);
+     * power(p, q);
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * p = p.normalize();
+     * power(p, q, assumeUnit); //This assumeUnit means p is a unit quaternion
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> power(const Quat<T> &p, const Quat<T> &q, QuatAssumeType assumeUnit);
+
+    /**
+     * @brief return the value of power function with quaternion \f$q\f$.
+     * \f[p^q = e^{q\ln(p)}.\f]
+     * @param q index quaternion of power function.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd p(1,2,3,4);
+     * Quatd q(5,6,7,8);
+     * p.power(q);
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * p = p.normalize();
+     * p.power(q, assumeUnit); //This assumeUnit means p is a unit quaternion
+     * ```
+     */
+    Quat<_Tp> power(const Quat<_Tp> &q, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief return the crossProduct between \f$p = (a, b, c, d) = (a, \boldsymbol{u})\f$ and \f$q = (w, x, y, z) = (w, \boldsymbol{v})\f$.
+     * \f[p \times q = \frac{pq- qp}{2}\f]
+     * \f[p \times q = \boldsymbol{u} \times \boldsymbol{v}\f]
+     * \f[p \times q = (cz-dy)i + (dx-bz)j + (by-xc)k \f]
+     *
+     * For example
+     * ```
+     * Quatd q{1,2,3,4};
+     * Quatd p{5,6,7,8};
+     * crossProduct(p, q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> crossProduct(const Quat<T> &p, const Quat<T> &q);
+
+    /**
+     * @brief return the crossProduct between \f$p = (a, b, c, d) = (a, \boldsymbol{u})\f$ and \f$q = (w, x, y, z) = (w, \boldsymbol{v})\f$.
+     * \f[p \times q = \frac{pq- qp}{2}.\f]
+     * \f[p \times q = \boldsymbol{u} \times \boldsymbol{v}.\f]
+     * \f[p \times q = (cz-dy)i + (dx-bz)j + (by-xc)k. \f]
+     *
+     * For example
+     * ```
+     * Quatd q{1,2,3,4};
+     * Quatd p{5,6,7,8};
+     * p.crossProduct(q)
+     * ```
+     */
+    Quat<_Tp> crossProduct(const Quat<_Tp> &q) const;
+
+    /**
+     * @brief return the norm of quaternion.
+     * \f[||q|| = \sqrt{w^2 + x^2 + y^2 + z^2}.\f]
+     */
+    _Tp norm() const;
+
+    /**
+     * @brief return a normalized \f$p\f$.
+     * \f[p = \frac{q}{||q||}\f]
+     * where \f$p\f$ satisfies \f$(p.x)^2 + (p.y)^2 + (p.z)^2 + (p.w)^2 = 1.\f$
+     */
+    Quat<_Tp> normalize() const;
+
+    /**
+     * @brief return \f$q^{-1}\f$ which is an inverse of \f$q\f$
+     * which satisfies \f$q * q^{-1} = 1\f$.
+     * @param q a quaternion.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, quaternion q assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * inv(q);
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * q = q.normalize();
+     * inv(q, assumeUnit);//This assumeUnit means p is a unit quaternion
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> inv(const Quat<T> &q, QuatAssumeType assumeUnit);
+
+    /**
+     * @brief return \f$q^{-1}\f$ which is an inverse of \f$q\f$
+     * satisfying \f$q * q^{-1} = 1\f$.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, quaternion q assume to be a unit quaternion and this function will save some computations.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.inv();
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * q = q.normalize();
+     * q.inv(assumeUnit);  //assumeUnit means p is a unit quaternion
+     * ```
+     */
+    Quat<_Tp> inv(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief return sinh value of quaternion q, sinh could be calculated as:
+     * \f[\sinh(p) = \sin(w)\cos(||\boldsymbol{v}||) + \cosh(w)\frac{v}{||\boldsymbol{v}||}\sin||\boldsymbol{v}||\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * sinh(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> sinh(const Quat<T> &q);
+
+    /**
+     * @brief return sinh value of this quaternion, sinh could be calculated as:
+     * \f$\sinh(p) = \sin(w)\cos(||\boldsymbol{v}||) + \cosh(w)\frac{v}{||\boldsymbol{v}||}\sin||\boldsymbol{v}||\f$
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.sinh();
+     * ```
+     */
+    Quat<_Tp> sinh() const;
+
+    /**
+     * @brief return cosh value of quaternion q, cosh could be calculated as:
+     * \f[\cosh(p) = \cosh(w) * \cos(||\boldsymbol{v}||) + \sinh(w)\frac{\boldsymbol{v}}{||\boldsymbol{v}||}\sin(||\boldsymbol{v}||)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * cosh(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> cosh(const Quat<T> &q);
+
+    /**
+     * @brief return cosh value of this quaternion, cosh could be calculated as:
+     * \f[\cosh(p) = \cosh(w) * \cos(||\boldsymbol{v}||) + \sinh(w)\frac{\boldsymbol{v}}{||\boldsymbol{v}||}sin(||\boldsymbol{v}||)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.cosh();
+     * ```
+     */
+    Quat<_Tp> cosh() const;
+
+    /**
+     * @brief return tanh value of quaternion q, tanh could be calculated as:
+     * \f[ \tanh(q) = \frac{\sinh(q)}{\cosh(q)}.\f]
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * tanh(q);
+     * ```
+     * @sa sinh, cosh
+     */
+    template <typename T>
+    friend Quat<T> tanh(const Quat<T> &q);
+
+    /**
+     * @brief return tanh value of this quaternion, tanh could be calculated as:
+     * \f[ \tanh(q) = \frac{\sinh(q)}{\cosh(q)}.\f]
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.tanh();
+     * ```
+     * @sa sinh, cosh
+     */
+    Quat<_Tp> tanh() const;
+
+    /**
+     * @brief return tanh value of quaternion q, sin could be calculated as:
+     * \f[\sin(p) = \sin(w) * \cosh(||\boldsymbol{v}||) + \cos(w)\frac{\boldsymbol{v}}{||\boldsymbol{v}||}\sinh(||\boldsymbol{v}||)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * sin(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> sin(const Quat<T> &q);
+
+    /**
+     * @brief return sin value of this quaternion, sin could be calculated as:
+     * \f[\sin(p) = \sin(w) * \cosh(||\boldsymbol{v}||) + \cos(w)\frac{\boldsymbol{v}}{||\boldsymbol{v}||}\sinh(||\boldsymbol{v}||)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.sin();
+     * ```
+     */
+    Quat<_Tp> sin() const;
+
+    /**
+     * @brief return sin value of quaternion q, cos could be calculated as:
+     * \f[\cos(p) = \cos(w) * \cosh(||\boldsymbol{v}||) - \sin(w)\frac{\boldsymbol{v}}{||\boldsymbol{v}||}\sinh(||\boldsymbol{v}||)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * cos(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> cos(const Quat<T> &q);
+
+    /**
+     * @brief return cos value of this quaternion, cos could be calculated as:
+     * \f[\cos(p) = \cos(w) * \cosh(||\boldsymbol{v}||) - \sin(w)\frac{\boldsymbol{v}}{||\boldsymbol{v}||}\sinh(||\boldsymbol{v}||)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.cos();
+     * ```
+     */
+    Quat<_Tp> cos() const;
+
+    /**
+     * @brief return tan value of quaternion q, tan could be calculated as:
+     * \f[\tan(q) = \frac{\sin(q)}{\cos(q)}.\f]
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * tan(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> tan(const Quat<T> &q);
+
+    /**
+     * @brief return tan value of this quaternion, tan could be calculated as:
+     * \f[\tan(q) = \frac{\sin(q)}{\cos(q)}.\f]
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.tan();
+     * ```
+     */
+    Quat<_Tp> tan() const;
+
+    /**
+     * @brief return arcsin value of quaternion q, arcsin could be calculated as:
+     * \f[\arcsin(q) = -\frac{\boldsymbol{v}}{||\boldsymbol{v}||}arcsinh(q\frac{\boldsymbol{v}}{||\boldsymbol{v}||})\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * asin(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> asin(const Quat<T> &q);
+
+    /**
+     * @brief return arcsin value of this quaternion, arcsin could be calculated as:
+     * \f[\arcsin(q) = -\frac{\boldsymbol{v}}{||\boldsymbol{v}||}arcsinh(q\frac{\boldsymbol{v}}{||\boldsymbol{v}||})\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.asin();
+     * ```
+     */
+    Quat<_Tp> asin() const;
+
+    /**
+     * @brief return arccos value of quaternion q, arccos could be calculated as:
+     * \f[\arccos(q) = -\frac{\boldsymbol{v}}{||\boldsymbol{v}||}arccosh(q)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * acos(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> acos(const Quat<T> &q);
+
+    /**
+     * @brief return arccos value of this quaternion, arccos could be calculated as:
+     * \f[\arccos(q) = -\frac{\boldsymbol{v}}{||\boldsymbol{v}||}arccosh(q)\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.acos();
+     * ```
+     */
+    Quat<_Tp> acos() const;
+
+    /**
+     * @brief return arctan value of quaternion q, arctan could be calculated as:
+     * \f[\arctan(q) = -\frac{\boldsymbol{v}}{||\boldsymbol{v}||}arctanh(q\frac{\boldsymbol{v}}{||\boldsymbol{v}||})\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * atan(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> atan(const Quat<T> &q);
+
+    /**
+     * @brief return arctan value of this quaternion, arctan could be calculated as:
+     * \f[\arctan(q) = -\frac{\boldsymbol{v}}{||\boldsymbol{v}||}arctanh(q\frac{\boldsymbol{v}}{||\boldsymbol{v}||})\f]
+     * where \f$\boldsymbol{v} = [x, y, z].\f$
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.atan();
+     * ```
+     */
+    Quat<_Tp> atan() const;
+
+    /**
+     * @brief return arcsinh value of quaternion q, arcsinh could be calculated as:
+     * \f[arcsinh(q) = \ln(q + \sqrt{q^2 + 1})\f].
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * asinh(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> asinh(const Quat<T> &q);
+
+    /**
+     * @brief return arcsinh value of this quaternion, arcsinh could be calculated as:
+     * \f[arcsinh(q) = \ln(q + \sqrt{q^2 + 1})\f].
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.asinh();
+     * ```
+     */
+    Quat<_Tp> asinh() const;
+
+    /**
+     * @brief return arccosh value of quaternion q, arccosh could be calculated as:
+     * \f[arccosh(q) = \ln(q + \sqrt{q^2 - 1})\f].
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * acosh(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> acosh(const Quat<T> &q);
+
+    /**
+     * @brief return arccosh value of this quaternion, arccosh could be calculated as:
+     * \f[arcosh(q) = \ln(q + \sqrt{q^2 - 1})\f].
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.acosh();
+     * ```
+     */
+    Quat<_Tp> acosh() const;
+
+    /**
+     * @brief return arctanh value of quaternion q, arctanh could be calculated as:
+     * \f[arctanh(q) = \frac{\ln(q + 1) - \ln(1 - q)}{2}\f].
+     * @param q a quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * atanh(q);
+     * ```
+     */
+    template <typename T>
+    friend Quat<T> atanh(const Quat<T> &q);
+
+    /**
+     * @brief return arctanh value of this quaternion, arctanh could be calculated as:
+     * \f[arcsinh(q) = \frac{\ln(q + 1) - \ln(1 - q)}{2}\f].
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.atanh();
+     * ```
+     */
+    Quat<_Tp> atanh() const;
+
+    /**
+     * @brief return true if this quaternion is a unit quaternion.
+     * @param eps tolerance scope of normalization. The eps could be defined as
+     *
+     * \f[eps = |1 - dotValue|\f] where \f[dotValue = (this.w^2 + this.x^2 + this,y^2 + this.z^2).\f]
+     * And this function will consider it is normalized when the dotValue over a range \f$[1-eps, 1+eps]\f$.
+     */
+    bool isNormal(_Tp eps=CV_QUAT_EPS) const;
+
+    /**
+     * @brief to throw an error if this quaternion is not a unit quaternion.
+     * @param eps tolerance scope of normalization.
+     * @sa isNormal
+     */
+    void assertNormal(_Tp eps=CV_QUAT_EPS) const;
+
+    /**
+     * @brief transform a quaternion to a 3x3 rotation matrix.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and
+     * this function will save some computations. Otherwise, this function will normalized this
+     * quaternion at first then to do the transformation.
+     *
+     * @note Matrix A which is to be rotated should have the form
+     * \f[\begin{bmatrix}
+     * x_0& x_1& x_2&...&x_n\\
+     * y_0& y_1& y_2&...&y_n\\
+     * z_0& z_1& z_2&...&z_n
+     * \end{bmatrix}\f]
+     * where the same subscript represents a point. The shape of A assume to be [3, n]
+     * The points matrix A can be rotated by toRotMat3x3() * A.
+     * The result has 3 rows and n columns too.
+
+     * For example
+     * ```
+     * double angle = CV_PI;
+     * Vec3d axis{0,0,1};
+     * Quatd q_unit = Quatd::createFromAngleAxis(angle, axis); //quaternion could also be get by interpolation by two or more quaternions.
+     *
+     * //assume there is two points (1,0,0) and (1,0,1) to be rotated
+     * Mat pointsA = (Mat_<double>(2, 3) << 1,0,0,1,0,1);
+     * //change the shape
+     * pointsA = pointsA.t();
+     * // rotate 180 degrees around the z axis
+     * Mat new_point = q_unit.toRotMat3x3() * pointsA;
+     * // print two points
+     * cout << new_point << endl;
+     * ```
+     */
+    Matx<_Tp, 3, 3> toRotMat3x3(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief transform a quaternion to a 4x4 rotation matrix.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and
+     * this function will save some computations. Otherwise, this function will normalized this
+     * quaternion at first then to do the transformation.
+     *
+     * The operations is similar as toRotMat3x3
+     * except that the points matrix should have the form
+     * \f[\begin{bmatrix}
+     * x_0& x_1& x_2&...&x_n\\
+     * y_0& y_1& y_2&...&y_n\\
+     * z_0& z_1& z_2&...&z_n\\
+     * 0&0&0&...&0
+     * \end{bmatrix}\f]
+     *
+     * @sa toRotMat3x3
+     */
+    Matx<_Tp, 4, 4> toRotMat4x4(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief transform the this quaternion to a Vec<T, 4>.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.toVec();
+     * ```
+     */
+    Vec<_Tp, 4> toVec() const;
+
+    /**
+     * @brief transform this quaternion to a Rotation vector.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and
+     * this function will save some computations.
+     * Rotation vector rVec is defined as:
+     * \f[ rVec = [\theta v_x, \theta v_y, \theta v_z]\f]
+     * where \f$\theta\f$ represents rotation angle, and \f$\boldsymbol{v}\f$ represents the normalized rotation axis.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.toRotVec();
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * q.normalize().toRotVec(assumeUnit); //answer is same as q.toRotVec().
+     * ```
+     */
+    Vec<_Tp, 3> toRotVec(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief get the angle of quaternion, it returns the rotation angle.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and
+     * this function will save some computations.
+     * \f[\psi = 2 *arccos(\frac{w}{||q||})\f]
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.getAngle();
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * q.normalize().getAngle(assumeUnit);//same as q.getAngle().
+     * ```
+     * @note It always return the value between \f$[0, 2\pi]\f$.
+     */
+    _Tp getAngle(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief get the axis of quaternion, it returns a vector of length 3.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, this quaternion assume to be a unit quaternion and
+     * this function will save some computations.
+     *
+     * the unit axis \f$\boldsymbol{u}\f$ is defined by
+     * \f[\begin{equation}
+     *    \begin{split}
+     *      \boldsymbol{v}
+     *      &= \boldsymbol{u} ||\boldsymbol{v}||\\
+     *      &= \boldsymbol{u}||q||sin(\frac{\theta}{2})
+     *    \end{split}
+     *    \end{equation}\f]
+     *  where \f$v=[x, y ,z]\f$ and \f$\theta\f$ represents rotation angle.
+     *
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * q.getAxis();
+     *
+     * QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+     * q.normalize().getAxis(assumeUnit);//same as q.getAxis()
+     * ```
+     */
+    Vec<_Tp, 3> getAxis(QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT) const;
+
+    /**
+     * @brief return the dot between quaternion \f$q\f$ and this quaternion.
+     *
+     * dot(p, q) is a good metric of how close the quaternions are.
+     * Indeed, consider the unit quaternion difference \f$p^{-1} * q\f$, its real part is dot(p, q).
+     * At the same time its real part is equal to \f$\cos(\beta/2)\f$ where \f$\beta\f$ is
+     * an angle of rotation between p and q, i.e.,
+     * Therefore, the closer dot(p, q) to 1,
+     * the smaller rotation between them.
+     * \f[p \cdot q = p.w \cdot q.w + p.x \cdot q.x + p.y \cdot q.y + p.z \cdot q.z\f]
+     * @param q the other quaternion.
+     *
+     * For example
+     * ```
+     * Quatd q(1,2,3,4);
+     * Quatd p(5,6,7,8);
+     * p.dot(q);
+     * ```
+     */
+    _Tp dot(Quat<_Tp> q) const;
+
+    /**
+     * @brief To calculate the interpolation from \f$q_0\f$ to \f$q_1\f$ by Linear Interpolation(Nlerp)
+     * For two quaternions, this interpolation curve can be displayed as:
+     * \f[Lerp(q_0, q_1, t) = (1 - t)q_0 + tq_1.\f]
+     * Obviously, the lerp will interpolate along a straight line if we think of \f$q_0\f$ and \f$q_1\f$ as a vector
+     * in a two-dimensional space. When \f$t = 0\f$, it returns \f$q_0\f$ and when \f$t= 1\f$, it returns \f$q_1\f$.
+     * \f$t\f$ should to be ranged in \f$[0, 1]\f$ normally.
+     * @param q0 a quaternion used in linear interpolation.
+     * @param q1 a quaternion used in linear interpolation.
+     * @param t percent of vector \f$\overrightarrow{q_0q_1}\f$ over a range [0, 1].
+     * @note it returns a non-unit quaternion.
+     */
+    static Quat<_Tp> lerp(const Quat<_Tp> &q0, const Quat &q1, const _Tp t);
+
+    /**
+     * @brief To calculate the interpolation from \f$q_0\f$ to \f$q_1\f$ by Normalized Linear Interpolation(Nlerp).
+     * it returns a normalized quaternion of Linear Interpolation(Lerp).
+     * \f[ Nlerp(q_0, q_1, t) = \frac{(1 - t)q_0 + tq_1}{||(1 - t)q_0 + tq_1||}.\f]
+     * The interpolation will always choose the shortest path but the constant speed is not guaranteed.
+     * @param q0 a quaternion used in normalized linear interpolation.
+     * @param q1 a quaternion used in normalized linear interpolation.
+     * @param t percent of vector \f$\overrightarrow{q_0q_1}\f$ over a range [0, 1].
+     * @param assumeUnit if QUAT_ASSUME_UNIT, all input quaternions assume to be unit quaternion. Otherwise, all inputs
+     quaternion will be normalized inside the function.
+     * @sa lerp
+     */
+    static Quat<_Tp> nlerp(const Quat<_Tp> &q0, const Quat &q1, const _Tp t, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+    /**
+     @brief To calculate the interpolation between \f$q_0\f$ and \f$q_1\f$ by Spherical Linear
+     Interpolation(Slerp), which can be defined as:
+    \f[ Slerp(q_0, q_1, t) = \frac{\sin((1-t)\theta)}{\sin(\theta)}q_0 + \frac{\sin(t\theta)}{\sin(\theta)}q_1\f]
+    where \f$\theta\f$ can be calculated as:
+    \f[\theta=cos^{-1}(q_0\cdot q_1)\f]
+    resulting from the both of their norm is unit.
+    @param q0 a quaternion used in Slerp.
+    @param q1 a quaternion used in Slerp.
+    @param t percent of angle between \f$q_0\f$ and \f$q_1\f$ over a range [0, 1].
+    @param assumeUnit if QUAT_ASSUME_UNIT, all input quaternions assume to be unit quaternions. Otherwise, all input
+    quaternions will be normalized inside the function.
+    @param directChange if QUAT_ASSUME_UNIT, the interpolation will choose the nearest path.
+    @note If the interpolation angle is small, the error between Nlerp and Slerp is not so large. To improve efficiency and
+    avoid zero division error, we use Nlerp instead of Slerp.
+    */
+    static Quat<_Tp> slerp(const Quat<_Tp> &q0, const Quat &q1, const _Tp t, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT, bool directChange=true);
+
+    /**
+     * @brief To calculate the interpolation between \f$q_0\f$,\f$q_1\f$,\f$q_2\f$,\f$q_3\f$  by Spherical and quadrangle(Squad). This could be defined as:
+     * \f[Squad(q_i, s_i, s_{i+1}, q_{i+1}, t) = Slerp(Slerp(q_i, q_{i+1}, t), Slerp(s_i, s_{i+1}, t), 2t(1-t))\f]
+     * where
+     * \f[s_i = q_i\exp(-\frac{\log(q^*_iq_{i+1}) + \log(q^*_iq_{i-1})}{4})\f]
+     *
+     * The Squad expression is analogous to the \f$B\acute{e}zier\f$ curve, but involves spherical linear
+     * interpolation instead of simple linear interpolation. Each \f$s_i\f$ needs to be calculated by three
+     * quaternions.
+     *
+     * @param q0 the first quaternion.
+     * @param s0 the second quaternion.
+     * @param s1 the third quaternion.
+     * @param q1 thr fourth quaternion.
+     * @param t interpolation parameter of quadratic and linear interpolation over a range \f$[0, 1]\f$.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, all input quaternions assume to be unit quaternion. Otherwise, all input
+     * quaternions will be normalized inside the function.
+     * @param directChange if QUAT_ASSUME_UNIT, squad will find the nearest path to interpolate.
+     * @sa interPoint, spline
+     */
+    static Quat<_Tp> squad(const Quat<_Tp> &q0, const Quat<_Tp> &s0,
+                            const Quat<_Tp> &s1, const Quat<_Tp> &q1,
+                            const _Tp t, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT,
+                            bool directChange=true);
+
+    /**
+     * @brief This is the part calculation of squad.
+     * To calculate the intermedia quaternion \f$s_i\f$ between each three quaternion
+     * \f[s_i = q_i\exp(-\frac{\log(q^*_iq_{i+1}) + \log(q^*_iq_{i-1})}{4}).\f]
+     * @param q0 the first quaternion.
+     * @param q1 the second quaternion.
+     * @param q2 the third quaternion.
+     * @param assumeUnit if QUAT_ASSUME_UNIT, all input quaternions assume to be unit quaternion. Otherwise, all input
+     * quaternions will be normalized inside the function.
+     * @sa squad
+     */
+    static Quat<_Tp> interPoint(const Quat<_Tp> &q0, const Quat<_Tp> &q1,
+                                 const Quat<_Tp> &q2, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+    /**
+     * @brief to calculate a quaternion which is the result of a \f$C^1\f$ continuous
+     * spline curve constructed by squad at the ratio t. Here, the interpolation values are
+     * between \f$q_1\f$ and \f$q_2\f$. \f$q_0\f$ and \f$q_2\f$ are used to ensure the \f$C^1\f$
+     * continuity. if t = 0, it returns \f$q_1\f$, if t = 1, it returns \f$q_2\f$.
+     * @param q0 the first input quaternion to ensure \f$C^1\f$ continuity.
+     * @param q1 the second input quaternion.
+     * @param q2 the third input quaternion.
+     * @param q3 the fourth input quaternion the same use of \f$q1\f$.
+     * @param t ratio over a range [0, 1].
+     * @param assumeUnit if QUAT_ASSUME_UNIT, \f$q_0, q_1, q_2, q_3\f$ assume to be unit quaternion. Otherwise, all input
+     * quaternions will be normalized inside the function.
+     *
+     * For example:
+     *
+     * If there are three double quaternions \f$v_0, v_1, v_2\f$ waiting to be interpolated.
+     *
+     * Interpolation between \f$v_0\f$ and \f$v_1\f$ with a ratio \f$t_0\f$ could be calculated as
+     * ```
+     * Quatd::spline(v0, v0, v1, v2, t0);
+     * ```
+     * Interpolation between \f$v_1\f$ and \f$v_2\f$ with a ratio \f$t_0\f$ could be calculated as
+     * ```
+     * Quatd::spline(v0, v1, v2, v2, t0);
+     * ```
+     * @sa squad, slerp
+     */
+    static Quat<_Tp> spline(const Quat<_Tp> &q0, const Quat<_Tp> &q1,
+                            const Quat<_Tp> &q2, const Quat<_Tp> &q3,
+                            const _Tp t, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+
+    Quat<_Tp> operator-() const;
+
+    bool operator==(const Quat<_Tp>&) const;
+
+    Quat<_Tp> operator+(const Quat<_Tp>&) const;
+
+    Quat<_Tp>& operator+=(const Quat<_Tp>&);
+
+    Quat<_Tp> operator-(const Quat<_Tp>&) const;
+
+    Quat<_Tp>& operator-=(const Quat<_Tp>&);
+
+    Quat<_Tp>& operator*=(const Quat<_Tp>&);
+
+    Quat<_Tp>& operator*=(const _Tp&);
+
+    Quat<_Tp> operator*(const Quat<_Tp>&) const;
+
+    Quat<_Tp> operator/(const _Tp&) const;
+
+    Quat<_Tp> operator/(const Quat<_Tp>&) const;
+
+    Quat<_Tp>& operator/=(const _Tp&);
+
+    Quat<_Tp>& operator/=(const Quat<_Tp>&);
+
+    _Tp& operator[](std::size_t n);
+
+    const _Tp& operator[](std::size_t n) const;
+
+    template <typename S, typename T>
+    friend Quat<S> cv::operator*(const T, const Quat<S>&);
+
+    template <typename S, typename T>
+    friend Quat<S> cv::operator*(const Quat<S>&, const T);
+
+    template <typename S>
+    friend std::ostream& cv::operator<<(std::ostream&, const Quat<S>&);
+
+    _Tp w, x, y, z;
+
+};
+
+template <typename T>
+Quat<T> inv(const Quat<T> &q, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+template <typename T>
+Quat<T> sinh(const Quat<T> &q);
+
+template <typename T>
+Quat<T> cosh(const Quat<T> &q);
+
+template <typename T>
+Quat<T> tanh(const Quat<T> &q);
+
+template <typename T>
+Quat<T> sin(const Quat<T> &q);
+
+template <typename T>
+Quat<T> cos(const Quat<T> &q);
+
+template <typename T>
+Quat<T> tan(const Quat<T> &q);
+
+template <typename T>
+Quat<T> asinh(const Quat<T> &q);
+
+template <typename T>
+Quat<T> acosh(const Quat<T> &q);
+
+template <typename T>
+Quat<T> atanh(const Quat<T> &q);
+
+template <typename T>
+Quat<T> asin(const Quat<T> &q);
+
+template <typename T>
+Quat<T> acos(const Quat<T> &q);
+
+template <typename T>
+Quat<T> atan(const Quat<T> &q);
+
+template <typename T>
+Quat<T> power(const Quat<T> &q, const Quat<T> &p, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+template <typename T>
+Quat<T> exp(const Quat<T> &q);
+
+template <typename T>
+Quat<T> log(const Quat<T> &q, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+template <typename T, typename _T>
+Quat<T> power(const Quat<T>& q, _T x, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+template <typename T>
+Quat<T> crossProduct(const Quat<T> &p, const Quat<T> &q);
+
+template <typename S>
+Quat<S> sqrt(const Quat<S> &q, QuatAssumeType assumeUnit=QUAT_ASSUME_NOT_UNIT);
+
+template <typename S, typename T>
+Quat<S> operator*(const T, const Quat<S>&);
+
+template <typename S, typename T>
+Quat<S> operator*(const Quat<S>&, const T);
+
+template <typename S>
+std::ostream& operator<<(std::ostream&, const Quat<S>&);
+
+using Quatd = Quat<double>;
+using Quatf = Quat<float>;
+
+//! @} core
+}
+
+#include "opencv2/core/quaternion.inl.hpp"
+
+#endif /* OPENCV_CORE_QUATERNION_HPP */
diff --git a/modules/core/include/opencv2/core/quaternion.inl.hpp b/modules/core/include/opencv2/core/quaternion.inl.hpp
new file mode 100644
index 0000000000..769f53ed4b
--- /dev/null
+++ b/modules/core/include/opencv2/core/quaternion.inl.hpp
@@ -0,0 +1,849 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+//
+//                          License Agreement
+//                For Open Source Computer Vision Library
+//
+// Copyright (C) 2020, Huawei Technologies Co., Ltd. All rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//       http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Author: Liangqian Kong <chargerKong@126.com>
+//         Longbu Wang <riskiest@gmail.com>
+
+#ifndef OPENCV_CORE_QUATERNION_INL_HPP
+#define OPENCV_CORE_QUATERNION_INL_HPP
+
+#ifndef OPENCV_CORE_QUATERNION_HPP
+#erorr This is not a standalone header. Include quaternion.hpp instead.
+#endif
+
+//@cond IGNORE
+///////////////////////////////////////////////////////////////////////////////////////
+//Implementation
+namespace cv {
+
+template <typename T>
+Quat<T>::Quat() : w(0), x(0), y(0), z(0) {}
+
+template <typename T>
+Quat<T>::Quat(const Vec<T, 4> &coeff):w(coeff[0]), x(coeff[1]), y(coeff[2]), z(coeff[3]){}
+
+template <typename T>
+Quat<T>::Quat(const T qw, const T qx, const T qy, const T qz):w(qw), x(qx), y(qy), z(qz){}
+
+template <typename T>
+Quat<T> Quat<T>::createFromAngleAxis(const T angle, const Vec<T, 3> &axis)
+{
+    T w, x, y, z;
+    T vNorm = std::sqrt(axis.dot(axis));
+    if (vNorm < CV_QUAT_EPS)
+    {
+        CV_Error(Error::StsBadArg, "this quaternion does not represent a rotation");
+    }
+    const T angle_half = angle * 0.5;
+    w = std::cos(angle_half);
+    const T sin_v = std::sin(angle_half);
+    const T sin_norm = sin_v / vNorm;
+    x = sin_norm * axis[0];
+    y = sin_norm * axis[1];
+    z = sin_norm * axis[2];
+    return Quat<T>(w, x, y, z);
+}
+
+template <typename T>
+Quat<T> Quat<T>::createFromRotMat(InputArray _R)
+{
+    CV_CheckTypeEQ(_R.type(), cv::traits::Type<T>::value, "");
+    if (_R.rows() != 3 || _R.cols() != 3)
+    {
+        CV_Error(Error::StsBadArg, "Cannot convert matrix to quaternion: rotation matrix should be a 3x3 matrix");
+    }
+    Matx<T, 3, 3> R;
+    _R.copyTo(R);
+
+    T S, w, x, y, z;
+    T trace = R(0, 0) + R(1, 1) + R(2, 2);
+    if (trace > 0)
+    {
+        S = std::sqrt(trace + 1) * 2;
+        x = (R(1, 2) - R(2, 1)) / S;
+        y = (R(2, 0) - R(0, 2)) / S;
+        z = (R(0, 1) - R(1, 0)) / S;
+        w = -0.25 * S;
+    }
+    else if (R(0, 0) > R(1, 1) && R(0, 0) > R(2, 2))
+    {
+
+        S = std::sqrt(1.0 + R(0, 0) - R(1, 1) - R(2, 2)) * 2;
+        x = -0.25 * S;
+        y = -(R(1, 0) + R(0, 1)) / S;
+        z = -(R(0, 2) + R(2, 0)) / S;
+        w = (R(1, 2) - R(2, 1)) / S;
+    }
+    else if (R(1, 1) > R(2, 2))
+    {
+        S = std::sqrt(1.0 - R(0, 0) + R(1, 1) - R(2, 2)) * 2;
+        x = (R(0, 1) + R(1, 0)) / S;
+        y = 0.25 * S;
+        z = (R(1, 2) + R(2, 1)) / S;
+        w = (R(0, 2) - R(2, 0)) / S;
+    }
+    else
+    {
+        S = std::sqrt(1.0 - R(0, 0) - R(1, 1) + R(2, 2)) * 2;
+        x = (R(0, 2) + R(2, 0)) / S;
+        y = (R(1, 2) + R(2, 1)) / S;
+        z = 0.25 * S;
+        w = -(R(0, 1) - R(1, 0)) / S;
+    }
+    return Quat<T> (w, x, y, z);
+}
+
+template <typename T>
+Quat<T> Quat<T>::createFromRvec(InputArray _rvec)
+{
+    if (!((_rvec.cols() == 1 && _rvec.rows() == 3) || (_rvec.cols() == 3 && _rvec.rows() == 1))) {
+        CV_Error(Error::StsBadArg, "Cannot convert rotation vector to quaternion: The length of rotation vector should be 3");
+    }
+    Vec<T, 3> rvec;
+    _rvec.copyTo(rvec);
+    T psi = std::sqrt(rvec.dot(rvec));
+    if (abs(psi) < CV_QUAT_EPS) {
+        return Quat<T> (1, 0, 0, 0);
+    }
+    Vec<T, 3> axis = rvec / psi;
+    return createFromAngleAxis(psi, axis);
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::operator-() const
+{
+    return Quat<T>(-w, -x, -y, -z);
+}
+
+
+template <typename T>
+inline bool Quat<T>::operator==(const Quat<T> &q) const
+{
+    return (abs(w - q.w) < CV_QUAT_EPS && abs(x - q.x) < CV_QUAT_EPS && abs(y - q.y) < CV_QUAT_EPS && abs(z - q.z) < CV_QUAT_EPS);
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::operator+(const Quat<T> &q1) const
+{
+    return Quat<T>(w + q1.w, x + q1.x, y + q1.y, z + q1.z);
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::operator-(const Quat<T> &q1) const
+{
+    return Quat<T>(w - q1.w, x - q1.x, y - q1.y, z - q1.z);
+}
+
+template <typename T>
+inline Quat<T>& Quat<T>::operator+=(const Quat<T> &q1)
+{
+    w += q1.w;
+    x += q1.x;
+    y += q1.y;
+    z += q1.z;
+    return *this;
+}
+
+template <typename T>
+inline Quat<T>& Quat<T>::operator-=(const Quat<T> &q1)
+{
+    w -= q1.w;
+    x -= q1.x;
+    y -= q1.y;
+    z -= q1.z;
+    return *this;
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::operator*(const Quat<T> &q1) const
+{
+    Vec<T, 4> q{w, x, y, z};
+    Vec<T, 4> q2{q1.w, q1.x, q1.y, q1.z};
+    return Quat<T>(q * q2);
+}
+
+
+template <typename T, typename S>
+Quat<T> operator*(const Quat<T> &q1, const S a)
+{
+    return Quat<T>(a * q1.w, a * q1.x, a * q1.y, a * q1.z);
+}
+
+template <typename T, typename S>
+Quat<T> operator*(const S a, const Quat<T> &q1)
+{
+    return Quat<T>(a * q1.w, a * q1.x, a * q1.y, a * q1.z);
+}
+
+template <typename T>
+inline Quat<T>& Quat<T>::operator*=(const Quat<T> &q1)
+{
+    T qw, qx, qy, qz;
+    qw = w * q1.w - x * q1.x - y * q1.y - z * q1.z;
+    qx = x * q1.w + w * q1.x + y * q1.z - z * q1.y;
+    qy = y * q1.w + w * q1.y + z * q1.x - x * q1.z;
+    qz = z * q1.w + w * q1.z + x * q1.y - y * q1.x;
+    w = qw;
+    x = qx;
+    y = qy;
+    z = qz;
+    return *this;
+}
+
+template <typename T>
+inline Quat<T>& Quat<T>::operator/=(const Quat<T> &q1)
+{
+    Quat<T> q(*this * q1.inv());
+    w = q.w;
+    x = q.x;
+    y = q.y;
+    z = q.z;
+    return *this;
+}
+template <typename T>
+Quat<T>& Quat<T>::operator*=(const T &q1)
+{
+    w *= q1;
+    x *= q1;
+    y *= q1;
+    z *= q1;
+    return *this;
+}
+
+template <typename T>
+inline Quat<T>& Quat<T>::operator/=(const T &a)
+{
+    const T a_inv = 1.0 / a;
+    w *= a_inv;
+    x *= a_inv;
+    y *= a_inv;
+    z *= a_inv;
+    return *this;
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::operator/(const T &a) const
+{
+    const T a_inv = 1.0 / a;
+    return Quat<T>(w * a_inv, x * a_inv, y * a_inv, z * a_inv);
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::operator/(const Quat<T> &q) const
+{
+    return *this * q.inv();
+}
+
+template <typename T>
+inline const T& Quat<T>::operator[](std::size_t n) const
+{
+    switch (n) {
+        case 0:
+            return w;
+        case 1:
+            return x;
+        case 2:
+            return y;
+        case 3:
+            return z;
+        default:
+            CV_Error(Error::StsOutOfRange, "subscript exceeds the index range");
+    }
+}
+
+template <typename T>
+inline T& Quat<T>::operator[](std::size_t n)
+{
+    switch (n) {
+        case 0:
+            return w;
+        case 1:
+            return x;
+        case 2:
+            return y;
+        case 3:
+            return z;
+        default:
+            CV_Error(Error::StsOutOfRange, "subscript exceeds the index range");
+    }
+}
+
+template <typename T>
+std::ostream & operator<<(std::ostream &os, const Quat<T> &q)
+{
+    os << "Quat " << Vec<T, 4>{q.w, q.x, q.y, q.z};
+    return os;
+}
+
+template <typename T>
+inline T Quat<T>::at(size_t index) const
+{
+    return (*this)[index];
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::conjugate() const
+{
+    return Quat<T>(w, -x, -y, -z);
+}
+
+template <typename T>
+inline T Quat<T>::norm() const
+{
+    return std::sqrt(dot(*this));
+}
+
+template <typename T>
+Quat<T> exp(const Quat<T> &q)
+{
+    return q.exp();
+}
+
+template <typename T>
+Quat<T> Quat<T>::exp() const
+{
+    Vec<T, 3> v{x, y, z};
+    T normV = std::sqrt(v.dot(v));
+    T k = normV < CV_QUAT_EPS ? 1 : std::sin(normV) / normV;
+    return std::exp(w) * Quat<T>(std::cos(normV), v[0] * k, v[1] * k, v[2] * k);
+}
+
+template <typename T>
+Quat<T> log(const Quat<T> &q, QuatAssumeType assumeUnit)
+{
+    return q.log(assumeUnit);
+}
+
+template <typename T>
+Quat<T> Quat<T>::log(QuatAssumeType assumeUnit) const
+{
+    Vec<T, 3> v{x, y, z};
+    T vNorm = std::sqrt(v.dot(v));
+    if (assumeUnit)
+    {
+        T k = vNorm < CV_QUAT_EPS ? 1 : std::acos(w) / vNorm;
+        return Quat<T>(0, v[0] * k, v[1] * k, v[2] * k);
+    }
+    T qNorm = norm();
+    if (qNorm < CV_QUAT_EPS)
+    {
+        CV_Error(Error::StsBadArg, "Cannot apply this quaternion to log function: undefined");
+    }
+    T k = vNorm < CV_QUAT_EPS ? 1 : std::acos(w / qNorm) / vNorm;
+    return Quat<T>(std::log(qNorm), v[0] * k, v[1] * k, v[2] *k);
+}
+
+template <typename T, typename _T>
+inline Quat<T> power(const Quat<T> &q1, _T alpha, QuatAssumeType assumeUnit)
+{
+    return q1.power(alpha, assumeUnit);
+}
+
+template <typename T>
+template <typename _T>
+inline Quat<T> Quat<T>::power(_T alpha, QuatAssumeType assumeUnit) const
+{
+    if (x * x + y * y + z * z > CV_QUAT_EPS)
+    {
+        T angle = getAngle(assumeUnit);
+        Vec<T, 3> axis = getAxis(assumeUnit);
+        if (assumeUnit)
+        {
+            return createFromAngleAxis(alpha * angle, axis);
+        }
+        return std::pow(norm(), alpha) * createFromAngleAxis(alpha * angle, axis);
+    }
+    else
+    {
+        return std::pow(norm(), alpha) * Quat<T>(w, x, y, z);
+    }
+}
+
+
+template <typename T>
+inline Quat<T> sqrt(const Quat<T> &q, QuatAssumeType assumeUnit)
+{
+    return q.sqrt(assumeUnit);
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::sqrt(QuatAssumeType assumeUnit) const
+{
+    return power(0.5, assumeUnit);
+}
+
+
+template <typename T>
+inline Quat<T> power(const Quat<T> &p, const Quat<T> &q, QuatAssumeType assumeUnit)
+{
+    return p.power(q, assumeUnit);
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::power(const Quat<T> &q, QuatAssumeType assumeUnit) const
+{
+    return cv::exp(q * log(assumeUnit));
+}
+
+template <typename T>
+inline T Quat<T>::dot(Quat<T> q1) const
+{
+    return w * q1.w + x * q1.x + y * q1.y + z * q1.z;
+}
+
+
+template <typename T>
+inline Quat<T> crossProduct(const Quat<T> &p, const Quat<T> &q)
+{
+    return p.crossProduct(q);
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::crossProduct(const Quat<T> &q) const
+{
+    return Quat<T> (0, y * q.z - z * q.y, z * q.x - x * q.z, x * q.y - q.x * y);
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::normalize() const
+{
+    T normVal = norm();
+    if (normVal < CV_QUAT_EPS)
+    {
+        CV_Error(Error::StsBadArg, "Cannot normalize this quaternion: the norm is too small.");
+    }
+    return Quat<T>(w / normVal, x / normVal, y / normVal, z / normVal) ;
+}
+
+template <typename T>
+inline Quat<T> inv(const Quat<T> &q, QuatAssumeType assumeUnit)
+{
+    return q.inv(assumeUnit);
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::inv(QuatAssumeType assumeUnit) const
+{
+    if (assumeUnit)
+    {
+        return conjugate();
+    }
+    T norm2 = dot(*this);
+    if (norm2 < CV_QUAT_EPS)
+    {
+        CV_Error(Error::StsBadArg, "This quaternion do not have inverse quaternion");
+    }
+    return conjugate() / norm2;
+}
+
+template <typename T>
+inline Quat<T> sinh(const Quat<T> &q)
+{
+    return q.sinh();
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::sinh() const
+{
+    Vec<T, 3> v{x, y ,z};
+    T vNorm = std::sqrt(v.dot(v));
+    T k = vNorm < CV_QUAT_EPS ? 1 : std::cosh(w) * std::sin(vNorm) / vNorm;
+    return Quat<T>(std::sinh(w) * std::cos(vNorm), v[0] * k, v[1] * k, v[2] * k);
+}
+
+
+template <typename T>
+inline Quat<T> cosh(const Quat<T> &q)
+{
+    return q.cosh();
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::cosh() const
+{
+    Vec<T, 3> v{x, y ,z};
+    T vNorm = std::sqrt(v.dot(v));
+    T k = vNorm < CV_QUAT_EPS ? 1 : std::sinh(w) * std::sin(vNorm) / vNorm;
+    return Quat<T>(std::cosh(w) * std::cos(vNorm), v[0] * k, v[1] * k, v[2] * k);
+}
+
+template <typename T>
+inline Quat<T> tanh(const Quat<T> &q)
+{
+    return q.tanh();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::tanh() const
+{
+    return sinh() * cosh().inv();
+}
+
+
+template <typename T>
+inline Quat<T> sin(const Quat<T> &q)
+{
+    return q.sin();
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::sin() const
+{
+    Vec<T, 3> v{x, y ,z};
+    T vNorm = std::sqrt(v.dot(v));
+    T k = vNorm < CV_QUAT_EPS ? 1 : std::cos(w) * std::sinh(vNorm) / vNorm;
+    return Quat<T>(std::sin(w) * std::cosh(vNorm), v[0] * k, v[1] * k, v[2] * k);
+}
+
+template <typename T>
+inline Quat<T> cos(const Quat<T> &q)
+{
+    return q.cos();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::cos() const
+{
+    Vec<T, 3> v{x, y ,z};
+    T vNorm = std::sqrt(v.dot(v));
+    T k = vNorm < CV_QUAT_EPS ? 1 : std::sin(w) * std::sinh(vNorm) / vNorm;
+    return Quat<T>(std::cos(w) * std::cosh(vNorm), -v[0] * k, -v[1] * k, -v[2] * k);
+}
+
+template <typename T>
+inline Quat<T> tan(const Quat<T> &q)
+{
+    return q.tan();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::tan() const
+{
+    return sin() * cos().inv();
+}
+
+template <typename T>
+inline Quat<T> asinh(const Quat<T> &q)
+{
+    return q.asinh();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::asinh() const
+{
+    return cv::log(*this + cv::power(*this * *this + Quat<T>(1, 0, 0, 0), 0.5));
+}
+
+template <typename T>
+inline Quat<T> acosh(const Quat<T> &q)
+{
+    return q.acosh();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::acosh() const
+{
+    return cv::log(*this + cv::power(*this * *this - Quat<T>(1,0,0,0), 0.5));
+}
+
+template <typename T>
+inline Quat<T> atanh(const Quat<T> &q)
+{
+    return q.atanh();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::atanh() const
+{
+    Quat<T> ident(1, 0, 0, 0);
+    Quat<T> c1 = (ident + *this).log();
+    Quat<T> c2 = (ident - *this).log();
+    return 0.5 * (c1 - c2);
+}
+
+template <typename T>
+inline Quat<T> asin(const Quat<T> &q)
+{
+    return q.asin();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::asin() const
+{
+    Quat<T> v(0, x, y, z);
+    T vNorm = v.norm();
+    T k = vNorm < CV_QUAT_EPS ? 1 : vNorm;
+    return -v / k * (*this * v / k).asinh();
+}
+
+template <typename T>
+inline Quat<T> acos(const Quat<T> &q)
+{
+    return q.acos();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::acos() const
+{
+    Quat<T> v(0, x, y, z);
+    T vNorm = v.norm();
+    T k = vNorm < CV_QUAT_EPS ? 1 : vNorm;
+    return -v / k * acosh();
+}
+
+template <typename T>
+inline Quat<T> atan(const Quat<T> &q)
+{
+    return q.atan();
+}
+
+template <typename T>
+inline Quat<T> Quat<T>::atan() const
+{
+    Quat<T> v(0, x, y, z);
+    T vNorm = v.norm();
+    T k = vNorm < CV_QUAT_EPS ? 1 : vNorm;
+    return -v / k * (*this * v / k).atanh();
+}
+
+template <typename T>
+inline T Quat<T>::getAngle(QuatAssumeType assumeUnit) const
+{
+    if (assumeUnit)
+    {
+        return 2 * std::acos(w);
+    }
+    if (norm() < CV_QUAT_EPS)
+    {
+        CV_Error(Error::StsBadArg, "This quaternion does not represent a rotation");
+    }
+    return 2 * std::acos(w / norm());
+}
+
+template <typename T>
+inline Vec<T, 3> Quat<T>::getAxis(QuatAssumeType assumeUnit) const
+{
+    T angle = getAngle(assumeUnit);
+    const T sin_v = std::sin(angle * 0.5);
+    if (assumeUnit)
+    {
+        return Vec<T, 3>{x, y, z} / sin_v;
+    }
+    return Vec<T, 3> {x, y, z} / (norm() * sin_v);
+}
+
+template <typename T>
+Matx<T, 4, 4> Quat<T>::toRotMat4x4(QuatAssumeType assumeUnit) const
+{
+    T a = w, b = x, c = y, d = z;
+    if (!assumeUnit)
+    {
+        Quat<T> qTemp = normalize();
+        a = qTemp.w;
+        b = qTemp.x;
+        c = qTemp.y;
+        d = qTemp.z;
+    }
+    Matx<T, 4, 4> R{
+        1 - 2 * (c * c + d * d), 2 * (b * c - a * d)    , 2 * (b * d + a * c)    , 0,
+        2 * (b * c + a * d)    , 1 - 2 * (b * b + d * d), 2 * (c * d - a * b)    , 0,
+        2 * (b * d - a * c)    , 2 * (c * d + a * b)    , 1 - 2 * (b * b + c * c), 0,
+        0                      , 0                      , 0                      , 1,
+    };
+    return R;
+}
+
+template <typename T>
+Matx<T, 3, 3> Quat<T>::toRotMat3x3(QuatAssumeType assumeUnit) const
+{
+    T a = w, b = x, c = y, d = z;
+    if (!assumeUnit)
+    {
+        Quat<T> qTemp = normalize();
+        a = qTemp.w;
+        b = qTemp.x;
+        c = qTemp.y;
+        d = qTemp.z;
+    }
+    Matx<T, 3, 3> R{
+        1 - 2 * (c * c + d * d), 2 * (b * c - a * d)    , 2 * (b * d + a * c),
+        2 * (b * c + a * d)    , 1 - 2 * (b * b + d * d), 2 * (c * d - a * b),
+        2 * (b * d - a * c)    , 2 * (c * d + a * b)    , 1 - 2 * (b * b + c * c)
+    };
+    return R;
+}
+
+template <typename T>
+Vec<T, 3> Quat<T>::toRotVec(QuatAssumeType assumeUnit) const
+{
+    T angle = getAngle(assumeUnit);
+    Vec<T, 3> axis = getAxis(assumeUnit);
+    return angle * axis;
+}
+
+template <typename T>
+Vec<T, 4> Quat<T>::toVec() const
+{
+    return Vec<T, 4>{w, x, y, z};
+}
+
+template <typename T>
+Quat<T> Quat<T>::lerp(const Quat<T> &q0, const Quat<T> &q1, const T t)
+{
+    return (1 - t) * q0 + t * q1;
+}
+
+template <typename T>
+Quat<T> Quat<T>::slerp(const Quat<T> &q0, const Quat<T> &q1, const T t, QuatAssumeType assumeUnit, bool directChange)
+{
+    Quatd v0(q0);
+    Quatd v1(q1);
+    if (!assumeUnit)
+    {
+        v0 = v0.normalize();
+        v1 = v1.normalize();
+    }
+    T cosTheta = v0.dot(v1);
+    constexpr T DOT_THRESHOLD = 0.995;
+    if (cosTheta > DOT_THRESHOLD)
+    {
+        return nlerp(v0, v1, t, QUAT_ASSUME_UNIT);
+    }
+
+    if (directChange && cosTheta < 0)
+    {
+        v0 = -v0;
+        cosTheta = -cosTheta;
+    }
+    T sinTheta = std::sqrt(1 - cosTheta * cosTheta);
+    T angle = atan2(sinTheta, cosTheta);
+    return (std::sin((1 - t) * angle) / (sinTheta) * v0 + std::sin(t * angle) / (sinTheta) * v1).normalize();
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::nlerp(const Quat<T> &q0, const Quat<T> &q1, const T t, QuatAssumeType assumeUnit)
+{
+    Quat<T> v0(q0), v1(q1);
+    if (v1.dot(v0) < 0)
+    {
+        v0 = -v0;
+    }
+    if (assumeUnit)
+    {
+        return ((1 - t) * v0 + t * v1).normalize();
+    }
+    v0 = v0.normalize();
+    v1 = v1.normalize();
+    return ((1 - t) * v0 + t * v1).normalize();
+}
+
+
+template <typename T>
+inline bool Quat<T>::isNormal(T eps) const
+{
+
+    double normVar = norm();
+    if ((normVar > 1 - eps) && (normVar < 1 + eps))
+        return true;
+    return false;
+}
+
+template <typename T>
+inline void Quat<T>::assertNormal(T eps) const
+{
+    if (!isNormal(eps))
+        CV_Error(Error::StsBadArg, "Quaternion should be normalized");
+}
+
+
+template <typename T>
+inline Quat<T> Quat<T>::squad(const Quat<T> &q0, const Quat<T> &q1,
+                            const Quat<T> &q2, const Quat<T> &q3,
+                            const T t, QuatAssumeType assumeUnit,
+                            bool directChange)
+{
+    Quat<T> v0(q0), v1(q1), v2(q2), v3(q3);
+    if (!assumeUnit)
+    {
+        v0 = v0.normalize();
+        v1 = v1.normalize();
+        v2 = v2.normalize();
+        v3 = v3.normalize();
+    }
+
+    Quat<T> c0 = slerp(v0, v3, t, assumeUnit, directChange);
+    Quat<T> c1 = slerp(v1, v2, t, assumeUnit, directChange);
+    return slerp(c0, c1, 2 * t * (1 - t), assumeUnit, directChange);
+}
+
+template <typename T>
+Quat<T> Quat<T>::interPoint(const Quat<T> &q0, const Quat<T> &q1,
+                            const Quat<T> &q2, QuatAssumeType assumeUnit)
+{
+    Quat<T> v0(q0), v1(q1), v2(q2);
+    if (!assumeUnit)
+    {
+        v0 = v0.normalize();
+        v1 = v1.normalize();
+        v2 = v2.normalize();
+    }
+    return v1 * cv::exp(-(cv::log(v1.conjugate() * v0, assumeUnit) + (cv::log(v1.conjugate() * v2, assumeUnit))) / 4);
+}
+
+template <typename T>
+Quat<T> Quat<T>::spline(const Quat<T> &q0, const Quat<T> &q1, const Quat<T> &q2, const Quat<T> &q3, const T t, QuatAssumeType assumeUnit)
+{
+    Quatd v0(q0), v1(q1), v2(q2), v3(q3);
+    if (!assumeUnit)
+    {
+        v0 = v0.normalize();
+        v1 = v1.normalize();
+        v2 = v2.normalize();
+        v3 = v3.normalize();
+    }
+    T cosTheta;
+    std::vector<Quat<T>> vec{v0, v1, v2, v3};
+    for (size_t i = 0; i < 3; ++i)
+    {
+        cosTheta = vec[i].dot(vec[i + 1]);
+        if (cosTheta < 0)
+        {
+            vec[i + 1] = -vec[i + 1];
+        }
+    }
+    Quat<T> s1 = interPoint(vec[0], vec[1], vec[2], QUAT_ASSUME_UNIT);
+    Quat<T> s2 = interPoint(vec[1], vec[2], vec[3], QUAT_ASSUME_UNIT);
+    return squad(vec[1], s1, s2, vec[2], t, assumeUnit, QUAT_ASSUME_NOT_UNIT);
+}
+
+}  // namepsace
+//! @endcond
+
+#endif /*OPENCV_CORE_QUATERNION_INL_HPP*/
diff --git a/modules/core/test/test_quaternion.cpp b/modules/core/test/test_quaternion.cpp
new file mode 100644
index 0000000000..0025674ec7
--- /dev/null
+++ b/modules/core/test/test_quaternion.cpp
@@ -0,0 +1,255 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "test_precomp.hpp"
+#include <opencv2/core/quaternion.hpp>
+#include <opencv2/ts/cuda_test.hpp>
+using namespace cv;
+namespace opencv_test{ namespace {
+class QuatTest: public ::testing::Test {
+protected:
+    void SetUp() override
+    {
+        q1 = {1,2,3,4};
+        q2 = {2.5,-2,3.5,4};
+        q1Unit = {1 / sqrt(30), sqrt(2) /sqrt(15), sqrt(3) / sqrt(10), 2 * sqrt(2) / sqrt(15)};
+        q1Inv = {1.0 / 30, -1.0 / 15, -1.0 / 10, -2.0 / 15};
+    }
+    double scalar = 2.5;
+    double angle = CV_PI;
+    int qNorm2 = 2;
+    Vec<double, 3> axis{1, 1, 1};
+    Vec<double, 3> unAxis{0, 0, 0};
+    Vec<double, 3> unitAxis{1.0 / sqrt(3), 1.0 / sqrt(3), 1.0 / sqrt(3)};
+    Quatd q3 = Quatd::createFromAngleAxis(angle, axis);
+    Quatd q3UnitAxis = Quatd::createFromAngleAxis(angle, unitAxis);
+    Quat<double> q3Norm2 = q3 * qNorm2;
+
+    Quat<double> q1Inv;
+    Quat<double> q1;
+    Quat<double> q2;
+    Quat<double> q1Unit;
+
+    Quatd qNull{0, 0, 0, 0};
+    Quatd qIdentity{1, 0, 0, 0};
+    QuatAssumeType assumeUnit = QUAT_ASSUME_UNIT;
+
+};
+
+TEST_F(QuatTest, constructor){
+    Vec<double, 4> coeff{1, 2, 3, 4};
+    EXPECT_EQ(Quat<double> (coeff), q1);
+    EXPECT_EQ(q3, q3UnitAxis);
+    EXPECT_ANY_THROW(Quatd::createFromAngleAxis(angle, unAxis));
+    Matx33d R1{
+        -1.0 / 3, 2.0 / 3 , 2.0 / 3,
+        2.0 / 3 , -1.0 / 3, 2.0 / 3,
+        2.0 / 3 , 2.0 / 3 , -1.0 / 3
+    };
+    Matx33d R2{
+        -2.0 / 3, -2.0 / 3, -1.0 / 3,
+        -2.0 / 3, 1.0 / 3, 2.0 / 3,
+        -1.0 / 3, 2.0 / 3, -2.0 / 3
+    };
+    Matx33d R3{
+        0.818181818181, 0.181818181818, 0.54545455454,
+        0.545454545545, -0.54545454545, -0.6363636364,
+        0.181818181818, 0.818181818182, -0.5454545455
+    };
+    Matx33d R4{
+        0.818181818181, -0.181818181818, 0.54545455454,
+        0.545454545545, 0.54545454545, -0.6363636364,
+        -0.181818181818, 0.818181818182, 0.5454545455
+    };
+    Quatd qMat = Quatd::createFromRotMat(R1);
+    Quatd qMat2 = Quatd::createFromRotMat(R2);
+    Quatd qMat3 = Quatd::createFromRotMat(R3);
+    Quatd qMat4 = Quatd::createFromRotMat(R4);
+    EXPECT_EQ(qMat2, Quatd(0, -0.408248290463, 0.816496580927, 0.408248904638));
+    EXPECT_EQ(qMat3, Quatd(-0.426401432711,-0.852802865422, -0.213200716355, -0.2132007163));
+    EXPECT_EQ(qMat, q3);
+    EXPECT_EQ(qMat4, -Quatd(0.852802865422, 0.426401432711221, 0.2132007163556, 0.2132007163));
+
+    Vec3d rot{angle / sqrt(3),angle / sqrt(3), angle / sqrt(3)};
+    Quatd rotQuad{0, 1.0 / sqrt(3), 1. / sqrt(3), 1. / sqrt(3)};
+    Quatd qRot = Quatd::createFromRvec(rot);
+    EXPECT_EQ(qRot, rotQuad);
+    EXPECT_EQ(Quatd::createFromRvec(Vec3d(0, 0, 0)), qIdentity);
+}
+
+TEST_F(QuatTest, basicfuns){
+    Quat<double> q1Conj{1, -2, -3, -4};
+    EXPECT_EQ(q3Norm2.normalize(), q3);
+    EXPECT_EQ(q1.norm(), sqrt(30));
+    EXPECT_EQ(q1.normalize(), q1Unit);
+    EXPECT_ANY_THROW(qNull.normalize());
+    EXPECT_EQ(q1.conjugate(), q1Conj);
+    EXPECT_EQ(q1.inv(), q1Inv);
+    EXPECT_EQ(inv(q1), q1Inv);
+    EXPECT_EQ(q3.inv(assumeUnit) * q3, qIdentity);
+    EXPECT_EQ(q1.inv() * q1, qIdentity);
+    EXPECT_ANY_THROW(inv(qNull));
+    EXPECT_NO_THROW(q1.at(0));
+    EXPECT_ANY_THROW(q1.at(4));
+
+    Matx33d R{
+        -2.0 / 3, 2.0 / 15 , 11.0 / 15,
+        2.0 / 3 , -1.0 / 3 , 2.0 / 3  ,
+        1.0 / 3 , 14.0 / 15, 2.0 / 15
+    };
+    Matx33d q1RotMat = q1.toRotMat3x3();
+    EXPECT_MAT_NEAR(q1RotMat, R, 1e-6);
+    Vec3d z_axis{0,0,1};
+    Quatd q_unit1 = Quatd::createFromAngleAxis(angle, z_axis);
+    Mat pointsA = (Mat_<double>(2, 3) << 1,0,0,1,0,1);
+    pointsA = pointsA.t();
+    Mat new_point = q_unit1.toRotMat3x3() * pointsA;
+    Mat afterRo = (Mat_<double>(3, 2) << -1,-1,0,0,0,1);
+    EXPECT_MAT_NEAR(afterRo, new_point, 1e-6);
+    EXPECT_ANY_THROW(qNull.toRotVec());
+    Vec3d rodVec{CV_PI/sqrt(3), CV_PI/sqrt(3), CV_PI/sqrt(3)};
+    Vec3d q3Rod = q3.toRotVec();
+    EXPECT_NEAR(q3Rod[0], rodVec[0], 1e-6);
+    EXPECT_NEAR(q3Rod[1], rodVec[1], 1e-6);
+    EXPECT_NEAR(q3Rod[2], rodVec[2], 1e-6);
+
+    EXPECT_EQ(log(q1Unit, assumeUnit), log(q1Unit));
+    EXPECT_EQ(log(qIdentity, assumeUnit), qNull);
+    EXPECT_EQ(log(q3), Quatd(0, angle * unitAxis[0] / 2, angle * unitAxis[1] / 2, angle * unitAxis[2] / 2));
+    EXPECT_ANY_THROW(log(qNull));
+    EXPECT_EQ(log(Quatd(exp(1), 0, 0, 0)), qIdentity);
+
+    EXPECT_EQ(exp(qIdentity), Quatd(exp(1), 0, 0, 0));
+    EXPECT_EQ(exp(qNull), qIdentity);
+    EXPECT_EQ(exp(Quatd(0, angle * unitAxis[0] / 2, angle * unitAxis[1] / 2, angle * unitAxis[2] / 2)), q3);
+
+    EXPECT_EQ(power(q3, 2), Quatd::createFromAngleAxis(2*angle, axis));
+    EXPECT_EQ(power(Quatd(0.5, 0.5, 0.5, 0.5), 2.0, assumeUnit), Quatd(-0.5,0.5,0.5,0.5));
+    EXPECT_EQ(power(Quatd(0.5, 0.5, 0.5, 0.5), -2.0), Quatd(-0.5,-0.5,-0.5,-0.5));
+    EXPECT_EQ(sqrt(q1), power(q1, 0.5));
+    EXPECT_EQ(exp(q3 * log(q1)), power(q1, q3));
+    EXPECT_EQ(exp(q1 * log(q3)), power(q3, q1, assumeUnit));
+    EXPECT_EQ(crossProduct(q1, q3), (q1 * q3 - q3 * q1) / 2);
+    EXPECT_EQ(sinh(qNull), qNull);
+    EXPECT_EQ(sinh(q1), (exp(q1) - exp(-q1)) / 2);
+    EXPECT_EQ(sinh(qIdentity), Quatd(sinh(1), 0, 0, 0));
+    EXPECT_EQ(sinh(q1), Quatd(0.73233760604, -0.44820744998, -0.67231117497, -0.8964148999610843));
+    EXPECT_EQ(cosh(qNull), qIdentity);
+    EXPECT_EQ(cosh(q1), Quatd(0.961585117636, -0.34135217456, -0.51202826184, -0.682704349122));
+    EXPECT_EQ(tanh(q1), sinh(q1) * inv(cosh(q1)));
+    EXPECT_EQ(sin(qNull), qNull);
+    EXPECT_EQ(sin(q1), Quatd(91.78371578403, 21.88648685303, 32.829730279543, 43.772973706058));
+    EXPECT_EQ(cos(qNull), qIdentity);
+    EXPECT_EQ(cos(q1), Quatd(58.9336461679, -34.0861836904, -51.12927553569, -68.17236738093));
+    EXPECT_EQ(tan(q1), sin(q1)/cos(q1));
+    EXPECT_EQ(sinh(asinh(q1)), q1);
+    Quatd c1 = asinh(sinh(q1));
+    EXPECT_EQ(sinh(c1), sinh(q1));
+    EXPECT_EQ(cosh(acosh(q1)), q1);
+    c1 = acosh(cosh(q1));
+    EXPECT_EQ(cosh(c1), cosh(q1));
+    EXPECT_EQ(tanh(atanh(q1)), q1);
+    c1 = atanh(tanh(q1));
+    EXPECT_EQ(tanh(q1), tanh(c1));
+    EXPECT_EQ(asin(sin(q1)), q1);
+    EXPECT_EQ(sin(asin(q1)), q1);
+    EXPECT_EQ(acos(cos(q1)), q1);
+    EXPECT_EQ(cos(acos(q1)), q1);
+    EXPECT_EQ(atan(tan(q3)), q3);
+    EXPECT_EQ(tan(atan(q1)), q1);
+}
+
+TEST_F(QuatTest, opeartor){
+    Quatd minusQ{-1, -2, -3, -4};
+    Quatd qAdd{3.5, 0, 6.5, 8};
+    Quatd qMinus{-1.5, 4, -0.5, 0};
+    Quatd qMultq{-20, 1, -5, 27};
+    Quatd qMults{2.5, 5.0, 7.5, 10.0};
+    Quatd qDvss{1.0 / 2.5, 2.0 / 2.5, 3.0 / 2.5, 4.0 / 2.5};
+    Quatd qOrigin(q1);
+
+    EXPECT_EQ(-q1, minusQ);
+    EXPECT_EQ(q1 + q2, qAdd);
+    EXPECT_EQ(q1 - q2, qMinus);
+    EXPECT_EQ(q1 * q2, qMultq);
+    EXPECT_EQ(q1 * scalar, qMults);
+    EXPECT_EQ(scalar * q1, qMults);
+    EXPECT_EQ(q1 / q1, qIdentity);
+    EXPECT_EQ(q1 / scalar, qDvss);
+    q1 += q2;
+    EXPECT_EQ(q1, qAdd);
+    q1 -= q2;
+    EXPECT_EQ(q1, qOrigin);
+    q1 *= q2;
+    EXPECT_EQ(q1, qMultq);
+    q1 /= q2;
+    EXPECT_EQ(q1, qOrigin);
+    q1 *= scalar;
+    EXPECT_EQ(q1, qMults);
+    q1 /= scalar;
+    EXPECT_EQ(q1, qOrigin);
+    EXPECT_NO_THROW(q1[0]);
+    EXPECT_NO_THROW(q1.at(0));
+    EXPECT_ANY_THROW(q1[4]);
+    EXPECT_ANY_THROW(q1.at(4));
+}
+
+TEST_F(QuatTest, quatAttrs){
+    double angleQ1 = 2 * acos(1.0 / sqrt(30));
+    Vec3d axis1{0.3713906763541037, 0.557086014, 0.742781352};
+    Vec<double, 3> q1axis1 = q1.getAxis();
+
+    EXPECT_EQ(angleQ1, q1.getAngle());
+    EXPECT_EQ(angleQ1, q1Unit.getAngle());
+    EXPECT_EQ(angleQ1, q1Unit.getAngle(assumeUnit));
+    EXPECT_EQ(0, qIdentity.getAngle());
+    EXPECT_ANY_THROW(qNull.getAxis());
+    EXPECT_NEAR(axis1[0], q1axis1[0], 1e-6);
+    EXPECT_NEAR(axis1[1], q1axis1[1], 1e-6);
+    EXPECT_NEAR(axis1[2], q1axis1[2], 1e-6);
+    EXPECT_NEAR(q3Norm2.norm(), qNorm2, 1e-6);
+    EXPECT_EQ(q3Norm2.getAngle(), angle);
+    EXPECT_NEAR(axis1[0], axis1[0], 1e-6);
+    EXPECT_NEAR(axis1[1], axis1[1], 1e-6);
+    EXPECT_NEAR(axis1[2], axis1[2], 1e-6);
+}
+
+TEST_F(QuatTest, interpolation){
+    Quatd qNoRot = Quatd::createFromAngleAxis(0, axis);
+    Quatd qLerpInter(1.0 / 2, sqrt(3) / 6, sqrt(3) / 6, sqrt(3) / 6);
+    EXPECT_EQ(Quatd::lerp(qNoRot, q3, 0), qNoRot);
+    EXPECT_EQ(Quatd::lerp(qNoRot, q3, 1), q3);
+    EXPECT_EQ(Quatd::lerp(qNoRot, q3, 0.5), qLerpInter);
+    Quatd q3NrNn2 = qNoRot * qNorm2;
+    EXPECT_EQ(Quatd::nlerp(q3NrNn2, q3Norm2, 0), qNoRot);
+    EXPECT_EQ(Quatd::nlerp(q3NrNn2, q3Norm2, 1), q3);
+    EXPECT_EQ(Quatd::nlerp(q3NrNn2, q3Norm2, 0.5), qLerpInter.normalize());
+    EXPECT_EQ(Quatd::nlerp(qNoRot, q3, 0, assumeUnit), qNoRot);
+    EXPECT_EQ(Quatd::nlerp(qNoRot, q3, 1, assumeUnit), q3);
+    EXPECT_EQ(Quatd::nlerp(qNoRot, q3, 0.5, assumeUnit), qLerpInter.normalize());
+    Quatd q3Minus(-q3);
+    EXPECT_EQ(Quatd::nlerp(qNoRot, q3, 0.4), -Quatd::nlerp(qNoRot, q3Minus, 0.4));
+    EXPECT_EQ(Quatd::slerp(qNoRot, q3, 0, assumeUnit), qNoRot);
+    EXPECT_EQ(Quatd::slerp(qNoRot, q3, 1, assumeUnit), q3);
+    EXPECT_EQ(Quatd::slerp(qNoRot, q3, 0.5, assumeUnit), -Quatd::nlerp(qNoRot, -q3, 0.5, assumeUnit));
+    EXPECT_EQ(Quatd::slerp(qNoRot, q1, 0.5), Quatd(0.76895194, 0.2374325, 0.35614876, 0.47486501));
+    EXPECT_EQ(Quatd::slerp(-qNoRot, q1, 0.5), Quatd(0.76895194, 0.2374325, 0.35614876, 0.47486501));
+    EXPECT_EQ(Quatd::slerp(qNoRot, -q1, 0.5), -Quatd::slerp(-qNoRot, q1, 0.5));
+
+    Quat<double> tr1 = Quatd::createFromAngleAxis(0, axis);
+    Quat<double> tr2 = Quatd::createFromAngleAxis(angle / 2, axis);
+    Quat<double> tr3 = Quatd::createFromAngleAxis(angle, axis);
+    Quat<double> tr4 = Quatd::createFromAngleAxis(angle, Vec3d{-1/sqrt(2),0,1/(sqrt(2))});
+    EXPECT_ANY_THROW(Quatd::spline(qNull, tr1, tr2, tr3, 0));
+    EXPECT_EQ(Quatd::spline(tr1, tr2, tr3, tr4, 0), tr2);
+    EXPECT_EQ(Quatd::spline(tr1, tr2, tr3, tr4, 1), tr3);
+    EXPECT_EQ(Quatd::spline(tr1, tr2, tr3, tr4, 0.6, assumeUnit), Quatd::spline(tr1, tr2, tr3, tr4, 0.6));
+    EXPECT_EQ(Quatd::spline(tr1, tr2, tr3, tr3, 0.5), Quatd::spline(tr1, -tr2, tr3, tr3, 0.5));
+    EXPECT_EQ(Quatd::spline(tr1, tr2, tr3, tr3, 0.5), -Quatd::spline(-tr1, -tr2, -tr3, tr3, 0.5));
+    EXPECT_EQ(Quatd::spline(tr1, tr2, tr3, tr3, 0.5), Quatd(0.336889853392, 0.543600719487, 0.543600719487, 0.543600719487));
+}
+
+} // namespace
+
+}// opencv_test
\ No newline at end of file
diff --git a/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp b/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
index 5e2375621e..72b5c08672 100644
--- a/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
+++ b/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
@@ -363,8 +363,8 @@ void StereographicProjector::mapForward(float x, float y, float &u, float &v)
 
     float r = sinf(v_) / (1 - cosf(v_));
 
-    u = scale * r * cos(u_);
-    v = scale * r * sin(u_);
+    u = scale * r * std::cos(u_);
+    v = scale * r * std::sin(u_);
 }
 
 inline
@@ -625,7 +625,7 @@ void TransverseMercatorProjector::mapBackward(float u, float v, float &x, float
     v /= scale;
 
     float v_ = asinf( sinf(v) / coshf(u) );
-    float u_ = atan2f( sinhf(u), cos(v) );
+    float u_ = atan2f( sinhf(u), std::cos(v) );
 
     float cosv = cosf(v_);
     float x_ = cosv * sinf(u_);

From 34c4e454c501369d1a3593dc9d0955367d54d16b Mon Sep 17 00:00:00 2001
From: Maxim Pashchenkov <maxim.pashchenkov@intel.com>
Date: Thu, 19 Nov 2020 21:22:19 +0300
Subject: [PATCH 127/152] Added small cmake fix

---
 cmake/FindONNX.cmake | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/cmake/FindONNX.cmake b/cmake/FindONNX.cmake
index 51aa77b460..56dd6d5098 100644
--- a/cmake/FindONNX.cmake
+++ b/cmake/FindONNX.cmake
@@ -4,7 +4,7 @@ set(ONNXRT_ROOT_DIR "" CACHE PATH "ONNX Runtime install directory")
 
 # For now, check the old name ORT_INSTALL_DIR
 if(ORT_INSTALL_DIR AND NOT ONNXRT_ROOT_DIR)
-  set(ONNXRT_ROOT_DIR ORT_INSTALL_DIR)
+  set(ONNXRT_ROOT_DIR ${ORT_INSTALL_DIR})
 endif()
 
 if(ONNXRT_ROOT_DIR)

From c4c9cdd2b1dd7bd879d678f8c7d2e780989045cc Mon Sep 17 00:00:00 2001
From: Jonathan Cole <joncole04412@gmail.com>
Date: Thu, 19 Nov 2020 16:20:32 -0500
Subject: [PATCH 128/152] Merge pull request #18855 from
 Rightpoint:feature/colejd/add-apple-conversions-to-framework-builds

Expose CGImage <-> Mat conversion for iOS platforms

* Add apple_conversions to framework builds

This exposes CGImage <-> Mat conversion.

* Export Mat <-> CGImage methods on iOS targets

* Add CGImage converters to iOS objc helper class

* Add CF_RETURNS_RETAINED annotations to methods returning CGImageRef
---
 modules/imgcodecs/CMakeLists.txt                 |  2 +-
 .../imgcodecs/include/opencv2/imgcodecs/ios.h    |  2 ++
 .../imgcodecs/include/opencv2/imgcodecs/macosx.h |  2 +-
 modules/imgcodecs/misc/objc/ios/Mat+Converters.h |  3 +++
 .../imgcodecs/misc/objc/ios/Mat+Converters.mm    | 16 ++++++++++++++++
 .../imgcodecs/misc/objc/macosx/Mat+Converters.h  |  2 +-
 modules/imgcodecs/src/apple_conversions.h        |  2 +-
 7 files changed, 25 insertions(+), 4 deletions(-)

diff --git a/modules/imgcodecs/CMakeLists.txt b/modules/imgcodecs/CMakeLists.txt
index 80f7e1c248..5a8faa9d05 100644
--- a/modules/imgcodecs/CMakeLists.txt
+++ b/modules/imgcodecs/CMakeLists.txt
@@ -113,7 +113,7 @@ file(GLOB imgcodecs_ext_hdrs
      "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/legacy/*.h"
      )
 
-if(APPLE)
+if(APPLE OR APPLE_FRAMEWORK)
   list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/apple_conversions.h)
   list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/apple_conversions.mm)
 endif()
diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/ios.h b/modules/imgcodecs/include/opencv2/imgcodecs/ios.h
index 0f15820892..5f17218170 100644
--- a/modules/imgcodecs/include/opencv2/imgcodecs/ios.h
+++ b/modules/imgcodecs/include/opencv2/imgcodecs/ios.h
@@ -50,6 +50,8 @@
 //! @addtogroup imgcodecs_ios
 //! @{
 
+CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image) CF_RETURNS_RETAINED;
+CV_EXPORTS void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist = false);
 CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image);
 CV_EXPORTS void UIImageToMat(const UIImage* image,
                              cv::Mat& m, bool alphaExist = false);
diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h b/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h
index f5d9c082c4..cfb0770700 100644
--- a/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h
+++ b/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h
@@ -12,7 +12,7 @@
 //! @addtogroup imgcodecs_macosx
 //! @{
 
-CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image);
+CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image) CF_RETURNS_RETAINED;
 CV_EXPORTS void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist = false);
 CV_EXPORTS NSImage* MatToNSImage(const cv::Mat& image);
 CV_EXPORTS void NSImageToMat(const NSImage* image, cv::Mat& m, bool alphaExist = false);
diff --git a/modules/imgcodecs/misc/objc/ios/Mat+Converters.h b/modules/imgcodecs/misc/objc/ios/Mat+Converters.h
index 8c185f884a..a3ee005c18 100644
--- a/modules/imgcodecs/misc/objc/ios/Mat+Converters.h
+++ b/modules/imgcodecs/misc/objc/ios/Mat+Converters.h
@@ -20,6 +20,9 @@ NS_ASSUME_NONNULL_BEGIN
 
 CV_EXPORTS @interface Mat (Converters)
 
+-(CGImageRef)toCGImage CF_RETURNS_RETAINED;
+-(instancetype)initWithCGImage:(CGImageRef)image;
+-(instancetype)initWithCGImage:(CGImageRef)image alphaExist:(BOOL)alphaExist;
 -(UIImage*)toUIImage;
 -(instancetype)initWithUIImage:(UIImage*)image;
 -(instancetype)initWithUIImage:(UIImage*)image alphaExist:(BOOL)alphaExist;
diff --git a/modules/imgcodecs/misc/objc/ios/Mat+Converters.mm b/modules/imgcodecs/misc/objc/ios/Mat+Converters.mm
index 3ea3117267..69250eb994 100644
--- a/modules/imgcodecs/misc/objc/ios/Mat+Converters.mm
+++ b/modules/imgcodecs/misc/objc/ios/Mat+Converters.mm
@@ -9,6 +9,22 @@
 
 @implementation Mat (Converters)
 
+-(CGImageRef)toCGImage {
+    return MatToCGImage(self.nativeRef);
+}
+
+-(instancetype)initWithCGImage:(CGImageRef)image {
+    return [self initWithCGImage:image alphaExist:NO];
+}
+
+-(instancetype)initWithCGImage:(CGImageRef)image alphaExist:(BOOL)alphaExist {
+    self = [self init];
+    if (self) {
+        CGImageToMat(image, self.nativeRef, (bool)alphaExist);
+    }
+    return self;
+}
+
 -(UIImage*)toUIImage {
     return MatToUIImage(self.nativeRef);
 }
diff --git a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
index d87887372d..63ac476599 100644
--- a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
+++ b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h
@@ -20,7 +20,7 @@ NS_ASSUME_NONNULL_BEGIN
 
 CV_EXPORTS @interface Mat (Converters)
 
--(CGImageRef)toCGImage;
+-(CGImageRef)toCGImage CF_RETURNS_RETAINED;
 -(instancetype)initWithCGImage:(CGImageRef)image;
 -(instancetype)initWithCGImage:(CGImageRef)image alphaExist:(BOOL)alphaExist;
 -(NSImage*)toNSImage;
diff --git a/modules/imgcodecs/src/apple_conversions.h b/modules/imgcodecs/src/apple_conversions.h
index 2762424379..27e8955bfc 100644
--- a/modules/imgcodecs/src/apple_conversions.h
+++ b/modules/imgcodecs/src/apple_conversions.h
@@ -7,5 +7,5 @@
 #import <ImageIO/ImageIO.h>
 #include "opencv2/core.hpp"
 
-CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image);
+CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image) CF_RETURNS_RETAINED;
 CV_EXPORTS void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist);

From c996fd1c06aba607b2ce576e5c35bb8383e6e794 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Felix=20Gorschl=C3=BCter?=
 <felix.gorschlueter@igd.fraunhofer.de>
Date: Mon, 9 Nov 2020 14:51:32 +0100
Subject: [PATCH 129/152] Small improvements to persistence-API doc

---
 .../core/include/opencv2/core/persistence.hpp |  9 ++++---
 modules/core/test/test_io.cpp                 | 26 +++++++++++++++++++
 2 files changed, 31 insertions(+), 4 deletions(-)

diff --git a/modules/core/include/opencv2/core/persistence.hpp b/modules/core/include/opencv2/core/persistence.hpp
index 9e3f8f6914..276f640323 100644
--- a/modules/core/include/opencv2/core/persistence.hpp
+++ b/modules/core/include/opencv2/core/persistence.hpp
@@ -403,8 +403,8 @@ public:
 
     /**
      * @brief Simplified writing API to use with bindings.
-     * @param name Name of the written object
-     * @param val Value of the written object
+     * @param name Name of the written object. When writing to sequences (a.k.a. "arrays"), pass an empty string.
+     * @param val Value of the written object.
      */
     CV_WRAP void write(const String& name, int val);
     /// @overload
@@ -437,9 +437,10 @@ public:
     CV_WRAP void writeComment(const String& comment, bool append = false);
 
     /** @brief Starts to write a nested structure (sequence or a mapping).
-    @param name name of the structure (if it's a member of parent mapping, otherwise it should be empty
+    @param name name of the structure. When writing to sequences (a.k.a. "arrays"), pass an empty string.
     @param flags type of the structure (FileNode::MAP or FileNode::SEQ (both with optional FileNode::FLOW)).
-    @param typeName usually an empty string
+    @param typeName optional name of the type you store. The effect of setting this depends on the storage format.
+    I.e. if the format has a specification for storing type information, this parameter is used.
     */
     CV_WRAP void startWriteStruct(const String& name, int flags, const String& typeName=String());
 
diff --git a/modules/core/test/test_io.cpp b/modules/core/test/test_io.cpp
index e695300d4b..d30c485368 100644
--- a/modules/core/test/test_io.cpp
+++ b/modules/core/test/test_io.cpp
@@ -1640,6 +1640,32 @@ TEST(Core_InputOutput, FileStorage_free_file_after_exception)
     ASSERT_EQ(0, std::remove(fileName.c_str()));
 }
 
+TEST(Core_InputOutput, FileStorage_write_to_sequence)
+{
+    const std::vector<std::string> formatExts = { ".yml", ".json", ".xml" };
+    const std::string fileName = "FileStorage_write_to_sequence";
+
+    for (const auto& ext : formatExts)
+    {
+        FileStorage fs(fileName + ext, FileStorage::WRITE);
+        std::vector<int> in = { 23, 42 };
+        fs.startWriteStruct("some_sequence", cv::FileNode::SEQ);
+        for (int i : in)
+            fs.write("", i);
+        fs.endWriteStruct();
+        fs.release();
+
+        FileStorage fsIn(fileName + ext, FileStorage::READ);
+        FileNode seq = fsIn["some_sequence"];
+        FileNodeIterator it = seq.begin(), it_end = seq.end();
+        std::vector<int> out;
+        for (; it != it_end; ++it)
+            out.push_back((int)*it);
+
+        EXPECT_EQ(in, out);
+    }
+}
+
 TEST(Core_InputOutput, FileStorage_YAML_parse_multiple_documents)
 {
     const std::string filename = "FileStorage_YAML_parse_multiple_documents.yml";

From ac24a72e669a7550516dc92304792e3800b6c85c Mon Sep 17 00:00:00 2001
From: Julien <182520+JulienMaille@users.noreply.github.com>
Date: Fri, 20 Nov 2020 12:14:00 +0100
Subject: [PATCH 130/152] Merge pull request #18841 from JulienMaille:patch-2

Fixing dnn Resize layer for variable input size

* Fix onnx loading of resize/upsample layers for different opset

* group all DynamicResize tests

* cleaned up scales checks

* Simplify branching
---
 modules/dnn/src/onnx/onnx_importer.cpp  | 76 +++++++++++++------------
 modules/dnn/test/test_onnx_importer.cpp |  7 ++-
 2 files changed, 47 insertions(+), 36 deletions(-)

diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp
index 5289feef57..f5db2033ca 100644
--- a/modules/dnn/src/onnx/onnx_importer.cpp
+++ b/modules/dnn/src/onnx/onnx_importer.cpp
@@ -1746,43 +1746,45 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
             for (int i = 1; i < node_proto.input_size(); i++)
                 CV_Assert(layer_id.find(node_proto.input(i)) == layer_id.end());
 
-            String interp_mode;
             if (layerParams.has("coordinate_transformation_mode"))
-                interp_mode = layerParams.get<String>("coordinate_transformation_mode");
-            else
-                interp_mode = layerParams.get<String>("mode");
-            CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
-
-            layerParams.set("align_corners", interp_mode == "align_corners");
-            Mat shapes = getBlob(node_proto, node_proto.input_size() - 1);
-            CV_CheckEQ(shapes.size[0], 4, "");
-            CV_CheckEQ(shapes.size[1], 1, "");
-            CV_CheckDepth(shapes.depth(), shapes.depth() == CV_32S || shapes.depth() == CV_32F, "");
-            if (shapes.depth() == CV_32F)
-                shapes.convertTo(shapes, CV_32S);
-            int height = shapes.at<int>(2);
-            int width  = shapes.at<int>(3);
-            if (hasDynamicShapes)
             {
-                layerParams.set("zoom_factor_x", width);
-                layerParams.set("zoom_factor_y", height);
+                String interp_mode = layerParams.get<String>("coordinate_transformation_mode");
+                CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
+
+                layerParams.set("align_corners", interp_mode == "align_corners");
+                if (layerParams.get<String>("mode") == "linear")
+                {
+                    layerParams.set("mode", interp_mode == "pytorch_half_pixel" ?
+                                            "opencv_linear" : "bilinear");
+                }
+            }
+            if (layerParams.get<String>("mode") == "linear" && framework_name == "pytorch")
+                layerParams.set("mode", "opencv_linear");
+
+            // input = [X, scales], [X, roi, scales] or [x, roi, scales, sizes]
+            int foundScaleId = hasDynamicShapes ? node_proto.input_size() - 1
+                                                : node_proto.input_size() > 2 ? 2 : 1;
+
+            Mat scales = getBlob(node_proto, foundScaleId);
+            if (scales.total() == 4)
+            {
+                layerParams.set("zoom_factor_y", scales.at<float>(2));
+                layerParams.set("zoom_factor_x", scales.at<float>(3));
             }
             else
             {
-                if (node_proto.input_size() == 3) {
-                    IterShape_t shapeIt = outShapes.find(node_proto.input(0));
-                    CV_Assert(shapeIt != outShapes.end());
-                    MatShape scales = shapeIt->second;
-                    height *= scales[2];
-                    width *= scales[3];
+                const std::string& inputLast = node_proto.input(node_proto.input_size() - 1);
+                if (constBlobs.find(inputLast) != constBlobs.end())
+                {
+                    Mat shapes = getBlob(inputLast);
+                    CV_CheckEQ(shapes.size[0], 4, "");
+                    CV_CheckEQ(shapes.size[1], 1, "");
+                    CV_CheckDepth(shapes.depth(), shapes.depth() == CV_32S || shapes.depth() == CV_32F, "");
+                    if (shapes.depth() == CV_32F)
+                        shapes.convertTo(shapes, CV_32S);
+                    layerParams.set("width", shapes.at<int>(3));
+                    layerParams.set("height", shapes.at<int>(2));
                 }
-                layerParams.set("width", width);
-                layerParams.set("height", height);
-            }
-
-            if (layerParams.get<String>("mode") == "linear") {
-                layerParams.set("mode", interp_mode == "pytorch_half_pixel" ?
-                                        "opencv_linear" : "bilinear");
             }
             replaceLayerParam(layerParams, "mode", "interpolation");
         }
@@ -1822,10 +1824,14 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
             else
             {
                 // scales as input
-                Mat scales = getBlob(node_proto, 1);
-                CV_Assert(scales.total() == 4);
-                layerParams.set("zoom_factor_y", scales.at<float>(2));
-                layerParams.set("zoom_factor_x", scales.at<float>(3));
+                const std::string& input1 = node_proto.input(1);
+                if (constBlobs.find(input1) != constBlobs.end())
+                {
+                    Mat scales = getBlob(input1);
+                    CV_Assert(scales.total() == 4);
+                    layerParams.set("zoom_factor_y", scales.at<float>(2));
+                    layerParams.set("zoom_factor_x", scales.at<float>(3));
+                }
             }
             replaceLayerParam(layerParams, "mode", "interpolation");
         }
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 21be4fecb2..897b95ad8e 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -518,7 +518,12 @@ TEST_P(Test_ONNX_layers, Broadcast)
 
 TEST_P(Test_ONNX_layers, DynamicResize)
 {
-    testONNXModels("dynamic_resize", npy, 0, 0, false, true, 2);
+    testONNXModels("dynamic_resize_9", npy, 0, 0, false, true, 2);
+    testONNXModels("dynamic_resize_10", npy, 0, 0, false, true, 2);
+    testONNXModels("dynamic_resize_11", npy, 0, 0, false, true, 2);
+    testONNXModels("dynamic_resize_scale_9", npy, 0, 0, false, true, 2);
+    testONNXModels("dynamic_resize_scale_10", npy, 0, 0, false, true, 2);
+    testONNXModels("dynamic_resize_scale_11", npy, 0, 0, false, true, 2);
 }
 
 TEST_P(Test_ONNX_layers, Div)

From 2255973b0f38591550ae08ed71dbc9056f317b96 Mon Sep 17 00:00:00 2001
From: Nathan Godwin <nathanrgodwin@gmail.com>
Date: Fri, 20 Nov 2020 05:25:17 -0600
Subject: [PATCH 131/152] Merge pull request #18371 from
 nathanrgodwin:sqpnp_dev

Added SQPnP algorithm to SolvePnP

* Added sqpnp

* Fixed test case

* Added fix for duplicate point checking and inverse func reuse

* Changes for 3x speedup

Changed norm method (significant speed increase), changed nearest rotation computation to FOAM

* Added symmetric 3x3 inverse and unrolled loops

* Fixed error with SVD

* Fixed error from with indices

Indices were initialized negative. When nullspace is large, points coplanar, and rotation near 0, indices not changed.
---
 modules/calib3d/doc/calib3d.bib               |   8 +
 modules/calib3d/include/opencv2/calib3d.hpp   |   5 +
 modules/calib3d/src/solvepnp.cpp              |  15 +-
 modules/calib3d/src/sqpnp.cpp                 | 775 ++++++++++++++++++
 modules/calib3d/src/sqpnp.hpp                 | 194 +++++
 modules/calib3d/test/test_solvepnp_ransac.cpp |   4 +
 6 files changed, 999 insertions(+), 2 deletions(-)
 create mode 100644 modules/calib3d/src/sqpnp.cpp
 create mode 100644 modules/calib3d/src/sqpnp.hpp

diff --git a/modules/calib3d/doc/calib3d.bib b/modules/calib3d/doc/calib3d.bib
index 57989b34fd..a7e5a23982 100644
--- a/modules/calib3d/doc/calib3d.bib
+++ b/modules/calib3d/doc/calib3d.bib
@@ -39,3 +39,11 @@
   year={2013},
   publisher={IEEE}
 }
+
+@inproceedings{Terzakis20,
+  author = {Terzakis, George and Lourakis, Manolis},
+  year = {2020},
+  month = {09},
+  pages = {},
+  title = {A Consistently Fast and Globally Optimal Solution to the Perspective-n-Point Problem}
+}
diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp
index 22c11cfac7..812c6be108 100644
--- a/modules/calib3d/include/opencv2/calib3d.hpp
+++ b/modules/calib3d/include/opencv2/calib3d.hpp
@@ -464,6 +464,7 @@ enum SolvePnPMethod {
                               //!<   - point 1: [ squareLength / 2,  squareLength / 2, 0]
                               //!<   - point 2: [ squareLength / 2, -squareLength / 2, 0]
                               //!<   - point 3: [-squareLength / 2, -squareLength / 2, 0]
+    SOLVEPNP_SQPNP       = 8, //!< SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis20
 #ifndef CV_DOXYGEN
     SOLVEPNP_MAX_COUNT        //!< Used for count
 #endif
@@ -835,6 +836,9 @@ It requires 4 coplanar object points defined in the following order:
   - point 1: [ squareLength / 2,  squareLength / 2, 0]
   - point 2: [ squareLength / 2, -squareLength / 2, 0]
   - point 3: [-squareLength / 2, -squareLength / 2, 0]
+-   **SOLVEPNP_SQPNP** Method is based on the paper "A Consistently Fast and Globally Optimal Solution to the
+Perspective-n-Point Problem" by G. Terzakis and M.Lourakis (@cite Terzakis20). It requires 3 or more points.
+
 
 The function estimates the object pose given a set of object points, their corresponding image
 projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
@@ -958,6 +962,7 @@ a 3D point expressed in the world frame into the camera frame:
          - point 1: [ squareLength / 2,  squareLength / 2, 0]
          - point 2: [ squareLength / 2, -squareLength / 2, 0]
          - point 3: [-squareLength / 2, -squareLength / 2, 0]
+    -  With **SOLVEPNP_SQPNP** input points must be >= 3
  */
 CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
                             InputArray cameraMatrix, InputArray distCoeffs,
diff --git a/modules/calib3d/src/solvepnp.cpp b/modules/calib3d/src/solvepnp.cpp
index 0e3a1e8f22..cac04c4869 100644
--- a/modules/calib3d/src/solvepnp.cpp
+++ b/modules/calib3d/src/solvepnp.cpp
@@ -47,6 +47,7 @@
 #include "p3p.h"
 #include "ap3p.h"
 #include "ippe.hpp"
+#include "sqpnp.hpp"
 #include "opencv2/calib3d/calib3d_c.h"
 #include <opencv2/core/utils/logger.hpp>
 
@@ -751,7 +752,8 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
 
     Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
     int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
-    CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
+    CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess)
+                || (npoints >= 3 && flags == SOLVEPNP_SQPNP) )
                && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
 
     opoints = opoints.reshape(3, npoints);
@@ -936,6 +938,14 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
             }
         } catch (...) { }
     }
+    else if (flags == SOLVEPNP_SQPNP)
+    {
+        Mat undistortedPoints;
+        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
+
+        sqpnp::PoseSolver solver;
+        solver.solve(opoints, undistortedPoints, vec_rvecs, vec_tvecs);
+    }
     /*else if (flags == SOLVEPNP_DLS)
     {
         Mat undistortedPoints;
@@ -963,7 +973,8 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
         vec_tvecs.push_back(tvec);
     }*/
     else
-        CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
+        CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, "
+            "SOLVEPNP_EPNP, SOLVEPNP_DLS, SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE or SOLVEPNP_SQPNP");
 
     CV_Assert(vec_rvecs.size() == vec_tvecs.size());
 
diff --git a/modules/calib3d/src/sqpnp.cpp b/modules/calib3d/src/sqpnp.cpp
new file mode 100644
index 0000000000..10ea96c423
--- /dev/null
+++ b/modules/calib3d/src/sqpnp.cpp
@@ -0,0 +1,775 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html
+
+// This file is based on file issued with the following license:
+
+/*
+BSD 3-Clause License
+
+Copyright (c) 2020, George Terzakis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "precomp.hpp"
+#include "sqpnp.hpp"
+
+#include <opencv2/calib3d.hpp>
+
+namespace cv {
+namespace sqpnp {
+
+const double PoseSolver::RANK_TOLERANCE = 1e-7;
+const double PoseSolver::SQP_SQUARED_TOLERANCE = 1e-10;
+const double PoseSolver::SQP_DET_THRESHOLD = 1.001;
+const double PoseSolver::ORTHOGONALITY_SQUARED_ERROR_THRESHOLD = 1e-8;
+const double PoseSolver::EQUAL_VECTORS_SQUARED_DIFF = 1e-10;
+const double PoseSolver::EQUAL_SQUARED_ERRORS_DIFF = 1e-6;
+const double PoseSolver::POINT_VARIANCE_THRESHOLD = 1e-5;
+const double PoseSolver::SQRT3 = std::sqrt(3);
+const int PoseSolver::SQP_MAX_ITERATION = 15;
+
+//No checking done here for overflow, since this is not public all call instances
+//are assumed to be valid
+template <typename tp, int snrows, int sncols,
+    int dnrows, int dncols>
+    void set(int row, int col, cv::Matx<tp, dnrows, dncols>& dest,
+        const cv::Matx<tp, snrows, sncols>& source)
+{
+    for (int y = 0; y < snrows; y++)
+    {
+        for (int x = 0; x < sncols; x++)
+        {
+            dest(row + y, col + x) = source(y, x);
+        }
+    }
+}
+
+PoseSolver::PoseSolver()
+    : num_null_vectors_(-1),
+    num_solutions_(0)
+{
+}
+
+
+void PoseSolver::solve(InputArray objectPoints, InputArray imagePoints, OutputArrayOfArrays rvecs,
+    OutputArrayOfArrays tvecs)
+{
+    //Input checking
+    int objType = objectPoints.getMat().type();
+    CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
+        "Type of objectPoints must be CV_32FC3 or CV_64FC3");
+
+    int imgType = imagePoints.getMat().type();
+    CV_CheckType(imgType, imgType == CV_32FC2 || imgType == CV_64FC2,
+        "Type of imagePoints must be CV_32FC2 or CV_64FC2");
+
+    CV_Assert(objectPoints.rows() == 1 || objectPoints.cols() == 1);
+    CV_Assert(objectPoints.rows() >= 3 || objectPoints.cols() >= 3);
+    CV_Assert(imagePoints.rows() == 1 || imagePoints.cols() == 1);
+    CV_Assert(imagePoints.rows() * imagePoints.cols() == objectPoints.rows() * objectPoints.cols());
+
+    Mat _imagePoints;
+    if (imgType == CV_32FC2)
+    {
+        imagePoints.getMat().convertTo(_imagePoints, CV_64F);
+    }
+    else
+    {
+        _imagePoints = imagePoints.getMat();
+    }
+
+    Mat _objectPoints;
+    if (objType == CV_32FC3)
+    {
+        objectPoints.getMat().convertTo(_objectPoints, CV_64F);
+    }
+    else
+    {
+        _objectPoints = objectPoints.getMat();
+    }
+
+    num_null_vectors_ = -1;
+    num_solutions_ = 0;
+
+    computeOmega(_objectPoints, _imagePoints);
+    solveInternal();
+
+    int depthRot = rvecs.fixedType() ? rvecs.depth() : CV_64F;
+    int depthTrans = tvecs.fixedType() ? tvecs.depth() : CV_64F;
+
+    rvecs.create(num_solutions_, 1, CV_MAKETYPE(depthRot, rvecs.fixedType() && rvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
+    tvecs.create(num_solutions_, 1, CV_MAKETYPE(depthTrans, tvecs.fixedType() && tvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
+
+    for (int i = 0; i < num_solutions_; i++)
+    {
+
+        Mat rvec;
+        Mat rotation = Mat(solutions_[i].r_hat).reshape(1, 3);
+        Rodrigues(rotation, rvec);
+
+        rvecs.getMatRef(i) = rvec;
+        tvecs.getMatRef(i) = Mat(solutions_[i].t);
+    }
+}
+
+void PoseSolver::computeOmega(InputArray objectPoints, InputArray imagePoints)
+{
+    omega_ = cv::Matx<double, 9, 9>::zeros();
+    cv::Matx<double, 3, 9> qa_sum = cv::Matx<double, 3, 9>::zeros();
+
+    cv::Point2d sum_img(0, 0);
+    cv::Point3d sum_obj(0, 0, 0);
+    double sq_norm_sum = 0;
+
+    Mat _imagePoints = imagePoints.getMat();
+    Mat _objectPoints = objectPoints.getMat();
+
+    int n = _objectPoints.cols * _objectPoints.rows;
+
+    for (int i = 0; i < n; i++)
+    {
+        const cv::Point2d& img_pt = _imagePoints.at<cv::Point2d>(i);
+        const cv::Point3d& obj_pt = _objectPoints.at<cv::Point3d>(i);
+
+        sum_img += img_pt;
+        sum_obj += obj_pt;
+
+        const double& x = img_pt.x, & y = img_pt.y;
+        const double& X = obj_pt.x, & Y = obj_pt.y, & Z = obj_pt.z;
+        double sq_norm = x * x + y * y;
+        sq_norm_sum += sq_norm;
+
+        double X2 = X * X,
+            XY = X * Y,
+            XZ = X * Z,
+            Y2 = Y * Y,
+            YZ = Y * Z,
+            Z2 = Z * Z;
+
+        omega_(0, 0) += X2;
+        omega_(0, 1) += XY;
+        omega_(0, 2) += XZ;
+        omega_(1, 1) += Y2;
+        omega_(1, 2) += YZ;
+        omega_(2, 2) += Z2;
+
+
+        //Populating this manually saves operations by only calculating upper triangle
+        omega_(0, 6) += -x * X2; omega_(0, 7) += -x * XY; omega_(0, 8) += -x * XZ;
+        omega_(1, 7) += -x * Y2; omega_(1, 8) += -x * YZ;
+        omega_(2, 8) += -x * Z2;
+
+        omega_(3, 6) += -y * X2; omega_(3, 7) += -y * XY; omega_(3, 8) += -y * XZ;
+        omega_(4, 7) += -y * Y2; omega_(4, 8) += -y * YZ;
+        omega_(5, 8) += -y * Z2;
+
+
+        omega_(6, 6) += sq_norm * X2; omega_(6, 7) += sq_norm * XY; omega_(6, 8) += sq_norm * XZ;
+        omega_(7, 7) += sq_norm * Y2; omega_(7, 8) += sq_norm * YZ;
+        omega_(8, 8) += sq_norm * Z2;
+
+        //Compute qa_sum
+        qa_sum(0, 0) += X; qa_sum(0, 1) += Y; qa_sum(0, 2) += Z;
+        qa_sum(1, 3) += X; qa_sum(1, 4) += Y; qa_sum(1, 5) += Z;
+
+        qa_sum(0, 6) += -x * X; qa_sum(0, 7) += -x * Y; qa_sum(0, 8) += -x * Z;
+        qa_sum(1, 6) += -y * X; qa_sum(1, 7) += -y * Y; qa_sum(1, 8) += -y * Z;
+
+        qa_sum(2, 0) += -x * X; qa_sum(2, 1) += -x * Y; qa_sum(2, 2) += -x * Z;
+        qa_sum(2, 3) += -y * X; qa_sum(2, 4) += -y * Y; qa_sum(2, 5) += -y * Z;
+
+        qa_sum(2, 6) += sq_norm * X; qa_sum(2, 7) += sq_norm * Y; qa_sum(2, 8) += sq_norm * Z;
+    }
+
+
+    omega_(1, 6) = omega_(0, 7); omega_(2, 6) = omega_(0, 8); omega_(2, 7) = omega_(1, 8);
+    omega_(4, 6) = omega_(3, 7); omega_(5, 6) = omega_(3, 8); omega_(5, 7) = omega_(4, 8);
+    omega_(7, 6) = omega_(6, 7); omega_(8, 6) = omega_(6, 8); omega_(8, 7) = omega_(7, 8);
+
+
+    omega_(3, 3) = omega_(0, 0); omega_(3, 4) = omega_(0, 1); omega_(3, 5) = omega_(0, 2);
+    omega_(4, 4) = omega_(1, 1); omega_(4, 5) = omega_(1, 2);
+    omega_(5, 5) = omega_(2, 2);
+
+    //Mirror upper triangle to lower triangle
+    for (int r = 0; r < 9; r++)
+    {
+        for (int c = 0; c < r; c++)
+        {
+            omega_(r, c) = omega_(c, r);
+        }
+    }
+
+    cv::Matx<double, 3, 3> q;
+    q(0, 0) = n; q(0, 1) = 0; q(0, 2) = -sum_img.x;
+    q(1, 0) = 0; q(1, 1) = n; q(1, 2) = -sum_img.y;
+    q(2, 0) = -sum_img.x; q(2, 1) = -sum_img.y; q(2, 2) = sq_norm_sum;
+
+    double inv_n = 1.0 / n;
+    double detQ = n * (n * sq_norm_sum - sum_img.y * sum_img.y - sum_img.x * sum_img.x);
+    double point_coordinate_variance = detQ * inv_n * inv_n * inv_n;
+
+    CV_Assert(point_coordinate_variance >= POINT_VARIANCE_THRESHOLD);
+
+    Matx<double, 3, 3> q_inv;
+    analyticalInverse3x3Symm(q, q_inv);
+
+    p_ = -q_inv * qa_sum;
+
+    omega_ += qa_sum.t() * p_;
+
+    cv::SVD omega_svd(omega_, cv::SVD::FULL_UV);
+    s_ = omega_svd.w;
+    u_ = cv::Mat(omega_svd.vt.t());
+
+    CV_Assert(s_(0) >= 1e-7);
+
+    while (s_(7 - num_null_vectors_) < RANK_TOLERANCE) num_null_vectors_++;
+
+    CV_Assert(++num_null_vectors_ <= 6);
+
+    point_mean_ = cv::Vec3d(sum_obj.x / n, sum_obj.y / n, sum_obj.z / n);
+}
+
+void PoseSolver::solveInternal()
+{
+    double min_sq_err = std::numeric_limits<double>::max();
+    int num_eigen_points = num_null_vectors_ > 0 ? num_null_vectors_ : 1;
+
+    for (int i = 9 - num_eigen_points; i < 9; i++)
+    {
+        const cv::Matx<double, 9, 1> e = SQRT3 * u_.col(i);
+        double orthogonality_sq_err = orthogonalityError(e);
+
+        SQPSolution solutions[2];
+
+        //If e is orthogonal, we can skip SQP
+        if (orthogonality_sq_err < ORTHOGONALITY_SQUARED_ERROR_THRESHOLD)
+        {
+            solutions[0].r_hat = det3x3(e) * e;
+            solutions[0].t = p_ * solutions[0].r_hat;
+            checkSolution(solutions[0], min_sq_err);
+        }
+        else
+        {
+            Matx<double, 9, 1> r;
+            nearestRotationMatrix(e, r);
+            solutions[0] = runSQP(r);
+            solutions[0].t = p_ * solutions[0].r_hat;
+            checkSolution(solutions[0], min_sq_err);
+
+            nearestRotationMatrix(-e, r);
+            solutions[1] = runSQP(r);
+            solutions[1].t = p_ * solutions[1].r_hat;
+            checkSolution(solutions[1], min_sq_err);
+        }
+    }
+
+    int c = 1;
+
+    while (min_sq_err > 3 * s_[9 - num_eigen_points - c] && 9 - num_eigen_points - c > 0)
+    {
+        int index = 9 - num_eigen_points - c;
+
+        const cv::Matx<double, 9, 1> e = u_.col(index);
+        SQPSolution solutions[2];
+
+        Matx<double, 9, 1> r;
+        nearestRotationMatrix(e, r);
+        solutions[0] = runSQP(r);
+        solutions[0].t = p_ * solutions[0].r_hat;
+        checkSolution(solutions[0], min_sq_err);
+
+        nearestRotationMatrix(-e, r);
+        solutions[1] = runSQP(r);
+        solutions[1].t = p_ * solutions[1].r_hat;
+        checkSolution(solutions[1], min_sq_err);
+
+        c++;
+    }
+}
+
+PoseSolver::SQPSolution PoseSolver::runSQP(const cv::Matx<double, 9, 1>& r0)
+{
+    cv::Matx<double, 9, 1> r = r0;
+
+    double delta_squared_norm = std::numeric_limits<double>::max();
+    cv::Matx<double, 9, 1> delta;
+
+    int step = 0;
+    while (delta_squared_norm > SQP_SQUARED_TOLERANCE && step++ < SQP_MAX_ITERATION)
+    {
+        solveSQPSystem(r, delta);
+        r += delta;
+        delta_squared_norm = cv::norm(delta, cv::NORM_L2SQR);
+    }
+
+    SQPSolution solution;
+
+    double det_r = det3x3(r);
+    if (det_r < 0)
+    {
+        r = -r;
+        det_r = -det_r;
+    }
+
+    if (det_r > SQP_DET_THRESHOLD)
+    {
+        nearestRotationMatrix(r, solution.r_hat);
+    }
+    else
+    {
+        solution.r_hat = r;
+    }
+
+    return solution;
+}
+
+void PoseSolver::solveSQPSystem(const cv::Matx<double, 9, 1>& r, cv::Matx<double, 9, 1>& delta)
+{
+    double sqnorm_r1 = r(0) * r(0) + r(1) * r(1) + r(2) * r(2),
+        sqnorm_r2 = r(3) * r(3) + r(4) * r(4) + r(5) * r(5),
+        sqnorm_r3 = r(6) * r(6) + r(7) * r(7) + r(8) * r(8);
+    double dot_r1r2 = r(0) * r(3) + r(1) * r(4) + r(2) * r(5),
+        dot_r1r3 = r(0) * r(6) + r(1) * r(7) + r(2) * r(8),
+        dot_r2r3 = r(3) * r(6) + r(4) * r(7) + r(5) * r(8);
+
+    cv::Matx<double, 9, 3> N;
+    cv::Matx<double, 9, 6> H;
+    cv::Matx<double, 6, 6> JH;
+
+    computeRowAndNullspace(r, H, N, JH);
+
+    cv::Matx<double, 6, 1> g;
+    g(0) = 1 - sqnorm_r1; g(1) = 1 - sqnorm_r2; g(2) = 1 - sqnorm_r3; g(3) = -dot_r1r2; g(4) = -dot_r2r3; g(5) = -dot_r1r3;
+
+    cv::Matx<double, 6, 1> x;
+    x(0) = g(0) / JH(0, 0);
+    x(1) = g(1) / JH(1, 1);
+    x(2) = g(2) / JH(2, 2);
+    x(3) = (g(3) - JH(3, 0) * x(0) - JH(3, 1) * x(1)) / JH(3, 3);
+    x(4) = (g(4) - JH(4, 1) * x(1) - JH(4, 2) * x(2) - JH(4, 3) * x(3)) / JH(4, 4);
+    x(5) = (g(5) - JH(5, 0) * x(0) - JH(5, 2) * x(2) - JH(5, 3) * x(3) - JH(5, 4) * x(4)) / JH(5, 5);
+
+    delta = H * x;
+
+
+    cv::Matx<double, 3, 9> nt_omega = N.t() * omega_;
+    cv::Matx<double, 3, 3> W = nt_omega * N, W_inv;
+
+    analyticalInverse3x3Symm(W, W_inv);
+
+    cv::Matx<double, 3, 1> y = -W_inv * nt_omega * (delta + r);
+    delta += N * y;
+}
+
+bool PoseSolver::analyticalInverse3x3Symm(const cv::Matx<double, 3, 3>& Q,
+    cv::Matx<double, 3, 3>& Qinv,
+    const double& threshold)
+{
+    // 1. Get the elements of the matrix
+    double a = Q(0, 0),
+        b = Q(1, 0), d = Q(1, 1),
+        c = Q(2, 0), e = Q(2, 1), f = Q(2, 2);
+
+    // 2. Determinant
+    double t2, t4, t7, t9, t12;
+    t2 = e * e;
+    t4 = a * d;
+    t7 = b * b;
+    t9 = b * c;
+    t12 = c * c;
+    double det = -t4 * f + a * t2 + t7 * f - 2.0 * t9 * e + t12 * d;
+
+    if (fabs(det) < threshold) return false;
+
+    // 3. Inverse
+    double t15, t20, t24, t30;
+    t15 = 1.0 / det;
+    t20 = (-b * f + c * e) * t15;
+    t24 = (b * e - c * d) * t15;
+    t30 = (a * e - t9) * t15;
+    Qinv(0, 0) = (-d * f + t2) * t15;
+    Qinv(0, 1) = Qinv(1, 0) = -t20;
+    Qinv(0, 2) = Qinv(2, 0) = -t24;
+    Qinv(1, 1) = -(a * f - t12) * t15;
+    Qinv(1, 2) = Qinv(2, 1) = t30;
+    Qinv(2, 2) = -(t4 - t7) * t15;
+
+    return true;
+}
+
+void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
+    cv::Matx<double, 9, 6>& H,
+    cv::Matx<double, 9, 3>& N,
+    cv::Matx<double, 6, 6>& K,
+    const double& norm_threshold)
+{
+    H = cv::Matx<double, 9, 6>::zeros();
+
+    // 1. q1
+    double norm_r1 = sqrt(r(0) * r(0) + r(1) * r(1) + r(2) * r(2));
+    double inv_norm_r1 = norm_r1 > 1e-5 ? 1.0 / norm_r1 : 0.0;
+    H(0, 0) = r(0) * inv_norm_r1;
+    H(1, 0) = r(1) * inv_norm_r1;
+    H(2, 0) = r(2) * inv_norm_r1;
+    K(0, 0) = 2 * norm_r1;
+
+    // 2. q2
+    double norm_r2 = sqrt(r(3) * r(3) + r(4) * r(4) + r(5) * r(5));
+    double inv_norm_r2 = 1.0 / norm_r2;
+    H(3, 1) = r(3) * inv_norm_r2;
+    H(4, 1) = r(4) * inv_norm_r2;
+    H(5, 1) = r(5) * inv_norm_r2;
+    K(1, 0) = 0;
+    K(1, 1) = 2 * norm_r2;
+
+    // 3. q3 = (r3'*q2)*q2 - (r3'*q1)*q1 ; q3 = q3/norm(q3)
+    double norm_r3 = sqrt(r(6) * r(6) + r(7) * r(7) + r(8) * r(8));
+    double inv_norm_r3 = 1.0 / norm_r3;
+    H(6, 2) = r(6) * inv_norm_r3;
+    H(7, 2) = r(7) * inv_norm_r3;
+    H(8, 2) = r(8) * inv_norm_r3;
+    K(2, 0) = K(2, 1) = 0;
+    K(2, 2) = 2 * norm_r3;
+
+    // 4. q4
+    double dot_j4q1 = r(3) * H(0, 0) + r(4) * H(1, 0) + r(5) * H(2, 0),
+        dot_j4q2 = r(0) * H(3, 1) + r(1) * H(4, 1) + r(2) * H(5, 1);
+
+    H(0, 3) = r(3) - dot_j4q1 * H(0, 0);
+    H(1, 3) = r(4) - dot_j4q1 * H(1, 0);
+    H(2, 3) = r(5) - dot_j4q1 * H(2, 0);
+    H(3, 3) = r(0) - dot_j4q2 * H(3, 1);
+    H(4, 3) = r(1) - dot_j4q2 * H(4, 1);
+    H(5, 3) = r(2) - dot_j4q2 * H(5, 1);
+    double inv_norm_j4 = 1.0 / sqrt(H(0, 3) * H(0, 3) + H(1, 3) * H(1, 3) + H(2, 3) * H(2, 3) +
+        H(3, 3) * H(3, 3) + H(4, 3) * H(4, 3) + H(5, 3) * H(5, 3));
+
+    H(0, 3) *= inv_norm_j4;
+    H(1, 3) *= inv_norm_j4;
+    H(2, 3) *= inv_norm_j4;
+    H(3, 3) *= inv_norm_j4;
+    H(4, 3) *= inv_norm_j4;
+    H(5, 3) *= inv_norm_j4;
+
+    K(3, 0) = r(3) * H(0, 0) + r(4) * H(1, 0) + r(5) * H(2, 0);
+    K(3, 1) = r(0) * H(3, 1) + r(1) * H(4, 1) + r(2) * H(5, 1);
+    K(3, 2) = 0;
+    K(3, 3) = r(3) * H(0, 3) + r(4) * H(1, 3) + r(5) * H(2, 3) + r(0) * H(3, 3) + r(1) * H(4, 3) + r(2) * H(5, 3);
+
+    // 5. q5
+    double dot_j5q2 = r(6) * H(3, 1) + r(7) * H(4, 1) + r(8) * H(5, 1);
+    double dot_j5q3 = r(3) * H(6, 2) + r(4) * H(7, 2) + r(5) * H(8, 2);
+    double dot_j5q4 = r(6) * H(3, 3) + r(7) * H(4, 3) + r(8) * H(5, 3);
+
+    H(0, 4) = -dot_j5q4 * H(0, 3);
+    H(1, 4) = -dot_j5q4 * H(1, 3);
+    H(2, 4) = -dot_j5q4 * H(2, 3);
+    H(3, 4) = r(6) - dot_j5q2 * H(3, 1) - dot_j5q4 * H(3, 3);
+    H(4, 4) = r(7) - dot_j5q2 * H(4, 1) - dot_j5q4 * H(4, 3);
+    H(5, 4) = r(8) - dot_j5q2 * H(5, 1) - dot_j5q4 * H(5, 3);
+    H(6, 4) = r(3) - dot_j5q3 * H(6, 2); H(7, 4) = r(4) - dot_j5q3 * H(7, 2); H(8, 4) = r(5) - dot_j5q3 * H(8, 2);
+
+    Matx<double, 9, 1> q4 = H.col(4);
+    q4 /= cv::norm(q4);
+    set<double, 9, 1, 9, 6>(0, 4, H, q4);
+
+    K(4, 0) = 0;
+    K(4, 1) = r(6) * H(3, 1) + r(7) * H(4, 1) + r(8) * H(5, 1);
+    K(4, 2) = r(3) * H(6, 2) + r(4) * H(7, 2) + r(5) * H(8, 2);
+    K(4, 3) = r(6) * H(3, 3) + r(7) * H(4, 3) + r(8) * H(5, 3);
+    K(4, 4) = r(6) * H(3, 4) + r(7) * H(4, 4) + r(8) * H(5, 4) + r(3) * H(6, 4) + r(4) * H(7, 4) + r(5) * H(8, 4);
+
+
+    // 4. q6
+    double dot_j6q1 = r(6) * H(0, 0) + r(7) * H(1, 0) + r(8) * H(2, 0);
+    double dot_j6q3 = r(0) * H(6, 2) + r(1) * H(7, 2) + r(2) * H(8, 2);
+    double dot_j6q4 = r(6) * H(0, 3) + r(7) * H(1, 3) + r(8) * H(2, 3);
+    double dot_j6q5 = r(0) * H(6, 4) + r(1) * H(7, 4) + r(2) * H(8, 4) + r(6) * H(0, 4) + r(7) * H(1, 4) + r(8) * H(2, 4);
+
+    H(0, 5) = r(6) - dot_j6q1 * H(0, 0) - dot_j6q4 * H(0, 3) - dot_j6q5 * H(0, 4);
+    H(1, 5) = r(7) - dot_j6q1 * H(1, 0) - dot_j6q4 * H(1, 3) - dot_j6q5 * H(1, 4);
+    H(2, 5) = r(8) - dot_j6q1 * H(2, 0) - dot_j6q4 * H(2, 3) - dot_j6q5 * H(2, 4);
+
+    H(3, 5) = -dot_j6q5 * H(3, 4) - dot_j6q4 * H(3, 3);
+    H(4, 5) = -dot_j6q5 * H(4, 4) - dot_j6q4 * H(4, 3);
+    H(5, 5) = -dot_j6q5 * H(5, 4) - dot_j6q4 * H(5, 3);
+
+    H(6, 5) = r(0) - dot_j6q3 * H(6, 2) - dot_j6q5 * H(6, 4);
+    H(7, 5) = r(1) - dot_j6q3 * H(7, 2) - dot_j6q5 * H(7, 4);
+    H(8, 5) = r(2) - dot_j6q3 * H(8, 2) - dot_j6q5 * H(8, 4);
+
+    Matx<double, 9, 1> q5 = H.col(5);
+    q5 /= cv::norm(q5);
+    set<double, 9, 1, 9, 6>(0, 5, H, q5);
+
+    K(5, 0) = r(6) * H(0, 0) + r(7) * H(1, 0) + r(8) * H(2, 0);
+    K(5, 1) = 0; K(5, 2) = r(0) * H(6, 2) + r(1) * H(7, 2) + r(2) * H(8, 2);
+    K(5, 3) = r(6) * H(0, 3) + r(7) * H(1, 3) + r(8) * H(2, 3);
+    K(5, 4) = r(6) * H(0, 4) + r(7) * H(1, 4) + r(8) * H(2, 4) + r(0) * H(6, 4) + r(1) * H(7, 4) + r(2) * H(8, 4);
+    K(5, 5) = r(6) * H(0, 5) + r(7) * H(1, 5) + r(8) * H(2, 5) + r(0) * H(6, 5) + r(1) * H(7, 5) + r(2) * H(8, 5);
+
+    // Great! Now H is an orthogonalized, sparse basis of the Jacobian row space and K is filled.
+    //
+    // Now get a projector onto the null space H:
+    const cv::Matx<double, 9, 9> Pn = cv::Matx<double, 9, 9>::eye() - (H * H.t());
+
+    // Now we need to pick 3 columns of P with non-zero norm (> 0.3) and some angle between them (> 0.3).
+    //
+    // Find the 3 columns of Pn with largest norms
+    int index1 = 0,
+        index2 = 0,
+        index3 = 0;
+    double  max_norm1 = std::numeric_limits<double>::min();
+    double min_dot12 = std::numeric_limits<double>::max();
+    double min_dot1323 = std::numeric_limits<double>::max();
+
+
+    double col_norms[9];
+    for (int i = 0; i < 9; i++)
+    {
+        col_norms[i] = cv::norm(Pn.col(i));
+        if (col_norms[i] >= norm_threshold)
+        {
+            if (max_norm1 < col_norms[i])
+            {
+                max_norm1 = col_norms[i];
+                index1 = i;
+            }
+        }
+    }
+
+    Matx<double, 9, 1> v1 = Pn.col(index1);
+    v1 /= max_norm1;
+    set<double, 9, 1, 9, 3>(0, 0, N, v1);
+
+    for (int i = 0; i < 9; i++)
+    {
+        if (i == index1) continue;
+        if (col_norms[i] >= norm_threshold)
+        {
+            double cos_v1_x_col = fabs(Pn.col(i).dot(v1) / col_norms[i]);
+
+            if (cos_v1_x_col <= min_dot12)
+            {
+                index2 = i;
+                min_dot12 = cos_v1_x_col;
+            }
+        }
+    }
+
+    Matx<double, 9, 1> v2 = Pn.col(index2);
+    Matx<double, 9, 1> n0 = N.col(0);
+    v2 -= v2.dot(n0) * n0;
+    v2 /= cv::norm(v2);
+    set<double, 9, 1, 9, 3>(0, 1, N, v2);
+
+    for (int i = 0; i < 9; i++)
+    {
+        if (i == index2 || i == index1) continue;
+        if (col_norms[i] >= norm_threshold)
+        {
+            double cos_v1_x_col = fabs(Pn.col(i).dot(v1) / col_norms[i]);
+            double cos_v2_x_col = fabs(Pn.col(i).dot(v2) / col_norms[i]);
+
+            if (cos_v1_x_col + cos_v2_x_col <= min_dot1323)
+            {
+                index3 = i;
+                min_dot1323 = cos_v2_x_col + cos_v2_x_col;
+            }
+        }
+    }
+
+    Matx<double, 9, 1> v3 = Pn.col(index3);
+    Matx<double, 9, 1> n1 = N.col(1);
+    v3 -= (v3.dot(n1)) * n1 - (v3.dot(n0)) * n0;
+    v3 /= cv::norm(v3);
+    set<double, 9, 1, 9, 3>(0, 2, N, v3);
+
+}
+
+// faster nearest rotation computation based on FOAM (see: http://users.ics.forth.gr/~lourakis/publ/2018_iros.pdf )
+/* Solve the nearest orthogonal approximation problem
+    * i.e., given e, find R minimizing ||R-e||_F
+    *
+    * The computation borrows from Markley's FOAM algorithm
+    * "Attitude Determination Using Vector Observations: A Fast Optimal Matrix Algorithm", J. Astronaut. Sci.
+    *
+    * See also M. Lourakis: "An Efficient Solution to Absolute Orientation", ICPR 2016
+    *
+    *  Copyright (C) 2019 Manolis Lourakis (lourakis **at** ics forth gr)
+    *  Institute of Computer Science, Foundation for Research & Technology - Hellas
+    *  Heraklion, Crete, Greece.
+    */
+void PoseSolver::nearestRotationMatrix(const cv::Matx<double, 9, 1>& e,
+    cv::Matx<double, 9, 1>& r)
+{
+    register int i;
+    double l, lprev, det_e, e_sq, adj_e_sq, adj_e[9];
+
+    // e's adjoint
+    adj_e[0] = e(4) * e(8) - e(5) * e(7); adj_e[1] = e(2) * e(7) - e(1) * e(8); adj_e[2] = e(1) * e(5) - e(2) * e(4);
+    adj_e[3] = e(5) * e(6) - e(3) * e(8); adj_e[4] = e(0) * e(8) - e(2) * e(6); adj_e[5] = e(2) * e(3) - e(0) * e(5);
+    adj_e[6] = e(3) * e(7) - e(4) * e(6); adj_e[7] = e(1) * e(6) - e(0) * e(7); adj_e[8] = e(0) * e(4) - e(1) * e(3);
+
+    // det(e), ||e||^2, ||adj(e)||^2
+    det_e = e(0) * e(4) * e(8) - e(0) * e(5) * e(7) - e(1) * e(3) * e(8) + e(2) * e(3) * e(7) + e(1) * e(6) * e(5) - e(2) * e(6) * e(4);
+    e_sq = e(0) * e(0) + e(1) * e(1) + e(2) * e(2) + e(3) * e(3) + e(4) * e(4) + e(5) * e(5) + e(6) * e(6) + e(7) * e(7) + e(8) * e(8);
+    adj_e_sq = adj_e[0] * adj_e[0] + adj_e[1] * adj_e[1] + adj_e[2] * adj_e[2] + adj_e[3] * adj_e[3] + adj_e[4] * adj_e[4] + adj_e[5] * adj_e[5] + adj_e[6] * adj_e[6] + adj_e[7] * adj_e[7] + adj_e[8] * adj_e[8];
+
+    // compute l_max with Newton-Raphson from FOAM's characteristic polynomial, i.e. eq.(23) - (26)
+    for (i = 200, l = 2.0, lprev = 0.0; fabs(l - lprev) > 1E-12 * fabs(lprev) && i > 0; --i) {
+        double tmp, p, pp;
+
+        tmp = (l * l - e_sq);
+        p = (tmp * tmp - 8.0 * l * det_e - 4.0 * adj_e_sq);
+        pp = 8.0 * (0.5 * tmp * l - det_e);
+
+        lprev = l;
+        l -= p / pp;
+    }
+
+    // the rotation matrix equals ((l^2 + e_sq)*e + 2*l*adj(e') - 2*e*e'*e) / (l*(l*l-e_sq) - 2*det(e)), i.e. eq.(14) using (18), (19)
+    {
+        // compute (l^2 + e_sq)*e
+        double tmp[9], e_et[9], denom;
+        const double a = l * l + e_sq;
+
+        // e_et=e*e'
+        e_et[0] = e(0) * e(0) + e(1) * e(1) + e(2) * e(2);
+        e_et[1] = e(0) * e(3) + e(1) * e(4) + e(2) * e(5);
+        e_et[2] = e(0) * e(6) + e(1) * e(7) + e(2) * e(8);
+
+        e_et[3] = e_et[1];
+        e_et[4] = e(3) * e(3) + e(4) * e(4) + e(5) * e(5);
+        e_et[5] = e(3) * e(6) + e(4) * e(7) + e(5) * e(8);
+
+        e_et[6] = e_et[2];
+        e_et[7] = e_et[5];
+        e_et[8] = e(6) * e(6) + e(7) * e(7) + e(8) * e(8);
+
+        // tmp=e_et*e
+        tmp[0] = e_et[0] * e(0) + e_et[1] * e(3) + e_et[2] * e(6);
+        tmp[1] = e_et[0] * e(1) + e_et[1] * e(4) + e_et[2] * e(7);
+        tmp[2] = e_et[0] * e(2) + e_et[1] * e(5) + e_et[2] * e(8);
+
+        tmp[3] = e_et[3] * e(0) + e_et[4] * e(3) + e_et[5] * e(6);
+        tmp[4] = e_et[3] * e(1) + e_et[4] * e(4) + e_et[5] * e(7);
+        tmp[5] = e_et[3] * e(2) + e_et[4] * e(5) + e_et[5] * e(8);
+
+        tmp[6] = e_et[6] * e(0) + e_et[7] * e(3) + e_et[8] * e(6);
+        tmp[7] = e_et[6] * e(1) + e_et[7] * e(4) + e_et[8] * e(7);
+        tmp[8] = e_et[6] * e(2) + e_et[7] * e(5) + e_et[8] * e(8);
+
+        // compute R as (a*e + 2*(l*adj(e)' - tmp))*denom; note that adj(e')=adj(e)'
+        denom = l * (l * l - e_sq) - 2.0 * det_e;
+        denom = 1.0 / denom;
+        r(0) = (a * e(0) + 2.0 * (l * adj_e[0] - tmp[0])) * denom;
+        r(1) = (a * e(1) + 2.0 * (l * adj_e[3] - tmp[1])) * denom;
+        r(2) = (a * e(2) + 2.0 * (l * adj_e[6] - tmp[2])) * denom;
+
+        r(3) = (a * e(3) + 2.0 * (l * adj_e[1] - tmp[3])) * denom;
+        r(4) = (a * e(4) + 2.0 * (l * adj_e[4] - tmp[4])) * denom;
+        r(5) = (a * e(5) + 2.0 * (l * adj_e[7] - tmp[5])) * denom;
+
+        r(6) = (a * e(6) + 2.0 * (l * adj_e[2] - tmp[6])) * denom;
+        r(7) = (a * e(7) + 2.0 * (l * adj_e[5] - tmp[7])) * denom;
+        r(8) = (a * e(8) + 2.0 * (l * adj_e[8] - tmp[8])) * denom;
+    }
+}
+
+double PoseSolver::det3x3(const cv::Matx<double, 9, 1>& e)
+{
+    return e(0) * e(4) * e(8) + e(1) * e(5) * e(6) + e(2) * e(3) * e(7)
+        - e(6) * e(4) * e(2) - e(7) * e(5) * e(0) - e(8) * e(3) * e(1);
+}
+
+inline bool PoseSolver::positiveDepth(const SQPSolution& solution) const
+{
+    const cv::Matx<double, 9, 1>& r = solution.r_hat;
+    const cv::Matx<double, 3, 1>& t = solution.t;
+    const cv::Vec3d& mean = point_mean_;
+    return (r(6) * mean(0) + r(7) * mean(1) + r(8) * mean(2) + t(2) > 0);
+}
+
+void PoseSolver::checkSolution(SQPSolution& solution, double& min_error)
+{
+    if (positiveDepth(solution))
+    {
+        solution.sq_error = (omega_ * solution.r_hat).ddot(solution.r_hat);
+        if (fabs(min_error - solution.sq_error) > EQUAL_SQUARED_ERRORS_DIFF)
+        {
+            if (min_error > solution.sq_error)
+            {
+                min_error = solution.sq_error;
+                solutions_[0] = solution;
+                num_solutions_ = 1;
+            }
+        }
+        else
+        {
+            bool found = false;
+            for (int i = 0; i < num_solutions_; i++)
+            {
+                if (cv::norm(solutions_[i].r_hat - solution.r_hat, cv::NORM_L2SQR) < EQUAL_VECTORS_SQUARED_DIFF)
+                {
+                    if (solutions_[i].sq_error > solution.sq_error)
+                    {
+                        solutions_[i] = solution;
+                    }
+                    found = true;
+                    break;
+                }
+            }
+
+            if (!found)
+            {
+                solutions_[num_solutions_++] = solution;
+            }
+            if (min_error > solution.sq_error) min_error = solution.sq_error;
+        }
+    }
+}
+
+double PoseSolver::orthogonalityError(const cv::Matx<double, 9, 1>& e)
+{
+    double sq_norm_e1 = e(0) * e(0) + e(1) * e(1) + e(2) * e(2);
+    double sq_norm_e2 = e(3) * e(3) + e(4) * e(4) + e(5) * e(5);
+    double sq_norm_e3 = e(6) * e(6) + e(7) * e(7) + e(8) * e(8);
+    double dot_e1e2 = e(0) * e(3) + e(1) * e(4) + e(2) * e(5);
+    double dot_e1e3 = e(0) * e(6) + e(1) * e(7) + e(2) * e(8);
+    double dot_e2e3 = e(3) * e(6) + e(4) * e(7) + e(5) * e(8);
+
+    return (sq_norm_e1 - 1) * (sq_norm_e1 - 1) + (sq_norm_e2 - 1) * (sq_norm_e2 - 1) + (sq_norm_e3 - 1) * (sq_norm_e3 - 1) +
+        2 * (dot_e1e2 * dot_e1e2 + dot_e1e3 * dot_e1e3 + dot_e2e3 * dot_e2e3);
+}
+
+}
+}
diff --git a/modules/calib3d/src/sqpnp.hpp b/modules/calib3d/src/sqpnp.hpp
new file mode 100644
index 0000000000..f8136324c9
--- /dev/null
+++ b/modules/calib3d/src/sqpnp.hpp
@@ -0,0 +1,194 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html
+
+// This file is based on file issued with the following license:
+
+/*
+BSD 3-Clause License
+
+Copyright (c) 2020, George Terzakis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef OPENCV_CALIB3D_SQPNP_HPP
+#define OPENCV_CALIB3D_SQPNP_HPP
+
+#include <opencv2/core.hpp>
+
+namespace cv {
+namespace sqpnp {
+
+
+class PoseSolver {
+public:
+    /**
+    * @brief PoseSolver constructor
+    */
+    PoseSolver();
+
+    /**
+    * @brief                Finds the possible poses of a camera given a set of 3D points
+    *                       and their corresponding 2D image projections. The poses are
+    *                       sorted by lowest squared error (which corresponds to lowest
+    *                       reprojection error).
+    * @param objectPoints   Array or vector of 3 or more 3D points defined in object coordinates.
+    *                       1xN/Nx1 3-channel (float or double) where N is the number of points.
+    * @param imagePoints    Array or vector of corresponding 2D points, 1xN/Nx1 2-channel.
+    * @param rvec           The output rotation solutions (up to 18 3x1 rotation vectors)
+    * @param tvec           The output translation solutions (up to 18 3x1 vectors)
+    */
+    void solve(InputArray objectPoints, InputArray imagePoints, OutputArrayOfArrays rvec,
+        OutputArrayOfArrays tvec);
+
+private:
+    struct SQPSolution
+    {
+        cv::Matx<double, 9, 1> r_hat;
+        cv::Matx<double, 3, 1> t;
+        double sq_error;
+    };
+
+    /*
+    * @brief                Computes the 9x9 PSD Omega matrix and supporting matrices.
+    * @param objectPoints   Array or vector of 3 or more 3D points defined in object coordinates.
+    *                       1xN/Nx1 3-channel (float or double) where N is the number of points.
+    * @param imagePoints    Array or vector of corresponding 2D points, 1xN/Nx1 2-channel.
+    */
+    void computeOmega(InputArray objectPoints, InputArray imagePoints);
+
+    /*
+    * @brief                Computes the 9x9 PSD Omega matrix and supporting matrices.
+    */
+    void solveInternal();
+
+    /*
+    * @brief                Produces the distance from being orthogonal for a given 3x3 matrix
+    *                       in row-major form.
+    * @param e              The vector to test representing a 3x3 matrix in row major form.
+    * @return               The distance the matrix is from being orthogonal.
+    */
+    static double orthogonalityError(const cv::Matx<double, 9, 1>& e);
+
+    /*
+    * @brief                Processes a solution and sorts it by error.
+    * @param solution       The solution to evaluate.
+    * @param min_error          The current minimum error.
+    */
+    void checkSolution(SQPSolution& solution, double& min_error);
+
+    /*
+    * @brief                Computes the determinant of a matrix stored in row-major format.
+    * @param e              Vector representing a 3x3 matrix stored in row-major format.
+    * @return               The determinant of the matrix.
+    */
+    static double det3x3(const cv::Matx<double, 9, 1>& e);
+
+    /*
+    * @brief                Tests the cheirality for a given solution.
+    * @param solution       The solution to evaluate.
+    */
+    inline bool positiveDepth(const SQPSolution& solution) const;
+
+    /*
+    * @brief                Determines the nearest rotation matrix to a given rotaiton matrix.
+    *                       Input and output are 9x1 vector representing a vector stored in row-major
+    *                       form.
+    * @param e              The input 3x3 matrix stored in a vector in row-major form.
+    * @param r              The nearest rotation matrix to the input e (again in row-major form).
+    */
+    static void nearestRotationMatrix(const cv::Matx<double, 9, 1>& e,
+        cv::Matx<double, 9, 1>& r);
+
+    /*
+    * @brief                Runs the sequential quadratic programming on orthogonal matrices.
+    * @param r0             The start point of the solver.
+    */
+    SQPSolution runSQP(const cv::Matx<double, 9, 1>& r0);
+
+    /*
+    * @brief                Steps down the gradient for the given matrix r to solve the SQP system.
+    * @param r              The current matrix step.
+    * @param delta          The next step down the gradient.
+    */
+    void solveSQPSystem(const cv::Matx<double, 9, 1>& r, cv::Matx<double, 9, 1>& delta);
+
+    /*
+    * @brief                Analytically computes the inverse of a symmetric 3x3 matrix using the
+    *                       lower triangle.
+    * @param Q              The matrix to invert.
+    * @param Qinv           The inverse of Q.
+    * @param threshold      The threshold to determine if Q is singular and non-invertible.
+    */
+    bool analyticalInverse3x3Symm(const cv::Matx<double, 3, 3>& Q,
+        cv::Matx<double, 3, 3>& Qinv,
+        const double& threshold = 1e-8);
+
+    /*
+    * @brief                Computes the 3D null space and 6D normal space of the constraint Jacobian
+    *                       at a 9D vector r (representing a rank-3 matrix). Note that K is lower
+    *                       triangular so upper triangle is undefined.
+    * @param r              9D vector representing a rank-3 matrix.
+    * @param H              6D row space of the constraint Jacobian at r.
+    * @param N              3D null space of the constraint Jacobian at r.
+    * @param K              The constraint Jacobian at r.
+    * @param norm_threshold Threshold for column vector norm of Pn (the projection onto the null space
+    *                       of the constraint Jacobian).
+    */
+    void computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
+        cv::Matx<double, 9, 6>& H,
+        cv::Matx<double, 9, 3>& N,
+        cv::Matx<double, 6, 6>& K,
+        const double& norm_threshold = 0.1);
+
+    static const double RANK_TOLERANCE;
+    static const double SQP_SQUARED_TOLERANCE;
+    static const double SQP_DET_THRESHOLD;
+    static const double ORTHOGONALITY_SQUARED_ERROR_THRESHOLD;
+    static const double EQUAL_VECTORS_SQUARED_DIFF;
+    static const double EQUAL_SQUARED_ERRORS_DIFF;
+    static const double POINT_VARIANCE_THRESHOLD;
+    static const int SQP_MAX_ITERATION;
+    static const double SQRT3;
+
+    cv::Matx<double, 9, 9> omega_;
+    cv::Vec<double, 9> s_;
+    cv::Matx<double, 9, 9> u_;
+    cv::Matx<double, 3, 9> p_;
+    cv::Vec3d point_mean_;
+    int num_null_vectors_;
+
+    SQPSolution solutions_[18];
+    int num_solutions_;
+
+};
+
+}
+}
+
+#endif
diff --git a/modules/calib3d/test/test_solvepnp_ransac.cpp b/modules/calib3d/test/test_solvepnp_ransac.cpp
index 0d35fa7126..fb0e2965e6 100644
--- a/modules/calib3d/test/test_solvepnp_ransac.cpp
+++ b/modules/calib3d/test/test_solvepnp_ransac.cpp
@@ -190,6 +190,8 @@ static std::string printMethod(int method)
         return "SOLVEPNP_IPPE";
     case 7:
         return "SOLVEPNP_IPPE_SQUARE";
+    case 8:
+        return "SOLVEPNP_SQPNP";
     default:
         return "Unknown value";
     }
@@ -206,6 +208,7 @@ public:
         eps[SOLVEPNP_AP3P] = 1.0e-2;
         eps[SOLVEPNP_DLS] = 1.0e-2;
         eps[SOLVEPNP_UPNP] = 1.0e-2;
+        eps[SOLVEPNP_SQPNP] = 1.0e-2;
         totalTestsCount = 10;
         pointsCount = 500;
     }
@@ -436,6 +439,7 @@ public:
         eps[SOLVEPNP_UPNP] = 1.0e-6; //UPnP is remapped to EPnP, so we use the same threshold
         eps[SOLVEPNP_IPPE] = 1.0e-6;
         eps[SOLVEPNP_IPPE_SQUARE] = 1.0e-6;
+        eps[SOLVEPNP_SQPNP] = 1.0e-6;
 
         totalTestsCount = 1000;
 

From e05c2e0f1dea20af4f27b97c0798fc261d1b492a Mon Sep 17 00:00:00 2001
From: Joe <joegeisbauer@gmail.com>
Date: Tue, 17 Nov 2020 13:58:42 -0600
Subject: [PATCH 132/152] Fix Reduce Mean error for MobileNets DNN

Fix for index error for Reduce Mean

Correct Reduce Mean indexing error
---
 modules/dnn/src/onnx/onnx_importer.cpp | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp
index 56683f4c14..01d84d9711 100644
--- a/modules/dnn/src/onnx/onnx_importer.cpp
+++ b/modules/dnn/src/onnx/onnx_importer.cpp
@@ -494,14 +494,17 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
                 MatShape inpShape = outShapes[node_proto.input(0)];
                 DictValue axes = layerParams.get("axes");
                 bool keepdims = layerParams.get<int>("keepdims");
-                MatShape targetShape = inpShape;
+                MatShape targetShape;
+                std::vector<bool> shouldDelete(inpShape.size(), false);
                 for (int i = 0; i < axes.size(); i++) {
                     int axis = clamp(axes.get<int>(i), inpShape.size());
-                    if (keepdims) {
-                        targetShape[axis] = 1;
-                    } else {
-                        targetShape.erase(targetShape.begin() + axis);
-                    }
+                    shouldDelete[axis] = true;
+                }
+                for (int axis = 0; axis < inpShape.size(); ++axis){
+                    if (!shouldDelete[axis])
+                        targetShape.push_back(inpShape[axis]);
+                    else if (keepdims)
+                        targetShape.push_back(1);
                 }
 
                 if (inpShape.size() == 3 && axes.size() <= 2)

From bc434e8f67d488dfc334efde894c31dbba003d76 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Fri, 20 Nov 2020 20:32:59 +0000
Subject: [PATCH 133/152] calib3d: eliminate 'register' build warning

---
 modules/calib3d/src/sqpnp.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/calib3d/src/sqpnp.cpp b/modules/calib3d/src/sqpnp.cpp
index 10ea96c423..7117e61c96 100644
--- a/modules/calib3d/src/sqpnp.cpp
+++ b/modules/calib3d/src/sqpnp.cpp
@@ -631,7 +631,7 @@ void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
 void PoseSolver::nearestRotationMatrix(const cv::Matx<double, 9, 1>& e,
     cv::Matx<double, 9, 1>& r)
 {
-    register int i;
+    int i;
     double l, lprev, det_e, e_sq, adj_e_sq, adj_e[9];
 
     // e's adjoint

From ce31c9c448e093600b6daccbc82026c972a818b7 Mon Sep 17 00:00:00 2001
From: Jiri Kucera <jkucera@redhat.com>
Date: Tue, 14 Apr 2020 14:23:43 +0200
Subject: [PATCH 134/152] core(matrix): Negative values checks

Add checks that prevents indexing an array by negative values.
---
 modules/core/src/matrix_wrap.cpp |  32 +++----
 modules/core/test/test_mat.cpp   | 153 +++++++++++++++++++++++++++++++
 2 files changed, 165 insertions(+), 20 deletions(-)

diff --git a/modules/core/src/matrix_wrap.cpp b/modules/core/src/matrix_wrap.cpp
index 4c5efd6ba5..0a8d6c12d7 100644
--- a/modules/core/src/matrix_wrap.cpp
+++ b/modules/core/src/matrix_wrap.cpp
@@ -947,7 +947,7 @@ bool _InputArray::isContinuous(int i) const
     if( k == STD_ARRAY_MAT )
     {
         const Mat* vv = (const Mat*)obj;
-        CV_Assert(i > 0 && i < sz.height);
+        CV_Assert(i >= 0 && i < sz.height);
         return vv[i].isContinuous();
     }
 
@@ -981,21 +981,21 @@ bool _InputArray::isSubmatrix(int i) const
     if( k == STD_VECTOR_MAT )
     {
         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
-        CV_Assert((size_t)i < vv.size());
+        CV_Assert(i >= 0 && (size_t)i < vv.size());
         return vv[i].isSubmatrix();
     }
 
     if( k == STD_ARRAY_MAT )
     {
         const Mat* vv = (const Mat*)obj;
-        CV_Assert(i < sz.height);
+        CV_Assert(i >= 0 && i < sz.height);
         return vv[i].isSubmatrix();
     }
 
     if( k == STD_VECTOR_UMAT )
     {
         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
-        CV_Assert((size_t)i < vv.size());
+        CV_Assert(i >= 0 && (size_t)i < vv.size());
         return vv[i].isSubmatrix();
     }
 
@@ -1026,9 +1026,7 @@ size_t _InputArray::offset(int i) const
     if( k == STD_VECTOR_MAT )
     {
         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
-        if( i < 0 )
-            return 1;
-        CV_Assert( i < (int)vv.size() );
+        CV_Assert( i >= 0 && i < (int)vv.size() );
 
         return (size_t)(vv[i].ptr() - vv[i].datastart);
     }
@@ -1036,16 +1034,14 @@ size_t _InputArray::offset(int i) const
     if( k == STD_ARRAY_MAT )
     {
         const Mat* vv = (const Mat*)obj;
-        if( i < 0 )
-            return 1;
-        CV_Assert( i < sz.height );
+        CV_Assert( i >= 0 && i < sz.height );
         return (size_t)(vv[i].ptr() - vv[i].datastart);
     }
 
     if( k == STD_VECTOR_UMAT )
     {
         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
-        CV_Assert((size_t)i < vv.size());
+        CV_Assert(i >= 0 && (size_t)i < vv.size());
         return vv[i].offset;
     }
 
@@ -1059,7 +1055,7 @@ size_t _InputArray::offset(int i) const
     if (k == STD_VECTOR_CUDA_GPU_MAT)
     {
         const std::vector<cuda::GpuMat>& vv = *(const std::vector<cuda::GpuMat>*)obj;
-        CV_Assert((size_t)i < vv.size());
+        CV_Assert(i >= 0 && (size_t)i < vv.size());
         return (size_t)(vv[i].data - vv[i].datastart);
     }
 
@@ -1089,25 +1085,21 @@ size_t _InputArray::step(int i) const
     if( k == STD_VECTOR_MAT )
     {
         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
-        if( i < 0 )
-            return 1;
-        CV_Assert( i < (int)vv.size() );
+        CV_Assert( i >= 0 && i < (int)vv.size() );
         return vv[i].step;
     }
 
     if( k == STD_ARRAY_MAT )
     {
         const Mat* vv = (const Mat*)obj;
-        if( i < 0 )
-            return 1;
-        CV_Assert( i < sz.height );
+        CV_Assert( i >= 0 && i < sz.height );
         return vv[i].step;
     }
 
     if( k == STD_VECTOR_UMAT )
     {
         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
-        CV_Assert((size_t)i < vv.size());
+        CV_Assert(i >= 0 && (size_t)i < vv.size());
         return vv[i].step;
     }
 
@@ -1119,7 +1111,7 @@ size_t _InputArray::step(int i) const
     if (k == STD_VECTOR_CUDA_GPU_MAT)
     {
         const std::vector<cuda::GpuMat>& vv = *(const std::vector<cuda::GpuMat>*)obj;
-        CV_Assert((size_t)i < vv.size());
+        CV_Assert(i >= 0 && (size_t)i < vv.size());
         return vv[i].step;
     }
 
diff --git a/modules/core/test/test_mat.cpp b/modules/core/test/test_mat.cpp
index 58eafd0748..90ebd04755 100644
--- a/modules/core/test/test_mat.cpp
+++ b/modules/core/test/test_mat.cpp
@@ -9,6 +9,8 @@
 #include "opencv2/core/eigen.hpp"
 #endif
 
+#include "opencv2/core/cuda.hpp"
+
 namespace opencv_test { namespace {
 
 class Core_ReduceTest : public cvtest::BaseTest
@@ -1984,6 +1986,157 @@ TEST(Core_InputArray, fetch_MatExpr)
 }
 
 
+#ifdef CV_CXX11
+class TestInputArrayRangeChecking {
+    static const char *kind2str(cv::_InputArray ia)
+    {
+        switch (ia.kind())
+        {
+        #define C(x) case cv::_InputArray::x: return #x
+        C(MAT);
+        C(UMAT);
+        C(EXPR);
+        C(MATX);
+        C(STD_VECTOR);
+        C(STD_ARRAY);
+        C(NONE);
+        C(STD_VECTOR_VECTOR);
+        C(STD_BOOL_VECTOR);
+        C(STD_VECTOR_MAT);
+        C(STD_ARRAY_MAT);
+        C(STD_VECTOR_UMAT);
+        C(CUDA_GPU_MAT);
+        C(STD_VECTOR_CUDA_GPU_MAT);
+        #undef C
+        default:
+            return "<unsupported>";
+        }
+    }
+
+    static void banner(cv::_InputArray ia, const char *label, const char *name)
+    {
+        std::cout << std::endl
+                  << label << " = " << name << ", Kind: " << kind2str(ia)
+                  << std::endl;
+    }
+
+    template<typename I, typename F>
+    static void testA(I ia, F f, const char *mfname)
+    {
+        banner(ia, "f", mfname);
+        EXPECT_THROW(f(ia, -1), cv::Exception)
+            << "f(ia, " << -1 << ") should throw cv::Exception";
+        for (int i = 0; i < int(ia.size()); i++)
+        {
+            EXPECT_NO_THROW(f(ia, i))
+                << "f(ia, " << i << ") should not throw an exception";
+        }
+        EXPECT_THROW(f(ia, int(ia.size())), cv::Exception)
+            << "f(ia, " << ia.size() << ") should throw cv::Exception";
+    }
+
+    template<typename I, typename F>
+    static void testB(I ia, F f, const char *mfname)
+    {
+        banner(ia, "f", mfname);
+        EXPECT_THROW(f(ia, -1), cv::Exception)
+            << "f(ia, " << -1 << ") should throw cv::Exception";
+        for (int i = 0; i < int(ia.size()); i++)
+        {
+            EXPECT_NO_THROW(f(ia, i))
+                << "f(ia, " << i << ") should not throw an exception";
+        }
+        EXPECT_THROW(f(ia, int(ia.size())), cv::Exception)
+            << "f(ia, " << ia.size() << ") should throw cv::Exception";
+    }
+
+    static void test_isContinuous()
+    {
+        auto f = [](cv::_InputArray ia, int i) { (void)ia.isContinuous(i); };
+
+        cv::Mat M;
+        cv::UMat uM;
+
+        std::vector<cv::Mat> vec = {M, M};
+        std::array<cv::Mat, 2> arr = {M, M};
+        std::vector<cv::UMat> uvec = {uM, uM};
+
+        testA(vec, f, "isContinuous");
+        testA(arr, f, "isContinuous");
+        testA(uvec, f, "isContinuous");
+    }
+
+    static void test_isSubmatrix()
+    {
+        auto f = [](cv::_InputArray ia, int i) { (void)ia.isSubmatrix(i); };
+
+        cv::Mat M;
+        cv::UMat uM;
+
+        std::vector<cv::Mat> vec = {M, M};
+        std::array<cv::Mat, 2> arr = {M, M};
+        std::vector<cv::UMat> uvec = {uM, uM};
+
+        testA(vec, f, "isSubmatrix");
+        testA(arr, f, "isSubmatrix");
+        testA(uvec, f, "isSubmatrix");
+    }
+
+    static void test_offset()
+    {
+        auto f = [](cv::_InputArray ia, int i) { return ia.offset(i); };
+
+        cv::Mat M;
+        cv::UMat uM;
+        cv::cuda::GpuMat gM;
+
+        std::vector<cv::Mat> vec = {M, M};
+        std::array<cv::Mat, 2> arr = {M, M};
+        std::vector<cv::UMat> uvec = {uM, uM};
+        std::vector<cv::cuda::GpuMat> gvec = {gM, gM};
+
+        testB(vec, f, "offset");
+        testB(arr, f, "offset");
+        testB(uvec, f, "offset");
+        testB(gvec, f, "offset");
+    }
+
+    static void test_step()
+    {
+        auto f = [](cv::_InputArray ia, int i) { return ia.step(i); };
+
+        cv::Mat M;
+        cv::UMat uM;
+        cv::cuda::GpuMat gM;
+
+        std::vector<cv::Mat> vec = {M, M};
+        std::array<cv::Mat, 2> arr = {M, M};
+        std::vector<cv::UMat> uvec = {uM, uM};
+        std::vector<cv::cuda::GpuMat> gvec = {gM, gM};
+
+        testB(vec, f, "step");
+        testB(arr, f, "step");
+        testB(uvec, f, "step");
+        testB(gvec, f, "step");
+    }
+
+public:
+    static void run()
+    {
+        test_isContinuous();
+        test_isSubmatrix();
+        test_offset();
+        test_step();
+    }
+};
+
+TEST(Core_InputArray, range_checking)
+{
+    TestInputArrayRangeChecking::run();
+}
+#endif
+
+
 TEST(Core_Vectors, issue_13078)
 {
     float floats_[] = { 1, 2, 3, 4, 5, 6, 7, 8 };

From 0f8ab0557eb5eafa3be10a7986b698f452b2e844 Mon Sep 17 00:00:00 2001
From: YashasSamaga <yashas_2010@yahoo.com>
Date: Sat, 21 Nov 2020 17:35:20 +0530
Subject: [PATCH 135/152] enable fusion tests, update thresholds and fix missed
 eltwise fusions

---
 modules/dnn/src/dnn.cpp                    | 44 +++++++++++-----------
 modules/dnn/test/test_backends.cpp         |  3 +-
 modules/dnn/test/test_caffe_importer.cpp   |  2 +-
 modules/dnn/test/test_darknet_importer.cpp |  2 +
 modules/dnn/test/test_layers.cpp           | 39 ++++++++++++++++---
 modules/dnn/test/test_model.cpp            |  2 +-
 modules/dnn/test/test_onnx_importer.cpp    | 11 ++++--
 modules/dnn/test/test_tf_importer.cpp      |  2 +-
 modules/dnn/test/test_torch_importer.cpp   |  3 +-
 9 files changed, 73 insertions(+), 35 deletions(-)

diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index a056e8f5b5..0f60a393a5 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -2681,7 +2681,6 @@ struct Net::Impl : public detail::NetImplBase
 
 #ifdef HAVE_CUDA
                     // CUDA backend supports fusion with eltwise sum (without variable channels)
-                    // `nextEltwiseLayer` is reset if eltwise layer doesn't have a compatible configuration for fusion
                     if (IS_DNN_CUDA_TARGET(preferableTarget) && !nextEltwiseLayer.empty())
                     {
                         // we create a temporary backend node for eltwise layer to obtain the eltwise configuration
@@ -2691,38 +2690,41 @@ struct Net::Impl : public detail::NetImplBase
                         // CUDA backend uses EltwiseOp when all operands have the same number of channels; otherwise, ShortcutOp is used.
                         // Hence, a successful cast to EltwiseOp implies that the number of channels is same in all operand tensors.
                         if (eltwiseNode.empty() || eltwiseNode->op != cuda4dnn::EltwiseOpType::SUM || !eltwiseNode->coeffs.empty())
-                            nextEltwiseLayer = Ptr<EltwiseLayer>();
+                            break;
                     }
 #endif
 
-                    if (pinsToKeep.count(lpNext) != 0)
+                    if (IS_DNN_OPENCL_TARGET(preferableTarget) && pinsToKeep.count(lpNext) != 0)
                         break;
                     if (nextData->inputBlobsId.size() != 2)
                         break;
 
-                    if (!nextData->params.has("operation") || toLowerCase(nextData->params.get<String>("operation")) == "sum")
+                    if (IS_DNN_OPENCL_TARGET(preferableTarget))
                     {
-                        if (nextData->params.has("coeff"))
+                        if (!nextData->params.has("operation") || toLowerCase(nextData->params.get<String>("operation")) == "sum")
                         {
-                            DictValue paramCoeff = nextData->params.get("coeff");
-                            int n = paramCoeff.size();
-                            bool isCoeffOneOne = (n == 2);
-                            for (int i = 0; isCoeffOneOne && i < n; i++)
-                            {
-                                float c = paramCoeff.get<float>(i);
-                                isCoeffOneOne &= (c == 1.0f);
-                            }
-                            if (!isCoeffOneOne)
+                            if (nextData->params.has("coeff"))
                             {
-                                CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion of 'Sum' without coeffs (or {1.0, 1.0}) is supported only");
-                                break;
+                                DictValue paramCoeff = nextData->params.get("coeff");
+                                int n = paramCoeff.size();
+                                bool isCoeffOneOne = (n == 2);
+                                for (int i = 0; isCoeffOneOne && i < n; i++)
+                                {
+                                    float c = paramCoeff.get<float>(i);
+                                    isCoeffOneOne &= (c == 1.0f);
+                                }
+                                if (!isCoeffOneOne)
+                                {
+                                    CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion of 'Sum' without coeffs (or {1.0, 1.0}) is supported only");
+                                    break;
+                                }
                             }
                         }
-                    }
-                    else
-                    {
-                        CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion with eltwise operation is not supported: " << nextData->params.get<String>("operation"));
-                        break;
+                        else
+                        {
+                            CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion with eltwise operation is not supported: " << nextData->params.get<String>("operation"));
+                            break;
+                        }
                     }
 
                     {
diff --git a/modules/dnn/test/test_backends.cpp b/modules/dnn/test/test_backends.cpp
index b3e425aef7..67f5782a2e 100644
--- a/modules/dnn/test/test_backends.cpp
+++ b/modules/dnn/test/test_backends.cpp
@@ -321,6 +321,7 @@ TEST_P(DNNTestNetwork, SSD_VGG16)
     else if (target == DNN_TARGET_CUDA_FP16)
     {
         scoreDiff = 0.03;
+        iouDiff = 0.13;
     }
 
     processNet("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel",
@@ -511,7 +512,7 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
     else if (target == DNN_TARGET_CUDA_FP16)
     {
         l1 = 0.3;
-        lInf = 7.2;
+        lInf = 7.6;
     }
     processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf);
 #if defined(HAVE_INF_ENGINE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
diff --git a/modules/dnn/test/test_caffe_importer.cpp b/modules/dnn/test/test_caffe_importer.cpp
index 5440f4734f..c0282207dd 100644
--- a/modules/dnn/test/test_caffe_importer.cpp
+++ b/modules/dnn/test/test_caffe_importer.cpp
@@ -749,7 +749,7 @@ TEST_P(Test_Caffe_nets, RFCN)
     if (target == DNN_TARGET_CUDA_FP16)
     {
         scoreDiff = 0.0034;
-        iouDiff = 0.11;
+        iouDiff = 0.12;
     }
     static Mat ref = (Mat_<float>(2, 7) << 0, 7, 0.991359, 491.822, 81.1668, 702.573, 178.234,
                                            0, 12, 0.94786, 132.093, 223.903, 338.077, 566.16);
diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp
index 83ac0525f3..021603636e 100644
--- a/modules/dnn/test/test_darknet_importer.cpp
+++ b/modules/dnn/test/test_darknet_importer.cpp
@@ -677,6 +677,8 @@ TEST_P(Test_Darknet_nets, YOLOv4_tiny)
 
     double scoreDiff = 0.01f;
     double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.15 : 0.01f;
+    if (target == DNN_TARGET_CUDA_FP16)
+        iouDiff = 0.02;
 
     std::string config_file = "yolov4-tiny.cfg";
     std::string weights_file = "yolov4-tiny.weights";
diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp
index 7aa74861a2..61537e0e01 100644
--- a/modules/dnn/test/test_layers.cpp
+++ b/modules/dnn/test/test_layers.cpp
@@ -2228,7 +2228,7 @@ public:
 
     static testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsForFusionTests()
     {
-        return dnnBackendsAndTargets(false, false, true, false, false, false); // OCV OpenCL + OCV CPU
+        return dnnBackendsAndTargets(false, false, true, false, true, false); // OCV OpenCL + OCV CPU + CUDA
     }
 };
 
@@ -2280,7 +2280,12 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
                 expectedFusedLayers.push_back(activId);
         }
     }
-
+    else if (backendId == DNN_BACKEND_CUDA)
+    {
+        if (actType == "ReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Swish" ||
+            actType == "Mish" || actType == "Sigmoid" || actType == "Power")
+                expectedFusedLayers.push_back(activId);
+    }
     TestLayerFusion::test(input, net, backendId, targetId, expectedFusedLayers);
 }
 INSTANTIATE_TEST_CASE_P(TestLayerFusion, ConvolutionActivationFusion, Combine(
@@ -2319,7 +2324,7 @@ TEST_P(ConvolutionEltwiseFusion, Accuracy)
     std::string eltwiseOp = get<1>(GetParam());
     bool weightedEltwise = get<2>(GetParam());
     if (eltwiseOp != "sum" && weightedEltwise)
-            throw SkipTestException("weighted eltwise not supported");
+        throw SkipTestException("weighted eltwise not supported");
     LayerParams eltwiseParams;
     TestLayerFusion::makeDefaultTestEltwiseLayer(eltwiseParams, eltwiseOp, weightedEltwise);
 
@@ -2332,7 +2337,11 @@ TEST_P(ConvolutionEltwiseFusion, Accuracy)
 
     Backend backendId = get<0>(get<3>(GetParam()));
     Target targetId = get<1>(get<3>(GetParam()));
-    TestLayerFusion::test(input, net, backendId, targetId);
+
+    std::vector<int> expectedFusedLayers;
+    if (backendId == DNN_BACKEND_CUDA && eltwiseOp == "sum" && !weightedEltwise)
+        expectedFusedLayers.push_back(eltwiseId);
+    TestLayerFusion::test(input, net, backendId, targetId, expectedFusedLayers);
 }
 INSTANTIATE_TEST_CASE_P(TestLayerFusion, ConvolutionEltwiseFusion, Combine(
 /* bias */              testing::Bool(),
@@ -2411,7 +2420,16 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
             }
         }
     }
-
+    else if(backendId == DNN_BACKEND_CUDA)
+    {
+        if (eltwiseOp == "sum" && !weightedEltwise)
+        {
+            expectedFusedLayers.push_back(eltwiseId);
+            if (actType == "ReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Swish" ||
+                actType == "Mish" || actType == "Sigmoid" || actType == "Power")
+                expectedFusedLayers.push_back(activId);
+        }
+    }
     TestLayerFusion::test(input, net, backendId, targetId, expectedFusedLayers);
 }
 INSTANTIATE_TEST_CASE_P(TestLayerFusion, ConvolutionEltwiseActivationFusion, Combine(
@@ -2486,7 +2504,16 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
                 expectedFusedLayers.push_back(activId); // activation fused with convolution
         }
     }
-
+    else if(backendId == DNN_BACKEND_CUDA)
+    {
+        if (actType == "ReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Swish" ||
+            actType == "Mish" || actType == "Sigmoid" || actType == "Power")
+        {
+                expectedFusedLayers.push_back(activId);
+                if (eltwiseOp == "sum" && !weightedEltwise)
+                    expectedFusedLayers.push_back(eltwiseId);
+        }
+    }
     TestLayerFusion::test(input, net, backendId, targetId, expectedFusedLayers);
 }
 INSTANTIATE_TEST_CASE_P(TestLayerFusion, ConvolutionActivationEltwiseFusion, Combine(
diff --git a/modules/dnn/test/test_model.cpp b/modules/dnn/test/test_model.cpp
index 5766684c41..7d516de73e 100644
--- a/modules/dnn/test/test_model.cpp
+++ b/modules/dnn/test/test_model.cpp
@@ -263,7 +263,7 @@ TEST_P(Test_Model, DetectionMobilenetSSD)
     }
     else if (target == DNN_TARGET_CUDA_FP16)
     {
-        scoreDiff = 4e-4;
+        scoreDiff = 0.002;
         iouDiff = 1e-2;
     }
     float confThreshold = FLT_MIN;
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 1a65fd4880..95e4bab193 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -221,7 +221,8 @@ TEST_P(Test_ONNX_layers, Deconvolution)
     testONNXModels("two_deconvolution", npy, 0, 0, false, false);
     testONNXModels("deconvolution_group", npy, 0, 0, false, false);
     testONNXModels("deconvolution_output_shape", npy, 0, 0, false, false);
-    testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
+    if (target != DNN_TARGET_CUDA_FP16) // bug
+        testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
 }
 
 TEST_P(Test_ONNX_layers, Deconvolution3D)
@@ -675,6 +676,8 @@ TEST_P(Test_ONNX_layers, LinearWithConstant)
 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2020040000)
     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
 #endif
+    if (backend == DNN_BACKEND_CUDA)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
     testONNXModels("lin_with_constant");
 }
 
@@ -685,6 +688,8 @@ TEST_P(Test_ONNX_layers, MatmulWithTwoInputs)
 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2020040000)
     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
 #endif
+    if (backend == DNN_BACKEND_CUDA)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
     testONNXModels("matmul_with_two_inputs");
 }
 
@@ -1159,8 +1164,8 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics)
     float l1 = 0.0013, lInf = 0.009;
     if (target == DNN_TARGET_CUDA_FP16)
     {
-        l1 = 0.008;
-        lInf = 0.04;
+        l1 = 0.01;
+        lInf = 0.06;
     }
 
     checkBackend(&input0, &ref0);
diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp
index 89dc9e0836..e6cfbe6637 100644
--- a/modules/dnn/test/test_tf_importer.cpp
+++ b/modules/dnn/test/test_tf_importer.cpp
@@ -1256,7 +1256,7 @@ TEST_P(Test_TensorFlow_nets, EfficientDet)
     if (target == DNN_TARGET_CUDA_FP16)
     {
         scoreDiff = 0.002;
-        iouDiff = 0.004;
+        iouDiff = 0.005;
     }
     normAssertDetections(ref, out, "", 0.5, scoreDiff, iouDiff);
     expectNoFallbacksFromIE(net);
diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp
index 82dcf1dd02..54b7c1baa9 100644
--- a/modules/dnn/test/test_torch_importer.cpp
+++ b/modules/dnn/test/test_torch_importer.cpp
@@ -165,7 +165,8 @@ TEST_P(Test_Torch_layers, run_reshape_single_sample)
     }
     else if (target == DNN_TARGET_CUDA_FP16)
     {
-        l1 = 0.01;
+        l1 = 0.02;
+        lInf = 0.04;
     }
     runTorchNet("net_reshape_single_sample", "", false, false, true, l1, lInf);
 }

From 632a08ff4012aafc9988eea9f52947b868e8b6e0 Mon Sep 17 00:00:00 2001
From: Hollow Man <hollowman@hollowman.ml>
Date: Sun, 22 Nov 2020 00:00:07 +0800
Subject: [PATCH 136/152] Fix typo in docs

adatapted -> adapted
---
 .../how_to_use_OpenCV_parallel_for_.markdown                    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown b/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown
index eeeb94b4c4..9968cdb257 100644
--- a/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown
+++ b/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown
@@ -32,7 +32,7 @@ automatically available with the platform (e.g. APPLE GCD) but chances are that
 have access to a parallel framework either directly or by enabling the option in CMake and rebuild the library.
 
 The second (weak) precondition is more related to the task you want to achieve as not all computations
-are suitable / can be adatapted to be run in a parallel way. To remain simple, tasks that can be split
+are suitable / can be adapted to be run in a parallel way. To remain simple, tasks that can be split
 into multiple elementary operations with no memory dependency (no possible race condition) are easily
 parallelizable. Computer vision processing are often easily parallelizable as most of the time the processing of
 one pixel does not depend to the state of other pixels.

From 5a3a915a9ba35b31808577245f71d468319d311c Mon Sep 17 00:00:00 2001
From: Or Avital <oravital7@gmail.com>
Date: Sun, 22 Nov 2020 14:19:20 +0200
Subject: [PATCH 137/152] Remove unnecessary condition (will never reach)

---
 modules/core/src/copy.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/modules/core/src/copy.cpp b/modules/core/src/copy.cpp
index 7f4329df78..dcd585d834 100644
--- a/modules/core/src/copy.cpp
+++ b/modules/core/src/copy.cpp
@@ -1032,8 +1032,7 @@ void flip( InputArray _src, OutputArray _dst, int flip_mode )
     }
 
     if ((size.width == 1 && flip_mode > 0) ||
-        (size.height == 1 && flip_mode == 0) ||
-        (size.height == 1 && size.width == 1 && flip_mode < 0))
+        (size.height == 1 && flip_mode == 0))
     {
         return _src.copyTo(_dst);
     }

From ac418e999defbfb9852d20a6884118153e7a7151 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Sun, 22 Nov 2020 16:28:53 +0000
Subject: [PATCH 138/152] cmake: update condition for find_package(Eigen3
 CONFIG)

---
 cmake/OpenCVFindLibsPerf.cmake | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/cmake/OpenCVFindLibsPerf.cmake b/cmake/OpenCVFindLibsPerf.cmake
index 7b3874ff0e..3753084d28 100644
--- a/cmake/OpenCVFindLibsPerf.cmake
+++ b/cmake/OpenCVFindLibsPerf.cmake
@@ -51,7 +51,10 @@ endif(WITH_CUDA)
 
 # --- Eigen ---
 if(WITH_EIGEN AND NOT HAVE_EIGEN)
-  if(NOT OPENCV_SKIP_EIGEN_FIND_PACKAGE_CONFIG)
+  if((OPENCV_FORCE_EIGEN_FIND_PACKAGE_CONFIG
+      OR NOT (CMAKE_VERSION VERSION_LESS "3.0.0")  # Eigen3Targets.cmake required CMake 3.0.0+
+      ) AND NOT OPENCV_SKIP_EIGEN_FIND_PACKAGE_CONFIG
+  )
     find_package(Eigen3 CONFIG QUIET)  # Ceres 2.0.0 CMake scripts doesn't work with CMake's FindEigen3.cmake module (due to missing EIGEN3_VERSION_STRING)
   endif()
   if(NOT Eigen3_FOUND)

From eafe6ccdbe8481bfa8b7d4dcc1430145ef11b16e Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Mon, 23 Nov 2020 19:05:57 +0000
Subject: [PATCH 139/152] objc: fix handling of std::vector<std::vector<T>>
 return type

---
 modules/objc/generator/gen_objc.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py
index 469b82b938..bd9743c757 100755
--- a/modules/objc/generator/gen_objc.py
+++ b/modules/objc/generator/gen_objc.py
@@ -1123,7 +1123,7 @@ class ObjectiveCWrapperGenerator(object):
                 if cpp_type.find("::") == -1:
                     cpp_type = self.get_namespace_prefix(cpp_type) + cpp_type
                 prologue.append("NSMutableArray<NSMutableArray<" + objc_type + ">*>* retVal = [NSMutableArray new];")
-                ret_val = "std::vector<" + cpp_type + "> retValVector = "
+                ret_val = "std::vector< std::vector<" + cpp_type + "> > retValVector = "
                 epilogue.append("CV2OBJC2(" + cpp_type + ", " + objc_type[:-1] + ", retValVector, retVal);")
             elif ret_type.startswith("Ptr_"):
                 cpp_type = type_dict[ret_type]["c_type"]

From f4f462c50bd3e3c25c8ad7b88e3bfaca49d0f702 Mon Sep 17 00:00:00 2001
From: Sergei Slashchinin <62052793+sl-sergei@users.noreply.github.com>
Date: Tue, 24 Nov 2020 19:52:45 +0300
Subject: [PATCH 140/152] Merge pull request #18862 from
 sl-sergei:support_pool1d

Support for Pool1d layer for OpenCV and OpenCL targets

* Initial version of Pool1d support

* Fix variable naming

* Fix 1d pooling for OpenCL

* Change support logic, remove unnecessary variable, split the tests

* Remove other depricated variables

* Fix warning. Check tests

* Change support check logic

* Change support check logic, 2
---
 .../dnn/include/opencv2/dnn/all_layers.hpp    |   2 -
 modules/dnn/src/layers/pooling_layer.cpp      | 186 +++++++++++-------
 modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp  |  16 +-
 modules/dnn/test/test_onnx_importer.cpp       |  78 ++++++++
 4 files changed, 205 insertions(+), 77 deletions(-)

diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp
index c9455ab528..ffc2568a89 100644
--- a/modules/dnn/include/opencv2/dnn/all_layers.hpp
+++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp
@@ -248,8 +248,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
         int type;
         std::vector<size_t> kernel_size, strides;
         std::vector<size_t> pads_begin, pads_end;
-        CV_DEPRECATED_EXTERNAL Size kernel, stride, pad;
-        CV_DEPRECATED_EXTERNAL int pad_l, pad_t, pad_r, pad_b;
         bool globalPooling; //!< Flag is true if at least one of the axes is global pooled.
         std::vector<bool> isGlobalPooling;
         bool computeMaxIdx;
diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp
index 98417620ed..d4eb1f174a 100644
--- a/modules/dnn/src/layers/pooling_layer.cpp
+++ b/modules/dnn/src/layers/pooling_layer.cpp
@@ -85,8 +85,6 @@ public:
         computeMaxIdx = true;
         globalPooling = false;
         isGlobalPooling = std::vector<bool>(3, false);
-        stride = Size(1, 1);
-        pad_t = pad_l = pad_b = pad_r = 0;
 
         hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
         shapesInitialized = !hasDynamicShapes;
@@ -108,16 +106,6 @@ public:
 
             getPoolingKernelParams(params, kernel_size, isGlobalPooling, pads_begin, pads_end, strides, padMode);
             globalPooling = isGlobalPooling[0] || isGlobalPooling[1] || isGlobalPooling[2];
-            if (kernel_size.size() == 2) {
-                kernel = Size(kernel_size[1], kernel_size[0]);
-                stride = Size(strides[1], strides[0]);
-                pad = Size(pads_begin[1], pads_begin[0]);
-
-                pad_t = pads_begin[0];
-                pad_l = pads_begin[1];
-                pad_b = pads_end[0];
-                pad_r = pads_end[1];
-            }
         }
         else if (params.has("pooled_w") || params.has("pooled_h"))
         {
@@ -165,17 +153,20 @@ public:
                 finalKernel.push_back(isGlobalPooling[idx] ? inp[i] : kernel_size[idx]);
              }
              kernel_size = finalKernel;
-             kernel = Size(kernel_size[1], kernel_size[0]);
          }
 
         getConvPoolPaddings(inp, kernel_size, strides, padMode, pads_begin, pads_end);
-        if (pads_begin.size() == 2) {
-            pad_t = pads_begin[0];
-            pad_l = pads_begin[1];
-            pad_b = pads_end[0];
-            pad_r = pads_end[1];
+
+        if (inputs[0].dims == 3)
+        {
+            //Pool1D
+            kernel_size.erase(kernel_size.begin() + 1);
+            strides.erase(strides.begin() + 1);
+            pads_begin.erase(pads_begin.begin() + 1);
+            pads_end.erase(pads_end.begin() + 1);
         }
 
+
 #ifdef HAVE_OPENCL
         poolOp.release();
 #endif
@@ -191,9 +182,11 @@ public:
                 return false;
             if (kernel_size.size() == 3)
                 return preferableTarget == DNN_TARGET_CPU;
+            if (kernel_size.size() == 1)
+                return false;
             if (preferableTarget == DNN_TARGET_MYRIAD) {
 #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
-                if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
+                if (type == MAX && (pads_begin[1] == 1 && pads_begin[0] == 1) && (strides[0] == 2 && strides[1] == 2)) {
                     return !isMyriadX();
                 }
 #endif
@@ -205,19 +198,23 @@ public:
 #endif
         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
         {
-            return !computeMaxIdx && type != STOCHASTIC;
+            return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1;
         }
-        else if (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE)
+        else if (backendId == DNN_BACKEND_OPENCV)
         {
             if (kernel_size.size() == 3)
-                return (backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU);
-            if (kernel_size.empty() || kernel_size.size() == 2)
-                return backendId == DNN_BACKEND_OPENCV ||
-                       (backendId == DNN_BACKEND_HALIDE && haveHalide() &&
-                           (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r)));
+                return preferableTarget == DNN_TARGET_CPU;
+            if (kernel_size.size() <= 2)
+                return true;
             else
                 return false;
         }
+        else if (backendId == DNN_BACKEND_HALIDE)
+        {
+            if (kernel_size.empty() || kernel_size.size() == 2)
+                return haveHalide() &&
+                       (type == MAX || (type == AVE && !pads_begin[0] && !pads_begin[1] && !pads_end[0] && !pads_end[1]));
+        }
         return false;
     }
 
@@ -237,12 +234,25 @@ public:
 
             config.in_shape = shape(inputs[0]);
             config.out_shape = shape(outputs[0]);
-            config.kernel = kernel;
-            config.pad_l = pad_l;
-            config.pad_t = pad_t;
-            config.pad_r = pad_r;
-            config.pad_b = pad_b;
-            config.stride = stride;
+            if (inputs[0].dims == 3)
+            {
+                //Pool1D
+                config.kernel = Size(kernel_size[0], 1);
+                config.stride = Size(strides[0], 1);
+                config.pad_l = pads_begin[0];
+                config.pad_t = 0;
+                config.pad_r = pads_end[0];
+                config.pad_b = 0;
+            }
+            else
+            {
+                config.kernel = Size(kernel_size[1], kernel_size[0]);
+                config.stride = Size(strides[1], strides[0]);
+                config.pad_l = pads_begin[1];
+                config.pad_t = pads_begin[0];
+                config.pad_r = pads_end[1];
+                config.pad_b = pads_end[0];
+            }
             config.channels = inputs[0].size[1];
             config.pool_method = type == MAX ? LIBDNN_POOLING_METHOD_MAX :
                                 (type == AVE ? LIBDNN_POOLING_METHOD_AVE :
@@ -428,7 +438,6 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
     public:
         const Mat* src, *rois;
         Mat *dst, *mask;
-        Size kernel, stride;
         int pad_l, pad_t, pad_r, pad_b;
         bool avePoolPaddedArea;
         int nstripes;
@@ -453,7 +462,7 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
             CV_Assert_N(
                       src.isContinuous(), dst.isContinuous(),
                       src.type() == CV_32F, src.type() == dst.type(),
-                      src.dims == 4 || src.dims == 5, dst.dims == 4 || dst.dims == 5,
+                      src.dims == 3 || src.dims == 4 || src.dims == 5, dst.dims == 3 || dst.dims == 4 || dst.dims == 5,
                       (((poolingType == ROI || poolingType == PSROI) &&
                       dst.size[0] == rois.size[0]) || src.size[0] == dst.size[0]),
                       poolingType == PSROI || src.size[1] == dst.size[1],
@@ -461,6 +470,9 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
 
             PoolingInvoker p;
 
+            bool isPool1D = src.dims == 3;
+            bool isPool3D = src.dims == 5;
+
             p.src = &src;
             p.rois = &rois;
             p.dst = &dst;
@@ -471,12 +483,10 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
             p.pads_end = pads_end;
 
             p.mask = &mask;
-            p.kernel = Size(kernel_size[1], kernel_size[0]);
-            p.stride = Size(strides[1], strides[0]);
             p.pad_l = pads_begin.back();
-            p.pad_t = pads_begin[pads_begin.size() - 2];
+            p.pad_t = isPool1D ? 0 : pads_begin[pads_begin.size() - 2];
             p.pad_r = pads_end.back();
-            p.pad_b = pads_end[pads_end.size() - 2];
+            p.pad_b = isPool1D ? 0 : pads_end[pads_end.size() - 2];
 
             p.avePoolPaddedArea = avePoolPaddedArea;
             p.nstripes = nstripes;
@@ -486,11 +496,11 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
 
             if( !computeMaxIdx )
             {
-                int height = src.size[src.dims - 2];
+                int height = isPool1D ? 1 : src.size[src.dims - 2];
                 int width = src.size[src.dims - 1];
 
-                int kernel_d = (kernel_size.size() == 3) ? kernel_size[0] : 1;
-                int kernel_h = kernel_size[kernel_size.size() - 2];
+                int kernel_d = isPool3D ? kernel_size[0] : 1;
+                int kernel_h = isPool1D ? 1 : kernel_size[kernel_size.size() - 2];
                 int kernel_w = kernel_size.back();
 
                 p.ofsbuf.resize(kernel_d * kernel_h * kernel_w);
@@ -510,13 +520,15 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
         {
             int channels = dst->size[1];
 
+            bool isPool3D = src->dims == 5;
             bool isPool2D = src->dims == 4;
-            int depth = !isPool2D? dst->size[2] : 1;
-            int height = dst->size[dst->dims - 2];
+            bool isPool1D = src->dims == 3;
+            int depth = isPool3D? dst->size[2] : 1;
+            int height = isPool1D? 1 : dst->size[dst->dims - 2];
             int width = dst->size[dst->dims - 1];
 
-            int inp_depth = !isPool2D? src->size[2] : 1;
-            int inp_height = src->size[src->dims - 2];
+            int inp_depth = isPool3D? src->size[2] : 1;
+            int inp_height = isPool1D? 1 : src->size[src->dims - 2];
             int inp_width = src->size[src->dims - 1];
 
             size_t total = dst->total();
@@ -524,12 +536,12 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
             size_t stripeStart = r.start*stripeSize;
             size_t stripeEnd = std::min(r.end*stripeSize, total);
 
-            int kernel_d = !isPool2D? kernel_size[0] : 1;
-            int kernel_h = kernel_size[kernel_size.size() - 2];
+            int kernel_d = isPool3D? kernel_size[0] : 1;
+            int kernel_h = isPool1D? 1 : kernel_size[kernel_size.size() - 2];
             int kernel_w = kernel_size.back();
 
-            int stride_d = !isPool2D? strides[0] : 0;
-            int stride_h = strides[strides.size() - 2];
+            int stride_d = isPool3D? strides[0] : 0;
+            int stride_h = isPool1D? 1 :strides[strides.size() - 2];
             int stride_w = strides.back();
             bool compMaxIdx = computeMaxIdx;
 
@@ -720,7 +732,24 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
                             }
                         }
                         else
+#else
+                        CV_UNUSED(isPool2D);
 #endif
+                        if( isPool1D )
+                        {
+                            const float* first = srcData + xstart;
+                            const float* last = srcData + xend;
+                            const float* max_elem = std::max_element(first, last);
+                            if (max_elem!=last)
+                            {
+                                dstData[x0] = *max_elem;
+                                if( compMaxIdx )
+                                {
+                                    dstMaskData[x0] = std::distance(first, max_elem);
+                                }
+                            }
+                        }
+                        else
                         {
                             float max_val = -FLT_MAX;
                             if( compMaxIdx )
@@ -794,6 +823,14 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
                         }
                         else
 #endif
+                        if( isPool1D )
+                        {
+                            const float* first = srcData + xstart;
+                            const float* last = srcData + xend;
+                            float sum_val = std::accumulate(first, last, 0.f);
+                            dstData[x0] = sum_val*inv_kernel_area;
+                        }
+                        else
                         {
                             float sum_val = 0.f;
                             for (int d = dstart; d < dend; ++d) {
@@ -907,20 +944,26 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
         Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
         const int inWidth = inputBuffer.width();
         const int inHeight = inputBuffer.height();
+        const size_t kernelHeight = kernel_size[0];
+        const size_t kernelWidth = kernel_size[1];
+        const size_t strideHeight = strides[0];
+        const size_t strideWidth = strides[1];
+        const size_t paddingTop = pads_begin[0];
+        const size_t paddingLeft = pads_begin[1];
 
         Halide::Var x("x"), y("y"), c("c"), n("n");
         Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
-        Halide::RDom r(0, kernel.width, 0, kernel.height);
+        Halide::RDom r(0, kernelWidth, 0, kernelHeight);
         Halide::Expr kx, ky;
-        if(pad_l || pad_t)
+        if(paddingLeft || paddingTop)
         {
-            kx = clamp(x * stride.width + r.x - pad_l, 0, inWidth - 1);
-            ky = clamp(y * stride.height + r.y - pad_t, 0, inHeight - 1);
+            kx = clamp(x * strideWidth + r.x - paddingLeft, 0, inWidth - 1);
+            ky = clamp(y * strideHeight + r.y - paddingTop, 0, inHeight - 1);
         }
         else
         {
-            kx = min(x * stride.width + r.x, inWidth - 1);
-            ky = min(y * stride.height + r.y, inHeight - 1);
+            kx = min(x * strideWidth + r.x, inWidth - 1);
+            ky = min(y * strideHeight + r.y, inHeight - 1);
         }
 
         // Halide::argmax returns tuple (r.x, r.y, max).
@@ -928,17 +971,17 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
 
         // Compute offset from argmax in range [0, kernel_size).
         Halide::Expr max_index;
-        if(pad_l || pad_t)
+        if(paddingLeft || paddingTop)
         {
-            max_index = clamp(y * stride.height + res[1] - pad_t,
+            max_index = clamp(y * strideHeight + res[1] - paddingTop,
                               0, inHeight - 1) * inWidth +
-                        clamp(x * stride.width + res[0] - pad_l,
+                        clamp(x * strideWidth + res[0] - paddingLeft,
                               0, inWidth - 1);
         }
         else
         {
-            max_index = min(y * stride.height + res[1], inHeight - 1) * inWidth +
-                        min(x * stride.width + res[0], inWidth - 1);
+            max_index = min(y * strideHeight + res[1], inHeight - 1) * inWidth +
+                        min(x * strideWidth + res[0], inWidth - 1);
         }
         top(x, y, c, n) = { res[2], Halide::cast<float>(max_index) };
         return Ptr<BackendNode>(new HalideBackendNode(top));
@@ -952,21 +995,25 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
         Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
 
         const int inW = inputBuffer.width(), inH = inputBuffer.height();
-        if ((inW - kernel.width) % stride.width || (inH - kernel.height) % stride.height)
+        const size_t kernelHeight = kernel_size[0];
+        const size_t kernelWidth = kernel_size[1];
+        const size_t strideHeight = strides[0];
+        const size_t strideWidth = strides[1];
+        if ((inW - kernelWidth) % strideWidth || (inH - kernelHeight) % strideHeight)
         {
             CV_Error(cv::Error::StsNotImplemented,
                      "Halide backend for average pooling with partial "
                      "kernels is not implemented");
         }
 
-        const float norm = 1.0f / (kernel.width * kernel.height);
+        const float norm = 1.0f / (kernelWidth * kernelHeight);
 
         Halide::Var x("x"), y("y"), c("c"), n("n");
         Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
-        Halide::RDom r(0, kernel.width, 0, kernel.height);
+        Halide::RDom r(0, kernelWidth, 0, kernelHeight);
         top(x, y, c, n) = sum(
-            inputBuffer(x * stride.width + r.x,
-                        y * stride.height + r.y, c, n)) * norm;
+            inputBuffer(x * strideWidth + r.x,
+                        y * strideHeight + r.y, c, n)) * norm;
         return Ptr<BackendNode>(new HalideBackendNode(top));
 #endif  // HAVE_HALIDE
         return Ptr<BackendNode>();
@@ -1028,6 +1075,7 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
     {
         CV_Assert(inputs.size() != 0);
 
+        bool isPool1D = inputs[0].size() == 3;
         std::vector<int> inpShape(inputs[0].begin() + 2, inputs[0].end());
         std::vector<int> outShape(inputs[0].begin(), inputs[0].begin() + 2);
 
@@ -1056,14 +1104,15 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
             }
             else if (padMode.empty())
             {
-                for (int i = 0; i < local_kernel.size(); i++) {
+                int addedDims = isPool1D? inpShape.size() : local_kernel.size();
+                for (int i = 0; i < addedDims; i++) {
                     float dst = (float) (inpShape[i] + pads_begin[i] + pads_end[i] - local_kernel[i]) / strides[i];
                     outShape.push_back(1 + (ceilMode ? ceil(dst) : floor(dst)));
                 }
 
                 // If we have padding, ensure that the last pooling starts strictly
                 // inside the image (instead of at the padding); otherwise clip the last.
-                for (int i = 0; i < pads_end.size(); i++) {
+                for (int i = 0; i < addedDims; i++) {
                     if (pads_end[i] && (outShape[2 + i] - 1) * strides[i] >= inpShape[i] + pads_end[i]) {
                         --outShape[2 + i];
                         CV_Assert((outShape[2 + i] - 1) * strides[i] < inpShape[i] + pads_end[i]);
@@ -1107,7 +1156,8 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
     {
         CV_UNUSED(inputs); // suppress unused variable warning
         long flops = 0;
-        size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
+        bool isPool1D = inputs[0].size() == 3;
+        size_t karea = std::accumulate(kernel_size.begin(), isPool1D? kernel_size.begin() + 1 : kernel_size.end(),
                                     1, std::multiplies<size_t>());
         for(int i = 0; i < outputs.size(); i++)
         {
diff --git a/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp b/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp
index 47b40cc6c2..b366c97ac8 100644
--- a/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp
+++ b/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp
@@ -51,18 +51,20 @@ template<typename Dtype>
 OCL4DNNPool<Dtype>::OCL4DNNPool(OCL4DNNPoolConfig config)
 {
     int dims = config.in_shape.size();
-    int spatial_dims = 2;
+    int spatial_dims = config.in_shape.size()-2;
 
     channels_ = config.channels;
     pool_method_ = config.pool_method;
     avePoolPaddedArea = config.avePoolPaddedArea;
     computeMaxIdx = config.computeMaxIdx;
     use_half = config.use_half;
+    kernel_shape_.push_back(config.kernel.height);
+    kernel_shape_.push_back(config.kernel.width);
+    stride_.push_back(config.stride.height);
+    stride_.push_back(config.stride.width);
 
     for (int i = 0; i < spatial_dims; ++i)
     {
-        kernel_shape_.push_back(i == 0 ? config.kernel.height : config.kernel.width);
-        stride_.push_back(i == 0 ? config.stride.height : config.stride.width);
         im_in_shape_.push_back(config.in_shape[dims - spatial_dims + i]);
         im_out_shape_.push_back(config.out_shape[dims - spatial_dims + i]);
     }
@@ -75,10 +77,10 @@ OCL4DNNPool<Dtype>::OCL4DNNPool(OCL4DNNPoolConfig config)
     pad_l_ = config.pad_l;
     pad_r_ = config.pad_r;
     pad_b_ = config.pad_b;
-    height_ = im_in_shape_[0];
-    width_ = im_in_shape_[1];
-    pooled_height_ = im_out_shape_[0];
-    pooled_width_ = im_out_shape_[1];
+    height_ = spatial_dims == 1? 1 : im_in_shape_[0];
+    width_ = im_in_shape_.back();
+    pooled_height_ = spatial_dims == 1? 1 : im_out_shape_[0];
+    pooled_width_ = im_out_shape_.back();
 
     count_ = 1;
     for (int i = 0; i < config.out_shape.size(); ++i)
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 897b95ad8e..f38ca6700f 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -747,6 +747,84 @@ TEST_P(Test_ONNX_layers, DynamicAxes)
     testONNXModels("maxpooling_sigmoid_dynamic_axes");
 }
 
+TEST_P(Test_ONNX_layers, MaxPool1d)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    }
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
+    testONNXModels("maxpooling_1d");
+}
+
+TEST_P(Test_ONNX_layers, MaxPoolSigmoid1d)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    }
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
+    testONNXModels("maxpooling_sigmoid_1d");
+}
+
+TEST_P(Test_ONNX_layers, MaxPool1d_Twise)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    }
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
+    testONNXModels("two_maxpooling_1d");
+}
+
+TEST_P(Test_ONNX_layers, AvePool1d)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    }
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
+    testONNXModels("average_pooling_1d");
+}
+
+TEST_P(Test_ONNX_layers, PoolConv1d)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    }
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
+    testONNXModels("pool_conv_1d");
+}
+
+TEST_P(Test_ONNX_layers, ConvResizePool1d)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    }
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+    {
+        if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    }
+    testONNXModels("conv_resize_pool_1d");
+}
+
 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
 
 class Test_ONNX_nets : public Test_ONNX_layers

From 19d825aa16fbc19e2cd124edd8027c078dae0f58 Mon Sep 17 00:00:00 2001
From: Maxim Pashchenkov <maxim.pashchenkov@intel.com>
Date: Tue, 24 Nov 2020 20:51:02 +0300
Subject: [PATCH 141/152] Merge pull request #18904 from
 mpashchenkov:mp/ocv-gapi-skip-gm-tests

G-API: Adding skip for GraphMeta tests

* Added skip for GraphMeta tests

* Removed false
---
 modules/gapi/test/gapi_graph_meta_tests.cpp | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/modules/gapi/test/gapi_graph_meta_tests.cpp b/modules/gapi/test/gapi_graph_meta_tests.cpp
index 73c0da3c9e..1cc4c0c7d8 100644
--- a/modules/gapi/test/gapi_graph_meta_tests.cpp
+++ b/modules/gapi/test/gapi_graph_meta_tests.cpp
@@ -99,7 +99,12 @@ TEST(GraphMeta, Streaming_AccessInput) {
     cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2));
 
     auto ccomp = graph.compileStreaming();
-    ccomp.setSource<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi", false));
+    const auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        ccomp.setSource<cv::gapi::wip::GCaptureSource>(path);
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
 
     cv::Mat out_mat;
@@ -122,7 +127,12 @@ TEST(GraphMeta, Streaming_AccessOutput) {
     cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2, out3));
 
     auto ccomp = graph.compileStreaming();
-    ccomp.setSource<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi", false));
+    const auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        ccomp.setSource<cv::gapi::wip::GCaptureSource>(path);
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
 
     cv::Mat out_mat;
@@ -155,7 +165,12 @@ TEST(GraphMeta, Streaming_AccessDesync) {
     cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2, out3, out4, out5));
 
     auto ccomp = graph.compileStreaming();
-    ccomp.setSource<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi", false));
+    const auto path = findDataFile("cv/video/768x576.avi");
+    try {
+        ccomp.setSource<cv::gapi::wip::GCaptureSource>(path);
+    } catch(...) {
+        throw SkipTestException("Video file can not be opened");
+    }
     ccomp.start();
 
     cv::optional<int64_t> out_sync_id;

From 85b0fb2a9c46958f1bbe1b11bf2f2389ddbd14f2 Mon Sep 17 00:00:00 2001
From: Jonathan Cole <joncole04412@gmail.com>
Date: Tue, 24 Nov 2020 16:54:54 -0500
Subject: [PATCH 142/152] Merge pull request #18826 from
 Rightpoint:feature/colejd/build-catalyst-xcframework

Support XCFramework builds, Catalyst

* Early work on xcframework support

* Improve legibility

* Somehow this works

* Specify ABIs in a place where they won't get erased

If you pass in the C/CXX flags from the Python script, they won't be respected. By doing it in the actual toolchain, the options are respected and Catalyst successfully links.

* Clean up and push updates

* Actually use Catalyst ABI

Needed to specify EXE linker flags to get compiler tests to link to the Catalyst ABIs.

* Clean up

* Revert changes to common toolchain that don't matter

* Try some things

* Support Catalyst build in OSX scripts

* Remove unnecessary iOS reference to AssetsLibrary framework

* Getting closer

* Try some things, port to Python 3

* Some additional fixes

* Point Cmake Plist gen to osx directory for Catalyst targets

* Remove dynamic lib references for Catalyst, copy iOS instead of macos

* Add flag for building only specified archs, remove iOS catalyst refs

* Add build-xcframework.sh

* Update build-xcframework.sh

* Add presumptive Apple Silicon support

* Add arm64 iphonesimulator target

* Fix xcframework build

* Working on arm64 iOS simulator

* Support 2.7 (replace run with check_output)

* Correctly check output of uname_m against arch

* Clean up

* Use lipo for intermediate frameworks, add python script

Remove unneeded __init__.py

* Simplify python xcframework build script

* Add --only-64-bit flag

* Add --framework-name flag

* Document

* Commit to f-strings, improve console output

* Add i386 to iphonesimulator platform in xcframework generator

* Enable objc for non-Catalyst frameworks

* Fix xcframework builder for paths with spaces

* Use arch when specifying Catalyst build platform in build command

* Fix incorrect settings for framework_name argparse configuration

* Prefer underscores instead of hyphens in new flags

* Move Catalyst flags to where they'll actually get used

* Use --without=objc on Catalyst target for now

* Remove get_or_create_folder and simplify logic

* Remove unused import

* Tighten up help text

* Document

* Move common functions into cv_build_utils

* Improve documentation

* Remove old build script

* Add readme

* Check for required CMake and Xcode versions

* Clean up TODOs and re-enable `copy_samples()`

Remove TODO

Fixup

* Add missing print_function import

* Clarify CMake dependency documentation

* Revert python2 change in gen_objc

* Remove unnecessary builtins imports

* Remove trailing whitespace

* Avoid building Catalyst unless specified

This makes Catalyst support a non-breaking change, though defaults should be specified when a breaking change is possible.

* Prevent lipoing for the same archs on different platforms before build

* Rename build-xcframework.py to build_xcframework.py

* Check for duplicate archs more carefully

* Prevent sample copying error when directory already exists

This can happen when building multiple architectures for the same platform.

* Simplify code for checking for default archs

* Improve build_xcframework.py header text

* Correctly resolve Python script paths

* Parse only known args in ios/osx build_framework.py

* Pass through uncaptured args in build_xcframework to osx/ios build

* Fix typo

* Fix typo

* Fix unparameterized build path for intermediate frameworks

* Fix dyanmic info.plist path for catalyst

* Fix utf-8 Python 3 issue

* Add dynamic flag to osx script

* Rename platform to platforms, remove armv7s and i386

* Fix creation of dynamic framework on maccatalyst and macos

* Update platforms/apple/readme.md

* Add `macos_archs` flag and deprecate `archs` flag

* Allow specification of archs when generating xcframework from terminal

* Change xcframework platform argument names to match archs flag names

* Remove platforms as a concept and shadow archs flags from ios/osx .py

* Improve documentation

* Fix building of objc module on Catalyst, excluding Swift

* Clean up build folder logic a bit

* Fix framework_name flag

* Drop passthrough_args, use unknown_args instead

* minor: coding style changes

Co-authored-by: Chris Ballinger <cballinger@rightpoint.com>
---
 cmake/OpenCVGenInfoPlist.cmake                |   6 +-
 cmake/OpenCVUtils.cmake                       |   8 +-
 modules/imgcodecs/CMakeLists.txt              |   2 +-
 .../generator/templates/cmakelists.template   |  10 +-
 platforms/apple/__init__.py                   |   0
 platforms/apple/build_xcframework.py          | 123 +++++++++
 platforms/apple/cv_build_utils.py             |  65 +++++
 platforms/apple/readme.md                     |  40 +++
 platforms/ios/__init__.py                     |   0
 platforms/ios/build_framework.py              | 236 ++++++++++++------
 .../Toolchains/Toolchain-Catalyst_Xcode.cmake |   4 +
 .../Toolchains/common-ios-toolchain.cmake     |   7 +-
 platforms/osx/__init__.py                     |   0
 platforms/osx/build_framework.py              |  77 +++++-
 14 files changed, 487 insertions(+), 91 deletions(-)
 create mode 100644 platforms/apple/__init__.py
 create mode 100755 platforms/apple/build_xcframework.py
 create mode 100644 platforms/apple/cv_build_utils.py
 create mode 100644 platforms/apple/readme.md
 create mode 100644 platforms/ios/__init__.py
 create mode 100644 platforms/ios/cmake/Toolchains/Toolchain-Catalyst_Xcode.cmake
 create mode 100644 platforms/osx/__init__.py

diff --git a/cmake/OpenCVGenInfoPlist.cmake b/cmake/OpenCVGenInfoPlist.cmake
index 90dd85479f..105087907f 100644
--- a/cmake/OpenCVGenInfoPlist.cmake
+++ b/cmake/OpenCVGenInfoPlist.cmake
@@ -2,7 +2,11 @@ set(OPENCV_APPLE_BUNDLE_NAME "OpenCV")
 set(OPENCV_APPLE_BUNDLE_ID "org.opencv")
 
 if(IOS)
-  if (APPLE_FRAMEWORK AND DYNAMIC_PLIST)
+  if(MAC_CATALYST)
+    # Copy the iOS plist over to the OSX directory if building iOS library for Catalyst
+    configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.plist.in"
+                  "${CMAKE_BINARY_DIR}/osx/Info.plist")
+  elseif(APPLE_FRAMEWORK AND DYNAMIC_PLIST)
     configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.Dynamic.plist.in"
                    "${CMAKE_BINARY_DIR}/ios/Info.plist")
   else()
diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake
index c8e7fdbd93..d07babdfbe 100644
--- a/cmake/OpenCVUtils.cmake
+++ b/cmake/OpenCVUtils.cmake
@@ -1512,10 +1512,16 @@ function(ocv_add_library target)
 
     set(CMAKE_SHARED_LIBRARY_RUNTIME_C_FLAG 1)
 
+    if(IOS AND NOT MAC_CATALYST)
+      set(OPENCV_APPLE_INFO_PLIST "${CMAKE_BINARY_DIR}/ios/Info.plist")
+    else()
+      set(OPENCV_APPLE_INFO_PLIST "${CMAKE_BINARY_DIR}/osx/Info.plist")
+    endif()
+
     set_target_properties(${target} PROPERTIES
       FRAMEWORK TRUE
       MACOSX_FRAMEWORK_IDENTIFIER org.opencv
-      MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_BINARY_DIR}/ios/Info.plist
+      MACOSX_FRAMEWORK_INFO_PLIST ${OPENCV_APPLE_INFO_PLIST}
       # "current version" in semantic format in Mach-O binary file
       VERSION ${OPENCV_LIBVERSION}
       # "compatibility version" in semantic format in Mach-O binary file
diff --git a/modules/imgcodecs/CMakeLists.txt b/modules/imgcodecs/CMakeLists.txt
index 5a8faa9d05..8ae85e62c5 100644
--- a/modules/imgcodecs/CMakeLists.txt
+++ b/modules/imgcodecs/CMakeLists.txt
@@ -119,7 +119,7 @@ if(APPLE OR APPLE_FRAMEWORK)
 endif()
 if(IOS)
   list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/ios_conversions.mm)
-  list(APPEND IMGCODECS_LIBRARIES "-framework UIKit" "-framework AssetsLibrary")
+  list(APPEND IMGCODECS_LIBRARIES "-framework UIKit")
 endif()
 if(APPLE AND (NOT IOS))
   list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/macosx_conversions.mm)
diff --git a/modules/objc/generator/templates/cmakelists.template b/modules/objc/generator/templates/cmakelists.template
index 67cacbbfa4..10e9379694 100644
--- a/modules/objc/generator/templates/cmakelists.template
+++ b/modules/objc/generator/templates/cmakelists.template
@@ -13,7 +13,13 @@ set (SUPPRESS_WARNINGS_FLAGS "-Wno-incomplete-umbrella")
 set (CMAKE_CXX_FLAGS  "$${CMAKE_CXX_FLAGS} $${OBJC_COMPILE_FLAGS} $${SUPPRESS_WARNINGS_FLAGS}")
 
 # grab the files
-file(GLOB_RECURSE objc_sources "objc/*\.h" "objc/*\.m" "objc/*\.mm" "objc/*\.swift" "objc/*\.modulemap")
+if(SWIFT_DISABLED)
+  message(STATUS "Swift wrapper disabled")
+  file(GLOB_RECURSE objc_sources "objc/*\.h" "objc/*\.m" "objc/*\.mm" "objc/*\.modulemap")
+else()
+  enable_language(Swift)
+  file(GLOB_RECURSE objc_sources "objc/*\.h" "objc/*\.m" "objc/*\.mm" "objc/*\.swift" "objc/*\.modulemap")
+endif()
 file(GLOB_RECURSE objc_headers "*\.h")
 
 add_library($framework STATIC $${objc_sources})
@@ -29,8 +35,6 @@ endforeach()
 
 install(TARGETS $framework LIBRARY DESTINATION lib)
 
-enable_language(Swift)
-
 # Additional target properties
 if (CMAKE_XCODE_BUILD_SYSTEM GREATER_EQUAL 12)
   set_target_properties($framework PROPERTIES
diff --git a/platforms/apple/__init__.py b/platforms/apple/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/platforms/apple/build_xcframework.py b/platforms/apple/build_xcframework.py
new file mode 100755
index 0000000000..669d798ae4
--- /dev/null
+++ b/platforms/apple/build_xcframework.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python3
+"""
+This script builds OpenCV into an xcframework compatible with the platforms
+of your choice. Just run it and grab a snack; you'll be waiting a while.
+"""
+
+import sys, os, argparse, pathlib, traceback
+from cv_build_utils import execute, print_error, print_header, get_xcode_version, get_cmake_version
+
+if __name__ == "__main__":
+
+    # Check for dependencies
+    assert sys.version_info >= (3, 6), f"Python 3.6 or later is required! Current version is {sys.version_info}"
+    # Need CMake 3.18.5/3.19 or later for a Silicon-related fix to building for the iOS Simulator.
+    # See https://gitlab.kitware.com/cmake/cmake/-/issues/21425 for context.
+    assert get_cmake_version() >= (3, 18, 5), f"CMake 3.18.5 or later is required. Current version is {get_cmake_version()}"
+    # Need Xcode 12.2 for Apple Silicon support
+    assert get_xcode_version() >= (12, 2), f"Xcode 12.2 command line tools or later are required! Current version is {get_xcode_version()}. \
+    Run xcode-select to switch if you have multiple Xcode installs."
+
+    # Parse arguments
+    description = """
+        This script builds OpenCV into an xcframework supporting the Apple platforms of your choice.
+        """
+    epilog = """
+        Any arguments that are not recognized by this script are passed through to the ios/osx build_framework.py scripts.
+        """
+    parser = argparse.ArgumentParser(description=description, epilog=epilog)
+    parser.add_argument('out', metavar='OUTDIR', help='The directory where the xcframework will be created')
+    parser.add_argument('--framework_name', default='opencv2', help='Name of OpenCV xcframework (default: opencv2, will change to OpenCV in future version)')
+    parser.add_argument('--iphoneos_archs', default=None, help='select iPhoneOS target ARCHS. Default is "armv7,arm64"')
+    parser.add_argument('--iphonesimulator_archs', default=None, help='select iPhoneSimulator target ARCHS. Default is "x86_64,arm64"')
+    parser.add_argument('--macos_archs', default=None, help='Select MacOS ARCHS. Default is "x86_64,arm64"')
+    parser.add_argument('--catalyst_archs', default=None, help='Select Catalyst ARCHS. Default is "x86_64,arm64"')
+    parser.add_argument('--build_only_specified_archs', default=False, action='store_true', help='if enabled, only directly specified archs are built and defaults are ignored')
+
+    args, unknown_args = parser.parse_known_args()
+    if unknown_args:
+        print(f"The following args are not recognized by this script and will be passed through to the ios/osx build_framework.py scripts: {unknown_args}")
+
+    # Parse architectures from args
+    iphoneos_archs = args.iphoneos_archs
+    if not iphoneos_archs and not args.build_only_specified_archs:
+        # Supply defaults
+        iphoneos_archs = "armv7,arm64"
+    print(f'Using iPhoneOS ARCHS={iphoneos_archs}')
+
+    iphonesimulator_archs = args.iphonesimulator_archs
+    if not iphonesimulator_archs and not args.build_only_specified_archs:
+        # Supply defaults
+        iphonesimulator_archs = "x86_64,arm64"
+    print(f'Using iPhoneSimulator ARCHS={iphonesimulator_archs}')
+
+    macos_archs = args.macos_archs
+    if not macos_archs and not args.build_only_specified_archs:
+        # Supply defaults
+        macos_archs = "x86_64,arm64"
+    print(f'Using MacOS ARCHS={macos_archs}')
+
+    catalyst_archs = args.macos_archs
+    if not catalyst_archs and not args.build_only_specified_archs:
+        # Supply defaults
+        catalyst_archs = "x86_64,arm64"
+    print(f'Using Catalyst ARCHS={catalyst_archs}')
+
+    # Build phase
+
+    try:
+        # Build .frameworks for each platform
+        osx_script_path = os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../osx/build_framework.py')
+        ios_script_path = os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios/build_framework.py')
+
+        build_folders = []
+
+        def get_or_create_build_folder(base_dir, platform):
+            build_folder = f"./{base_dir}/{platform}".replace(" ", "\\ ")  # Escape spaces in output path
+            pathlib.Path(build_folder).mkdir(parents=True, exist_ok=True)
+            return build_folder
+
+        if iphoneos_archs:
+            build_folder = get_or_create_build_folder(args.out, "iphoneos")
+            build_folders.append(build_folder)
+            command = ["python3", ios_script_path, "--iphoneos_archs", iphoneos_archs, "--framework_name", args.framework_name, "--build_only_specified_archs", build_folder] + unknown_args
+            print_header("Building iPhoneOS frameworks")
+            print(command)
+            execute(command, cwd=os.getcwd())
+        if iphonesimulator_archs:
+            build_folder = get_or_create_build_folder(args.out, "iphonesimulator")
+            build_folders.append(build_folder)
+            command = ["python3", ios_script_path, "--iphonesimulator_archs", iphonesimulator_archs, "--framework_name", args.framework_name, "--build_only_specified_archs", build_folder] + unknown_args
+            print_header("Building iPhoneSimulator frameworks")
+            execute(command, cwd=os.getcwd())
+        if macos_archs:
+            build_folder = get_or_create_build_folder(args.out, "macos")
+            build_folders.append(build_folder)
+            command = ["python3", osx_script_path, "--macos_archs", macos_archs, "--framework_name", args.framework_name, "--build_only_specified_archs", build_folder] + unknown_args
+            print_header("Building MacOS frameworks")
+            execute(command, cwd=os.getcwd())
+        if catalyst_archs:
+            build_folder = get_or_create_build_folder(args.out, "catalyst")
+            build_folders.append(build_folder)
+            command = ["python3", osx_script_path, "--catalyst_archs", catalyst_archs, "--framework_name", args.framework_name, "--build_only_specified_archs", build_folder] + unknown_args
+            print_header("Building Catalyst frameworks")
+            execute(command, cwd=os.getcwd())
+
+        # Put all the built .frameworks together into a .xcframework
+        print_header("Building xcframework")
+        xcframework_build_command = [
+            "xcodebuild",
+            "-create-xcframework",
+            "-output",
+            f"{args.out}/{args.framework_name}.xcframework",
+        ]
+        for folder in build_folders:
+            xcframework_build_command += ["-framework", f"{folder}/{args.framework_name}.framework"]
+        execute(xcframework_build_command, cwd=os.getcwd())
+
+        print("")
+        print_header(f"Finished building {args.out}/{args.framework_name}.xcframework")
+    except Exception as e:
+        print_error(e)
+        traceback.print_exc(file=sys.stderr)
+        sys.exit(1)
diff --git a/platforms/apple/cv_build_utils.py b/platforms/apple/cv_build_utils.py
new file mode 100644
index 0000000000..d764b70fd1
--- /dev/null
+++ b/platforms/apple/cv_build_utils.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Common utilities. These should be compatible with Python 2 and 3.
+"""
+
+from __future__ import print_function
+import sys, re
+from subprocess import check_call, check_output, CalledProcessError
+
+def execute(cmd, cwd = None):
+    print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr)
+    print('Executing: ' + ' '.join(cmd))
+    retcode = check_call(cmd, cwd = cwd)
+    if retcode != 0:
+        raise Exception("Child returned:", retcode)
+
+def print_header(text):
+    print("="*60)
+    print(text)
+    print("="*60)
+
+def print_error(text):
+    print("="*60, file=sys.stderr)
+    print("ERROR: %s" % text, file=sys.stderr)
+    print("="*60, file=sys.stderr)
+
+def get_xcode_major():
+    ret = check_output(["xcodebuild", "-version"]).decode('utf-8')
+    m = re.match(r'Xcode\s+(\d+)\..*', ret, flags=re.IGNORECASE)
+    if m:
+        return int(m.group(1))
+    else:
+        raise Exception("Failed to parse Xcode version")
+
+def get_xcode_version():
+    """
+    Returns the major and minor version of the current Xcode
+    command line tools as a tuple of (major, minor)
+    """
+    ret = check_output(["xcodebuild", "-version"]).decode('utf-8')
+    m = re.match(r'Xcode\s+(\d+)\.(\d+)', ret, flags=re.IGNORECASE)
+    if m:
+        return (int(m.group(1)), int(m.group(2)))
+    else:
+        raise Exception("Failed to parse Xcode version")
+
+def get_xcode_setting(var, projectdir):
+    ret = check_output(["xcodebuild", "-showBuildSettings"], cwd = projectdir).decode('utf-8')
+    m = re.search("\s" + var + " = (.*)", ret)
+    if m:
+        return m.group(1)
+    else:
+        raise Exception("Failed to parse Xcode settings")
+
+def get_cmake_version():
+    """
+    Returns the major and minor version of the current CMake
+    command line tools as a tuple of (major, minor, revision)
+    """
+    ret = check_output(["cmake", "--version"]).decode('utf-8')
+    m = re.match(r'cmake\sversion\s+(\d+)\.(\d+).(\d+)', ret, flags=re.IGNORECASE)
+    if m:
+        return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
+    else:
+        raise Exception("Failed to parse CMake version")
diff --git a/platforms/apple/readme.md b/platforms/apple/readme.md
new file mode 100644
index 0000000000..f12446c060
--- /dev/null
+++ b/platforms/apple/readme.md
@@ -0,0 +1,40 @@
+# Building for Apple Platforms
+
+build_xcframework.py creates an xcframework supporting a variety of Apple platforms.
+
+You'll need the following to run these steps:
+- MacOS 10.15 or later
+- Python 3.6 or later
+- CMake 3.18.5/3.19.0 or later (make sure the `cmake` command is available on your PATH)
+- Xcode 12.2 or later (and its command line tools)
+
+You can then run build_xcframework.py, as below:
+```
+cd ~/<my_working_directory>
+python opencv/platforms/apple/build_xcframework.py ./build_xcframework
+```
+
+Grab a coffee, because you'll be here for a while. By default this builds OpenCV for 8 architectures across 4 platforms:
+
+- iOS (`--iphoneos_archs`): arm64, armv7
+- iOS Simulator (`--iphonesimulator_archs`): x86_64, arm64
+- macOS (`--macos_archs`): x86_64, arm64
+- Mac Catalyst (`--catalyst_archs`): x86_64, arm64
+
+If everything's fine, you will eventually get `opencv2.xcframework` in the output directory.
+
+The script has some configuration options to exclude platforms and architectures you don't want to build for. Use the `--help` flag for more information.
+
+## Examples
+
+You may override the defaults by specifying a value for any of the `*_archs` flags. For example, if you want to build for arm64 on every platform, you can do this:
+
+```
+python build_xcframework.py somedir --iphoneos_archs arm64 --iphonesimulator_archs arm64 --macos_archs arm64 --catalyst_archs arm64
+```
+
+If you want to build only for certain platforms, you can supply the `--build_only_specified_archs` flag, which makes the script build only the archs you directly ask for. For example, to build only for Catalyst, you can do this:
+
+```
+python build_xcframework.py somedir --catalyst_archs x86_64,arm64 --build_only_specified_archs
+```
diff --git a/platforms/ios/__init__.py b/platforms/ios/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py
index 5965cd0a96..b7936ceea7 100755
--- a/platforms/ios/build_framework.py
+++ b/platforms/ios/build_framework.py
@@ -32,34 +32,14 @@ Adding --dynamic parameter will build {framework_name}.framework as App Store dy
 """
 
 from __future__ import print_function, unicode_literals
-import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing
+import glob, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing
 from subprocess import check_call, check_output, CalledProcessError
 from distutils.dir_util import copy_tree
 
-IPHONEOS_DEPLOYMENT_TARGET='9.0'  # default, can be changed via command line options or environment variable
-
-def execute(cmd, cwd = None):
-    print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr)
-    print('Executing: ' + ' '.join(cmd))
-    retcode = check_call(cmd, cwd = cwd)
-    if retcode != 0:
-        raise Exception("Child returned:", retcode)
-
-def getXCodeMajor():
-    ret = check_output(["xcodebuild", "-version"]).decode('utf-8')
-    m = re.match(r'Xcode\s+(\d+)\..*', ret, flags=re.IGNORECASE)
-    if m:
-        return int(m.group(1))
-    else:
-        raise Exception("Failed to parse Xcode version")
+sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../apple'))
+from cv_build_utils import execute, print_error, get_xcode_major, get_xcode_setting
 
-def getXCodeSetting(var, projectdir):
-    ret = check_output(["xcodebuild", "-showBuildSettings"], cwd = projectdir)
-    m = re.search("\s" + var + " = (.*)", ret)
-    if m:
-        return m.group(1)
-    else:
-        raise Exception("Failed to parse Xcode settings")
+IPHONEOS_DEPLOYMENT_TARGET='9.0'  # default, can be changed via command line options or environment variable
 
 class Builder:
     def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, disable, enablenonfree, targets, debug, debug_info, framework_name, run_tests, build_docs):
@@ -99,7 +79,7 @@ class Builder:
         main_working_dir = os.path.join(outdir, "build")
         dirs = []
 
-        xcode_ver = getXCodeMajor()
+        xcode_ver = get_xcode_major()
 
         # build each architecture separately
         alltargets = []
@@ -119,6 +99,30 @@ class Builder:
             if xcode_ver >= 7 and target[1] == 'iPhoneOS' and self.bitcodedisabled == False:
                 cmake_flags.append("-DCMAKE_C_FLAGS=-fembed-bitcode")
                 cmake_flags.append("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
+            if xcode_ver >= 7 and target[1] == 'Catalyst':
+                sdk_path = check_output(["xcodebuild", "-version", "-sdk", "macosx", "Path"]).decode('utf-8').rstrip()
+                c_flags = [
+                    "-target %s-apple-ios13.0-macabi" % target[0],  # e.g. x86_64-apple-ios13.2-macabi # -mmacosx-version-min=10.15
+                    "-isysroot %s" % sdk_path,
+                    "-iframework %s/System/iOSSupport/System/Library/Frameworks" % sdk_path,
+                    "-isystem %s/System/iOSSupport/usr/include" % sdk_path,
+                ]
+                if self.bitcodedisabled == False:
+                    c_flags.append("-fembed-bitcode")
+                cmake_flags.append("-DCMAKE_C_FLAGS=" + " ".join(c_flags))
+                cmake_flags.append("-DCMAKE_CXX_FLAGS=" + " ".join(c_flags))
+                cmake_flags.append("-DCMAKE_EXE_LINKER_FLAGS=" + " ".join(c_flags))
+
+                # CMake annot compile Swift for Catalyst https://gitlab.kitware.com/cmake/cmake/-/issues/21436
+                # cmake_flags.append("-DCMAKE_Swift_FLAGS=" + " " + target_flag)
+                cmake_flags.append("-DSWIFT_DISABLED=1")
+
+                cmake_flags.append("-DIOS=1")  # Build the iOS codebase
+                cmake_flags.append("-DMAC_CATALYST=1")  # Set a flag for Mac Catalyst, just in case we need it
+                cmake_flags.append("-DWITH_OPENCL=OFF")  # Disable OpenCL; it isn't compatible with iOS
+                cmake_flags.append("-DCMAKE_OSX_SYSROOT=%s" % sdk_path)
+                cmake_flags.append("-DCMAKE_CXX_COMPILER_WORKS=TRUE")
+                cmake_flags.append("-DCMAKE_C_COMPILER_WORKS=TRUE")
             self.buildOne(target[0], target[1], main_build_dir, cmake_flags)
 
             if not self.dynamic:
@@ -128,10 +132,10 @@ class Builder:
         self.makeFramework(outdir, dirs)
         if self.build_objc_wrapper:
             if self.run_tests:
-                check_call([sys.argv[0].replace("build_framework", "run_tests"), "--framework_dir=" + outdir, "--framework_name=" + self.framework_name, dirs[0] +  "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget())])
+                check_call([sys.argv[0].replace("build_framework", "run_tests"), "--framework_dir=" + outdir, "--framework_name=" + self.framework_name, dirs[0] +  "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget(target[1]))])
             else:
                 print("To run tests call:")
-                print(sys.argv[0].replace("build_framework", "run_tests") + " --framework_dir=" + outdir + " --framework_name=" + self.framework_name + " " + dirs[0] +  "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget()))
+                print(sys.argv[0].replace("build_framework", "run_tests") + " --framework_dir=" + outdir + " --framework_name=" + self.framework_name + " " + dirs[0] +  "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget(target[1])))
             if self.build_docs:
                 check_call([sys.argv[0].replace("build_framework", "build_docs"), dirs[0] + "/modules/objc/framework_build"])
                 doc_path = os.path.join(dirs[0], "modules", "objc", "doc_build", "docs")
@@ -147,9 +151,7 @@ class Builder:
         try:
             self._build(outdir)
         except Exception as e:
-            print("="*60, file=sys.stderr)
-            print("ERROR: %s" % e, file=sys.stderr)
-            print("="*60, file=sys.stderr)
+            print_error(e)
             traceback.print_exc(file=sys.stderr)
             sys.exit(1)
 
@@ -170,17 +172,19 @@ class Builder:
             "-DOPENCV_INCLUDE_INSTALL_PATH=include",
             "-DOPENCV_3P_LIB_INSTALL_PATH=lib/3rdparty",
             "-DFRAMEWORK_NAME=%s" % self.framework_name,
-        ] + ([
-            "-DBUILD_SHARED_LIBS=ON",
-            "-DCMAKE_MACOSX_BUNDLE=ON",
-            "-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED=NO",
-        ] if self.dynamic and not self.build_objc_wrapper else []) + ([
-            "-DDYNAMIC_PLIST=ON"
-        ] if self.dynamic else []) + ([
-            "-DOPENCV_ENABLE_NONFREE=ON"
-        ] if self.enablenonfree else []) + ([
-            "-DBUILD_WITH_DEBUG_INFO=ON"
-        ] if self.debug_info else [])
+        ]
+        if self.dynamic:
+            args += [
+                "-DDYNAMIC_PLIST=ON"
+            ]
+        if self.enablenonfree:
+            args += [
+                "-DOPENCV_ENABLE_NONFREE=ON"
+            ]
+        if self.debug_info:
+            args += [
+                "-DBUILD_WITH_DEBUG_INFO=ON"
+            ]
 
         if len(self.exclude) > 0:
             args += ["-DBUILD_opencv_%s=OFF" % m for m in self.exclude]
@@ -202,21 +206,18 @@ class Builder:
         buildcmd += [
             "IPHONEOS_DEPLOYMENT_TARGET=" + os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
             "ARCHS=%s" % arch,
+            "-sdk", target.lower(),
+            "-configuration", self.getConfiguration(),
+            "-parallelizeTargets",
+            "-jobs", str(multiprocessing.cpu_count()),
         ]
 
-        buildcmd += [
-                "-sdk", target.lower(),
-                "-configuration", self.getConfiguration(),
-                "-parallelizeTargets",
-                "-jobs", str(multiprocessing.cpu_count()),
-            ]
-
         return buildcmd
 
     def getInfoPlist(self, builddirs):
         return os.path.join(builddirs[0], "ios", "Info.plist")
 
-    def getObjcTarget(self):
+    def getObjcTarget(self, target):
         # Obj-C generation target
         return 'ios'
 
@@ -226,9 +227,28 @@ class Builder:
             (["-DCMAKE_TOOLCHAIN_FILE=%s" % toolchain] if toolchain is not None else [])
         if target.lower().startswith("iphoneos"):
             cmakecmd.append("-DCPU_BASELINE=DETECT")
+        if target.lower().startswith("iphonesimulator"):
+            build_arch = check_output(["uname", "-m"]).decode('utf-8').rstrip()
+            if build_arch != arch:
+                print("build_arch (%s) != arch (%s)" % (build_arch, arch))
+                cmakecmd.append("-DCMAKE_SYSTEM_PROCESSOR=" + arch)
+                cmakecmd.append("-DCMAKE_OSX_ARCHITECTURES=" + arch)
+                cmakecmd.append("-DCPU_BASELINE=DETECT")
+                cmakecmd.append("-DCMAKE_CROSSCOMPILING=ON")
+                cmakecmd.append("-DOPENCV_WORKAROUND_CMAKE_20989=ON")
+        if target.lower() == "catalyst":
+            build_arch = check_output(["uname", "-m"]).decode('utf-8').rstrip()
+            if build_arch != arch:
+                print("build_arch (%s) != arch (%s)" % (build_arch, arch))
+                cmakecmd.append("-DCMAKE_SYSTEM_PROCESSOR=" + arch)
+                cmakecmd.append("-DCMAKE_OSX_ARCHITECTURES=" + arch)
+                cmakecmd.append("-DCPU_BASELINE=DETECT")
+                cmakecmd.append("-DCMAKE_CROSSCOMPILING=ON")
+                cmakecmd.append("-DOPENCV_WORKAROUND_CMAKE_20989=ON")
         if target.lower() == "macosx":
-            build_arch = check_output(["uname", "-m"]).rstrip()
+            build_arch = check_output(["uname", "-m"]).decode('utf-8').rstrip()
             if build_arch != arch:
+                print("build_arch (%s) != arch (%s)" % (build_arch, arch))
                 cmakecmd.append("-DCMAKE_SYSTEM_PROCESSOR=" + arch)
                 cmakecmd.append("-DCMAKE_OSX_ARCHITECTURES=" + arch)
                 cmakecmd.append("-DCPU_BASELINE=DETECT")
@@ -249,7 +269,17 @@ class Builder:
         #cmakecmd.append(self.opencv)
         #cmakecmd.extend(cmakeargs)
         cmakecmd = self.makeCMakeCmd(arch, target, self.opencv, cmakeargs)
+        print("")
+        print("=================================")
+        print("CMake")
+        print("=================================")
+        print("")
         execute(cmakecmd, cwd = builddir)
+        print("")
+        print("=================================")
+        print("Xcodebuild")
+        print("=================================")
+        print("")
 
         # Clean and build
         clean_dir = os.path.join(builddir, "install")
@@ -259,7 +289,9 @@ class Builder:
         execute(buildcmd + ["-target", "ALL_BUILD", "build"], cwd = builddir)
         execute(["cmake", "-DBUILD_TYPE=%s" % self.getConfiguration(), "-P", "cmake_install.cmake"], cwd = builddir)
         if self.build_objc_wrapper:
-            cmakecmd = self.makeCMakeCmd(arch, target, builddir + "/modules/objc_bindings_generator/{}/gen".format(self.getObjcTarget()), cmakeargs)
+            cmakecmd = self.makeCMakeCmd(arch, target, builddir + "/modules/objc_bindings_generator/{}/gen".format(self.getObjcTarget(target)), cmakeargs)
+            # cmakecmd.append("-DCMAKE_Swift_FLAGS=" + "-target x86_64-apple-ios13.0-macabi")
+            # cmakecmd.append("-DCMAKE_EXE_LINKER_FLAGS=" + "-target x86_64-apple-ios13.0-macabi")
             cmakecmd.append("-DBUILD_ROOT=%s" % builddir)
             cmakecmd.append("-DCMAKE_INSTALL_NAME_TOOL=install_name_tool")
             cmakecmd.append("--no-warn-unused-cli")
@@ -280,24 +312,51 @@ class Builder:
     def makeDynamicLib(self, builddir):
         target = builddir[(builddir.rfind("build-") + 6):]
         target_platform = target[(target.rfind("-") + 1):]
-        is_device = target_platform == "iphoneos"
-        res = os.path.join(builddir, "install", "lib", self.framework_name + ".framework", self.framework_name)
+        is_device = target_platform == "iphoneos" or target_platform == "catalyst"
+        framework_dir = os.path.join(builddir, "install", "lib", self.framework_name + ".framework")
+        if not os.path.exists(framework_dir):
+            os.makedirs(framework_dir)
+        res = os.path.join(framework_dir, self.framework_name)
         libs = glob.glob(os.path.join(builddir, "install", "lib", "*.a"))
-        module = [os.path.join(builddir, "lib", self.getConfiguration(), self.framework_name + ".framework", self.framework_name)]
+        if self.build_objc_wrapper:
+            module = [os.path.join(builddir, "lib", self.getConfiguration(), self.framework_name + ".framework", self.framework_name)]
+        else:
+            module = []
 
         libs3 = glob.glob(os.path.join(builddir, "install", "lib", "3rdparty", "*.a"))
 
-        link_target = target[:target.find("-")] + "-apple-ios" + os.environ['IPHONEOS_DEPLOYMENT_TARGET'] + ("-simulator" if target.endswith("simulator") else "")
+        if os.environ.get('IPHONEOS_DEPLOYMENT_TARGET'):
+            link_target = target[:target.find("-")] + "-apple-ios" + os.environ['IPHONEOS_DEPLOYMENT_TARGET'] + ("-simulator" if target.endswith("simulator") else "")
+        else:
+            if target_platform == "catalyst":
+                link_target = "%s-apple-ios13.0-macabi" % target[:target.find("-")]
+            else:
+                link_target = "%s-apple-darwin" % target[:target.find("-")]
         bitcode_flags = ["-fembed-bitcode", "-Xlinker", "-bitcode_verify"] if is_device and not self.bitcodedisabled else []
-        toolchain_dir = getXCodeSetting("TOOLCHAIN_DIR", builddir)
+        toolchain_dir = get_xcode_setting("TOOLCHAIN_DIR", builddir)
+        sdk_dir = get_xcode_setting("SDK_DIR", builddir)
+        framework_options = []
         swift_link_dirs = ["-L" + toolchain_dir + "/usr/lib/swift/" + target_platform, "-L/usr/lib/swift"]
-        sdk_dir = getXCodeSetting("SDK_DIR", builddir)
+        if target_platform == "catalyst":
+            swift_link_dirs = ["-L" + toolchain_dir + "/usr/lib/swift/" + "maccatalyst", "-L/usr/lib/swift"]
+            framework_options = [
+                "-iframework", "%s/System/iOSSupport/System/Library/Frameworks" % sdk_dir,
+                "-framework", "AVFoundation", "-framework", "UIKit", "-framework", "CoreGraphics",
+                "-framework", "CoreImage", "-framework", "CoreMedia", "-framework", "QuartzCore",
+            ]
+        elif target_platform == "macosx":
+            framework_options = [
+                "-framework", "AVFoundation", "-framework", "AppKit", "-framework", "CoreGraphics",
+                "-framework", "CoreImage", "-framework", "CoreMedia", "-framework", "QuartzCore",
+                "-framework", "Accelerate", "-framework", "OpenCL",
+            ]
         execute([
             "clang++",
             "-Xlinker", "-rpath",
             "-Xlinker", "/usr/lib/swift",
             "-target", link_target,
-            "-isysroot", sdk_dir,
+            "-isysroot", sdk_dir,] +
+            framework_options + [
             "-install_name", "@rpath/" + self.framework_name + ".framework/" + self.framework_name,
             "-dynamiclib", "-dead_strip", "-fobjc-link-runtime", "-all_load",
             "-o", res
@@ -402,6 +461,8 @@ class iOSBuilder(Builder):
     def copy_samples(self, outdir):
         print('Copying samples to: ' + outdir)
         samples_dir = os.path.join(outdir, "samples")
+        if os.path.exists(samples_dir):
+            shutil.rmtree(samples_dir)
         shutil.copytree(os.path.join(self.opencv, "samples", "swift", "ios"), samples_dir)
         if self.framework_name != "OpenCV":
             for dirname, dirs, files in os.walk(samples_dir):
@@ -430,8 +491,9 @@ if __name__ == "__main__":
     parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)')
     parser.add_argument('--disable-bitcode', default=False, dest='bitcodedisabled', action='store_true', help='disable bitcode (enabled by default)')
     parser.add_argument('--iphoneos_deployment_target', default=os.environ.get('IPHONEOS_DEPLOYMENT_TARGET', IPHONEOS_DEPLOYMENT_TARGET), help='specify IPHONEOS_DEPLOYMENT_TARGET')
-    parser.add_argument('--iphoneos_archs', default='armv7,armv7s,arm64', help='select iPhoneOS target ARCHS')
-    parser.add_argument('--iphonesimulator_archs', default='i386,x86_64', help='select iPhoneSimulator target ARCHS')
+    parser.add_argument('--build_only_specified_archs', default=False, action='store_true', help='if enabled, only directly specified archs are built and defaults are ignored')
+    parser.add_argument('--iphoneos_archs', default=None, help='select iPhoneOS target ARCHS. Default is "armv7,armv7s,arm64"')
+    parser.add_argument('--iphonesimulator_archs', default=None, help='select iPhoneSimulator target ARCHS. Default is "i386,x86_64"')
     parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)')
     parser.add_argument('--debug', default=False, dest='debug', action='store_true', help='Build "Debug" binaries (disabled by default)')
     parser.add_argument('--debug_info', default=False, dest='debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)')
@@ -440,26 +502,58 @@ if __name__ == "__main__":
     parser.add_argument('--run_tests', default=False, dest='run_tests', action='store_true', help='Run tests')
     parser.add_argument('--build_docs', default=False, dest='build_docs', action='store_true', help='Build docs')
 
-    args = parser.parse_args()
+    args, unknown_args = parser.parse_known_args()
+    if unknown_args:
+        print("The following args are not recognized and will not be used: %s" % unknown_args)
 
     os.environ['IPHONEOS_DEPLOYMENT_TARGET'] = args.iphoneos_deployment_target
     print('Using IPHONEOS_DEPLOYMENT_TARGET=' + os.environ['IPHONEOS_DEPLOYMENT_TARGET'])
-    iphoneos_archs = args.iphoneos_archs.split(',')
+
+    iphoneos_archs = None
+    if args.iphoneos_archs:
+        iphoneos_archs = args.iphoneos_archs.split(',')
+    elif not args.build_only_specified_archs:
+        # Supply defaults
+        iphoneos_archs = ["armv7", "armv7s", "arm64"]
     print('Using iPhoneOS ARCHS=' + str(iphoneos_archs))
-    iphonesimulator_archs = args.iphonesimulator_archs.split(',')
+
+    iphonesimulator_archs = None
+    if args.iphonesimulator_archs:
+        iphonesimulator_archs = args.iphonesimulator_archs.split(',')
+    elif not args.build_only_specified_archs:
+        # Supply defaults
+        iphonesimulator_archs = ["i386", "x86_64"]
     print('Using iPhoneSimulator ARCHS=' + str(iphonesimulator_archs))
+
+    # Prevent the build from happening if the same architecture is specified for multiple platforms.
+    # When `lipo` is run to stitch the frameworks together into a fat framework, it'll fail, so it's
+    # better to stop here while we're ahead.
+    if iphoneos_archs and iphonesimulator_archs:
+        duplicate_archs = set(iphoneos_archs).intersection(iphonesimulator_archs)
+        if duplicate_archs:
+            print_error("Cannot have the same architecture for multiple platforms in a fat framework! Consider using build_xcframework.py in the apple platform folder instead. Duplicate archs are %s" % duplicate_archs)
+            exit(1)
+
     if args.legacy_build:
         args.framework_name = "opencv2"
         if not "objc" in args.without:
             args.without.append("objc")
 
-    b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without, args.disable, args.enablenonfree,
-        [
-            (iphoneos_archs, "iPhoneOS"),
-        ] if os.environ.get('BUILD_PRECOMMIT', None) else
-        [
-            (iphoneos_archs, "iPhoneOS"),
-            (iphonesimulator_archs, "iPhoneSimulator"),
-        ], args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs)
+    targets = []
+    if os.environ.get('BUILD_PRECOMMIT', None):
+        if not iphoneos_archs:
+            print_error("--iphoneos_archs must have at least one value")
+            sys.exit(1)
+        targets.append((iphoneos_archs, "iPhoneOS"))
+    else:
+        if not iphoneos_archs and not iphonesimulator_archs:
+            print_error("--iphoneos_archs and --iphonesimulator_archs are undefined; nothing will be built.")
+            sys.exit(1)
+        if iphoneos_archs:
+            targets.append((iphoneos_archs, "iPhoneOS"))
+        if iphonesimulator_archs:
+            targets.append((iphonesimulator_archs, "iPhoneSimulator"))
+
+    b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without, args.disable, args.enablenonfree, targets, args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs)
 
     b.build(args.out)
diff --git a/platforms/ios/cmake/Toolchains/Toolchain-Catalyst_Xcode.cmake b/platforms/ios/cmake/Toolchains/Toolchain-Catalyst_Xcode.cmake
new file mode 100644
index 0000000000..a22c10a7af
--- /dev/null
+++ b/platforms/ios/cmake/Toolchains/Toolchain-Catalyst_Xcode.cmake
@@ -0,0 +1,4 @@
+set(MAC_CATALYST TRUE)
+message(STATUS "Setting up Catalyst toolchain for IOS_ARCH='${IOS_ARCH}'")
+include(${CMAKE_CURRENT_LIST_DIR}/common-ios-toolchain.cmake)
+message(STATUS "Catalyst toolchain loaded")
diff --git a/platforms/ios/cmake/Toolchains/common-ios-toolchain.cmake b/platforms/ios/cmake/Toolchains/common-ios-toolchain.cmake
index 13aea357f1..4cbe4f1729 100644
--- a/platforms/ios/cmake/Toolchains/common-ios-toolchain.cmake
+++ b/platforms/ios/cmake/Toolchains/common-ios-toolchain.cmake
@@ -79,8 +79,11 @@ endif()
 if(NOT DEFINED CMAKE_OSX_SYSROOT)
   if(IPHONEOS)
     set(CMAKE_OSX_SYSROOT "iphoneos")
-  else()
+  elseif(IPHONESIMULATOR)
     set(CMAKE_OSX_SYSROOT "iphonesimulator")
+  elseif(MAC_CATALYST)
+    # Use MacOS SDK for Catalyst builds
+    set(CMAKE_OSX_SYSROOT "macosx")
   endif()
 endif()
 set(CMAKE_MACOSX_BUNDLE YES)
@@ -90,7 +93,7 @@ if(APPLE_FRAMEWORK AND NOT BUILD_SHARED_LIBS)
   set(CMAKE_OSX_ARCHITECTURES "${IOS_ARCH}" CACHE INTERNAL "Build architecture for iOS" FORCE)
 endif()
 
-if(NOT DEFINED IPHONEOS_DEPLOYMENT_TARGET)
+if(NOT DEFINED IPHONEOS_DEPLOYMENT_TARGET AND NOT MAC_CATALYST)
   if(NOT DEFINED ENV{IPHONEOS_DEPLOYMENT_TARGET})
     message(FATAL_ERROR "IPHONEOS_DEPLOYMENT_TARGET is not specified")
   endif()
diff --git a/platforms/osx/__init__.py b/platforms/osx/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/platforms/osx/build_framework.py b/platforms/osx/build_framework.py
index de13e665fa..95c8268454 100755
--- a/platforms/osx/build_framework.py
+++ b/platforms/osx/build_framework.py
@@ -9,14 +9,19 @@ import os, os.path, sys, argparse, traceback, multiprocessing
 # import common code
 sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios'))
 from build_framework import Builder
+sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../apple'))
+from cv_build_utils import print_error
 
 MACOSX_DEPLOYMENT_TARGET='10.12'  # default, can be changed via command line options or environment variable
 
 class OSXBuilder(Builder):
 
-    def getObjcTarget(self):
+    def getObjcTarget(self, target):
         # Obj-C generation target
-        return 'osx'
+        if target == "Catalyst":
+            return 'ios'
+        else:
+            return 'osx'
 
     def getToolchain(self, arch, target):
         return None
@@ -26,11 +31,21 @@ class OSXBuilder(Builder):
             "xcodebuild",
             "MACOSX_DEPLOYMENT_TARGET=" + os.environ['MACOSX_DEPLOYMENT_TARGET'],
             "ARCHS=%s" % arch,
-            "-sdk", target.lower(),
+            "-sdk", "macosx" if target == "Catalyst" else target.lower(),
             "-configuration", "Debug" if self.debug else "Release",
             "-parallelizeTargets",
             "-jobs", str(multiprocessing.cpu_count())
         ]
+
+        if target == "Catalyst":
+            buildcmd.append("-destination 'platform=macOS,arch=%s,variant=Mac Catalyst'" % arch)
+            buildcmd.append("-UseModernBuildSystem=YES")
+            buildcmd.append("SKIP_INSTALL=NO")
+            buildcmd.append("BUILD_LIBRARY_FOR_DISTRIBUTION=YES")
+            buildcmd.append("TARGETED_DEVICE_FAMILY=\"1,2\"")
+            buildcmd.append("SDKROOT=iphoneos")
+            buildcmd.append("SUPPORTS_MAC_CATALYST=YES")
+
         return buildcmd
 
     def getInfoPlist(self, builddirs):
@@ -45,30 +60,68 @@ if __name__ == "__main__":
     parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
     parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
     parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)')
+    parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)')
     parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)')
     parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET')
-    parser.add_argument('--archs', default='x86_64', help='Select target ARCHS (set to "x86_64,arm64" to build Universal Binary for Big Sur and later)')
+    parser.add_argument('--build_only_specified_archs', default=False, action='store_true', help='if enabled, only directly specified archs are built and defaults are ignored')
+    parser.add_argument('--archs', default=None, help='(Deprecated! Prefer --macos_archs instead.) Select target ARCHS (set to "x86_64,arm64" to build Universal Binary for Big Sur and later). Default is "x86_64".')
+    parser.add_argument('--macos_archs', default=None, help='Select target ARCHS (set to "x86_64,arm64" to build Universal Binary for Big Sur and later). Default is "x86_64"')
+    parser.add_argument('--catalyst_archs', default=None, help='Select target ARCHS (set to "x86_64,arm64" to build Universal Binary for Big Sur and later). Default is None')
     parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)')
     parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)')
-    parser.add_argument('--framework_name', default='opencv2', dest='framework_name', action='store_true', help='Name of OpenCV framework (default: opencv2, will change to OpenCV in future version)')
+    parser.add_argument('--framework_name', default='opencv2', dest='framework_name', help='Name of OpenCV framework (default: opencv2, will change to OpenCV in future version)')
     parser.add_argument('--legacy_build', default=False, dest='legacy_build', action='store_true', help='Build legacy framework (default: False, equivalent to "--framework_name=opencv2 --without=objc")')
     parser.add_argument('--run_tests', default=False, dest='run_tests', action='store_true', help='Run tests')
     parser.add_argument('--build_docs', default=False, dest='build_docs', action='store_true', help='Build docs')
 
-    args = parser.parse_args()
+    args, unknown_args = parser.parse_known_args()
+    if unknown_args:
+        print("The following args are not recognized and will not be used: %s" % unknown_args)
 
     os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target
     print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET'])
-    archs = args.archs.split(',')
-    print('Using ARCHS=' + str(archs))
+
+    macos_archs = None
+    if args.archs:
+        # The archs flag is replaced by macos_archs. If the user specifies archs,
+        # treat it as if the user specified the macos_archs flag instead.
+        args.macos_archs = args.archs
+        print("--archs is deprecated! Prefer --macos_archs instead.")
+    if args.macos_archs:
+        macos_archs = args.macos_archs.split(',')
+    elif not args.build_only_specified_archs:
+        # Supply defaults
+        macos_archs = ["x86_64"]
+    print('Using MacOS ARCHS=' + str(macos_archs))
+
+    catalyst_archs = None
+    if args.catalyst_archs:
+        catalyst_archs = args.catalyst_archs.split(',')
+    # TODO: To avoid breaking existing CI, catalyst_archs has no defaults. When we can make a breaking change, this should specify a default arch.
+    print('Using Catalyst ARCHS=' + str(catalyst_archs))
+
+    # Prevent the build from happening if the same architecture is specified for multiple platforms.
+    # When `lipo` is run to stitch the frameworks together into a fat framework, it'll fail, so it's
+    # better to stop here while we're ahead.
+    if macos_archs and catalyst_archs:
+        duplicate_archs = set(macos_archs).intersection(catalyst_archs)
+        if duplicate_archs:
+            print_error("Cannot have the same architecture for multiple platforms in a fat framework! Consider using build_xcframework.py in the apple platform folder instead. Duplicate archs are %s" % duplicate_archs)
+            exit(1)
 
     if args.legacy_build:
         args.framework_name = "opencv2"
         if not "objc" in args.without:
             args.without.append("objc")
 
-    b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.disable, args.enablenonfree,
-        [
-            (archs, "MacOSX")
-        ], args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs)
+    targets = []
+    if not macos_archs and not catalyst_archs:
+        print_error("--macos_archs and --catalyst_archs are undefined; nothing will be built.")
+        sys.exit(1)
+    if macos_archs:
+        targets.append((macos_archs, "MacOSX"))
+    if catalyst_archs:
+        targets.append((catalyst_archs, "Catalyst")),
+
+    b = OSXBuilder(args.opencv, args.contrib, args.dynamic, True, args.without, args.disable, args.enablenonfree, targets, args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs)
     b.build(args.out)

From 0800f6f91b0806bded8eb279aff93ba5d40847f2 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Tue, 24 Nov 2020 22:26:10 +0000
Subject: [PATCH 143/152] videoio: add missing getCaptureDomain() methods

---
 modules/videoio/src/cap_openni.cpp  | 1 +
 modules/videoio/src/cap_openni2.cpp | 1 +
 2 files changed, 2 insertions(+)

diff --git a/modules/videoio/src/cap_openni.cpp b/modules/videoio/src/cap_openni.cpp
index e4dbea80d7..1281dc2f99 100644
--- a/modules/videoio/src/cap_openni.cpp
+++ b/modules/videoio/src/cap_openni.cpp
@@ -311,6 +311,7 @@ public:
     virtual bool setProperty(int probIdx, double propVal) CV_OVERRIDE;
     virtual bool grabFrame() CV_OVERRIDE;
     virtual IplImage* retrieveFrame(int outputType) CV_OVERRIDE;
+    virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI; }
 
     bool isOpened() const;
 
diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp
index 926a004196..fa5a2fd699 100644
--- a/modules/videoio/src/cap_openni2.cpp
+++ b/modules/videoio/src/cap_openni2.cpp
@@ -119,6 +119,7 @@ public:
     virtual bool setProperty(int probIdx, double propVal) CV_OVERRIDE;
     virtual bool grabFrame() CV_OVERRIDE;
     virtual IplImage* retrieveFrame(int outputType) CV_OVERRIDE;
+    virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI2; }
 
     bool isOpened() const;
 

From c08e38acd017f6c73a1657e5e12b1609d3a80334 Mon Sep 17 00:00:00 2001
From: Dale Phurrough <dale@hidale.com>
Date: Wed, 25 Nov 2020 01:53:41 +0100
Subject: [PATCH 144/152] fix missing addref() in ocl::Context::create(str)

- fix https://github.com/opencv/opencv/issues/18906
- unable to add related test cases as there is
  no public access to Context:Impl refcounts
---
 modules/core/src/ocl.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp
index a9bd974b9a..44ee8f9c59 100644
--- a/modules/core/src/ocl.cpp
+++ b/modules/core/src/ocl.cpp
@@ -2437,6 +2437,7 @@ public:
         if (impl)
         {
             CV_LOG_INFO(NULL, "OpenCL: reuse context@" << impl->contextId << " for configuration: " << configuration)
+            impl->addref();
             return impl;
         }
 

From f28895cd6bcade173a9765afb08d353e440c3fbf Mon Sep 17 00:00:00 2001
From: Gabriel Nascarella Hishida <gnhn19@inf.ufpr.br>
Date: Tue, 24 Nov 2020 22:14:55 -0300
Subject: [PATCH 145/152] doc: Fix example code using deprecated xrange

xrange was abandoned and doesn't exist in Python 3. range() works just the same
---
 .../py_calib3d/py_calibration/py_calibration.markdown           | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown
index e337999efd..bba7b90b9f 100644
--- a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown
+++ b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown
@@ -209,7 +209,7 @@ find the average error, we calculate the arithmetical mean of the errors calcula
 calibration images.
 @code{.py}
 mean_error = 0
-for i in xrange(len(objpoints)):
+for i in range(len(objpoints)):
     imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
     error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2)
     mean_error += error

From 387a76ba598fa1e177f8d79a2760ec75a4e9b0e1 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Wed, 25 Nov 2020 12:56:12 +0000
Subject: [PATCH 146/152] build: xcode 12 support, cmake fixes

---
 CMakeLists.txt                    |  5 +++++
 cmake/OpenCVCompilerOptions.cmake |  2 +-
 cmake/OpenCVFindLibsGrfmt.cmake   | 36 ++++++++++++++++---------------
 platforms/ios/build_framework.py  |  2 +-
 4 files changed, 26 insertions(+), 19 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index d143a7aeaa..f1b5423871 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -32,6 +32,11 @@ endif()
 #
 # Configure CMake policies
 #
+
+if(POLICY CMP0025)
+  cmake_policy(SET CMP0025 NEW)  # CMAKE_CXX_COMPILER_ID=AppleClang
+endif()
+
 if(POLICY CMP0026)
   cmake_policy(SET CMP0026 NEW)
 endif()
diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake
index 9ac671dd34..21201c12dd 100644
--- a/cmake/OpenCVCompilerOptions.cmake
+++ b/cmake/OpenCVCompilerOptions.cmake
@@ -151,7 +151,7 @@ if(CV_GCC OR CV_CLANG)
     if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
       add_extra_compiler_option(-Wno-missing-field-initializers)  # GCC 4.x emits warnings about {}, fixed in GCC 5+
     endif()
-    if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0)
+    if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0)
       add_extra_compiler_option(-Wno-deprecated-enum-enum-conversion)
       add_extra_compiler_option(-Wno-deprecated-anon-enum-enum-conversion)
     endif()
diff --git a/cmake/OpenCVFindLibsGrfmt.cmake b/cmake/OpenCVFindLibsGrfmt.cmake
index fcf716b976..4ad44fe833 100644
--- a/cmake/OpenCVFindLibsGrfmt.cmake
+++ b/cmake/OpenCVFindLibsGrfmt.cmake
@@ -15,11 +15,12 @@ else()
 endif()
 
 if(NOT ZLIB_FOUND)
-  ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIRS)
+  ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIR)
 
-  set(ZLIB_LIBRARY zlib)
+  set(ZLIB_LIBRARY zlib CACHE INTERNAL "")
   add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/zlib")
-  set(ZLIB_INCLUDE_DIRS "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}")
+  set(ZLIB_INCLUDE_DIR "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "")
+  set(ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIR})
   set(ZLIB_LIBRARIES ${ZLIB_LIBRARY})
 
   ocv_parse_header2(ZLIB "${${ZLIB_LIBRARY}_SOURCE_DIR}/zlib.h" ZLIB_VERSION)
@@ -37,16 +38,17 @@ if(WITH_JPEG)
     ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR)
 
     if(NOT BUILD_JPEG_TURBO_DISABLE)
-      set(JPEG_LIBRARY libjpeg-turbo)
+      set(JPEG_LIBRARY libjpeg-turbo CACHE INTERNAL "")
       set(JPEG_LIBRARIES ${JPEG_LIBRARY})
       add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg-turbo")
-      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src")
+      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "")
     else()
-      set(JPEG_LIBRARY libjpeg)
+      set(JPEG_LIBRARY libjpeg CACHE INTERNAL "")
       set(JPEG_LIBRARIES ${JPEG_LIBRARY})
       add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg")
-      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}")
+      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
     endif()
+    set(JPEG_INCLUDE_DIRS "${JPEG_INCLUDE_DIR}")
   endif()
 
   macro(ocv_detect_jpeg_version header_file)
@@ -83,10 +85,10 @@ if(WITH_TIFF)
   if(NOT TIFF_FOUND)
     ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR)
 
-    set(TIFF_LIBRARY libtiff)
+    set(TIFF_LIBRARY libtiff CACHE INTERNAL "")
     set(TIFF_LIBRARIES ${TIFF_LIBRARY})
     add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff")
-    set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}")
+    set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "")
     ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
   endif()
 
@@ -128,12 +130,12 @@ endif()
 if(WITH_WEBP AND NOT WEBP_FOUND
     AND (NOT ANDROID OR HAVE_CPUFEATURES)
 )
-
-  set(WEBP_LIBRARY libwebp)
+  ocv_clear_vars(WEBP_LIBRARY WEBP_INCLUDE_DIR)
+  set(WEBP_LIBRARY libwebp CACHE INTERNAL "")
   set(WEBP_LIBRARIES ${WEBP_LIBRARY})
 
   add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libwebp")
-  set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src")
+  set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "")
   set(HAVE_WEBP 1)
 endif()
 
@@ -164,10 +166,10 @@ if(WITH_JASPER)
   if(NOT JASPER_FOUND)
     ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR)
 
-    set(JASPER_LIBRARY libjasper)
+    set(JASPER_LIBRARY libjasper CACHE INTERNAL "")
     set(JASPER_LIBRARIES ${JASPER_LIBRARY})
     add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper")
-    set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
+    set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
   endif()
 
   set(HAVE_JASPER YES)
@@ -197,10 +199,10 @@ if(WITH_PNG)
   if(NOT PNG_FOUND)
     ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_LIBPNG_PNG_H PNG_DEFINITIONS)
 
-    set(PNG_LIBRARY libpng)
+    set(PNG_LIBRARY libpng CACHE INTERNAL "")
     set(PNG_LIBRARIES ${PNG_LIBRARY})
     add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng")
-    set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}")
+    set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
     set(PNG_DEFINITIONS "")
     ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
   endif()
@@ -242,7 +244,7 @@ if(WITH_GDAL)
     endif()
 endif()
 
-if (WITH_GDCM)
+if(WITH_GDCM)
   find_package(GDCM QUIET)
   if(NOT GDCM_FOUND)
     set(HAVE_GDCM NO)
diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py
index 223542d9f5..bc765872a0 100755
--- a/platforms/ios/build_framework.py
+++ b/platforms/ios/build_framework.py
@@ -31,7 +31,7 @@ from __future__ import print_function
 import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing
 from subprocess import check_call, check_output, CalledProcessError
 
-IPHONEOS_DEPLOYMENT_TARGET='8.0'  # default, can be changed via command line options or environment variable
+IPHONEOS_DEPLOYMENT_TARGET='9.0'  # default, can be changed via command line options or environment variable
 
 def execute(cmd, cwd = None):
     print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr)

From 7afd48658c18b688ef757ab050fb766cb244b75b Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Thu, 26 Nov 2020 08:55:15 +0000
Subject: [PATCH 147/152] gapi: eliminate std::rand() and RAND_MAX from tests

---
 modules/gapi/test/common/gapi_parsers_tests_common.hpp | 7 ++++---
 modules/gapi/test/rmat/rmat_tests.cpp                  | 4 ++--
 modules/gapi/test/test_precomp.hpp                     | 3 +++
 3 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/modules/gapi/test/common/gapi_parsers_tests_common.hpp b/modules/gapi/test/common/gapi_parsers_tests_common.hpp
index 91dcca7b3e..328f86b851 100644
--- a/modules/gapi/test/common/gapi_parsers_tests_common.hpp
+++ b/modules/gapi/test/common/gapi_parsers_tests_common.hpp
@@ -176,7 +176,7 @@ private:
     int randInRange(const int start, const int end)
     {
         GAPI_Assert(start <= end);
-        return start + std::rand() % (end - start + 1);
+        return theRNG().uniform(start, end);
     }
 
     cv::Rect generateBox(const cv::Size& in_sz)
@@ -211,7 +211,7 @@ private:
         SSDitem it;
         it.image_id = static_cast<float>(i);
         it.label = static_cast<float>(randInRange(0, 9));
-        it.confidence = static_cast<float>(std::rand()) / RAND_MAX;
+        it.confidence = theRNG().uniform(0.f, 1.f);
         auto box = generateBox(in_sz);
         it.rc_left   = normalize(box.x, in_sz.width);
         it.rc_right  = normalize(box.x + box.width, in_sz.width);
@@ -245,9 +245,10 @@ public:
         auto data = mat.ptr<float>();
 
         const size_t range = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>());
+        cv::RNG& rng = theRNG();
         for (size_t i = 0; i < range; ++i)
         {
-            data[i] = static_cast<float>(std::rand()) / RAND_MAX;
+            data[i] = rng.uniform(0.f, 1.f);
         }
         return mat;
     }
diff --git a/modules/gapi/test/rmat/rmat_tests.cpp b/modules/gapi/test/rmat/rmat_tests.cpp
index 9980925a3b..52c3806c5b 100644
--- a/modules/gapi/test/rmat/rmat_tests.cpp
+++ b/modules/gapi/test/rmat/rmat_tests.cpp
@@ -116,11 +116,11 @@ public:
 // we have some specific data hidden under RMat,
 // test that we can obtain it via RMat.as<T>() method
 TEST(RMat, UsageInBackend) {
-    int i = std::rand();
+    int i = 123456;
     auto rmat = cv::make_rmat<RMatAdapterForBackend>(i);
 
     auto adapter = rmat.get<RMatAdapterForBackend>();
-    EXPECT_NE(nullptr, adapter);
+    ASSERT_NE(nullptr, adapter);
     EXPECT_EQ(i, adapter->deviceSpecificData());
 }
 } // namespace opencv_test
diff --git a/modules/gapi/test/test_precomp.hpp b/modules/gapi/test/test_precomp.hpp
index 6253acfcb3..7b3c695443 100644
--- a/modules/gapi/test/test_precomp.hpp
+++ b/modules/gapi/test/test_precomp.hpp
@@ -34,4 +34,7 @@ static inline void countNonZero_is_forbidden_in_tests_use_norm_instead() {}
 }
 #define countNonZero() countNonZero_is_forbidden_in_tests_use_norm_instead()
 
+#undef RAND_MAX
+#define RAND_MAX RAND_MAX_is_banned_in_tests__use_cv_theRNG_instead
+
 #endif // __OPENCV_GAPI_TEST_PRECOMP_HPP__

From ece14eae2403b8106d7532e4350a856d80e38769 Mon Sep 17 00:00:00 2001
From: Ruslan Garnov <ruslan.garnov@intel.com>
Date: Thu, 26 Nov 2020 20:00:45 +0300
Subject: [PATCH 148/152] Removed redundant call of handleNewStream in
 streaming executor

---
 modules/gapi/src/executor/gstreamingexecutor.cpp | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp
index 58789889a3..70686699d0 100644
--- a/modules/gapi/src/executor/gstreamingexecutor.cpp
+++ b/modules/gapi/src/executor/gstreamingexecutor.cpp
@@ -1231,10 +1231,6 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins)
                                real_video_completion_cb);
     }
 
-    for (auto &&op : m_ops) {
-        op.isl_exec->handleNewStream();
-    }
-
     // Now do this for every island (in a topological order)
     for (auto &&op : m_ops)
     {

From 2cf2456f4c39788ba167d04579fb2251f4fd9a07 Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Thu, 26 Nov 2020 21:30:21 +0000
Subject: [PATCH 149/152] dnn(test): skip unstable GatherMultiOutput OCL_FP16
 test

---
 modules/dnn/test/test_onnx_importer.cpp | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index f38ca6700f..9ba10d4b47 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -718,6 +718,9 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
 
 TEST_P(Test_ONNX_layers, GatherMultiOutput)
 {
+    if (cvtest::skipUnstableTests && backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
+        throw SkipTestException("Skip unstable test: https://github.com/opencv/opencv/issues/18937");
+
 #if defined(INF_ENGINE_RELEASE)
     if (target == DNN_TARGET_MYRIAD)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE);

From 7efc0011fd8b438bc435635c0475fa0b9a4c256d Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Thu, 26 Nov 2020 21:21:05 +0000
Subject: [PATCH 150/152] gapi(test): avoid anonymous namespace types as
 template parameters

---
 .../test/cpu/gapi_ocv_stateful_kernel_test_utils.hpp     | 9 ++++++---
 modules/gapi/test/gapi_array_tests.cpp                   | 3 ++-
 modules/gapi/test/gapi_opaque_tests.cpp                  | 3 ++-
 3 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_test_utils.hpp b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_test_utils.hpp
index 040e628460..0caf0115f1 100644
--- a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_test_utils.hpp
+++ b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_test_utils.hpp
@@ -22,8 +22,10 @@ struct Name                                      \
 
 namespace opencv_test
 {
-namespace
-{
+
+// types from anonymous namespace doesn't work well with templates
+inline namespace gapi_ocv_stateful_kernel_test_utils {
+
 struct UserStruct
 {
     UserStruct() = default;
@@ -41,7 +43,8 @@ private:
     short _myShortVal;
     float _myFloatVal;
 };
-} // anonymous namespace
+
+} // namespace
 } // opencv_test
 
 #endif // OPENCV_GAPI_OCV_STATEFUL_KERNEL_TESTS_UTILS_HPP
diff --git a/modules/gapi/test/gapi_array_tests.cpp b/modules/gapi/test/gapi_array_tests.cpp
index b4c8378799..8bdc0854f0 100644
--- a/modules/gapi/test/gapi_array_tests.cpp
+++ b/modules/gapi/test/gapi_array_tests.cpp
@@ -240,7 +240,8 @@ TEST(GArray_VectorRef, TestMov)
     EXPECT_EQ(V{}, vtest);
 }
 
-namespace {
+// types from anonymous namespace doesn't work well with templates
+inline namespace gapi_array_tests {
     struct MyTestStruct {
         int i;
         float f;
diff --git a/modules/gapi/test/gapi_opaque_tests.cpp b/modules/gapi/test/gapi_opaque_tests.cpp
index 4cadb918b8..de3572c4bd 100644
--- a/modules/gapi/test/gapi_opaque_tests.cpp
+++ b/modules/gapi/test/gapi_opaque_tests.cpp
@@ -284,7 +284,8 @@ TEST(GOpaque_OpaqueRef, TestMov)
     EXPECT_NE(test, mov.rref<I>());         // ref lost the data
 }
 
-namespace {
+// types from anonymous namespace doesn't work well with templates
+inline namespace gapi_opaque_tests {
     struct MyTestStruct {
         int i;
         float f;

From 5c987e4c75b9ab84a8a6af60a7d4b353d5730fea Mon Sep 17 00:00:00 2001
From: Alexander Alekhin <alexander.a.alekhin@gmail.com>
Date: Fri, 27 Nov 2020 01:56:59 +0300
Subject: [PATCH 151/152] Merge pull request #18924 from alalek:4.x-xcode12

(4.x) build: Xcode 12 support

* build: xcode 12 support, cmake fixes

* ts: eliminate clang 11 warnigns

* 3rdparty: clang 11 warnings

* features2d: eliminate build warnings

* test: warnings

* gapi: warnings from 18928
---
 3rdparty/openexr/CMakeLists.txt          |   1 +
 3rdparty/openjpeg/CMakeLists.txt         |   4 +
 cmake/OpenCVCompilerOptions.cmake        |   2 +-
 cmake/OpenCVFindLibsGrfmt.cmake          |  36 ++---
 modules/core/test/test_math.cpp          |   2 +-
 modules/features2d/src/evaluation.cpp    |   2 +-
 modules/features2d/src/keypoint.cpp      |   2 +-
 modules/gapi/CMakeLists.txt              |   4 +
 modules/ts/include/opencv2/ts/ts_gtest.h | 162 +++++++++++------------
 modules/ts/src/ts_perf.cpp               |   2 +-
 platforms/ios/build_framework.py         |   2 +-
 11 files changed, 115 insertions(+), 104 deletions(-)

diff --git a/3rdparty/openexr/CMakeLists.txt b/3rdparty/openexr/CMakeLists.txt
index 88f60b23c0..8d10e7d968 100644
--- a/3rdparty/openexr/CMakeLists.txt
+++ b/3rdparty/openexr/CMakeLists.txt
@@ -109,6 +109,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow -Wunused -Wsign-compare -Wundef -W
                                      -Wmissing-prototypes  # gcc/clang
                                      -Wreorder
                                      -Wunused-result
+                                     -Wimplicit-const-int-float-conversion  # clang
 )
 if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 8.0)
   ocv_warnings_disable(CMAKE_CXX_FLAGS -Wclass-memaccess)
diff --git a/3rdparty/openjpeg/CMakeLists.txt b/3rdparty/openjpeg/CMakeLists.txt
index ec15bba850..b38bf28f05 100644
--- a/3rdparty/openjpeg/CMakeLists.txt
+++ b/3rdparty/openjpeg/CMakeLists.txt
@@ -11,6 +11,10 @@ set(OPENJPEG_LIBRARY_NAME libopenjp2)
 
 project(openjpeg C)
 
+ocv_warnings_disable(CMAKE_C_FLAGS
+    -Wimplicit-const-int-float-conversion  # clang
+)
+
 #-----------------------------------------------------------------------------
 # OPENJPEG version number, useful for packaging and doxygen doc:
 set(OPENJPEG_VERSION_MAJOR 2)
diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake
index 080c78c547..929c5b5e51 100644
--- a/cmake/OpenCVCompilerOptions.cmake
+++ b/cmake/OpenCVCompilerOptions.cmake
@@ -153,7 +153,7 @@ if(CV_GCC OR CV_CLANG)
     if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
       add_extra_compiler_option(-Wno-missing-field-initializers)  # GCC 4.x emits warnings about {}, fixed in GCC 5+
     endif()
-    if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0)
+    if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0)
       add_extra_compiler_option(-Wno-deprecated-enum-enum-conversion)
       add_extra_compiler_option(-Wno-deprecated-anon-enum-enum-conversion)
     endif()
diff --git a/cmake/OpenCVFindLibsGrfmt.cmake b/cmake/OpenCVFindLibsGrfmt.cmake
index 22e20b6b79..28aa47ba9c 100644
--- a/cmake/OpenCVFindLibsGrfmt.cmake
+++ b/cmake/OpenCVFindLibsGrfmt.cmake
@@ -15,11 +15,12 @@ else()
 endif()
 
 if(NOT ZLIB_FOUND)
-  ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIRS)
+  ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIR)
 
-  set(ZLIB_LIBRARY zlib)
+  set(ZLIB_LIBRARY zlib CACHE INTERNAL "")
   add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/zlib")
-  set(ZLIB_INCLUDE_DIRS "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}")
+  set(ZLIB_INCLUDE_DIR "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "")
+  set(ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIR})
   set(ZLIB_LIBRARIES ${ZLIB_LIBRARY})
 
   ocv_parse_header2(ZLIB "${${ZLIB_LIBRARY}_SOURCE_DIR}/zlib.h" ZLIB_VERSION)
@@ -37,16 +38,17 @@ if(WITH_JPEG)
     ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR)
 
     if(NOT BUILD_JPEG_TURBO_DISABLE)
-      set(JPEG_LIBRARY libjpeg-turbo)
+      set(JPEG_LIBRARY libjpeg-turbo CACHE INTERNAL "")
       set(JPEG_LIBRARIES ${JPEG_LIBRARY})
       add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg-turbo")
-      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src")
+      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "")
     else()
-      set(JPEG_LIBRARY libjpeg)
+      set(JPEG_LIBRARY libjpeg CACHE INTERNAL "")
       set(JPEG_LIBRARIES ${JPEG_LIBRARY})
       add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg")
-      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}")
+      set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
     endif()
+    set(JPEG_INCLUDE_DIRS "${JPEG_INCLUDE_DIR}")
   endif()
 
   macro(ocv_detect_jpeg_version header_file)
@@ -83,10 +85,10 @@ if(WITH_TIFF)
   if(NOT TIFF_FOUND)
     ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR)
 
-    set(TIFF_LIBRARY libtiff)
+    set(TIFF_LIBRARY libtiff CACHE INTERNAL "")
     set(TIFF_LIBRARIES ${TIFF_LIBRARY})
     add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff")
-    set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}")
+    set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "")
     ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
   endif()
 
@@ -128,12 +130,12 @@ endif()
 if(WITH_WEBP AND NOT WEBP_FOUND
     AND (NOT ANDROID OR HAVE_CPUFEATURES)
 )
-
-  set(WEBP_LIBRARY libwebp)
+  ocv_clear_vars(WEBP_LIBRARY WEBP_INCLUDE_DIR)
+  set(WEBP_LIBRARY libwebp CACHE INTERNAL "")
   set(WEBP_LIBRARIES ${WEBP_LIBRARY})
 
   add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libwebp")
-  set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src")
+  set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "")
   set(HAVE_WEBP 1)
 endif()
 
@@ -192,10 +194,10 @@ if(WITH_JASPER AND NOT HAVE_OPENJPEG)
   if(NOT JASPER_FOUND)
     ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR)
 
-    set(JASPER_LIBRARY libjasper)
+    set(JASPER_LIBRARY libjasper CACHE INTERNAL "")
     set(JASPER_LIBRARIES ${JASPER_LIBRARY})
     add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper")
-    set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
+    set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
   endif()
 
   set(HAVE_JASPER YES)
@@ -225,10 +227,10 @@ if(WITH_PNG)
   if(NOT PNG_FOUND)
     ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_LIBPNG_PNG_H PNG_DEFINITIONS)
 
-    set(PNG_LIBRARY libpng)
+    set(PNG_LIBRARY libpng CACHE INTERNAL "")
     set(PNG_LIBRARIES ${PNG_LIBRARY})
     add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng")
-    set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}")
+    set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
     set(PNG_DEFINITIONS "")
     ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
   endif()
@@ -270,7 +272,7 @@ if(WITH_GDAL)
     endif()
 endif()
 
-if (WITH_GDCM)
+if(WITH_GDCM)
   find_package(GDCM QUIET)
   if(NOT GDCM_FOUND)
     set(HAVE_GDCM NO)
diff --git a/modules/core/test/test_math.cpp b/modules/core/test/test_math.cpp
index 066475b19e..d5b80dcb89 100644
--- a/modules/core/test/test_math.cpp
+++ b/modules/core/test/test_math.cpp
@@ -2579,7 +2579,7 @@ TEST(Core_CheckRange_INT_MAX, accuracy)
 TEST(Core_CheckRange_INT_MAX1, accuracy)
 {
     cv::Mat m(3, 3, CV_32SC1, cv::Scalar(INT_MAX));
-    ASSERT_TRUE( cv::checkRange(m, true, 0, 0, INT_MAX+1.0f) );
+    ASSERT_TRUE( cv::checkRange(m, true, 0, 0, (float)((double)INT_MAX+1.0f)) );
     ASSERT_TRUE( cv::checkRange(m) );
 }
 
diff --git a/modules/features2d/src/evaluation.cpp b/modules/features2d/src/evaluation.cpp
index 2c1a446a57..ca7ab14500 100644
--- a/modules/features2d/src/evaluation.cpp
+++ b/modules/features2d/src/evaluation.cpp
@@ -314,7 +314,7 @@ struct SIdx
         UsedFinder(const SIdx& _used) : used(_used) {}
         const SIdx& used;
         bool operator()(const SIdx& v) const { return  (v.i1 == used.i1 || v.i2 == used.i2); }
-        UsedFinder& operator=(const UsedFinder&);
+        UsedFinder& operator=(const UsedFinder&) = delete;
     };
 };
 
diff --git a/modules/features2d/src/keypoint.cpp b/modules/features2d/src/keypoint.cpp
index bab1e22b45..e14c9da94c 100644
--- a/modules/features2d/src/keypoint.cpp
+++ b/modules/features2d/src/keypoint.cpp
@@ -151,7 +151,7 @@ public:
 
 private:
     const Mat mask;
-    MaskPredicate& operator=(const MaskPredicate&);
+    MaskPredicate& operator=(const MaskPredicate&) = delete;
 };
 
 void KeyPointsFilter::runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask )
diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
index ee275fe1af..0067cfa389 100644
--- a/modules/gapi/CMakeLists.txt
+++ b/modules/gapi/CMakeLists.txt
@@ -38,6 +38,10 @@ if(MSVC)
   endif()
 endif()
 
+if(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")  # don't add Clang here: issue should be investigated and fixed (workaround for Apple only)
+  ocv_warnings_disable(CMAKE_CXX_FLAGS -Wrange-loop-analysis)  # https://github.com/opencv/opencv/issues/18928
+endif()
+
 file(GLOB gapi_ext_hdrs
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/*.hpp"
     "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.hpp"
diff --git a/modules/ts/include/opencv2/ts/ts_gtest.h b/modules/ts/include/opencv2/ts/ts_gtest.h
index bf0ff9134e..b1c6c12152 100644
--- a/modules/ts/include/opencv2/ts/ts_gtest.h
+++ b/modules/ts/include/opencv2/ts/ts_gtest.h
@@ -9795,7 +9795,7 @@ class GTEST_API_ ExitedWithCode {
   bool operator()(int exit_status) const;
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ExitedWithCode& other);
+  void operator=(const ExitedWithCode& other) = delete;
 
   const int exit_code_;
 };
@@ -11769,7 +11769,7 @@ class RangeGenerator : public ParamGeneratorInterface<T> {
           step_(other.step_) {}
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<T>* const base_;
     T value_;
@@ -11787,7 +11787,7 @@ class RangeGenerator : public ParamGeneratorInterface<T> {
   }
 
   // No implementation - assignment is unsupported.
-  void operator=(const RangeGenerator& other);
+  void operator=(const RangeGenerator& other) = delete;
 
   const T begin_;
   const T end_;
@@ -11878,7 +11878,7 @@ class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
   };  // class ValuesInIteratorRangeGenerator::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const ValuesInIteratorRangeGenerator& other);
+  void operator=(const ValuesInIteratorRangeGenerator& other) = delete;
 
   const ContainerType container_;
 };  // class ValuesInIteratorRangeGenerator
@@ -12329,7 +12329,7 @@ class ValueArray1 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray1& other);
+  void operator=(const ValueArray1& other) = delete;
 
   const T1 v1_;
 };
@@ -12349,7 +12349,7 @@ class ValueArray2 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray2& other);
+  void operator=(const ValueArray2& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12372,7 +12372,7 @@ class ValueArray3 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray3& other);
+  void operator=(const ValueArray3& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12397,7 +12397,7 @@ class ValueArray4 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray4& other);
+  void operator=(const ValueArray4& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12423,7 +12423,7 @@ class ValueArray5 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray5& other);
+  void operator=(const ValueArray5& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12452,7 +12452,7 @@ class ValueArray6 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray6& other);
+  void operator=(const ValueArray6& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12483,7 +12483,7 @@ class ValueArray7 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray7& other);
+  void operator=(const ValueArray7& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12516,7 +12516,7 @@ class ValueArray8 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray8& other);
+  void operator=(const ValueArray8& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12551,7 +12551,7 @@ class ValueArray9 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray9& other);
+  void operator=(const ValueArray9& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12587,7 +12587,7 @@ class ValueArray10 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray10& other);
+  void operator=(const ValueArray10& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12626,7 +12626,7 @@ class ValueArray11 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray11& other);
+  void operator=(const ValueArray11& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12667,7 +12667,7 @@ class ValueArray12 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray12& other);
+  void operator=(const ValueArray12& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12710,7 +12710,7 @@ class ValueArray13 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray13& other);
+  void operator=(const ValueArray13& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12754,7 +12754,7 @@ class ValueArray14 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray14& other);
+  void operator=(const ValueArray14& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12801,7 +12801,7 @@ class ValueArray15 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray15& other);
+  void operator=(const ValueArray15& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12851,7 +12851,7 @@ class ValueArray16 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray16& other);
+  void operator=(const ValueArray16& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12902,7 +12902,7 @@ class ValueArray17 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray17& other);
+  void operator=(const ValueArray17& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -12955,7 +12955,7 @@ class ValueArray18 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray18& other);
+  void operator=(const ValueArray18& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13010,7 +13010,7 @@ class ValueArray19 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray19& other);
+  void operator=(const ValueArray19& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13067,7 +13067,7 @@ class ValueArray20 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray20& other);
+  void operator=(const ValueArray20& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13127,7 +13127,7 @@ class ValueArray21 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray21& other);
+  void operator=(const ValueArray21& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13188,7 +13188,7 @@ class ValueArray22 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray22& other);
+  void operator=(const ValueArray22& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13252,7 +13252,7 @@ class ValueArray23 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray23& other);
+  void operator=(const ValueArray23& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13318,7 +13318,7 @@ class ValueArray24 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray24& other);
+  void operator=(const ValueArray24& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13385,7 +13385,7 @@ class ValueArray25 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray25& other);
+  void operator=(const ValueArray25& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13454,7 +13454,7 @@ class ValueArray26 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray26& other);
+  void operator=(const ValueArray26& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13527,7 +13527,7 @@ class ValueArray27 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray27& other);
+  void operator=(const ValueArray27& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13601,7 +13601,7 @@ class ValueArray28 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray28& other);
+  void operator=(const ValueArray28& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13676,7 +13676,7 @@ class ValueArray29 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray29& other);
+  void operator=(const ValueArray29& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13754,7 +13754,7 @@ class ValueArray30 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray30& other);
+  void operator=(const ValueArray30& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13835,7 +13835,7 @@ class ValueArray31 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray31& other);
+  void operator=(const ValueArray31& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -13917,7 +13917,7 @@ class ValueArray32 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray32& other);
+  void operator=(const ValueArray32& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14002,7 +14002,7 @@ class ValueArray33 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray33& other);
+  void operator=(const ValueArray33& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14088,7 +14088,7 @@ class ValueArray34 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray34& other);
+  void operator=(const ValueArray34& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14176,7 +14176,7 @@ class ValueArray35 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray35& other);
+  void operator=(const ValueArray35& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14267,7 +14267,7 @@ class ValueArray36 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray36& other);
+  void operator=(const ValueArray36& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14360,7 +14360,7 @@ class ValueArray37 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray37& other);
+  void operator=(const ValueArray37& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14454,7 +14454,7 @@ class ValueArray38 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray38& other);
+  void operator=(const ValueArray38& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14551,7 +14551,7 @@ class ValueArray39 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray39& other);
+  void operator=(const ValueArray39& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14650,7 +14650,7 @@ class ValueArray40 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray40& other);
+  void operator=(const ValueArray40& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14751,7 +14751,7 @@ class ValueArray41 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray41& other);
+  void operator=(const ValueArray41& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14854,7 +14854,7 @@ class ValueArray42 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray42& other);
+  void operator=(const ValueArray42& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -14959,7 +14959,7 @@ class ValueArray43 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray43& other);
+  void operator=(const ValueArray43& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15066,7 +15066,7 @@ class ValueArray44 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray44& other);
+  void operator=(const ValueArray44& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15175,7 +15175,7 @@ class ValueArray45 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray45& other);
+  void operator=(const ValueArray45& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15286,7 +15286,7 @@ class ValueArray46 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray46& other);
+  void operator=(const ValueArray46& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15400,7 +15400,7 @@ class ValueArray47 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray47& other);
+  void operator=(const ValueArray47& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15516,7 +15516,7 @@ class ValueArray48 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray48& other);
+  void operator=(const ValueArray48& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15633,7 +15633,7 @@ class ValueArray49 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray49& other);
+  void operator=(const ValueArray49& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15751,7 +15751,7 @@ class ValueArray50 {
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const ValueArray50& other);
+  void operator=(const ValueArray50& other) = delete;
 
   const T1 v1_;
   const T2 v2_;
@@ -15904,7 +15904,7 @@ class CartesianProductGenerator2
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -15919,7 +15919,7 @@ class CartesianProductGenerator2
   };  // class CartesianProductGenerator2::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator2& other);
+  void operator=(const CartesianProductGenerator2& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -16032,7 +16032,7 @@ class CartesianProductGenerator3
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -16050,7 +16050,7 @@ class CartesianProductGenerator3
   };  // class CartesianProductGenerator3::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator3& other);
+  void operator=(const CartesianProductGenerator3& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -16179,7 +16179,7 @@ class CartesianProductGenerator4
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -16200,7 +16200,7 @@ class CartesianProductGenerator4
   };  // class CartesianProductGenerator4::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator4& other);
+  void operator=(const CartesianProductGenerator4& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -16342,7 +16342,7 @@ class CartesianProductGenerator5
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -16366,7 +16366,7 @@ class CartesianProductGenerator5
   };  // class CartesianProductGenerator5::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator5& other);
+  void operator=(const CartesianProductGenerator5& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -16524,7 +16524,7 @@ class CartesianProductGenerator6
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -16551,7 +16551,7 @@ class CartesianProductGenerator6
   };  // class CartesianProductGenerator6::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator6& other);
+  void operator=(const CartesianProductGenerator6& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -16723,7 +16723,7 @@ class CartesianProductGenerator7
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -16753,7 +16753,7 @@ class CartesianProductGenerator7
   };  // class CartesianProductGenerator7::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator7& other);
+  void operator=(const CartesianProductGenerator7& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -16941,7 +16941,7 @@ class CartesianProductGenerator8
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -16974,7 +16974,7 @@ class CartesianProductGenerator8
   };  // class CartesianProductGenerator8::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator8& other);
+  void operator=(const CartesianProductGenerator8& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -17176,7 +17176,7 @@ class CartesianProductGenerator9
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -17212,7 +17212,7 @@ class CartesianProductGenerator9
   };  // class CartesianProductGenerator9::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator9& other);
+  void operator=(const CartesianProductGenerator9& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -17428,7 +17428,7 @@ class CartesianProductGenerator10
     }
 
     // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
+    void operator=(const Iterator& other) = delete;
 
     const ParamGeneratorInterface<ParamType>* const base_;
     // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
@@ -17467,7 +17467,7 @@ class CartesianProductGenerator10
   };  // class CartesianProductGenerator10::Iterator
 
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator10& other);
+  void operator=(const CartesianProductGenerator10& other) = delete;
 
   const ParamGenerator<T1> g1_;
   const ParamGenerator<T2> g2_;
@@ -17503,7 +17503,7 @@ CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder2& other);
+  void operator=(const CartesianProductHolder2& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17526,7 +17526,7 @@ CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder3& other);
+  void operator=(const CartesianProductHolder3& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17552,7 +17552,7 @@ CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder4& other);
+  void operator=(const CartesianProductHolder4& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17580,7 +17580,7 @@ CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder5& other);
+  void operator=(const CartesianProductHolder5& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17612,7 +17612,7 @@ CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder6& other);
+  void operator=(const CartesianProductHolder6& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17647,7 +17647,7 @@ CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder7& other);
+  void operator=(const CartesianProductHolder7& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17686,7 +17686,7 @@ CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder8& other);
+  void operator=(const CartesianProductHolder8& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17729,7 +17729,7 @@ CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder9& other);
+  void operator=(const CartesianProductHolder9& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
@@ -17775,7 +17775,7 @@ CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
 
  private:
   // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder10& other);
+  void operator=(const CartesianProductHolder10& other) = delete;
 
   const Generator1 g1_;
   const Generator2 g2_;
diff --git a/modules/ts/src/ts_perf.cpp b/modules/ts/src/ts_perf.cpp
index 1639932088..2a9169fd13 100644
--- a/modules/ts/src/ts_perf.cpp
+++ b/modules/ts/src/ts_perf.cpp
@@ -2168,7 +2168,7 @@ struct KeypointComparator
         return cmp(pts_[idx1], pts_[idx2]);
     }
 private:
-    const KeypointComparator& operator=(const KeypointComparator&); // quiet MSVC
+    KeypointComparator& operator=(const KeypointComparator&) = delete;
 };
 }//namespace
 
diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py
index b7936ceea7..7b05a019d3 100755
--- a/platforms/ios/build_framework.py
+++ b/platforms/ios/build_framework.py
@@ -113,7 +113,7 @@ class Builder:
                 cmake_flags.append("-DCMAKE_CXX_FLAGS=" + " ".join(c_flags))
                 cmake_flags.append("-DCMAKE_EXE_LINKER_FLAGS=" + " ".join(c_flags))
 
-                # CMake annot compile Swift for Catalyst https://gitlab.kitware.com/cmake/cmake/-/issues/21436
+                # CMake cannot compile Swift for Catalyst https://gitlab.kitware.com/cmake/cmake/-/issues/21436
                 # cmake_flags.append("-DCMAKE_Swift_FLAGS=" + " " + target_flag)
                 cmake_flags.append("-DSWIFT_DISABLED=1")
 

From 7521f207b182744e9354a61e969b2e4f87903cd2 Mon Sep 17 00:00:00 2001
From: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
Date: Fri, 27 Nov 2020 20:39:46 +0300
Subject: [PATCH 152/152] Merge pull request #18762 from
 TolyaTalamanov:at/support-garray

[G-API] Wrap GArray

* Wrap GArray for output

* Collect in/out info in graph

* Add imgproc tests

* Add cv::Point2f

* Update test_gapi_imgproc.py

* Fix comments to review
---
 modules/gapi/include/opencv2/gapi/garray.hpp  |  2 +
 modules/gapi/include/opencv2/gapi/gcommon.hpp |  6 +-
 modules/gapi/include/opencv2/gapi/gkernel.hpp | 13 ++-
 modules/gapi/include/opencv2/gapi/imgproc.hpp |  4 +-
 .../include/opencv2/gapi/opencv_includes.hpp  | 11 +--
 .../gapi/include/opencv2/gapi/own/types.hpp   | 10 +++
 modules/gapi/include/opencv2/gapi/s11n.hpp    |  3 +
 modules/gapi/misc/python/pyopencv_gapi.hpp    | 21 ++++-
 modules/gapi/misc/python/shadow_gapi.hpp      |  2 +
 .../gapi/misc/python/test/test_gapi_core.py   | 75 ++++++++++--------
 .../misc/python/test/test_gapi_imgproc.py     | 79 +++++++++++++++++++
 .../python/test/test_gapi_sample_pipelines.py | 20 ++---
 .../misc/python/test/test_gapi_streaming.py   | 73 +++++++++++++++++
 modules/gapi/src/api/gcomputation.cpp         | 30 +++++--
 .../src/backends/common/serialization.cpp     | 52 ++++++------
 modules/gapi/src/compiler/gcompiled_priv.hpp  | 10 +++
 modules/gapi/src/compiler/gcompiler.cpp       | 39 +++++++--
 modules/gapi/src/compiler/gstreaming.cpp      | 24 ++++--
 modules/gapi/src/compiler/gstreaming_priv.hpp | 14 ++--
 modules/gapi/test/own/gapi_types_tests.cpp    | 16 ++++
 20 files changed, 400 insertions(+), 104 deletions(-)
 create mode 100644 modules/gapi/misc/python/test/test_gapi_imgproc.py

diff --git a/modules/gapi/include/opencv2/gapi/garray.hpp b/modules/gapi/include/opencv2/gapi/garray.hpp
index 0798655666..5d4b3c59e0 100644
--- a/modules/gapi/include/opencv2/gapi/garray.hpp
+++ b/modules/gapi/include/opencv2/gapi/garray.hpp
@@ -368,6 +368,8 @@ private:
     detail::GArrayU m_ref;
 };
 
+using GArrayP2f = GArray<cv::Point2f>;
+
 /** @} */
 
 } // namespace cv
diff --git a/modules/gapi/include/opencv2/gapi/gcommon.hpp b/modules/gapi/include/opencv2/gapi/gcommon.hpp
index 0242020f6a..a474140baa 100644
--- a/modules/gapi/include/opencv2/gapi/gcommon.hpp
+++ b/modules/gapi/include/opencv2/gapi/gcommon.hpp
@@ -49,6 +49,7 @@ namespace detail
         CV_UINT64,     // uint64_t user G-API data
         CV_STRING,     // std::string user G-API data
         CV_POINT,      // cv::Point user G-API data
+        CV_POINT2F,    // cv::Point2f user G-API data
         CV_SIZE,       // cv::Size user G-API data
         CV_RECT,       // cv::Rect user G-API data
         CV_SCALAR,     // cv::Scalar user G-API data
@@ -68,15 +69,16 @@ namespace detail
     template<> struct GOpaqueTraits<cv::Size>    { static constexpr const OpaqueKind kind = OpaqueKind::CV_SIZE; };
     template<> struct GOpaqueTraits<cv::Scalar>  { static constexpr const OpaqueKind kind = OpaqueKind::CV_SCALAR; };
     template<> struct GOpaqueTraits<cv::Point>   { static constexpr const OpaqueKind kind = OpaqueKind::CV_POINT; };
+    template<> struct GOpaqueTraits<cv::Point2f> { static constexpr const OpaqueKind kind = OpaqueKind::CV_POINT2F; };
     template<> struct GOpaqueTraits<cv::Mat>     { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; };
     template<> struct GOpaqueTraits<cv::Rect>    { static constexpr const OpaqueKind kind = OpaqueKind::CV_RECT; };
     template<> struct GOpaqueTraits<cv::GMat>    { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; };
     template<> struct GOpaqueTraits<cv::gapi::wip::draw::Prim>
                                                  { static constexpr const OpaqueKind kind = OpaqueKind::CV_DRAW_PRIM; };
-    using GOpaqueTraitsArrayTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Scalar, cv::Point,
+    using GOpaqueTraitsArrayTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Scalar, cv::Point, cv::Point2f,
                                                cv::Mat, cv::Rect, cv::gapi::wip::draw::Prim>;
     // GOpaque is not supporting cv::Mat and cv::Scalar since there are GScalar and GMat types
-    using GOpaqueTraitsOpaqueTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Point, cv::Rect,
+    using GOpaqueTraitsOpaqueTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Point, cv::Point2f, cv::Rect,
                                                 cv::gapi::wip::draw::Prim>;
 } // namespace detail
 
diff --git a/modules/gapi/include/opencv2/gapi/gkernel.hpp b/modules/gapi/include/opencv2/gapi/gkernel.hpp
index d4c3e6c634..0ec7dd07c0 100644
--- a/modules/gapi/include/opencv2/gapi/gkernel.hpp
+++ b/modules/gapi/include/opencv2/gapi/gkernel.hpp
@@ -26,9 +26,16 @@
 
 namespace cv {
 
-using GShapes = std::vector<GShape>;
-using GKinds = std::vector<cv::detail::OpaqueKind>;
-using GCtors  = std::vector<detail::HostCtor>;
+struct GTypeInfo
+{
+    GShape                 shape;
+    cv::detail::OpaqueKind kind;
+};
+
+using GShapes    = std::vector<GShape>;
+using GKinds     = std::vector<cv::detail::OpaqueKind>;
+using GCtors     = std::vector<detail::HostCtor>;
+using GTypesInfo = std::vector<GTypeInfo>;
 
 // GKernel describes kernel API to the system
 // FIXME: add attributes of a kernel, (e.g. number and types
diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp
index e41c2507f2..7435ec1e1d 100644
--- a/modules/gapi/include/opencv2/gapi/imgproc.hpp
+++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp
@@ -1035,7 +1035,7 @@ or #cornerMinEigenVal.
 
 @return vector of detected corners.
  */
-GAPI_EXPORTS GArray<Point2f> goodFeaturesToTrack(const GMat  &image,
+GAPI_EXPORTS_W GArray<Point2f> goodFeaturesToTrack(const GMat  &image,
                                                        int    maxCorners,
                                                        double qualityLevel,
                                                        double minDistance,
@@ -1350,7 +1350,7 @@ Resulting gray color value computed as
 @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC1.
 @sa RGB2YUV
  */
-GAPI_EXPORTS GMat RGB2Gray(const GMat& src);
+GAPI_EXPORTS_W GMat RGB2Gray(const GMat& src);
 
 /** @overload
 Resulting gray color value computed as
diff --git a/modules/gapi/include/opencv2/gapi/opencv_includes.hpp b/modules/gapi/include/opencv2/gapi/opencv_includes.hpp
index 5f25fe4af7..08b2d6ed02 100644
--- a/modules/gapi/include/opencv2/gapi/opencv_includes.hpp
+++ b/modules/gapi/include/opencv2/gapi/opencv_includes.hpp
@@ -21,11 +21,12 @@
 #  include <opencv2/gapi/own/mat.hpp>
 // replacement of cv's structures:
 namespace cv {
-    using Rect   = gapi::own::Rect;
-    using Size   = gapi::own::Size;
-    using Point  = gapi::own::Point;
-    using Scalar = gapi::own::Scalar;
-    using Mat    = gapi::own::Mat;
+    using Rect    = gapi::own::Rect;
+    using Size    = gapi::own::Size;
+    using Point   = gapi::own::Point;
+    using Point2f = gapi::own::Point2f;
+    using Scalar  = gapi::own::Scalar;
+    using Mat     = gapi::own::Mat;
 }  // namespace cv
 #endif // !defined(GAPI_STANDALONE)
 
diff --git a/modules/gapi/include/opencv2/gapi/own/types.hpp b/modules/gapi/include/opencv2/gapi/own/types.hpp
index 20445ee0fd..c77a62ca53 100644
--- a/modules/gapi/include/opencv2/gapi/own/types.hpp
+++ b/modules/gapi/include/opencv2/gapi/own/types.hpp
@@ -28,6 +28,16 @@ public:
     int y = 0;
 };
 
+class Point2f
+{
+public:
+    Point2f() = default;
+    Point2f(float _x, float _y) : x(_x),  y(_y)  {};
+
+    float x = 0.f;
+    float y = 0.f;
+};
+
 class Rect
 {
 public:
diff --git a/modules/gapi/include/opencv2/gapi/s11n.hpp b/modules/gapi/include/opencv2/gapi/s11n.hpp
index 0e3e382328..0e2c4c239b 100644
--- a/modules/gapi/include/opencv2/gapi/s11n.hpp
+++ b/modules/gapi/include/opencv2/gapi/s11n.hpp
@@ -121,6 +121,9 @@ GAPI_EXPORTS std::unique_ptr<IIStream> getInStream(const std::vector<char> &p);
 GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Point &pt);
 GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::Point &pt);
 
+GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Point2f &pt);
+GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::Point2f &pt);
+
 GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Size &sz);
 GAPI_EXPORTS IIStream& operator>> (IIStream& is,       cv::Size &sz);
 
diff --git a/modules/gapi/misc/python/pyopencv_gapi.hpp b/modules/gapi/misc/python/pyopencv_gapi.hpp
index 57c0b3db4f..e25328e64f 100644
--- a/modules/gapi/misc/python/pyopencv_gapi.hpp
+++ b/modules/gapi/misc/python/pyopencv_gapi.hpp
@@ -47,8 +47,20 @@ static PyObject* from_grunarg(const GRunArg& v)
             const auto& s = util::get<cv::Scalar>(v);
             return pyopencv_from(s);
         }
-
+        case GRunArg::index_of<cv::detail::VectorRef>():
+        {
+            const auto& vref = util::get<cv::detail::VectorRef>(v);
+            switch (vref.getKind())
+            {
+                case cv::detail::OpaqueKind::CV_POINT2F:
+                    return pyopencv_from(vref.rref<cv::Point2f>());
+                default:
+                    PyErr_SetString(PyExc_TypeError, "Unsupported kind for GArray");
+                    return NULL;
+            }
+        }
         default:
+            PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
             return NULL;
     }
     GAPI_Assert(false);
@@ -65,7 +77,6 @@ PyObject* pyopencv_from(const GRunArgs& value)
         PyObject* item = from_grunarg(value[0]);
         if(!item)
         {
-            PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
             return NULL;
         }
         return item;
@@ -117,9 +128,13 @@ static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw)
         {
             args.emplace_back(reinterpret_cast<pyopencv_GMat_t*>(item)->v);
         }
+        else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GArrayP2f_TypePtr)))
+        {
+            args.emplace_back(reinterpret_cast<pyopencv_GArrayP2f_t*>(item)->v.strip());
+        }
         else
         {
-            PyErr_SetString(PyExc_TypeError, "cv.GIn() supports only cv.GMat and cv.GScalar");
+            PyErr_SetString(PyExc_TypeError, "Unsupported type for cv.GIn()/cv.GOut()");
             return NULL;
         }
     }
diff --git a/modules/gapi/misc/python/shadow_gapi.hpp b/modules/gapi/misc/python/shadow_gapi.hpp
index 0fac222212..792314512c 100644
--- a/modules/gapi/misc/python/shadow_gapi.hpp
+++ b/modules/gapi/misc/python/shadow_gapi.hpp
@@ -16,6 +16,8 @@ namespace cv
    class GAPI_EXPORTS_W_SIMPLE GRunArg { };
    class GAPI_EXPORTS_W_SIMPLE GMetaArg { };
 
+   class GAPI_EXPORTS_W_SIMPLE GArrayP2f { };
+
    using GProtoInputArgs  = GIOProtoArgs<In_Tag>;
    using GProtoOutputArgs = GIOProtoArgs<Out_Tag>;
 
diff --git a/modules/gapi/misc/python/test/test_gapi_core.py b/modules/gapi/misc/python/test/test_gapi_core.py
index cd85d9cadb..267037a78d 100644
--- a/modules/gapi/misc/python/test/test_gapi_core.py
+++ b/modules/gapi/misc/python/test/test_gapi_core.py
@@ -2,26 +2,27 @@
 
 import numpy as np
 import cv2 as cv
+import os
 
 from tests_common import NewOpenCVTests
 
 
 # Plaidml is an optional backend
 pkgs = [
-         cv.gapi.core.ocl.kernels(),
-         cv.gapi.core.cpu.kernels(),
-         cv.gapi.core.fluid.kernels()
-         # cv.gapi.core.plaidml.kernels()
-       ]
+          ('ocl'    , cv.gapi.core.ocl.kernels()),
+          ('cpu'    , cv.gapi.core.cpu.kernels()),
+          ('fluid'  , cv.gapi.core.fluid.kernels())
+          # ('plaidml', cv.gapi.core.plaidml.kernels())
+      ]
 
 
 class gapi_core_test(NewOpenCVTests):
 
     def test_add(self):
         # TODO: Extend to use any type and size here
-        sz = (1280, 720)
-        in1 = np.random.randint(0, 100, sz)
-        in2 = np.random.randint(0, 100, sz)
+        sz = (720, 1280)
+        in1 = np.full(sz, 100)
+        in2 = np.full(sz, 50)
 
         # OpenCV
         expected = cv.add(in1, in2)
@@ -32,17 +33,18 @@ class gapi_core_test(NewOpenCVTests):
         g_out = cv.gapi.add(g_in1, g_in2)
         comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
 
-        for pkg in pkgs:
+        for pkg_name, pkg in pkgs:
             actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
             # Comparison
-            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
-            self.assertEqual(expected.dtype, actual.dtype)
+            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
+                             'Failed on ' + pkg_name + ' backend')
+            self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
 
 
     def test_add_uint8(self):
-        sz = (1280, 720)
-        in1 = np.random.randint(0, 100, sz).astype(np.uint8)
-        in2 = np.random.randint(0, 100, sz).astype(np.uint8)
+        sz = (720, 1280)
+        in1 = np.full(sz, 100, dtype=np.uint8)
+        in2 = np.full(sz, 50 , dtype=np.uint8)
 
         # OpenCV
         expected = cv.add(in1, in2)
@@ -53,16 +55,17 @@ class gapi_core_test(NewOpenCVTests):
         g_out = cv.gapi.add(g_in1, g_in2)
         comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
 
-        for pkg in pkgs:
+        for pkg_name, pkg in pkgs:
             actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
             # Comparison
-            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
-            self.assertEqual(expected.dtype, actual.dtype)
+            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
+                             'Failed on ' + pkg_name + ' backend')
+            self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
 
 
     def test_mean(self):
-        sz = (1280, 720, 3)
-        in_mat = np.random.randint(0, 100, sz)
+        img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
+        in_mat = cv.imread(img_path)
 
         # OpenCV
         expected = cv.mean(in_mat)
@@ -72,15 +75,16 @@ class gapi_core_test(NewOpenCVTests):
         g_out = cv.gapi.mean(g_in)
         comp = cv.GComputation(g_in, g_out)
 
-        for pkg in pkgs:
+        for pkg_name, pkg in pkgs:
             actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
             # Comparison
-            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
+            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
+                             'Failed on ' + pkg_name + ' backend')
 
 
     def test_split3(self):
-        sz = (1280, 720, 3)
-        in_mat = np.random.randint(0, 100, sz)
+        img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
+        in_mat = cv.imread(img_path)
 
         # OpenCV
         expected = cv.split(in_mat)
@@ -90,19 +94,19 @@ class gapi_core_test(NewOpenCVTests):
         b, g, r = cv.gapi.split3(g_in)
         comp = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))
 
-        for pkg in pkgs:
+        for pkg_name, pkg in pkgs:
             actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
             # Comparison
             for e, a in zip(expected, actual):
-                self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF))
-                self.assertEqual(e.dtype, a.dtype)
+                self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF),
+                                 'Failed on ' + pkg_name + ' backend')
+                self.assertEqual(e.dtype, a.dtype, 'Failed on ' + pkg_name + ' backend')
 
 
     def test_threshold(self):
-        sz = (1280, 720)
-        in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
-        rand_int = np.random.randint(0, 50)
-        maxv = (rand_int, rand_int)
+        img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
+        in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
+        maxv = (30, 30)
 
         # OpenCV
         expected_thresh, expected_mat = cv.threshold(in_mat, maxv[0], maxv[0], cv.THRESH_TRIANGLE)
@@ -113,12 +117,15 @@ class gapi_core_test(NewOpenCVTests):
         mat, threshold = cv.gapi.threshold(g_in, g_sc, cv.THRESH_TRIANGLE)
         comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(mat, threshold))
 
-        for pkg in pkgs:
+        for pkg_name, pkg in pkgs:
             actual_mat, actual_thresh = comp.apply(cv.gin(in_mat, maxv), args=cv.compile_args(pkg))
             # Comparison
-            self.assertEqual(0.0, cv.norm(expected_mat, actual_mat, cv.NORM_INF))
-            self.assertEqual(expected_mat.dtype, actual_mat.dtype)
-            self.assertEqual(expected_thresh, actual_thresh[0])
+            self.assertEqual(0.0, cv.norm(expected_mat, actual_mat, cv.NORM_INF),
+                             'Failed on ' + pkg_name + ' backend')
+            self.assertEqual(expected_mat.dtype, actual_mat.dtype,
+                             'Failed on ' + pkg_name + ' backend')
+            self.assertEqual(expected_thresh, actual_thresh[0],
+                             'Failed on ' + pkg_name + ' backend')
 
 
 if __name__ == '__main__':
diff --git a/modules/gapi/misc/python/test/test_gapi_imgproc.py b/modules/gapi/misc/python/test/test_gapi_imgproc.py
new file mode 100644
index 0000000000..dd1e397081
--- /dev/null
+++ b/modules/gapi/misc/python/test/test_gapi_imgproc.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+import numpy as np
+import cv2 as cv
+import os
+
+from tests_common import NewOpenCVTests
+
+
+# Plaidml is an optional backend
+pkgs = [
+           ('ocl'    , cv.gapi.core.ocl.kernels()),
+           ('cpu'    , cv.gapi.core.cpu.kernels()),
+           ('fluid'  , cv.gapi.core.fluid.kernels())
+           # ('plaidml', cv.gapi.core.plaidml.kernels())
+       ]
+
+
+class gapi_imgproc_test(NewOpenCVTests):
+
+    def test_good_features_to_track(self):
+        # TODO: Extend to use any type and size here
+        img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
+        in1 = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
+
+        # NB: goodFeaturesToTrack configuration
+        max_corners         = 50
+        quality_lvl         = 0.01
+        min_distance        = 10
+        block_sz            = 3
+        use_harris_detector = True
+        k                   = 0.04
+        mask                = None
+
+        # OpenCV
+        expected = cv.goodFeaturesToTrack(in1, max_corners, quality_lvl,
+                                          min_distance, mask=mask,
+                                          blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
+
+        # G-API
+        g_in = cv.GMat()
+        g_out = cv.gapi.goodFeaturesToTrack(g_in, max_corners, quality_lvl,
+                                            min_distance, mask, block_sz, use_harris_detector, k)
+
+        comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
+
+        for pkg_name, pkg in pkgs:
+            actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg))
+            # NB: OpenCV & G-API have different output shapes:
+            # OpenCV - (num_points, 1, 2)
+            # G-API  - (num_points, 2)
+            # Comparison
+            self.assertEqual(0.0, cv.norm(expected.flatten(), actual.flatten(), cv.NORM_INF),
+                             'Failed on ' + pkg_name + ' backend')
+
+
+    def test_rgb2gray(self):
+        # TODO: Extend to use any type and size here
+        img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
+        in1 = cv.imread(img_path)
+
+        # OpenCV
+        expected = cv.cvtColor(in1, cv.COLOR_RGB2GRAY)
+
+        # G-API
+        g_in = cv.GMat()
+        g_out = cv.gapi.RGB2Gray(g_in)
+
+        comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
+
+        for pkg_name, pkg in pkgs:
+            actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg))
+            # Comparison
+            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
+                             'Failed on ' + pkg_name + ' backend')
+
+
+if __name__ == '__main__':
+    NewOpenCVTests.bootstrap()
diff --git a/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py b/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py
index 8000496f79..53304fcb26 100644
--- a/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py
+++ b/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py
@@ -2,25 +2,26 @@
 
 import numpy as np
 import cv2 as cv
+import os
 
 from tests_common import NewOpenCVTests
 
 
 # Plaidml is an optional backend
 pkgs = [
-         cv.gapi.core.ocl.kernels(),
-         cv.gapi.core.cpu.kernels(),
-         cv.gapi.core.fluid.kernels()
-         # cv.gapi.core.plaidml.kernels()
-       ]
+         ('ocl'    , cv.gapi.core.ocl.kernels()),
+         ('cpu'    , cv.gapi.core.cpu.kernels()),
+         ('fluid'  , cv.gapi.core.fluid.kernels())
+         # ('plaidml', cv.gapi.core.plaidml.kernels())
+     ]
 
 
 class gapi_sample_pipelines(NewOpenCVTests):
 
     # NB: This test check multiple outputs for operation
     def test_mean_over_r(self):
-        sz = (100, 100, 3)
-        in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
+        img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
+        in_mat = cv.imread(img_path)
 
         # # OpenCV
         _, _, r_ch = cv.split(in_mat)
@@ -32,10 +33,11 @@ class gapi_sample_pipelines(NewOpenCVTests):
         g_out = cv.gapi.mean(r)
         comp = cv.GComputation(g_in, g_out)
 
-        for pkg in pkgs:
+        for pkg_name, pkg in pkgs:
             actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
             # Comparison
-            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
+            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
+                             'Failed on ' + pkg_name + ' backend')
 
 
 if __name__ == '__main__':
diff --git a/modules/gapi/misc/python/test/test_gapi_streaming.py b/modules/gapi/misc/python/test/test_gapi_streaming.py
index bf182d9c91..ae7ef5d338 100644
--- a/modules/gapi/misc/python/test/test_gapi_streaming.py
+++ b/modules/gapi/misc/python/test/test_gapi_streaming.py
@@ -47,6 +47,8 @@ class test_gapi_streaming(NewOpenCVTests):
         ccomp.start()
 
         # Assert
+        max_num_frames  = 10
+        proc_num_frames = 0
         while cap.isOpened():
             has_expected, expected = cap.read()
             has_actual,   actual   = ccomp.pull()
@@ -58,6 +60,10 @@ class test_gapi_streaming(NewOpenCVTests):
 
             self.assertEqual(0.0, cv.norm(cv.medianBlur(expected, ksize), actual, cv.NORM_INF))
 
+            proc_num_frames += 1
+            if proc_num_frames == max_num_frames:
+                break;
+
 
     def test_video_split3(self):
         path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
@@ -76,6 +82,8 @@ class test_gapi_streaming(NewOpenCVTests):
         ccomp.start()
 
         # Assert
+        max_num_frames  = 10
+        proc_num_frames = 0
         while cap.isOpened():
             has_expected, frame = cap.read()
             has_actual,   actual   = ccomp.pull()
@@ -89,6 +97,10 @@ class test_gapi_streaming(NewOpenCVTests):
             for e, a in zip(expected, actual):
                 self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF))
 
+            proc_num_frames += 1
+            if proc_num_frames == max_num_frames:
+                break;
+
 
     def test_video_add(self):
         sz = (576, 768, 3)
@@ -111,6 +123,8 @@ class test_gapi_streaming(NewOpenCVTests):
         ccomp.start()
 
         # Assert
+        max_num_frames  = 10
+        proc_num_frames = 0
         while cap.isOpened():
             has_expected, frame  = cap.read()
             has_actual,   actual = ccomp.pull()
@@ -123,6 +137,65 @@ class test_gapi_streaming(NewOpenCVTests):
             expected = cv.add(frame, in_mat)
             self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
 
+            proc_num_frames += 1
+            if proc_num_frames == max_num_frames:
+                break;
+
+
+    def test_video_good_features_to_track(self):
+        path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
+
+        # NB: goodFeaturesToTrack configuration
+        max_corners         = 50
+        quality_lvl         = 0.01
+        min_distance        = 10
+        block_sz            = 3
+        use_harris_detector = True
+        k                   = 0.04
+        mask                = None
+
+        # OpenCV
+        cap = cv.VideoCapture(path)
+
+        # G-API
+        g_in = cv.GMat()
+        g_gray = cv.gapi.RGB2Gray(g_in)
+        g_out = cv.gapi.goodFeaturesToTrack(g_gray, max_corners, quality_lvl,
+                                            min_distance, mask, block_sz, use_harris_detector, k)
+
+        c = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
+
+        ccomp = c.compileStreaming()
+        source = cv.gapi.wip.make_capture_src(path)
+        ccomp.setSource(source)
+        ccomp.start()
+
+        # Assert
+        max_num_frames  = 10
+        proc_num_frames = 0
+        while cap.isOpened():
+            has_expected, frame  = cap.read()
+            has_actual,   actual = ccomp.pull()
+
+            self.assertEqual(has_expected, has_actual)
+
+            if not has_actual:
+                break
+
+            # OpenCV
+            frame = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
+            expected = cv.goodFeaturesToTrack(frame, max_corners, quality_lvl,
+                                              min_distance, mask=mask,
+                                              blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
+            for e, a in zip(expected, actual):
+                # NB: OpenCV & G-API have different output shapes:
+                # OpenCV - (num_points, 1, 2)
+                # G-API  - (num_points, 2)
+                self.assertEqual(0.0, cv.norm(e.flatten(), a.flatten(), cv.NORM_INF))
+
+            proc_num_frames += 1
+            if proc_num_frames == max_num_frames:
+                break;
 
 
 if __name__ == '__main__':
diff --git a/modules/gapi/src/api/gcomputation.cpp b/modules/gapi/src/api/gcomputation.cpp
index 9ff0273b40..5668cddc93 100644
--- a/modules/gapi/src/api/gcomputation.cpp
+++ b/modules/gapi/src/api/gcomputation.cpp
@@ -9,6 +9,7 @@
 #include <algorithm> // remove_if
 #include <cctype>    // isspace (non-locale version)
 #include <ade/util/algorithm.hpp>
+#include <ade/util/zip_range.hpp>   // util::indexed
 
 #include "logger.hpp" // GAPI_LOG
 
@@ -21,6 +22,7 @@
 
 #include "compiler/gmodelbuilder.hpp"
 #include "compiler/gcompiler.hpp"
+#include "compiler/gcompiled_priv.hpp"
 
 // cv::GComputation private implementation /////////////////////////////////////
 // <none>
@@ -174,28 +176,42 @@ cv::GRunArgs cv::GComputation::apply(GRunArgs &&ins, GCompileArgs &&args)
 {
     recompile(descr_of(ins), std::move(args));
 
-    const auto& out_metas = m_priv->m_lastCompiled.outMetas();
+    const auto& out_info = m_priv->m_lastCompiled.priv().outInfo();
+
     GRunArgs run_args;
     GRunArgsP outs;
-    run_args.reserve(out_metas.size());
-    outs.reserve(out_metas.size());
+    run_args.reserve(out_info.size());
+    outs.reserve(out_info.size());
 
-    for (auto&& meta : out_metas)
+    for (auto&& info : out_info)
     {
-        switch (meta.index())
+        switch (info.shape)
         {
-            case cv::GMetaArg::index_of<cv::GMatDesc>():
+            case cv::GShape::GMAT:
             {
                 run_args.emplace_back(cv::Mat{});
                 outs.emplace_back(&cv::util::get<cv::Mat>(run_args.back()));
                 break;
             }
-            case cv::GMetaArg::index_of<cv::GScalarDesc>():
+            case cv::GShape::GSCALAR:
             {
                 run_args.emplace_back(cv::Scalar{});
                 outs.emplace_back(&cv::util::get<cv::Scalar>(run_args.back()));
                 break;
             }
+            case cv::GShape::GARRAY:
+            {
+                switch (info.kind)
+                {
+                    case cv::detail::OpaqueKind::CV_POINT2F:
+                        run_args.emplace_back(cv::detail::VectorRef{std::vector<cv::Point2f>{}});
+                        outs.emplace_back(cv::util::get<cv::detail::VectorRef>(run_args.back()));
+                        break;
+                    default:
+                        util::throw_error(std::logic_error("Unsupported kind for GArray"));
+                }
+                break;
+            }
             default:
                 util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output"));
         }
diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp
index 592c03cfed..8c2313b292 100644
--- a/modules/gapi/src/backends/common/serialization.cpp
+++ b/modules/gapi/src/backends/common/serialization.cpp
@@ -152,6 +152,13 @@ IIStream& operator>> (IIStream& is, cv::Point& pt) {
     return is >> pt.x >> pt.y;
 }
 
+IOStream& operator<< (IOStream& os, const cv::Point2f &pt) {
+    return os << pt.x << pt.y;
+}
+IIStream& operator>> (IIStream& is, cv::Point2f& pt) {
+    return is >> pt.x >> pt.y;
+}
+
 IOStream& operator<< (IOStream& os, const cv::Size &sz) {
     return os << sz.width << sz.height;
 }
@@ -516,17 +523,17 @@ IOStream& operator<< (IOStream& os, const cv::GArg &arg) {
         GAPI_Assert(arg.kind == cv::detail::ArgKind::OPAQUE_VAL);
         GAPI_Assert(arg.opaque_kind != cv::detail::OpaqueKind::CV_UNKNOWN);
         switch (arg.opaque_kind) {
-        case cv::detail::OpaqueKind::CV_BOOL:   os << arg.get<bool>();         break;
-        case cv::detail::OpaqueKind::CV_INT:    os << arg.get<int>();          break;
-        case cv::detail::OpaqueKind::CV_UINT64: os << arg.get<uint64_t>();     break;
-        case cv::detail::OpaqueKind::CV_DOUBLE: os << arg.get<double>();       break;
-        case cv::detail::OpaqueKind::CV_FLOAT:  os << arg.get<float>();        break;
-        case cv::detail::OpaqueKind::CV_STRING: os << arg.get<std::string>();  break;
-        case cv::detail::OpaqueKind::CV_POINT:  os << arg.get<cv::Point>();    break;
-        case cv::detail::OpaqueKind::CV_SIZE:   os << arg.get<cv::Size>();     break;
-        case cv::detail::OpaqueKind::CV_RECT:   os << arg.get<cv::Rect>();     break;
-        case cv::detail::OpaqueKind::CV_SCALAR: os << arg.get<cv::Scalar>();   break;
-        case cv::detail::OpaqueKind::CV_MAT:    os << arg.get<cv::Mat>();      break;
+        case cv::detail::OpaqueKind::CV_BOOL:    os << arg.get<bool>();         break;
+        case cv::detail::OpaqueKind::CV_INT:     os << arg.get<int>();          break;
+        case cv::detail::OpaqueKind::CV_UINT64:  os << arg.get<uint64_t>();     break;
+        case cv::detail::OpaqueKind::CV_DOUBLE:  os << arg.get<double>();       break;
+        case cv::detail::OpaqueKind::CV_FLOAT:   os << arg.get<float>();        break;
+        case cv::detail::OpaqueKind::CV_STRING:  os << arg.get<std::string>();  break;
+        case cv::detail::OpaqueKind::CV_POINT:   os << arg.get<cv::Point>();    break;
+        case cv::detail::OpaqueKind::CV_SIZE:    os << arg.get<cv::Size>();     break;
+        case cv::detail::OpaqueKind::CV_RECT:    os << arg.get<cv::Rect>();     break;
+        case cv::detail::OpaqueKind::CV_SCALAR:  os << arg.get<cv::Scalar>();   break;
+        case cv::detail::OpaqueKind::CV_MAT:     os << arg.get<cv::Mat>();      break;
         default: GAPI_Assert(false && "GArg: Unsupported (unknown?) opaque value type");
         }
     }
@@ -550,17 +557,18 @@ IIStream& operator>> (IIStream& is, cv::GArg &arg) {
         switch (arg.opaque_kind) {
 #define HANDLE_CASE(E,T) case cv::detail::OpaqueKind::CV_##E:           \
             { T t{}; is >> t; arg = (cv::GArg(t)); } break
-            HANDLE_CASE(BOOL   , bool);
-            HANDLE_CASE(INT    , int);
-            HANDLE_CASE(UINT64 , uint64_t);
-            HANDLE_CASE(DOUBLE , double);
-            HANDLE_CASE(FLOAT  , float);
-            HANDLE_CASE(STRING , std::string);
-            HANDLE_CASE(POINT  , cv::Point);
-            HANDLE_CASE(SIZE   , cv::Size);
-            HANDLE_CASE(RECT   , cv::Rect);
-            HANDLE_CASE(SCALAR , cv::Scalar);
-            HANDLE_CASE(MAT    , cv::Mat);
+            HANDLE_CASE(BOOL    , bool);
+            HANDLE_CASE(INT     , int);
+            HANDLE_CASE(UINT64  , uint64_t);
+            HANDLE_CASE(DOUBLE  , double);
+            HANDLE_CASE(FLOAT   , float);
+            HANDLE_CASE(STRING  , std::string);
+            HANDLE_CASE(POINT   , cv::Point);
+            HANDLE_CASE(POINT2F , cv::Point2f);
+            HANDLE_CASE(SIZE    , cv::Size);
+            HANDLE_CASE(RECT    , cv::Rect);
+            HANDLE_CASE(SCALAR  , cv::Scalar);
+            HANDLE_CASE(MAT     , cv::Mat);
 #undef HANDLE_CASE
         default: GAPI_Assert(false && "GArg: Unsupported (unknown?) opaque value type");
         }
diff --git a/modules/gapi/src/compiler/gcompiled_priv.hpp b/modules/gapi/src/compiler/gcompiled_priv.hpp
index f21bfc80bc..b08b1f9c59 100644
--- a/modules/gapi/src/compiler/gcompiled_priv.hpp
+++ b/modules/gapi/src/compiler/gcompiled_priv.hpp
@@ -38,6 +38,10 @@ class GAPI_EXPORTS GCompiled::Priv
     GMetaArgs  m_outMetas; // inferred by compiler
     std::unique_ptr<cv::gimpl::GExecutor> m_exec;
 
+    // NB: Used by python wrapper to clarify input/output types
+    GTypesInfo m_out_info;
+    GTypesInfo m_in_info;
+
     void checkArgs(const cv::gimpl::GRuntimeArgs &args) const;
 
 public:
@@ -55,6 +59,12 @@ public:
     const GMetaArgs& outMetas() const;
 
     const cv::gimpl::GModel::Graph& model() const;
+
+    void setOutInfo(const GTypesInfo& info) { m_out_info = std::move(info); }
+    const GTypesInfo& outInfo() const { return m_out_info; }
+
+    void setInInfo(const GTypesInfo& info) { m_in_info = std::move(info); }
+    const GTypesInfo& inInfo() const { return m_in_info; }
 };
 
 }
diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp
index f6fa398c17..4d050dbabd 100644
--- a/modules/gapi/src/compiler/gcompiler.cpp
+++ b/modules/gapi/src/compiler/gcompiler.cpp
@@ -417,6 +417,19 @@ void cv::gimpl::GCompiler::compileIslands(ade::Graph &g, const cv::GCompileArgs
     GIslandModel::compileIslands(gim, g, args);
 }
 
+static cv::GTypesInfo collectInfo(const cv::gimpl::GModel::ConstGraph& g,
+                                  const std::vector<ade::NodeHandle>& nhs) {
+    cv::GTypesInfo info;
+    info.reserve(nhs.size());
+
+    ade::util::transform(nhs, std::back_inserter(info), [&g](const ade::NodeHandle& nh) {
+        const auto& data = g.metadata(nh).get<cv::gimpl::Data>();
+        return cv::GTypeInfo{data.shape, data.kind};
+    });
+
+    return info;
+}
+
 cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg)
 {
     // This is the final compilation step. Here:
@@ -435,6 +448,8 @@ cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg)
     //     an execution plan for it (backend-specific execution)
     // ...before call to produceCompiled();
 
+    GModel::ConstGraph cgr(*pg);
+
     const auto &outMetas = GModel::ConstGraph(*pg).metadata()
         .get<OutputMeta>().outMeta;
     std::unique_ptr<GExecutor> pE(new GExecutor(std::move(pg)));
@@ -443,6 +458,14 @@ cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg)
 
     GCompiled compiled;
     compiled.priv().setup(m_metas, outMetas, std::move(pE));
+
+    // NB: Need to store input/output GTypeInfo to allocate output arrays for python bindings
+    auto out_meta = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().out_nhs);
+    auto in_meta  = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().in_nhs);
+
+    compiled.priv().setOutInfo(std::move(out_meta));
+    compiled.priv().setInInfo(std::move(in_meta));
+
     return compiled;
 }
 
@@ -458,13 +481,15 @@ cv::GStreamingCompiled cv::gimpl::GCompiler::produceStreamingCompiled(GPtr &&pg)
         outMetas = GModel::ConstGraph(*pg).metadata().get<OutputMeta>().outMeta;
     }
 
-    auto out_desc = GModel::ConstGraph(*pg).metadata().get<cv::gimpl::Protocol>().outputs;
-    GShapes out_shapes;
-    for (auto&& desc : out_desc)
-    {
-        out_shapes.push_back(desc.shape);
-    }
-    compiled.priv().setOutShapes(std::move(out_shapes));
+
+    GModel::ConstGraph cgr(*pg);
+
+    // NB: Need to store input/output GTypeInfo to allocate output arrays for python bindings
+    auto out_meta = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().out_nhs);
+    auto in_meta  = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().in_nhs);
+
+    compiled.priv().setOutInfo(std::move(out_meta));
+    compiled.priv().setInInfo(std::move(in_meta));
 
     std::unique_ptr<GStreamingExecutor> pE(new GStreamingExecutor(std::move(pg),
                                                                   m_args));
diff --git a/modules/gapi/src/compiler/gstreaming.cpp b/modules/gapi/src/compiler/gstreaming.cpp
index eb06f3f6f2..fa736d592e 100644
--- a/modules/gapi/src/compiler/gstreaming.cpp
+++ b/modules/gapi/src/compiler/gstreaming.cpp
@@ -8,6 +8,7 @@
 #include "precomp.hpp"
 
 #include <ade/graph.hpp>
+#include <ade/util/zip_range.hpp>   // util::indexed
 
 #include <opencv2/gapi/gproto.hpp> // can_describe
 #include <opencv2/gapi/gcompiled.hpp>
@@ -121,13 +122,13 @@ std::tuple<bool, cv::GRunArgs> cv::GStreamingCompiled::pull()
     // FIXME: Why it is not @ priv??
     GRunArgs run_args;
     GRunArgsP outs;
-    const auto& out_shapes = m_priv->outShapes();
-    run_args.reserve(out_shapes.size());
-    outs.reserve(out_shapes.size());
+    const auto& out_info = m_priv->outInfo();
+    run_args.reserve(out_info.size());
+    outs.reserve(out_info.size());
 
-    for (auto&& shape : out_shapes)
+    for (auto&& info : out_info)
     {
-        switch (shape)
+        switch (info.shape)
         {
             case cv::GShape::GMAT:
             {
@@ -141,6 +142,19 @@ std::tuple<bool, cv::GRunArgs> cv::GStreamingCompiled::pull()
                 outs.emplace_back(&cv::util::get<cv::Scalar>(run_args.back()));
                 break;
             }
+            case cv::GShape::GARRAY:
+            {
+                switch (info.kind)
+                {
+                    case cv::detail::OpaqueKind::CV_POINT2F:
+                        run_args.emplace_back(cv::detail::VectorRef{std::vector<cv::Point2f>{}});
+                        outs.emplace_back(cv::util::get<cv::detail::VectorRef>(run_args.back()));
+                        break;
+                    default:
+                        util::throw_error(std::logic_error("Unsupported kind for GArray"));
+                }
+                break;
+            }
             default:
                 util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output"));
         }
diff --git a/modules/gapi/src/compiler/gstreaming_priv.hpp b/modules/gapi/src/compiler/gstreaming_priv.hpp
index 2f195ca226..be0869e663 100644
--- a/modules/gapi/src/compiler/gstreaming_priv.hpp
+++ b/modules/gapi/src/compiler/gstreaming_priv.hpp
@@ -27,7 +27,10 @@ class GAPI_EXPORTS GStreamingCompiled::Priv
     GMetaArgs  m_metas;    // passed by user
     GMetaArgs  m_outMetas; // inferred by compiler
     std::unique_ptr<cv::gimpl::GStreamingExecutor> m_exec;
-    GShapes m_out_shapes;
+
+    // NB: Used by python wrapper to clarify input/output types
+    GTypesInfo m_out_info;
+    GTypesInfo m_in_info;
 
 public:
     void setup(const GMetaArgs &metaArgs,
@@ -48,10 +51,11 @@ public:
 
     bool running() const;
 
-    // NB: std::tuple<bool, cv::GRunArgs> pull() creates GRunArgs for outputs,
-    // so need to know out shapes to create corresponding GRunArg
-    void setOutShapes(GShapes shapes) { m_out_shapes = std::move(shapes); }
-    const GShapes& outShapes() const { return m_out_shapes; }
+    void setOutInfo(const GTypesInfo& info) { m_out_info = std::move(info); }
+    const GTypesInfo& outInfo() const { return m_out_info; }
+
+    void setInInfo(const GTypesInfo& info) { m_in_info = std::move(info); }
+    const GTypesInfo& inInfo() const { return m_in_info; }
 };
 
 } // namespace cv
diff --git a/modules/gapi/test/own/gapi_types_tests.cpp b/modules/gapi/test/own/gapi_types_tests.cpp
index b40bb1df88..602a931de1 100644
--- a/modules/gapi/test/own/gapi_types_tests.cpp
+++ b/modules/gapi/test/own/gapi_types_tests.cpp
@@ -27,6 +27,22 @@ TEST(Point, CreateWithParams)
     EXPECT_EQ(2, p.y);
 }
 
+TEST(Point2f, CreateEmpty)
+{
+    cv::gapi::own::Point2f p;
+
+    EXPECT_EQ(0.f, p.x);
+    EXPECT_EQ(0.f, p.y);
+}
+
+TEST(Point2f, CreateWithParams)
+{
+    cv::gapi::own::Point2f p = {3.14f, 2.71f};
+
+    EXPECT_EQ(3.14f, p.x);
+    EXPECT_EQ(2.71f, p.y);
+}
+
 TEST(Rect, CreateEmpty)
 {
     cv::gapi::own::Rect r;