ported all demos and examples to the new api

pull/3471/head
kallaballa 2 years ago
parent 834ba6e891
commit cbc6082fcf
  1. 39
      modules/v4d/include/opencv2/v4d/v4d.hpp
  2. 2
      modules/v4d/samples/beauty-demo.cpp
  3. 2
      modules/v4d/samples/cube-demo.cpp
  4. 2
      modules/v4d/samples/custom_source_and_sink.cpp
  5. 2
      modules/v4d/samples/display_image.cpp
  6. 2
      modules/v4d/samples/display_image_fb.cpp
  7. 2
      modules/v4d/samples/display_image_nvg.cpp
  8. 2
      modules/v4d/samples/font-demo.cpp
  9. 2
      modules/v4d/samples/font_rendering.cpp
  10. 2
      modules/v4d/samples/font_with_gui.cpp
  11. 2
      modules/v4d/samples/many_cubes-demo.cpp
  12. 2
      modules/v4d/samples/nanovg-demo.cpp
  13. 2
      modules/v4d/samples/optflow-demo.cpp
  14. 34
      modules/v4d/samples/pedestrian-demo.cpp
  15. 2
      modules/v4d/samples/render_opengl.cpp
  16. 2
      modules/v4d/samples/shader-demo.cpp
  17. 2
      modules/v4d/samples/vector_graphics.cpp
  18. 2
      modules/v4d/samples/vector_graphics_and_fb.cpp
  19. 2
      modules/v4d/samples/video-demo.cpp
  20. 2
      modules/v4d/samples/video_editing.cpp

@ -71,7 +71,7 @@ class Plan {
public:
virtual ~Plan() {};
virtual void setup(cv::Ptr<V4D> window) {};
virtual void infere(cv::Ptr<V4D> window) = 0;
virtual void infer(cv::Ptr<V4D> window) = 0;
virtual void teardown(cv::Ptr<V4D> window) {};
};
/*!
@ -487,7 +487,7 @@ public:
}
void capture() {
cv::UMat tmp;
static thread_local cv::UMat tmp(fbSize(), CV_8UC3);
capture(tmp);
}
@ -507,10 +507,10 @@ public:
}
void write() {
cv::UMat frame;
static thread_local cv::UMat frame(fbSize(), CV_8UC3);
fb([](cv::UMat& frameBuffer, const cv::UMat& f) {
f.copyTo(frameBuffer);
fb([](const cv::UMat& frameBuffer, cv::UMat& f) {
frameBuffer.copyTo(f);
}, frame);
write([](cv::UMat& outputFrame, const cv::UMat& f){
@ -540,13 +540,6 @@ public:
});
}
/*!
* Execute function object fn inside a nanovg context.
* The context takes care of setting up opengl and nanovg states.
* A function object passed like that can use the functions in cv::viz::nvg.
* @param fn A function that is passed the size of the framebuffer
* and performs drawing using cv::v4d::nvg
*/
template <typename Tfn, typename ... Args>
void nvg(Tfn fn, Args&&... args) {
CV_Assert(detail::is_stateless<std::remove_cv_t<std::remove_reference_t<decltype(fn)>>>::value);
@ -572,17 +565,15 @@ public:
}
template <typename Tfn, typename ... Args>
void parallel(Tfn fn, Args&&... args) {
CV_Assert(detail::is_stateless<std::remove_cv_t<std::remove_reference_t<decltype(fn)>>>::value);
const string id = make_id("parallel", fn);
TimeTracker::getInstance()->execute(id, [this, fn, id, &args...](){
(emit_access<std::true_type, std::remove_reference_t<Args>, Args...>(id, std::is_const_v<std::remove_reference_t<Args>>, &args),...);
std::function functor(fn);
typename detail::function_traits<decltype(fn)>::argument_types t = std::make_tuple(args...);
std::apply([=](auto &&... Targs) { add_transaction<decltype(functor)>(parallelCtx(), id, std::forward<decltype(functor)>(fn), std::forward<std::add_lvalue_reference_t<decltype(Targs)>>(Targs)...); }, t);
});
}
void parallel(Tfn fn, Args&&... args) {
CV_Assert(detail::is_stateless<std::remove_cv_t<std::remove_reference_t<decltype(fn)>>>::value);
const string id = make_id("parallel", fn);
TimeTracker::getInstance()->execute(id, [this, fn, id, &args...](){
(emit_access<std::true_type, std::remove_reference_t<Args>, Args...>(id, std::is_const_v<std::remove_reference_t<Args>>, &args),...);
std::function functor(fn);
add_transaction<decltype(functor)>(parallelCtx(), id, std::forward<decltype(functor)>(fn), std::forward<Args>(args)...);
});
}
CV_EXPORTS void imgui(std::function<void(ImGuiContext* ctx)> fn);
/*!
@ -688,7 +679,7 @@ public:
this->runPlan();
this->display();
this->clearPlan();
plan->infere(self());
plan->infer(self());
this->makePlan();
do {
this->runPlan();

@ -215,7 +215,7 @@ class BeautyDemoPlan : public Plan {
bool faceFound_ = false;
FaceFeatures features_;
public:
void infere(cv::Ptr<V4D> window) override {
void infer(cv::Ptr<V4D> window) override {
auto always = [](){ return true; };
auto isTrue = [](bool& ff){ return ff; };
auto isFalse = [](bool& ff){ return !ff; };

@ -219,7 +219,7 @@ public:
init_scene(sz, v, sp, ut);
}, window->fbSize(), vao, shaderProgram, uniformTransform);
}
void infere(cv::Ptr<V4D> window) {
void infer(cv::Ptr<V4D> window) {
window->gl([](){
//Clear the background
glClearColor(0.2, 0.24, 0.4, 1);

@ -41,7 +41,7 @@ int main() {
class CustomSourceAndSinkPlan : public Plan {
string hr_ = "Hello Rainbow!";
void infere(cv::Ptr<V4D> win) override {
void infer(cv::Ptr<V4D> win) override {
win->capture();
//Render "Hello Rainbow!" over the video

@ -21,7 +21,7 @@ int main() {
#endif
}
//Display the framebuffer in the native window in an endless loop.
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
//Feeds the image to the video pipeline
win->feed(image_);
}

@ -27,7 +27,7 @@ int main() {
}, image_, resized_, converted_, win->fbSize());
}
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
//Create a fb context and copy the prepared image to the framebuffer. The fb context
//takes care of retrieving and storing the data on the graphics card (using CL-GL
//interop if available), ready for other contexts to use

@ -40,7 +40,7 @@ public:
}, image_);
}
void infere(Ptr<V4D> win) override{
void infer(Ptr<V4D> win) override{
//Creates a NanoVG context to draw the loaded image_ over again to the screen.
win->nvg([](const Image_t& img, const cv::Size& sz) {
using namespace cv::v4d::nvg;

@ -95,7 +95,7 @@ class FontDemoPlan : public Plan {
int32_t translateY_;
public:
void infere(cv::Ptr<V4D> window) override {
void infer(cv::Ptr<V4D> window) override {
auto always = []() { return true; };
auto isTrue = [](const bool& b) { return b; };

@ -10,7 +10,7 @@ int main() {
//The text to render
string hw_ = "Hello World";
public:
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
//Render the text at the center of the screen. Note that you can load you own fonts.
win->nvg([](const Size &sz, const string &str) {
using namespace cv::v4d::nvg;

@ -25,7 +25,7 @@ int main() {
//The text
string hw_ = "hello world";
public:
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
//Render the text at the center of the screen using parameters from the GUI.
win->nvg([](const Size& sz, const string& str, const float& s, const std::vector<float>& c) {
using namespace cv::v4d::nvg;

@ -232,7 +232,7 @@ public:
}, window->fbSize(), vao[i], shaderProgram[i], uniformTransform[i]);
}
}
void infere(cv::Ptr<V4D> window) {
void infer(cv::Ptr<V4D> window) {
window->gl([](){
//Clear the background
glClearColor(0.2, 0.24, 0.4, 1);

@ -133,7 +133,7 @@ class NanoVGDemoPlan : public Plan {
cv::UMat hueChannel_;
double hue_;
public:
void infere(cv::Ptr<V4D> window) override {
void infer(cv::Ptr<V4D> window) override {
window->parallel([](const uint64_t& frameCount, double& hue){
//we use frame count to calculate the current hue

@ -403,7 +403,7 @@ class OptflowPlan : public Plan {
vector<cv::Point2f> detectedPoints;
public:
virtual ~OptflowPlan() override {};
virtual void infere(cv::Ptr<V4D> window) override {
virtual void infer(cv::Ptr<V4D> window) override {
window->capture([](const cv::UMat& videoFrame, cv::UMat& d, cv::UMat& b) {
//resize to foreground scale
cv::resize(videoFrame, d, cv::Size(videoFrame.size().width * fg_scale, videoFrame.size().height * fg_scale));

@ -33,8 +33,6 @@ constexpr const char* OUTPUT_FILENAME = "pedestrian-demo.mkv";
#endif
const int BLUR_KERNEL_SIZE = std::max(int(DIAG / 200 % 2 == 0 ? DIAG / 200 + 1 : DIAG / 200), 1);
//Descriptor used for pedestrian detection
static thread_local cv::HOGDescriptor hog;
//adapted from cv::dnn_objdetect::InferBbox
static inline bool pair_comparator(std::pair<double, size_t> l1, std::pair<double, size_t> l2) {
@ -138,16 +136,20 @@ class PedestrianDemoPlan : public Plan {
bool trackerInitialized_ = false;
//If tracking fails re-detect
bool redetect_ = true;
//Descriptor used for pedestrian detection
cv::HOGDescriptor hog_;
public:
void setup(cv::Ptr<V4D> window) override {
params_.desc_pca = cv::TrackerKCF::GRAY;
params_.compress_feature = false;
params_.compressed_size = 1;
tracker_ = cv::TrackerKCF::create(params_);
hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
window->parallel([](cv::TrackerKCF::Params& params, cv::Ptr<cv::Tracker>& tracker, cv::HOGDescriptor& hog){
params.desc_pca = cv::TrackerKCF::GRAY;
params.compress_feature = false;
params.compressed_size = 1;
tracker = cv::TrackerKCF::create(params);
hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
}, params_, tracker_, hog_);
}
void infere(cv::Ptr<V4D> window) override {
void infer(cv::Ptr<V4D> window) override {
static auto always = [](){ return true; };
static auto doRedect = [](const bool& trackerInit, const bool& redetect){ return !trackerInit || redetect; };
static auto dontRedect = [](const bool& trackerInit, const bool& redetect){ return trackerInit && !redetect; };
@ -156,11 +158,11 @@ public:
{
window->capture();
window->fb([&](const cv::UMat& frameBuffer){
window->fb([](const cv::UMat& frameBuffer, cv::UMat& videoFrame){
//copy video frame
cvtColor(frameBuffer,videoFrame_,cv::COLOR_BGRA2RGB);
//downsample video frame for hog detection
});
cvtColor(frameBuffer,videoFrame,cv::COLOR_BGRA2RGB);
//downsample video frame for hog_ detection
}, videoFrame_);
window->parallel([](const cv::UMat& videoFrame, cv::UMat& videoFrameDown, cv::UMat& videoFrameDownGrey, cv::UMat& background){
cv::resize(videoFrame, videoFrameDown, cv::Size(DOWNSIZE_WIDTH, DOWNSIZE_HEIGHT));
@ -173,7 +175,7 @@ public:
//Try to track the pedestrian (if we currently are tracking one), else re-detect using HOG descriptor
window->graph(doRedect, trackerInitialized_, redetect_);
{
window->parallel([](bool& redetect, cv::UMat& videoFrameDownGrey, std::vector<cv::Rect>& locations, vector<vector<double>>& boxes, vector<double>& probs, cv::Ptr<cv::TrackerKCF>& tracker, cv::Rect& tracked, bool& trackerInitialized){
window->parallel([](cv::HOGDescriptor& hog, bool& redetect, cv::UMat& videoFrameDownGrey, std::vector<cv::Rect>& locations, vector<vector<double>>& boxes, vector<double>& probs, cv::Ptr<cv::Tracker>& tracker, cv::Rect& tracked, bool& trackerInitialized){
redetect = false;
//Detect pedestrians
hog.detectMultiScale(videoFrameDownGrey, locations, 0, cv::Size(), cv::Size(), 1.15, 2.0, false);
@ -203,12 +205,12 @@ public:
trackerInitialized = true;
}
}
}, redetect_, videoFrameDownGrey_, locations_, boxes_, probs_, tracker_, tracked_, trackerInitialized_);
}, hog_, redetect_, videoFrameDownGrey_, locations_, boxes_, probs_, tracker_, tracked_, trackerInitialized_);
}
window->endgraph(doRedect, trackerInitialized_, redetect_);
window->graph(dontRedect, trackerInitialized_, redetect_);
{
window->parallel([](bool& redetect, const cv::UMat& videoFrameDownGrey, cv::Ptr<cv::TrackerKCF>& tracker, cv::Rect& tracked){
window->parallel([](bool& redetect, cv::UMat& videoFrameDownGrey, cv::Ptr<cv::Tracker>& tracker, cv::Rect& tracked){
if(!tracker->update(videoFrameDownGrey, tracked)) {
//detection failed - re-detect
redetect = true;
@ -269,7 +271,7 @@ int main(int argc, char **argv) {
window->setSource(src);
#endif
window->run<PedestrianDemoPlan>(0);
window->run<PedestrianDemoPlan>(2);
return 0;
}

@ -15,7 +15,7 @@ int main() {
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
});
}
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
win->gl([]() {
//Clears the screen. The clear color and other GL-states are preserved between context-calls.
glClear(GL_COLOR_BUFFER_BIT);

@ -276,7 +276,7 @@ void setup(cv::Ptr<V4D> window) override {
}, window->fbSize());
}
void infere(cv::Ptr<V4D> window) override {
void infer(cv::Ptr<V4D> window) override {
window->capture(frame_);
window->gl([](const cv::Size &sz) {

@ -5,7 +5,7 @@ using namespace cv::v4d;
class VectorGraphicsPlan: public Plan {
public:
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
//Creates a NanoVG context and draws googly eyes that occasionally blink.
win->nvg([](const Size &sz) {
//Calls from this namespace may only be used inside a nvg context.

@ -9,7 +9,7 @@ int main() {
class VectorGraphicsAndFBPlan : public Plan {
public:
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
//Again creates a NanoVG context and draws googly eyes
win->nvg([](const Size& sz) {
//Calls from this namespace may only be used inside a nvg context

@ -197,7 +197,7 @@ public:
init_scene();
});
}
void infere(cv::Ptr<V4D> window) override {
void infer(cv::Ptr<V4D> window) override {
window->capture(frame_);
window->gl([]() {

@ -14,7 +14,7 @@ int main(int argc, char** argv) {
cv::UMat frame_;
const string hv_ = "Hello Video!";
public:
void infere(Ptr<V4D> win) override {
void infer(Ptr<V4D> win) override {
//Capture video from the source
win->capture();

Loading…
Cancel
Save