diff --git a/modules/dnn/src/net_impl.cpp b/modules/dnn/src/net_impl.cpp
index dc0c53191f..775016b3b7 100644
--- a/modules/dnn/src/net_impl.cpp
+++ b/modules/dnn/src/net_impl.cpp
@@ -1400,6 +1400,7 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
     Mat blob_ = blob.getMat();  // can't use InputArray directly due MatExpr stuff
     MatShape blobShape = shape(blob_);
 
+#if 0  // TODO: DNNTestNetwork.MobileNet_SSD_Caffe_Different_Width_Height/0
     if (pin.lid == 0)
     {
         CV_Assert(!netInputLayer.empty());
@@ -1411,7 +1412,6 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
             if (!inputShapeLimitation.empty())
             {
                 CV_CheckEQ(inputShapeLimitation.size(), blobShape.size(), "");
-#if 0  // TODO: DNNTestNetwork.MobileNet_SSD_Caffe_Different_Width_Height/0
                 const size_t dims = inputShapeLimitation.size();
                 for (size_t dim = 0; dim < dims; dim++)
                 {
@@ -1419,10 +1419,10 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
                         continue;  // don't limit batch
                     CV_CheckEQ(inputShapeLimitation[dim], blobShape[dim], "");
                 }
-#endif
             }
         }
     }
+#endif
 
     LayerData& ld = layers[pin.lid];
     const int numInputs = std::max(pin.oid + 1, (int)ld.requiredOutputs.size());
diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp
index 307a05ef4b..027326c69e 100644
--- a/modules/dnn/src/onnx/onnx_importer.cpp
+++ b/modules/dnn/src/onnx/onnx_importer.cpp
@@ -891,6 +891,11 @@ void ONNXImporter::populateNet()
     }
 
     dstNet.setInputsNames(netInputs);
+    if (!hasDynamicShapes)
+    {
+        for (int i = 0; i < netInputs.size(); ++i)
+            dstNet.setInputShape(netInputs[i], outShapes[netInputs[i]]);
+    }
 
     // dump outputs
     for (int i = 0; i < graph_proto.output_size(); ++i)
diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp
index d556bf6f6f..cc09ec14eb 100644
--- a/modules/dnn/src/tflite/tflite_importer.cpp
+++ b/modules/dnn/src/tflite/tflite_importer.cpp
@@ -163,6 +163,8 @@ void TFLiteImporter::populateNet()
     CV_Assert(modelTensors);
     layouts.resize(modelTensors->size(), DATA_LAYOUT_UNKNOWN);
     size_t subgraph_inputs_size = subgraph_inputs->size();
+    std::vector<std::string> inputsNames(subgraph_inputs_size);
+    std::vector<MatShape> inputsShapes(subgraph_inputs_size);
     for (size_t i = 0; i < subgraph_inputs_size; ++i)
     {
         int idx = subgraph_inputs->Get(i);
@@ -171,7 +173,24 @@ void TFLiteImporter::populateNet()
         if (!tensor)
             CV_Error(Error::StsError, cv::format("DNN/TFLite: subgraph input %d (%d) is NULL", (int)i, idx));
         layouts[idx] = estimateLayout(*tensor);
+
+        // Keep info about origin inputs names and shapes
+        inputsNames[i] = tensor->name()->str();
+        std::vector<int> shape(tensor->shape()->begin(), tensor->shape()->end());
+        if (layouts[idx] == DATA_LAYOUT_NHWC) {
+            CV_CheckEQ(shape.size(), (size_t)4, "");
+            std::swap(shape[2], shape[3]);
+            std::swap(shape[1], shape[2]);
+        }
+        inputsShapes[i] = shape;
     }
+
+    dstNet.setInputsNames(inputsNames);
+    for (size_t i = 0; i < subgraph_inputs_size; ++i)
+    {
+        dstNet.setInputShape(inputsNames[i], inputsShapes[i]);
+    }
+
     const auto& all_operators = *subgraph_operators;
     const size_t all_operators_size = all_operators.size();
     for (size_t op_idx = 0; op_idx < all_operators_size; ++op_idx)
diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp
index 6698174521..b5a97770b1 100644
--- a/modules/dnn/test/test_onnx_importer.cpp
+++ b/modules/dnn/test/test_onnx_importer.cpp
@@ -30,6 +30,27 @@ public:
         pb
     };
 
+    void testInputShapes(const Net& net, const std::vector<Mat>& inps)
+    {
+        std::vector<MatShape> inLayerShapes;
+        std::vector<MatShape> outLayerShapes;
+        net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
+        ASSERT_EQ(inLayerShapes.size(), inps.size());
+
+        for (int i = 0; i < inps.size(); ++i) {
+            bool hasDynamicShapes = inLayerShapes[i].empty();
+            if (hasDynamicShapes)
+                continue;
+            if (inLayerShapes[i].size() == 1) {  // 1D input
+                ASSERT_EQ(shape(inLayerShapes[i][0], 1), shape(inps[i]));
+            } else {
+                // Compare all axes except batch dimension which is variable.
+                inLayerShapes[i][0] = inps[i].size[0];
+                ASSERT_EQ(inLayerShapes[i], shape(inps[i]));
+            }
+        }
+    }
+
     void testONNXModels(const String& basename, const Extension ext = npy,
                         const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
                         bool checkNoFallbacks = true, int numInps = 1)
@@ -54,6 +75,8 @@ public:
         Net net = readNetFromONNX(onnxmodel);
         ASSERT_FALSE(net.empty());
 
+        testInputShapes(net, inps);
+
         net.setPreferableBackend(backend);
         net.setPreferableTarget(target);
 
@@ -2315,6 +2338,8 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics)
         lInf = 0.06;
     }
 
+    testInputShapes(net, {input0});
+
     checkBackend(&input0, &ref0);
     net.setInput(input0);
     Mat out = net.forward().clone();
diff --git a/modules/dnn/test/test_tflite_importer.cpp b/modules/dnn/test/test_tflite_importer.cpp
index bffdaa5b03..bce826b96f 100644
--- a/modules/dnn/test/test_tflite_importer.cpp
+++ b/modules/dnn/test/test_tflite_importer.cpp
@@ -11,6 +11,7 @@ Test for TFLite models loading
 
 #include <opencv2/dnn/layer.details.hpp>  // CV_DNN_REGISTER_LAYER_CLASS
 #include <opencv2/dnn/utils/debug_utils.hpp>
+#include <opencv2/dnn/shape_utils.hpp>
 
 #ifdef OPENCV_TEST_DNN_TFLITE
 
@@ -19,9 +20,21 @@ namespace opencv_test { namespace {
 using namespace cv;
 using namespace cv::dnn;
 
+void testInputShapes(const Net& net, const std::vector<Mat>& inps) {
+    std::vector<MatShape> inLayerShapes;
+    std::vector<MatShape> outLayerShapes;
+    net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
+    ASSERT_EQ(inLayerShapes.size(), inps.size());
+
+    for (int i = 0; i < inps.size(); ++i) {
+        ASSERT_EQ(inLayerShapes[i], shape(inps[i]));
+    }
+}
+
 void testModel(const std::string& modelName, const Mat& input, double l1 = 1e-5, double lInf = 1e-4)
 {
     Net net = readNet(findDataFile("dnn/tflite/" + modelName + ".tflite", false));
+    testInputShapes(net, {input});
     net.setInput(input);
 
     std::vector<String> outNames = net.getUnconnectedOutLayersNames();
@@ -72,6 +85,7 @@ TEST(Test_TFLite, max_unpooling)
     cvtColor(input, input, COLOR_BGR2RGBA);
     input = input.mul(Scalar(1, 1, 1, 0));
     input = blobFromImage(input, 1.0 / 255);
+    testInputShapes(net, {input});
     net.setInput(input);
 
     std::vector<std::vector<Mat> > outs;