|
|
|
@ -1937,6 +1937,117 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Eltwise_unequal, Combine( |
|
|
|
|
dnnBackendsAndTargets() |
|
|
|
|
)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct Layer_Test_Eltwise_bcast : testing::TestWithParam<tuple<string, int, tuple<Backend, Target>>> |
|
|
|
|
{ |
|
|
|
|
public: |
|
|
|
|
void test_bcast() |
|
|
|
|
{ |
|
|
|
|
string op = get<0>(GetParam()); |
|
|
|
|
int dim = get<1>(GetParam()); |
|
|
|
|
tuple<Backend, Target> backend_target= get<2>(GetParam()); |
|
|
|
|
int backend = get<0>(backend_target); |
|
|
|
|
int target = get<1>(backend_target); |
|
|
|
|
|
|
|
|
|
vector<vector<int>> dim_shape_list; |
|
|
|
|
get_all_arr(dim_shape_list, dim); |
|
|
|
|
replace(dim_shape_list, 1, 3); |
|
|
|
|
// same shape
|
|
|
|
|
for (int i = 0; i < dim_shape_list.size(); i++) |
|
|
|
|
for (int j = 0; j < dim_shape_list.size(); j++) |
|
|
|
|
run(dim_shape_list[i], dim_shape_list[j], op, backend, target); |
|
|
|
|
|
|
|
|
|
vector<vector<int>> sub_shape_list; |
|
|
|
|
vector<vector<int>> tmp; |
|
|
|
|
for(int i = 1; i < dim; i++){ |
|
|
|
|
get_all_arr(tmp, i); |
|
|
|
|
replace(tmp, 1, 3); |
|
|
|
|
sub_shape_list.insert(sub_shape_list.end(), tmp.begin(), tmp.end()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// diff shape
|
|
|
|
|
for (const auto &shp1: dim_shape_list) |
|
|
|
|
for (const auto &shp2: sub_shape_list) |
|
|
|
|
run(shp1, shp2, op, backend, target); |
|
|
|
|
|
|
|
|
|
// diff shape
|
|
|
|
|
for (const auto &shp1: sub_shape_list) |
|
|
|
|
for (const auto &shp2: dim_shape_list) |
|
|
|
|
run(shp1, shp2, op, backend, target); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
// give n to generate all n-D arrays with 0 or 1
|
|
|
|
|
static void get_all_arr(vector<vector<int>> &arr, int n) |
|
|
|
|
{ |
|
|
|
|
int total = 1 << n; |
|
|
|
|
arr.assign(total, vector<int>(n, -1)); |
|
|
|
|
for (int i = 0; i < total; i++) |
|
|
|
|
for (int j = 0; j < n; j++) |
|
|
|
|
arr[i][j] = (i >> (n - j - 1)) & 1; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// zero will replace all 0, one will replace all 1
|
|
|
|
|
static void replace(vector<vector<int>> &arr, int zero, int one) |
|
|
|
|
{ |
|
|
|
|
for (int i = 0; i < arr.size(); i++) |
|
|
|
|
for (int j = 0; j < arr[0].size(); j++) |
|
|
|
|
arr[i][j] = arr[i][j] ? one : zero; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void run(const vector<int> &a_shape, const vector<int> &b_shape, const String &op, const int backend, const int target) |
|
|
|
|
{ |
|
|
|
|
Mat a = Mat::zeros((int) a_shape.size(), a_shape.data(), CV_32FC1); |
|
|
|
|
Mat b = Mat::ones((int) b_shape.size(), b_shape.data(), CV_32FC1); |
|
|
|
|
|
|
|
|
|
Net net; |
|
|
|
|
LayerParams lp; |
|
|
|
|
lp.type = "NaryEltwise"; |
|
|
|
|
lp.name = "testLayer"; |
|
|
|
|
lp.set("operation", op); |
|
|
|
|
int id = net.addLayerToPrev(lp.name, lp.type, lp); |
|
|
|
|
net.connect(0, 1, id, 1); |
|
|
|
|
|
|
|
|
|
vector<String> inpNames(2); |
|
|
|
|
inpNames[0] = "a"; |
|
|
|
|
inpNames[1] = "b"; |
|
|
|
|
net.setInputsNames(inpNames); |
|
|
|
|
net.setInput(a, inpNames[0]); |
|
|
|
|
net.setInput(b, inpNames[1]); |
|
|
|
|
|
|
|
|
|
net.setPreferableBackend(backend); |
|
|
|
|
net.setPreferableTarget(target); |
|
|
|
|
|
|
|
|
|
Mat re; |
|
|
|
|
ASSERT_NO_THROW(re = net.forward()); // runtime error
|
|
|
|
|
auto ptr_re = (float *) re.data; |
|
|
|
|
for (int i = 0; i < re.total(); i++) |
|
|
|
|
if (op == "sum"){ |
|
|
|
|
ASSERT_EQ(1, ptr_re[i]); // sum result should be 1
|
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
TEST_P(Layer_Test_Eltwise_bcast, DISABLED_brute_force) |
|
|
|
|
{ |
|
|
|
|
test_bcast(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// This test is to verify whether the broadcast operations of unidirectional and bidirectional,
|
|
|
|
|
// as well as tensors with same and different shapes, can be forwarded correctly.
|
|
|
|
|
// This can ensure that the elementwise layer does not have any errors when forwarding.
|
|
|
|
|
//
|
|
|
|
|
// To test which cases the backend will fallback to the cpu, replace the fallback command like
|
|
|
|
|
// `return Ptr<BackendNode>();` in `initCUDA()` with `throw std::runtime_error("fallback");`
|
|
|
|
|
//
|
|
|
|
|
// To test more operators, add more ops after "sum".
|
|
|
|
|
// Default only "sum" is tested, because for the most cases they have the same implementation.
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Eltwise_bcast, Combine( |
|
|
|
|
Values("sum"), |
|
|
|
|
Values(1, 2, 3, 4, 5), |
|
|
|
|
dnnBackendsAndTargets() |
|
|
|
|
)); |
|
|
|
|
|
|
|
|
|
typedef testing::TestWithParam<tuple<Backend, Target> > Layer_Test_Resize; |
|
|
|
|
TEST_P(Layer_Test_Resize, change_input) |
|
|
|
|
{ |
|
|
|
|