@ -618,6 +618,204 @@ TEST(Layer_LSTM_Test_Accuracy_with_, HiddenParams)
normAssert ( h_t_reference , outputs [ 0 ] ) ;
}
typedef testing : : TestWithParam < tuple < int , int > > Layer_Gather_1d_Test ;
TEST_P ( Layer_Gather_1d_Test , Accuracy ) {
int batch_size = get < 0 > ( GetParam ( ) ) ;
int axis = get < 1 > ( GetParam ( ) ) ;
LayerParams lp ;
lp . type = " Gather " ;
lp . name = " gatherLayer " ;
lp . set ( " axis " , axis ) ;
lp . set ( " real_ndims " , 1 ) ;
Ptr < GatherLayer > layer = GatherLayer : : create ( lp ) ;
std : : vector < int > input_shape = { batch_size , 1 } ;
std : : vector < int > indices_shape = { 1 , 1 } ;
std : : vector < int > output_shape = { batch_size , 1 } ;
if ( batch_size = = 0 ) {
input_shape . erase ( input_shape . begin ( ) ) ;
indices_shape . erase ( indices_shape . begin ( ) ) ;
output_shape . erase ( output_shape . begin ( ) ) ;
} else if ( axis = = 0 ) {
output_shape [ 0 ] = 1 ;
}
cv : : Mat input = cv : : Mat ( input_shape , CV_32F , 1.0 ) ;
cv : : randu ( input , 0.0 , 1.0 ) ;
cv : : Mat indices = cv : : Mat ( indices_shape , CV_32F , 0.0 ) ;
cv : : Mat output_ref = cv : : Mat ( output_shape , CV_32F , input ( cv : : Range : : all ( ) , cv : : Range ( 0 , 1 ) ) . data ) ;
std : : vector < Mat > inputs { input , indices } ;
std : : vector < Mat > outputs ;
runLayer ( layer , inputs , outputs ) ;
ASSERT_EQ ( shape ( output_ref ) , shape ( outputs [ 0 ] ) ) ;
normAssert ( output_ref , outputs [ 0 ] ) ;
}
INSTANTIATE_TEST_CASE_P ( /*nothing*/ , Layer_Gather_1d_Test , Combine (
/*input blob shape*/ Values ( 0 , 1 , 2 , 3 ) ,
/*operation*/ Values ( 0 , 1 )
) ) ;
typedef testing : : TestWithParam < tuple < int , int , std : : string > > Layer_Arg_1d_Test ;
TEST_P ( Layer_Arg_1d_Test , Accuracy ) {
int batch_size = get < 0 > ( GetParam ( ) ) ;
int axis = get < 1 > ( GetParam ( ) ) ;
std : : string operation = get < 2 > ( GetParam ( ) ) ;
LayerParams lp ;
lp . type = " Arg " ;
lp . name = " arg " + operation + " _Layer " ;
lp . set ( " op " , operation ) ;
lp . set ( " axis " , axis ) ;
lp . set ( " keepdims " , 1 ) ;
lp . set ( " select_last_index " , 0 ) ;
Ptr < ArgLayer > layer = ArgLayer : : create ( lp ) ;
std : : vector < int > input_shape = { batch_size , 1 } ;
std : : vector < int > output_shape = { 1 , 1 } ;
if ( batch_size = = 0 ) {
input_shape . erase ( input_shape . begin ( ) ) ;
output_shape . erase ( output_shape . begin ( ) ) ;
}
if ( axis ! = 0 & & batch_size ! = 0 ) {
output_shape [ 0 ] = batch_size ;
}
cv : : Mat input = cv : : Mat ( input_shape , CV_32F , 1 ) ;
cv : : Mat output_ref = cv : : Mat ( output_shape , CV_32F , 0 ) ;
for ( int i = 0 ; i < batch_size ; + + i )
input . at < float > ( i , 0 ) = static_cast < float > ( i + 1 ) ;
std : : vector < Mat > inputs { input } ;
std : : vector < Mat > outputs ;
runLayer ( layer , inputs , outputs ) ;
ASSERT_EQ ( shape ( output_ref ) , shape ( outputs [ 0 ] ) ) ;
normAssert ( output_ref , outputs [ 0 ] ) ;
}
INSTANTIATE_TEST_CASE_P ( /*nothing*/ , Layer_Arg_1d_Test , Combine (
/*input blob shape*/ Values ( 0 , 1 , 2 , 3 ) ,
/*operation*/ Values ( 0 , 1 ) ,
/*operation*/ Values ( " max " , " min " )
) ) ;
typedef testing : : TestWithParam < tuple < int , std : : string > > Layer_NaryElemwise_1d_Test ;
TEST_P ( Layer_NaryElemwise_1d_Test , Accuracy ) {
int batch_size = get < 0 > ( GetParam ( ) ) ;
std : : string operation = get < 1 > ( GetParam ( ) ) ;
LayerParams lp ;
lp . type = " Eltwise " ;
lp . name = operation + " _Layer " ;
lp . set ( " operation " , operation ) ;
Ptr < NaryEltwiseLayer > layer = NaryEltwiseLayer : : create ( lp ) ;
std : : vector < int > input_shape = { batch_size , 1 } ;
if ( batch_size = = 0 )
input_shape . erase ( input_shape . begin ( ) ) ;
cv : : Mat input1 = cv : : Mat ( input_shape , CV_32F , 0.0 ) ;
cv : : Mat input2 = cv : : Mat ( input_shape , CV_32F , 0.0 ) ;
cv : : randu ( input1 , 0.0 , 1.0 ) ;
cv : : randu ( input2 , 0.0 , 1.0 ) ;
cv : : Mat output_ref ;
if ( operation = = " sum " ) {
output_ref = input1 + input2 ;
} else if ( operation = = " mul " ) {
output_ref = input1 . mul ( input2 ) ;
} else if ( operation = = " div " ) {
output_ref = input1 / input2 ;
} else if ( operation = = " sub " ) {
output_ref = input1 - input2 ;
} else {
output_ref = cv : : Mat ( ) ;
}
std : : vector < Mat > inputs { input1 , input2 } ;
std : : vector < Mat > outputs ;
runLayer ( layer , inputs , outputs ) ;
if ( ! output_ref . empty ( ) ) {
ASSERT_EQ ( shape ( output_ref ) , shape ( outputs [ 0 ] ) ) ;
normAssert ( output_ref , outputs [ 0 ] ) ;
} else {
CV_Error ( Error : : StsAssert , " Provided operation: " + operation + " is not supported. Please check the test instantiation. " ) ;
}
}
INSTANTIATE_TEST_CASE_P ( /*nothing*/ , Layer_NaryElemwise_1d_Test , Combine (
/*input blob shape*/ Values ( 0 , 1 ) ,
/*operation*/ Values ( " div " , " mul " , " sum " , " sub " )
) ) ;
typedef testing : : TestWithParam < tuple < int , std : : string > > Layer_Elemwise_1d_Test ;
TEST_P ( Layer_Elemwise_1d_Test , Accuracy ) {
int batch_size = get < 0 > ( GetParam ( ) ) ;
std : : string operation = get < 1 > ( GetParam ( ) ) ;
LayerParams lp ;
lp . type = " Eltwise " ;
lp . name = operation + " _Layer " ;
lp . set ( " operation " , operation ) ;
Ptr < EltwiseLayer > layer = EltwiseLayer : : create ( lp ) ;
std : : vector < int > input_shape = { batch_size , 1 } ;
if ( batch_size = = 0 )
input_shape . erase ( input_shape . begin ( ) ) ;
cv : : Mat input1 = cv : : Mat ( input_shape , CV_32F , 1.0 ) ;
cv : : Mat input2 = cv : : Mat ( input_shape , CV_32F , 1.0 ) ;
cv : : randu ( input1 , 0.0 , 1.0 ) ;
cv : : randu ( input2 , 0.0 , 1.0 ) ;
// Dynamically select the operation
cv : : Mat output_ref ;
if ( operation = = " sum " ) {
output_ref = input1 + input2 ;
} else if ( operation = = " max " ) {
output_ref = cv : : max ( input1 , input2 ) ;
} else if ( operation = = " min " ) {
output_ref = cv : : min ( input1 , input2 ) ;
} else if ( operation = = " prod " ) {
output_ref = input1 . mul ( input2 ) ;
} else if ( operation = = " div " ) {
output_ref = input1 / input2 ;
} else {
output_ref = cv : : Mat ( ) ;
}
std : : vector < Mat > inputs { input1 , input2 } ;
std : : vector < Mat > outputs ;
runLayer ( layer , inputs , outputs ) ;
if ( ! output_ref . empty ( ) ) {
ASSERT_EQ ( shape ( output_ref ) , shape ( outputs [ 0 ] ) ) ;
normAssert ( output_ref , outputs [ 0 ] ) ;
} else {
CV_Error ( Error : : StsAssert , " Provided operation: " + operation + " is not supported. Please check the test instantiation. " ) ;
}
}
INSTANTIATE_TEST_CASE_P ( /*nothing*/ , Layer_Elemwise_1d_Test , Combine (
/*input blob shape*/ Values ( 0 , 1 , 2 , 3 ) ,
/*operation*/ Values ( " div " , " prod " , " max " , " min " , " sum " )
) ) ;
TEST ( Layer_GRU_Test_Accuracy_with_ , Pytorch )
{
Mat Wx = blobFromNPY ( _tf ( " gru.W.npy " ) ) ;