Home | History | Annotate | Download | only in src

Lines Matching refs:layer

70 static void icvCNNetworkAddLayer( CvCNNetwork* network, CvCNNLayer* layer );
73 /* In all layer functions we denote input by X and output by Y, where
78 /*------------------------ functions for convolutional layer ---------------------------*/
81 static void icvCNNConvolutionForward( CvCNNLayer* layer, const CvMat* X, CvMat* Y );
83 static void icvCNNConvolutionBackward( CvCNNLayer* layer, int t,
86 /*------------------------ functions for sub-sampling layer ----------------------------*/
89 static void icvCNNSubSamplingForward( CvCNNLayer* layer, const CvMat* X, CvMat* Y );
91 static void icvCNNSubSamplingBackward( CvCNNLayer* layer, int t,
94 /*------------------------ functions for full connected layer --------------------------*/
97 static void icvCNNFullConnectForward( CvCNNLayer* layer, const CvMat* X, CvMat* Y );
99 static void icvCNNFullConnectBackward( CvCNNLayer* layer, int,
108 CvCNNLayer* first_layer, *layer, *last_layer; \
115 for( i = 0, layer = first_layer; i < n_layers && layer; i++ ) \
117 if( !ICV_IS_CNN_LAYER(layer) ) \
119 last_layer = layer; \
120 layer = layer->next_layer; \
123 if( i == 0 || i != n_layers || first_layer->prev_layer || layer ) \
127 CV_ERROR( CV_StsBadArg, "First layer must contain only one input plane" ); \
130 CV_ERROR( CV_StsBadArg, "Invalid input sizes of the first layer" ); \
134 CV_ERROR( CV_StsBadArg, "Invalid output sizes of the last layer" ); \
233 CvCNNLayer* layer;
244 for( k = 0, layer = first_layer; k < n_layers; k++, layer = layer->next_layer )
246 CV_CALL(X[k+1] = cvCreateMat( layer->n_output_planes*layer->output_height*
247 layer->output_width, 1, CV_32FC1 ));
267 for( k = 0, layer = first_layer; k < n_layers; k++, layer = layer->next_layer )
268 CV_CALL(layer->forward( layer, X[k], X[k+1] ));
288 for( k = 0, layer = first_layer; k < n_layers - 1; k++, layer = layer->next_layer )
289 CV_CALL(layer->forward( layer, X[k], X[k+1] ));
290 CV_CALL(layer->forward( layer, X[k], X[k+1] ));
298 for( k = n_layers; k > 0; k--, layer = layer->prev_layer )
299 CV_CALL(layer->backward( layer, n + start_iter, X[k-1], dE_dX[k], dE_dX[k-1] ));
328 CvCNNLayer* first_layer, *layer = 0;
351 for( k = 0, layerlayer = layer->next_layer )
353 CV_CALL(X[k+1] = cvCreateMat( layer->n_output_planes*layer->output_height*
354 layer->output_width, 1, CV_32FC1 ));
359 for( k = 0, layer = first_layer; k < n_layers; k++, layer = layer->next_layer )
360 CV_CALL(layer->forward( layer, X[k], X[k+1] ));
475 CV_ERROR( CV_StsBadArg, "Invalid layer" );
495 static void icvCNNetworkAddLayer( CvCNNetwork* network, CvCNNLayer* layer )
509 if( ICV_IS_CNN_FULLCONNECT_LAYER(layer) )
511 if( layer->n_input_planes != prev_layer->output_width*prev_layer->output_height*
513 CV_ERROR( CV_StsBadArg, "Unmatched size of the new layer" );
514 if( layer->input_height != 1 || layer->output_height != 1 ||
515 layer->input_width != 1 || layer->output_width != 1 )
516 CV_ERROR( CV_StsBadArg, "Invalid size of the new layer" );
518 else if( ICV_IS_CNN_CONVOLUTION_LAYER(layer) || ICV_IS_CNN_SUBSAMPLING_LAYER(layer) )
520 if( prev_layer->n_output_planes != layer->n_input_planes ||
521 prev_layer->output_height != layer->input_height ||
522 prev_layer->output_width != layer->input_width )
523 CV_ERROR( CV_StsBadArg, "Unmatched size of the new layer" );
526 CV_ERROR( CV_StsBadArg, "Invalid layer" );
528 layer->prev_layer = prev_layer;
529 prev_layer->next_layer = layer;
542 CvCNNLayer* layer = 0, *next_layer = 0;
551 layer = network->layers;
552 if( layer == NULL )
553 CV_ERROR( CV_StsBadArg, "CNN is empty (does not contain any layer)" );
555 // k is the number of the layer to be deleted
556 for( k = 0; k < network->n_layers && layer; k++ )
558 next_layer = layer->next_layer;
559 layer->release( &layer );
560 layer = next_layer;
563 if( k != network->n_layers || layer)
572 * Layer functions *
580 CvCNNLayer* layer = 0;
601 CV_CALL(layer = (CvCNNLayer*)cvAlloc( header_size ));
602 memset( layer, 0, header_size );
604 layer->flags = ICV_CNN_LAYER|layer_type;
605 CV_ASSERT( ICV_IS_CNN_LAYER(layer) )
607 layer->n_input_planes = n_input_planes;
608 layer->input_height = input_height;
609 layer->input_width = input_width;
611 layer->n_output_planes = n_output_planes;
612 layer->output_height = output_height;
613 layer->output_width = output_width;
615 layer->init_learn_rate = init_learn_rate;
616 layer->learn_rate_decrease_type = learn_rate_decrease_type;
618 layer->release = release;
619 layer->forward = forward;
620 layer->backward = backward;
624 if( cvGetErrStatus() < 0 && layer)
625 cvFree( &layer );
627 return layer;
638 CvCNNConvolutionLayer* layer = 0;
649 CV_CALL(layer = (CvCNNConvolutionLayer*)icvCreateCNNLayer( ICV_CNN_CONVOLUTION_LAYER,
655 layer->K = K;
656 CV_CALL(layer->weights = cvCreateMat( n_output_planes, K*K+1, CV_32FC1 ));
657 CV_CALL(layer->connect_mask = cvCreateMat( n_output_planes, n_input_planes, CV_8UC1));
663 if( !CV_ARE_SIZES_EQ( weights, layer->weights ) )
665 CV_CALL(cvCopy( weights, layer->weights ));
670 cvRandArr( &rng, layer->weights, CV_RAND_UNI, cvRealScalar(-1), cvRealScalar(1) );
677 if( !CV_ARE_SIZES_EQ( connect_mask, layer->connect_mask ) )
679 CV_CALL(cvCopy( connect_mask, layer->connect_mask ));
682 CV_CALL(cvSet( layer->connect_mask, cvRealScalar(1) ));
686 if( cvGetErrStatus() < 0 && layer )
688 cvReleaseMat( &layer->weights );
689 cvReleaseMat( &layer->connect_mask );
690 cvFree( &layer );
693 return (CvCNNLayer*)layer;
703 CvCNNSubSamplingLayer* layer = 0;
715 CV_CALL(layer = (CvCNNSubSamplingLayer*)icvCreateCNNLayer( ICV_CNN_SUBSAMPLING_LAYER,
721 layer->sub_samp_scale = sub_samp_scale;
722 layer->a = a;
723 layer->s = s;
725 CV_CALL(layer->sumX =
727 CV_CALL(layer->exp2ssumWX =
730 cvZero( layer->sumX );
731 cvZero( layer->exp2ssumWX );
733 CV_CALL(layer->weights = cvCreateMat( n_output_planes, 2, CV_32FC1 ));
738 if( !CV_ARE_SIZES_EQ( weights, layer->weights ) )
740 CV_CALL(cvCopy( weights, layer->weights ));
745 cvRandArr( &rng, layer->weights, CV_RAND_UNI, cvRealScalar(-1), cvRealScalar(1) );
750 if( cvGetErrStatus() < 0 && layer )
752 cvReleaseMat( &layer->exp2ssumWX );
753 cvFree( &layer );
756 return (CvCNNLayer*)layer;
764 CvCNNFullConnectLayer* layer = 0;
772 CV_CALL(layer = (CvCNNFullConnectLayer*)icvCreateCNNLayer( ICV_CNN_FULLCONNECT_LAYER,
777 layer->a = a;
778 layer->s = s;
780 CV_CALL(layer->exp2ssumWX = cvCreateMat( n_outputs, 1, CV_32FC1 ));
781 cvZero( layer->exp2ssumWX );
783 CV_CALL(layer->weights = cvCreateMat( n_outputs, n_inputs+1, CV_32FC1 ));
788 if( !CV_ARE_SIZES_EQ( weights, layer->weights ) )
790 CV_CALL(cvCopy( weights, layer->weights ));
795 cvRandArr( &rng, layer->weights, CV_RAND_UNI, cvRealScalar(-1), cvRealScalar(1) );
800 if( cvGetErrStatus() < 0 && layer )
802 cvReleaseMat( &layer->exp2ssumWX );
803 cvReleaseMat( &layer->weights );
804 cvFree( &layer );
807 return (CvCNNLayer*)layer;
812 * Layer FORWARD functions *
821 CV_ERROR( CV_StsBadArg, "Invalid layer" );
825 const CvCNNConvolutionLayer* layer = (CvCNNConvolutionLayer*) _layer;
827 const int K = layer->K;
830 const int nXplanes = layer->n_input_planes;
831 const int Xheight = layer->input_height;
832 const int Xwidth = layer->input_width ;
835 const int nYplanes = layer->n_output_planes;
836 const int Yheight = layer->output_height;
837 const int Ywidth = layer->output_width;
850 connect_mask_data = layer->connect_mask->data.ptr;
851 w = layer->weights->data.fl;
892 CV_ERROR( CV_StsBadArg, "Invalid layer" );
896 const CvCNNSubSamplingLayer* layer = (CvCNNSubSamplingLayer*) _layer;
898 const int sub_sampl_scale = layer->sub_samp_scale;
899 const int nplanes = layer->n_input_planes;
901 const int Xheight = layer->input_height;
902 const int Xwidth = layer->input_width ;
905 const int Yheight = layer->output_height;
906 const int Ywidth = layer->output_width;
914 CV_ASSERT(layer->exp2ssumWX->cols == 1 && layer->exp2ssumWX->rows == nplanes*Ysize);
916 // update inner variable layer->exp2ssumWX, which will be used in back-progation
917 cvZero( layer->sumX );
918 cvZero( layer->exp2ssumWX );
924 sumX_data = layer->sumX->data.fl;
933 w = layer->weights->data.fl;
934 cvGetRows( layer->sumX, &sumX_sub_col, 0, Ysize );
935 cvGetRows( layer->exp2ssumWX, &exp2ssumWX_sub_col, 0, Ysize );
943 CV_CALL(cvScale( layer->exp2ssumWX, layer->exp2ssumWX, 2.0*layer->s ));
944 CV_CALL(cvExp( layer->exp2ssumWX, layer->exp2ssumWX ));
945 CV_CALL(cvMinS( layer->exp2ssumWX, FLT_MAX, layer->exp2ssumWX ));
948 float* exp2ssumWX_data = layer->exp2ssumWX->data.fl;
949 for( ni = 0; ni < layer->exp2ssumWX->rows; ni++, exp2ssumWX_data++ )
956 // compute the output variable Y == ( a - 2a/(layer->exp2ssumWX + 1))
957 CV_CALL(cvAddS( layer->exp2ssumWX, cvRealScalar(1), Y ));
958 CV_CALL(cvDiv( 0, Y, Y, -2.0*layer->a ));
959 CV_CALL(cvAddS( Y, cvRealScalar(layer->a), Y ));
970 CV_ERROR( CV_StsBadArg, "Invalid layer" );
974 const CvCNNFullConnectLayer* layer = (CvCNNFullConnectLayer*)_layer;
975 CvMat* weights = layer->weights;
978 CV_ASSERT(X->cols == 1 && X->rows == layer->n_input_planes);
979 CV_ASSERT(Y->cols == 1 && Y->rows == layer->n_output_planes);
985 // update inner variable layer->exp2ssumWX, which will be used in Back-Propagation
986 CV_CALL(cvGEMM( &sub_weights, X, 2*layer->s, &bias, 2*layer->s, layer->exp2ssumWX ));
987 CV_CALL(cvExp( layer->exp2ssumWX, layer->exp2ssumWX ));
988 CV_CALL(cvMinS( layer->exp2ssumWX, FLT_MAX, layer->exp2ssumWX ));
991 float* exp2ssumWX_data = layer->exp2ssumWX->data.fl;
993 for( i = 0; i < layer->exp2ssumWX->rows; i++, exp2ssumWX_data++ )
1000 // compute the output variable Y == ( a - 2a/(layer->exp2ssumWX + 1))
1001 CV_CALL(cvAddS( layer->exp2ssumWX, cvRealScalar(1), Y ));
1002 CV_CALL(cvDiv( 0, Y, Y, -2.0*layer->a ));
1003 CV_CALL(cvAddS( Y, cvRealScalar(layer->a), Y ));
1009 * Layer BACKWARD functions *
1015 of the previous layer (X).
1019 of the current layer. */
1030 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1034 const CvCNNConvolutionLayer* layer = (CvCNNConvolutionLayer*) _layer;
1036 const int K = layer->K;
1038 const int n_X_planes = layer->n_input_planes;
1039 const int X_plane_height = layer->input_height;
1040 const int X_plane_width = layer->input_width;
1043 const int n_Y_planes = layer->n_output_planes;
1044 const int Y_plane_height = layer->output_height;
1045 const int Y_plane_width = layer->output_width;
1053 CvMat* weights = layer->weights;
1073 if( layer->connect_mask->data.ptr[ni*n_Y_planes+no] )
1109 if( layer->learn_rate_decrease_type == CV_CNN_LEARN_RATE_DECREASE_LOG_INV )
1110 eta = -layer->init_learn_rate/logf(1+(float)t);
1111 else if( layer->learn_rate_decrease_type == CV_CNN_LEARN_RATE_DECREASE_SQRT_INV )
1112 eta = -layer->init_learn_rate/sqrtf((float)t);
1114 eta = -layer->init_learn_rate/(float)t;
1138 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1142 const CvCNNSubSamplingLayer* layer = (CvCNNSubSamplingLayer*) _layer;
1144 const int Xwidth = layer->input_width;
1145 const int Ywidth = layer->output_width;
1146 const int Yheight = layer->output_height;
1148 const int scale = layer->sub_samp_scale;
1149 const int k_max = layer->n_output_planes * Yheight;
1157 CV_CALL(dY_dX_elems = cvCreateMat( layer->sumX->rows, 1, CV_32FC1 ));
1158 CV_CALL(dY_dW_elems = cvCreateMat( 2, layer->sumX->rows, CV_32FC1 ));
1159 CV_CALL(dE_dW = cvCreateMat( 1, 2*layer->n_output_planes, CV_32FC1 ));
1162 // ==<dY_dX_elems> = 4as*(layer->exp2ssumWX)/(layer->exp2ssumWX + 1)^2
1163 CV_CALL(cvAddS( layer->exp2ssumWX, cvRealScalar(1), dY_dX_elems ));
1165 CV_CALL(cvMul( dY_dX_elems, layer->exp2ssumWX, dY_dX_elems, 4.0*layer->a*layer->s ));
1175 cvReshape( layer->sumX, &sumX_row, 0, 1 );
1183 for( i = 0; i < layer->n_output_planes; i++ )
1193 // compute <dY_dX> = layer->weights*<dY_dX>
1194 w = layer->weights->data.fl;
1196 for( i = 0; i < layer->n_input_planes; i++, w++, dY_dX_sub_col.data.fl += Ysize )
1222 CvMat dE_dW_mat, *weights = layer->weights;
1224 if( layer->learn_rate_decrease_type == CV_CNN_LEARN_RATE_DECREASE_LOG_INV )
1225 eta = -layer->init_learn_rate/logf(1+(float)t);
1226 else if( layer->learn_rate_decrease_type == CV_CNN_LEARN_RATE_DECREASE_SQRT_INV )
1227 eta = -layer->init_learn_rate/sqrtf((float)t);
1229 eta = -layer->init_learn_rate/(float)t;
1245 of the previous layer (X) and the weights of the current layer (W)
1246 and updates weights od the current layer by using <dE_dW>.
1250 of the current layer. */
1263 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1267 const CvCNNFullConnectLayer* layer = (CvCNNFullConnectLayer*)_layer;
1268 const int n_outputs = layer->n_output_planes;
1269 const int n_inputs = layer->n_input_planes;
1273 CvMat* weights = layer->weights;
1286 // activ_func_der == 4as*(layer->exp2ssumWX)/(layer->exp2ssumWX + 1)^2
1287 CV_CALL(cvReshape( layer->exp2ssumWX, &exp2ssumWXrow, 0, layer->exp2ssumWX->cols ));
1291 4.0*layer->a*layer->s ));
1312 if( layer->learn_rate_decrease_type == CV_CNN_LEARN_RATE_DECREASE_LOG_INV )
1313 eta = -layer->init_learn_rate/logf(1+(float)t);
1314 else if( layer->learn_rate_decrease_type == CV_CNN_LEARN_RATE_DECREASE_SQRT_INV )
1315 eta = -layer->init_learn_rate/sqrtf((float)t);
1317 eta = -layer->init_learn_rate/(float)t;
1329 * Layer RELEASE functions *
1336 CvCNNConvolutionLayer* layer = 0;
1341 layer = *(CvCNNConvolutionLayer**)p_layer;
1343 if( !layer )
1345 if( !ICV_IS_CNN_CONVOLUTION_LAYER(layer) )
1346 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1348 cvReleaseMat( &layer->weights );
1349 cvReleaseMat( &layer->connect_mask );
1361 CvCNNSubSamplingLayer* layer = 0;
1366 layer = *(CvCNNSubSamplingLayer**)p_layer;
1368 if( !layer )
1370 if( !ICV_IS_CNN_SUBSAMPLING_LAYER(layer) )
1371 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1373 cvReleaseMat( &layer->exp2ssumWX );
1374 cvReleaseMat( &layer->weights );
1386 CvCNNFullConnectLayer* layer = 0;
1391 layer = *(CvCNNFullConnectLayer**)p_layer;
1393 if( !layer )
1395 if( !ICV_IS_CNN_FULLCONNECT_LAYER(layer) )
1396 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1398 cvReleaseMat( &layer->exp2ssumWX );
1399 cvReleaseMat( &layer->weights );
1431 CvCNNLayer* layer = 0;
1470 CV_CALL(layer = cvCreateCNNConvolutionLayer(
1487 CV_CALL(layer = cvCreateCNNSubSamplingLayer(
1502 CV_CALL(layer = cvCreateCNNFullConnectLayer( n_input_planes, n_output_planes,
1510 if( cvGetErrStatus() < 0 && layer )
1511 layer->release( &layer );
1516 return layer;
1520 static void icvWriteCNNLayer( CvFileStorage* fs, CvCNNLayer* layer )
1525 if( !ICV_IS_CNN_LAYER(layer) )
1526 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1528 CV_CALL( cvStartWriteStruct( fs, NULL, CV_NODE_MAP, "opencv-ml-cnn-layer" ));
1530 CV_CALL(cvWriteInt( fs, "n_input_planes", layer->n_input_planes ));
1531 CV_CALL(cvWriteInt( fs, "input_height", layer->input_height ));
1532 CV_CALL(cvWriteInt( fs, "input_width", layer->input_width ));
1533 CV_CALL(cvWriteInt( fs, "n_output_planes", layer->n_output_planes ));
1534 CV_CALL(cvWriteInt( fs, "output_height", layer->output_height ));
1535 CV_CALL(cvWriteInt( fs, "output_width", layer->output_width ));
1536 CV_CALL(cvWriteInt( fs, "learn_rate_decrease_type", layer->learn_rate_decrease_type));
1537 CV_CALL(cvWriteReal( fs, "init_learn_rate", layer->init_learn_rate ));
1538 CV_CALL(cvWrite( fs, "weights", layer->weights ));
1540 if( ICV_IS_CNN_CONVOLUTION_LAYER( layer ))
1542 CvCNNConvolutionLayer* l = (CvCNNConvolutionLayer*)layer;
1546 else if( ICV_IS_CNN_SUBSAMPLING_LAYER( layer ) )
1548 CvCNNSubSamplingLayer* l = (CvCNNSubSamplingLayer*)layer;
1553 else if( ICV_IS_CNN_FULLCONNECT_LAYER( layer ) )
1555 CvCNNFullConnectLayer* l = (CvCNNFullConnectLayer*)layer;
1561 CV_ERROR( CV_StsBadArg, "Invalid layer" );
1563 CV_CALL( cvEndWriteStruct( fs )); //"opencv-ml-cnn-layer"
1572 CvCNNLayer* layer = 0;
1598 CV_CALL(layer = icvReadCNNLayer( fs, (CvFileNode*)reader.ptr ));
1599 CV_CALL(cnn->network = cvCreateCNNetwork( layer ));
1604 CV_CALL(layer = icvReadCNNLayer( fs, (CvFileNode*)reader.ptr ));
1605 CV_CALL(cnn->network->add_layer( cnn->network, layer ));
1613 if( layer ) layer->release( &layer );
1629 CvCNNLayer* layer;
1643 layer = cnn->network->layers;
1644 for( i = 0; i < n_layers && layer; i++, layer = layer->next_layer )
1645 CV_CALL(icvWriteCNNLayer( fs, layer ));
1646 if( i < n_layers || layer )