From fe062deba59cfbc8296ea1052d84f998b10afa95 Mon Sep 17 00:00:00 2001 From: Qiea <1310371422@qq.com> Date: Mon, 11 Nov 2024 11:42:08 +0800 Subject: [PATCH] =?UTF-8?q?=E5=AE=9E=E7=8E=B0cnn=E8=AE=A1=E7=AE=97?= =?UTF-8?q?=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cnn.c | 52 +++++++++++++++------------------------------------- 1 file changed, 15 insertions(+), 37 deletions(-) diff --git a/cnn.c b/cnn.c index 9564b17..b7c445d 100644 --- a/cnn.c +++ b/cnn.c @@ -62,10 +62,10 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int for(int k=0; k < model_w.num_kernels; k++) { for (int row = 0; row < cr_l; row++) { for (int col = 0; col < cr_l; col++) { + conv_temp = 0; // 每个输出像素初始化为0 for (int c = 0; c < model_w.channel; c++) { conv_temp += _conv_rlst[(c * model_w.num_kernels * cr_l * cr_l) + (k * cr_l * cr_l) + (row * cr_l + col)]; } - conv_rlst[(k * cr_l * cr_l) + (row * cr_l + col)] = conv_temp; // 加上对应卷积核的偏置 conv_temp += model_b.array[k]; // 激活函数:ReLU(将小于0的值设为0) @@ -77,27 +77,6 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int } } } - - - - -// if(model_w.channel>1){ -// float sum=0; -// for(int _l=0 ; _l < model_w.channel ; _l++){ -// sum += input_matrix[ -// (_l*(im_l*im_l)) + ((row+1)*(im_l)+(col+1)) -// ]; -// } -// conv_temp = sum; -// } -// -// if(_debug < 10 && strcmp(model_w.name, "conv2_weight") == 0){ -// printf("[%d]%f, input_matrix:[%f]\r\k",_debug,conv_temp,input_matrix[ -// (0*(im_l*im_l)) + ((row+1)*(im_l)+(col+1)) -// ]); -// } -// _debug++; - return conv_rlst; } @@ -174,9 +153,9 @@ int main(){ //第二层:填充32 * 52 * 52 float* expand_matrix_2 = expand(pool_rslt_1, 50, 32); - print_rslt(expand_matrix_2, 52, (1*10*52)); +// print_rslt(expand_matrix_2, 52, (1*10*52)); float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52); - print_rslt(conv_rlst_2, 50, (1*10*50)); +// print_rslt(conv_rlst_2, 50, (64*50*50)); float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50); // print_rslt(pool_rslt_2, 25, (1*25*25)); @@ -184,7 +163,7 @@ int main(){ float* expand_matrix_3 = expand(pool_rslt_2, 25, 64); // print_rslt(expand_matrix_2, 52, (1*52*52)); float* conv_rlst_3 = convolution(conv3_weight,conv3_bias,expand_matrix_3, 27); -// print_rslt(conv_rlst_3, 25, (1*25*25)); + print_rslt(conv_rlst_3, 25, (1*25*25)); float* pool_rslt_3 = pooling(conv3_weight, conv_rlst_3, 25); // print_rslt(pool_rslt_3, 12, (1*12*12)); @@ -228,31 +207,29 @@ int main(){ affine1_rslt[n] = 0; // 否则存入0 } -// print_rslt(affine1_rslt,1,128); - + print_rslt(affine1_rslt,1,128); float affine2_temp; // 临时变量,用于存储输出层的中间结果 - float* affine2_rslt = (float *) malloc(sizeof(float)*7); - memset(affine2_rslt, 0, 7); + float affine2_rslt[7]; // 存储输出层的结果(假设输出层有7个神经元) // 比较输出层的最大值 float temp = -100; // 用于存储最大值的临时变量,初始化为一个非常小的值 - u8 predict_num; // 用于存储预测的数字(对应最大值的索引) + int predict_num; // 用于存储预测的数字(对应最大值的索引) -// 遍历7个输出神经元(假设有7个类别) - for(u8 n=0; n<7; n++) +// 遍历10个输出神经元(假设有10个类别) + for(int n=0; n<7; n++) { affine2_temp = 0; // 当前神经元的输出初始化为0 // 进行矩阵乘法,将隐藏层的输出与输出层权重进行点积 - for(u8 i=0; i<128; i++) + for(int i=0; i<128; i++) { - affine2_temp = affine2_temp + fc2_weight.array[i+100*n] * affine1_rslt[i]; + affine2_temp = affine2_temp + fc2_weight.array[i+128*n] * affine1_rslt[i]; } // 加上对应神经元的偏置 - affine2_temp = affine2_temp + fc2_bias.array[n]; + affine2_temp = affine2_temp + fc2_weight.array[n]; affine2_rslt[n] = affine2_temp; // 存储输出层的结果 // 寻找最大值 @@ -263,8 +240,9 @@ int main(){ } } - print_rslt(affine2_rslt,7,7); - printf("RES is:%d",predict_num+1); + print_rslt(affine2_rslt,1,7); + + printf("Label is:%d",predict_num+1); return 0; }