From b91bb81d81e1dce4a95ba3393a487d20d2f362de Mon Sep 17 00:00:00 2001 From: Qiea <1310371422@qq.com> Date: Mon, 11 Nov 2024 11:04:38 +0800 Subject: [PATCH] tmp --- cnn.c | 58 +++++++++++++++++++++++++++++++++++++++-------------- cnn_model.c | 6 +++--- cnn_model.h | 2 +- 3 files changed, 47 insertions(+), 19 deletions(-) diff --git a/cnn.c b/cnn.c index 49873a2..fed1873 100644 --- a/cnn.c +++ b/cnn.c @@ -30,12 +30,17 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int int im_l = input_matrix_length; int cr_l = input_matrix_length - 2; float conv_temp; // 临时变量,用于存储卷积计算的中间结果 + + //用于合并前的数组,具有32*64*50*50(第二层)的大小 + float* _conv_rlst = (float *) malloc(sizeof (float) * model_w.channel * model_w.num_kernels * (cr_l * cr_l)); + memset(_conv_rlst, 0, sizeof (float) * model_w.channel * model_w.num_kernels * (cr_l * cr_l)); + //子图合并后的数组 float* conv_rlst = (float *) malloc(sizeof (float) * model_w.num_kernels * (cr_l * cr_l)); memset(conv_rlst, 0, sizeof (float) * model_w.num_kernels * (cr_l * cr_l)); - // 遍历30个卷积核(假设有30个通道) - for(int l=0;l1){ +// float sum=0; +// for(int _l=0 ; _l < model_w.channel ; _l++){ +// sum += input_matrix[ +// (_l*(im_l*im_l)) + ((row+1)*(im_l)+(col+1)) +// ]; +// } +// conv_temp = sum; +// } +// +// if(_debug < 10 && strcmp(model_w.name, "conv2_weight") == 0){ +// printf("[%d]%f, input_matrix:[%f]\r\k",_debug,conv_temp,input_matrix[ +// (0*(im_l*im_l)) + ((row+1)*(im_l)+(col+1)) +// ]); +// } +// _debug++; + // 加上对应卷积核的偏置 - conv_temp += model_b.array[n]; + conv_temp += model_b.array[k]; // 激活函数:ReLU(将小于0的值设为0) if (conv_temp > 0) - conv_rlst[(n*(cr_l*cr_l)) + (row*cr_l) + (col)] = conv_temp; // 如果卷积结果大于0,存入结果数组 + conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = conv_temp; // 如果卷积结果大于0,存入结果数组 else - conv_rlst[(n*(cr_l*cr_l)) + (row*cr_l) + (col)] = 0; // 否则存入0 + conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = 0; // 否则存入0 } } } + + + + + + + } + + return conv_rlst; } @@ -130,7 +158,7 @@ int main(){ //第一层:填充102 * 102 float* expand_matrix_1 = expand(data.array, 100, 1); -// print_rslt(expand_matrix_1, 102, (1*102*102)); +// print_rslt(expand_matrix_1, 102, (1*10*102)); float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102); // print_rslt(conv_rlst_1, 100*0.01, (0.01*10*100)); float* pool_rslt_1 = pooling(conv1_weight, conv_rlst_1, 100); @@ -138,9 +166,9 @@ int main(){ //第二层:填充32 * 52 * 52 float* expand_matrix_2 = expand(pool_rslt_1, 50, 32); -// print_rslt(expand_matrix_2, 52, (1*52*52)); + print_rslt(expand_matrix_2, 52, (1*10*52)); float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52); -// print_rslt(conv_rlst_2, 50*0.02, (0.02*10*50)); + print_rslt(conv_rlst_2, 50, (1*10*50)); float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50); // print_rslt(pool_rslt_2, 25, (1*25*25)); diff --git a/cnn_model.c b/cnn_model.c index cb87d66..e866b5c 100644 --- a/cnn_model.c +++ b/cnn_model.c @@ -341,7 +341,7 @@ void model_init(){ conv1_weight.name = "conv1_weight"; conv1_weight.array = modelmym_init(conv1_weight.name); conv1_weight.maxlength = CONV1_WEIGHT_ARRSIZE; - conv1_weight.layer = 1; + conv1_weight.channel = 1; conv1_weight.num_kernels = 32; conv2_bias.name = "conv2_bias"; @@ -351,7 +351,7 @@ void model_init(){ conv2_weight.name = "conv2_weight"; conv2_weight.array = modelmym_init(conv2_weight.name); conv2_weight.maxlength = CONV2_WEIGHT_ARRSIZE; - conv2_weight.layer = 32; + conv2_weight.channel = 32; conv2_weight.num_kernels = 64; conv3_bias.name = "conv3_bias"; @@ -361,7 +361,7 @@ void model_init(){ conv3_weight.name = "conv3_weight"; conv3_weight.array = modelmym_init(conv3_weight.name); conv3_weight.maxlength = CONV3_WEIGHT_ARRSIZE; - conv3_weight.layer = 64; + conv3_weight.channel = 64; conv3_weight.num_kernels = 128; fc1_bias.name = "fc1_bias"; diff --git a/cnn_model.h b/cnn_model.h index 459f13d..65207cc 100644 --- a/cnn_model.h +++ b/cnn_model.h @@ -15,7 +15,7 @@ typedef struct { u32 maxlength; u32 realength; - u8 layer; + u8 channel; u8 num_kernels; } Model;