From 2a6580ac3000883d6471bb9cac1a91e91dde3489 Mon Sep 17 00:00:00 2001 From: Qiea <1310371422@qq.com> Date: Mon, 11 Nov 2024 12:40:52 +0800 Subject: [PATCH] =?UTF-8?q?=E7=AE=80=E5=8C=96=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cnn.c | 154 ++++++++++++++++++++++++++++------------------------ cnn_model.c | 4 +- cnn_model.h | 1 - 3 files changed, 84 insertions(+), 75 deletions(-) diff --git a/cnn.c b/cnn.c index caf704a..e02d06a 100644 --- a/cnn.c +++ b/cnn.c @@ -2,6 +2,20 @@ +void print_rslt(float* rslt, u8 input_matrix_length, u32 length){ + int _tmp = 0; + printf("[0:0]"); + for (int i = 0; i < length; i++) { + printf("%f ",rslt[i]); + if ((i + 1) % input_matrix_length == 0) { + printf("\n[%d:%d]",++_tmp,i+1); + } + } + printf("\r\n"); +} + + + // 将原始矩阵复制到填充后的矩阵中央 float* expand(const float* old_matrix, int old_matrix_length, int layer){ float* new_matrix = (float *)malloc(sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2)); @@ -125,16 +139,75 @@ float* pooling(Model model_w, const float* input_matrix, u8 input_matrix_length) -void print_rslt(float* rslt, u8 input_matrix_length, u32 length){ - int _tmp = 0; - printf("[0:0]"); - for (int i = 0; i < length; i++) { - printf("%f ",rslt[i]); - if ((i + 1) % input_matrix_length == 0) { - printf("\n[%d:%d]",++_tmp,i+1); +float* hidden(const float* input_matrix){ + float affine1_temp; // 临时变量,用于存储全连接层的中间结果 + float *affine1_rslt = (float *) malloc(sizeof(float)*128); + memset(affine1_rslt, 0, sizeof(float)*128); + + // 遍历128个神经元(假设隐藏层有128个神经元) + for(u8 n=0; n<128; n++) + { + affine1_temp = 0; // 每个神经元的输出初始化为0 + + // 进行矩阵乘法,将池化层输出展平为一维向量后,与全连接层权重进行点积 + for(int i=0; i<(128*12*12); i++) + { + affine1_temp = affine1_temp + input_matrix[i] * fc1_weight.array[i+(128*12*12)*n]; + } + + // 加上对应神经元的偏置 + affine1_temp = affine1_temp + fc1_bias.array[n]; + + // 激活函数:ReLU(将小于0的值设为0) + if(affine1_temp > 0) + affine1_rslt[n] = affine1_temp; // 如果结果大于0,存入结果数组 + else + affine1_rslt[n] = 0; // 否则存入0 + } + + // print_rslt(affine1_rslt,1,128); + + return affine1_rslt; +} + + + +float* output(const float* input_matrix){ + float affine2_temp; // 临时变量,用于存储输出层的中间结果 + float *affine2_rslt = (float *) malloc(sizeof(float)*7); + memset(affine2_rslt, 0, sizeof(float)*7); + +// 比较输出层的最大值 + float temp = -100; // 用于存储最大值的临时变量,初始化为一个非常小的值 + int predict_num; // 用于存储预测的数字(对应最大值的索引) + +// 遍历10个输出神经元(假设有10个类别) + for(int n=0; n<7; n++) + { + affine2_temp = 0; // 当前神经元的输出初始化为0 + + // 进行矩阵乘法,将隐藏层的输出与输出层权重进行点积 + for(int i=0; i<128; i++) + { + affine2_temp = affine2_temp + fc2_weight.array[i+128*n] * input_matrix[i]; + } + + // 加上对应神经元的偏置 + affine2_temp = affine2_temp + fc2_weight.array[n]; + affine2_rslt[n] = affine2_temp; // 存储输出层的结果 + + // 寻找最大值 + if(temp <= affine2_rslt[n]) + { + temp = affine2_rslt[n]; // 更新最大值 + predict_num = n; // 记录最大值对应的类别索引 } } - printf("\r\n"); + + print_rslt(affine2_rslt,7,7); + printf("Label is:%d\r\n",predict_num+1); + + return affine2_rslt; } @@ -154,68 +227,7 @@ void cnn_run(){ float* conv_rlst_3 = convolution(conv3_weight,conv3_bias,expand_matrix_3, 27); float* pool_rslt_3 = pooling(conv3_weight, conv_rlst_3, 25); - { - // 隐藏层参数地址 - float *affine1_rslt = (float *) malloc(sizeof(float)*128); - memset(affine1_rslt, 0, sizeof(float)*128); - float affine1_temp; // 临时变量,用于存储全连接层的中间结果 + float* affine1_rslt = hidden(pool_rslt_3); + float* affine2_rslt = output(affine1_rslt); -// 遍历128个神经元(假设隐藏层有128个神经元) - for(u8 n=0; n<128; n++) - { - affine1_temp = 0; // 每个神经元的输出初始化为0 - - // 进行矩阵乘法,将池化层输出展平为一维向量后,与全连接层权重进行点积 - for(int i=0; i<(128*12*12); i++) - { - affine1_temp = affine1_temp + pool_rslt_3[i] * fc1_weight.array[i+(128*12*12)*n]; - } - - // 加上对应神经元的偏置 - affine1_temp = affine1_temp + fc1_bias.array[n]; - - // 激活函数:ReLU(将小于0的值设为0) - if(affine1_temp > 0) - affine1_rslt[n] = affine1_temp; // 如果结果大于0,存入结果数组 - else - affine1_rslt[n] = 0; // 否则存入0 - } - -// print_rslt(affine1_rslt,1,128); - - - float affine2_temp; // 临时变量,用于存储输出层的中间结果 - float affine2_rslt[7]; // 存储输出层的结果(假设输出层有7个神经元) - -// 比较输出层的最大值 - float temp = -100; // 用于存储最大值的临时变量,初始化为一个非常小的值 - int predict_num; // 用于存储预测的数字(对应最大值的索引) - -// 遍历10个输出神经元(假设有10个类别) - for(int n=0; n<7; n++) - { - affine2_temp = 0; // 当前神经元的输出初始化为0 - - // 进行矩阵乘法,将隐藏层的输出与输出层权重进行点积 - for(int i=0; i<128; i++) - { - affine2_temp = affine2_temp + fc2_weight.array[i+128*n] * affine1_rslt[i]; - } - - // 加上对应神经元的偏置 - affine2_temp = affine2_temp + fc2_weight.array[n]; - affine2_rslt[n] = affine2_temp; // 存储输出层的结果 - - // 寻找最大值 - if(temp <= affine2_rslt[n]) - { - temp = affine2_rslt[n]; // 更新最大值 - predict_num = n; // 记录最大值对应的类别索引 - } - } - - print_rslt(affine2_rslt,7,7); - - printf("Label is:%d\r\n",predict_num+1); - } } diff --git a/cnn_model.c b/cnn_model.c index e6969ce..40e10c1 100644 --- a/cnn_model.c +++ b/cnn_model.c @@ -287,9 +287,7 @@ u8 model_switchdata(char* data_name){ } } -void model_dataset(){ -printf("\r\ndataset is: %s\r\n",data.dname); -} + u8 model_info(char* model_name){ if(strcmp(model_name, "all") == 0){ diff --git a/cnn_model.h b/cnn_model.h index 65207cc..db32eb5 100644 --- a/cnn_model.h +++ b/cnn_model.h @@ -56,7 +56,6 @@ u8 modelmym_free(char* model_name); u8 model_write(char* model_name); u8 model_read(char* model_name, u32 start, u32 end, u32 gap); u8 model_switchdata(char* model_name); -void model_dataset(void); u8 model_info(char* model_name); void* model(char* model_name); void model_init(void);