全功能实现,验证通过,暂未精简

This commit is contained in:
Qiea
2024-11-11 14:34:09 +08:00
parent 2a6580ac30
commit f52b9f1aee
4 changed files with 115 additions and 27 deletions

132
cnn.c
View File

@@ -14,11 +14,14 @@ void print_rslt(float* rslt, u8 input_matrix_length, u32 length){
printf("\r\n");
}
// 将原始矩阵复制到填充后的矩阵中央
float* expand(const float* old_matrix, int old_matrix_length, int layer){
float* new_matrix = (float *)malloc(sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
if (new_matrix == NULL) {
// 处理 malloc 失败的情况,例如打印错误并退出
perror("Memory allocation failed");
exit(1); // 退出程序
}
memset(new_matrix, 0, sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
for(int l=0; l < layer; l++){
for (int i = 0; i < old_matrix_length; i++) {
@@ -33,8 +36,6 @@ float* expand(const float* old_matrix, int old_matrix_length, int layer){
return new_matrix;
}
//model 模型名字
//input_matrix 输入图像
//input_matrix_length 输入图像的边长102
@@ -42,7 +43,6 @@ float* expand(const float* old_matrix, int old_matrix_length, int layer){
//返回卷积的结果
float* convolution(Model model_w, Model model_b, const float* input_matrix, int input_matrix_length){
// 初始化卷积层参数
int _debug=0;
int im_l = input_matrix_length;
int cr_l = input_matrix_length - 2;
float conv_temp; // 临时变量,用于存储卷积计算的中间结果
@@ -93,11 +93,10 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int
}
}
}
free(_conv_rlst);
return conv_rlst;
}
//num_kernels 卷积核的个数32
//area 池化的面积2*2
//input_matrix 输入图像
@@ -137,8 +136,6 @@ float* pooling(Model model_w, const float* input_matrix, u8 input_matrix_length)
return pool_rslt;
}
float* hidden(const float* input_matrix){
float affine1_temp; // 临时变量,用于存储全连接层的中间结果
float *affine1_rslt = (float *) malloc(sizeof(float)*128);
@@ -170,8 +167,6 @@ float* hidden(const float* input_matrix){
return affine1_rslt;
}
float* output(const float* input_matrix){
float affine2_temp; // 临时变量,用于存储输出层的中间结果
float *affine2_rslt = (float *) malloc(sizeof(float)*7);
@@ -212,22 +207,107 @@ float* output(const float* input_matrix){
void cnn_run(){
void calculate_statistics(Model model, float* value)
{
value[0] = fabsf(model.array[0]);
float sum = 0;
float sum_sq = 0;
//第一层填充102 * 102
float* expand_matrix_1 = expand(data.array, 100, 1);
float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102);
float* pool_rslt_1 = pooling(conv1_weight, conv_rlst_1, 100);
//第二层填充32 * 52 * 52
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52);
float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50);
//第三层:填充 64 * 27 * 27
float* expand_matrix_3 = expand(pool_rslt_2, 25, 64);
float* conv_rlst_3 = convolution(conv3_weight,conv3_bias,expand_matrix_3, 27);
float* pool_rslt_3 = pooling(conv3_weight, conv_rlst_3, 25);
for (int i = 0; i < model.maxlength; i++) {
float abs_val = fabsf(model.array[i]);
float* affine1_rslt = hidden(pool_rslt_3);
float* affine2_rslt = output(affine1_rslt);
if (abs_val > value[0]) {
value[0] = abs_val;
}
sum += abs_val;
sum_sq += abs_val * abs_val;
}
value[1] = sum / (float)model.maxlength;
float variance = (sum_sq / (float)model.maxlength) - (value[1] * value[1]);
value[2] = sqrtf(variance);
}
u8 check_threshold(Model model, const float* value)
{
const float threshold = 20;
for (int i = 0; i < model.maxlength; i++) {
float K = (fabsf(model.array[i]) - value[1]) / value[2];
if (K > threshold) {
return 1;
}
}
return 0;
}
float* generateMatrix(Model model, const float* value)
{
float* CNN_data = (float*) malloc(sizeof(float)*100*100);
memset(CNN_data, 0, sizeof(float)*100*100);
u16 x = model.maxlength / 100;
float y = value[0] / 100;
for (int i = 0; i < model.maxlength; i++) {
float absolutevalue = fabsf(model.array[i]);
if (!absolutevalue) {
continue;
}
int xIndex = i / x;
if (xIndex >= 100) xIndex = 99;
int yIndex = (int)(absolutevalue / y);
if (yIndex < 0) yIndex = 0;
CNN_data[yIndex * 100 + xIndex]++;
}
return CNN_data;
}
void cnn_run(){
float value[3] = {0};
calculate_statistics(data,&value[0]);
if (check_threshold(data,&value[0])){
float* _data = generateMatrix(data,&value[0]);
// printf("最大值:%f 平均值:%f 标准差:%f\r\n",value[0],value[1],value[2]);
// print_rslt(_data,100,1*100*100);
//第一层填充102 * 102
float* expand_matrix_1 = expand(_data, 100, 1);
float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102);
float* pool_rslt_1 = pooling(conv1_weight, conv_rlst_1, 100);
free(_data);
free(expand_matrix_1);
free(conv_rlst_1);
//第二层填充32 * 52 * 52
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52);
float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50);
free(pool_rslt_1);
free(expand_matrix_2);
free(conv_rlst_2);
//第三层:填充 64 * 27 * 27
float* expand_matrix_3 = expand(pool_rslt_2, 25, 64);
float* conv_rlst_3 = convolution(conv3_weight,conv3_bias,expand_matrix_3, 27);
float* pool_rslt_3 = pooling(conv3_weight, conv_rlst_3, 25);
free(pool_rslt_2);
free(expand_matrix_3);
free(conv_rlst_3);
float* affine1_rslt = hidden(pool_rslt_3);
float* affine2_rslt = output(affine1_rslt);
free(pool_rslt_3);
free(affine1_rslt);
free(affine2_rslt);
}
}

1
cnn.h
View File

@@ -2,6 +2,7 @@
#define _CNN_H_
#include "cnn_model.h"
#include <stdio.h>
#include <math.h>

View File

@@ -8,6 +8,7 @@
#include <stdint.h>
typedef struct {
char* name;
char* dname;
@@ -33,7 +34,11 @@ typedef struct {
#define FC1_WEIGHT_ARRSIZE (128*18432) //2359296
#define FC2_BIAS_ARRSIZE (7)
#define FC2_WEIGHT_ARRSIZE (7*128) //896
#define DATA_ARRSIZE (100*100) //1250000
#if 1
#define DATA_ARRSIZE (1250000)
#else
#define DATA_ARRSIZE (100 * 100)
#endif

2
main.c
View File

@@ -83,6 +83,8 @@ int main(){
model_write("all");
run_dataset();
// model_switchdata("C1autosave00095_right_new_2");
// cnn_run();
DEBUG_PRINTF("\r\nEnd结束");
}