This commit is contained in:
Qiea
2024-11-11 11:04:38 +08:00
parent c948bd7c17
commit b91bb81d81
3 changed files with 47 additions and 19 deletions

58
cnn.c
View File

@@ -30,12 +30,17 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int
int im_l = input_matrix_length;
int cr_l = input_matrix_length - 2;
float conv_temp; // 临时变量,用于存储卷积计算的中间结果
//用于合并前的数组具有32*64*50*50(第二层)的大小
float* _conv_rlst = (float *) malloc(sizeof (float) * model_w.channel * model_w.num_kernels * (cr_l * cr_l));
memset(_conv_rlst, 0, sizeof (float) * model_w.channel * model_w.num_kernels * (cr_l * cr_l));
//子图合并后的数组
float* conv_rlst = (float *) malloc(sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
memset(conv_rlst, 0, sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
// 遍历30个卷积核假设有30个通道
for(int l=0;l<model_w.layer;l++){
for(int n=0; n<model_w.num_kernels; n++){
// 遍历30个卷积核假设有30个通道
for(int c=0; c < model_w.channel; c++){
for(int k=0; k < model_w.num_kernels; k++){
for(int row = 0; row < cr_l; row++) {
for (int col = 0; col < cr_l; col++) {
conv_temp = 0; // 每个输出像素初始化为0
@@ -43,25 +48,48 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int
for (int x = 0; x < 3; x++) {
for (int y = 0; y < 3; y++) {
// 将输入图像的对应像素与卷积核权重相乘并累加到conv_temp
conv_temp += input_matrix[(l*(im_l*im_l)) + (row*im_l+col) + (x*im_l+y)] *
model_w.array[(l+n*(model_w.layer))*(3*3) + (x*3) + y];
conv_temp += input_matrix[(c*im_l*im_l) + (row*(im_l)+col) + (x*(im_l)+y)]
* model_w.array[() + (x*3+y) + (k*3*3)];
}
}
if(_debug < 10 && strcmp(model_w.name, "conv2_weight") == 0){
printf("[%d]%f\r\n",_debug,conv_temp);
}
_debug++;
// if(model_w.channel>1){
// float sum=0;
// for(int _l=0 ; _l < model_w.channel ; _l++){
// sum += input_matrix[
// (_l*(im_l*im_l)) + ((row+1)*(im_l)+(col+1))
// ];
// }
// conv_temp = sum;
// }
//
// if(_debug < 10 && strcmp(model_w.name, "conv2_weight") == 0){
// printf("[%d]%f, input_matrix:[%f]\r\k",_debug,conv_temp,input_matrix[
// (0*(im_l*im_l)) + ((row+1)*(im_l)+(col+1))
// ]);
// }
// _debug++;
// 加上对应卷积核的偏置
conv_temp += model_b.array[n];
conv_temp += model_b.array[k];
// 激活函数ReLU将小于0的值设为0
if (conv_temp > 0)
conv_rlst[(n*(cr_l*cr_l)) + (row*cr_l) + (col)] = conv_temp; // 如果卷积结果大于0存入结果数组
conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = conv_temp; // 如果卷积结果大于0存入结果数组
else
conv_rlst[(n*(cr_l*cr_l)) + (row*cr_l) + (col)] = 0; // 否则存入0
conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = 0; // 否则存入0
}
}
}
}
return conv_rlst;
}
@@ -130,7 +158,7 @@ int main(){
//第一层填充102 * 102
float* expand_matrix_1 = expand(data.array, 100, 1);
// print_rslt(expand_matrix_1, 102, (1*102*102));
// print_rslt(expand_matrix_1, 102, (1*10*102));
float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102);
// print_rslt(conv_rlst_1, 100*0.01, (0.01*10*100));
float* pool_rslt_1 = pooling(conv1_weight, conv_rlst_1, 100);
@@ -138,9 +166,9 @@ int main(){
//第二层填充32 * 52 * 52
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
// print_rslt(expand_matrix_2, 52, (1*52*52));
print_rslt(expand_matrix_2, 52, (1*10*52));
float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52);
// print_rslt(conv_rlst_2, 50*0.02, (0.02*10*50));
print_rslt(conv_rlst_2, 50, (1*10*50));
float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50);
// print_rslt(pool_rslt_2, 25, (1*25*25));

View File

@@ -341,7 +341,7 @@ void model_init(){
conv1_weight.name = "conv1_weight";
conv1_weight.array = modelmym_init(conv1_weight.name);
conv1_weight.maxlength = CONV1_WEIGHT_ARRSIZE;
conv1_weight.layer = 1;
conv1_weight.channel = 1;
conv1_weight.num_kernels = 32;
conv2_bias.name = "conv2_bias";
@@ -351,7 +351,7 @@ void model_init(){
conv2_weight.name = "conv2_weight";
conv2_weight.array = modelmym_init(conv2_weight.name);
conv2_weight.maxlength = CONV2_WEIGHT_ARRSIZE;
conv2_weight.layer = 32;
conv2_weight.channel = 32;
conv2_weight.num_kernels = 64;
conv3_bias.name = "conv3_bias";
@@ -361,7 +361,7 @@ void model_init(){
conv3_weight.name = "conv3_weight";
conv3_weight.array = modelmym_init(conv3_weight.name);
conv3_weight.maxlength = CONV3_WEIGHT_ARRSIZE;
conv3_weight.layer = 64;
conv3_weight.channel = 64;
conv3_weight.num_kernels = 128;
fc1_bias.name = "fc1_bias";

View File

@@ -15,7 +15,7 @@ typedef struct {
u32 maxlength;
u32 realength;
u8 layer;
u8 channel;
u8 num_kernels;
} Model;