完成三层卷积池化,但还没有验证

This commit is contained in:
Qiea
2024-11-10 22:35:33 +08:00
parent fb74170c00
commit 63cbfb9693
2 changed files with 56 additions and 55 deletions

107
cnn.c
View File

@@ -26,43 +26,37 @@ float* expand(const float* old_matrix, u8 old_matrix_length, u8 layer){
//返回卷积的结果
float* convolution(Model model_w, Model model_b, const float* input_matrix, u8 input_matrix_length){
// 初始化卷积层参数
u8 c_rl = input_matrix_length - 2;
u8 im_l = input_matrix_length;
u8 cr_l = input_matrix_length - 2;
float conv_temp; // 临时变量,用于存储卷积计算的中间结果
float* conv_rlst = (float *) malloc(sizeof (float) * model_w.num_kernels * model_w.layer * (c_rl*c_rl));
memset(conv_rlst, 0, sizeof (float) * model_w.num_kernels * model_w.layer * (c_rl*c_rl));
float* conv_rlst = (float *) malloc(sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
memset(conv_rlst, 0, sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
// 遍历30个卷积核假设有30个通道
for(u8 l=0;l<model_w.layer;l++){
for(u8 n=0; n<model_w.num_kernels; n++){
for(u8 l=0;l<model_w.layer;l++){
// 遍历输出图像的每一行卷积输出大小为24x24
for(u8 row=0; row<(c_rl); row++) {
// 遍历输出图像的每一列
for (u8 col = 0; col < (input_matrix_length - 2); col++) {
conv_temp = 0; // 每个输出像素初始化为0
// 进行3x3的卷积操作
for (u8 x = 0; x < 3; x++) {
for (u8 y = 0; y < 3; y++) {
// 将输入图像的对应像素与卷积核权重相乘并累加到conv_temp
conv_temp += input_matrix[model_w.layer*(input_matrix_length*input_matrix_length)+row*28
+col+(x*(input_matrix_length)+y)] * model_w.array[n*(model_w.layer*3*3)+l*(3*3)+x*3+y];
}
for(u8 row = 0; row < cr_l; row++) {
for (u8 col = 0; col < cr_l; col++) {
conv_temp = 0; // 每个输出像素初始化为0
// 进行3x3的卷积操作
for (u8 x = 0; x < 3; x++) {
for (u8 y = 0; y < 3; y++) {
// 将输入图像的对应像素与卷积核权重相乘并累加到conv_temp
conv_temp += input_matrix[(l*(im_l*im_l)) + (row*im_l) + (col) + (x*im_l) + (y)] *
model_w.array[(x*3) + y + (n*(3*3))];
}
// 加上对应卷积核的偏置
conv_temp += model_b.array[n];
// 激活函数ReLU将小于0的值设为0
if (conv_temp > 0)
conv_rlst[row * (input_matrix_length - 2) + col +
n * (input_matrix_length - 2) * (input_matrix_length - 2) +
0
] = conv_temp; // 如果卷积结果大于0存入结果数组
else
conv_rlst[row * (input_matrix_length - 2) + col +
n * (input_matrix_length - 2) * (input_matrix_length - 2) +
0
] = 0; // 否则存入0
}
// 加上对应卷积核的偏置
conv_temp += model_b.array[n];
// 激活函数ReLU将小于0的值设为0
if (conv_temp > 0)
conv_rlst[(n*(cr_l*cr_l)) + (row*cr_l) + (col)] = conv_temp; // 如果卷积结果大于0存入结果数组
else
conv_rlst[(n*(cr_l*cr_l)) + (row*cr_l) + (col)] = 0; // 否则存入0
}
}
}
}
}
return conv_rlst;
}
@@ -73,33 +67,33 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, u8 i
//input_matrix_length 输入图像的边长100
//输出图像的边长50
//返回池化的结果
float* pooling(u8 num_kernels, u8 area, const float* input_matrix, u8 input_matrix_length){
float* pooling(Model model_w, const float* input_matrix, u8 input_matrix_length){
u8 im_l = input_matrix_length;
float pool_temp = 0; // 临时变量,用于存储池化操作的最大值
float* pool_rslt = (float *) malloc(sizeof (float)*num_kernels*input_matrix_length*input_matrix_length);
memset(pool_rslt, 0, sizeof (float)*num_kernels*input_matrix_length*input_matrix_length);
float* pool_rslt = (float *) malloc(sizeof (float)*model_w.num_kernels*im_l*im_l);
memset(pool_rslt, 0, sizeof (float)*model_w.num_kernels*im_l*im_l);
// 遍历30个通道与卷积核数量相同
for(u8 n=0; n<num_kernels; n++)
for(u8 n=0; n<model_w.num_kernels; n++)
{
// 遍历输入图像的每一行步长为22x2的池化窗口
for(u8 row=0; row<input_matrix_length; row=row+2)
for(u8 row=0; row<im_l; row=row+2)
{
// 遍历输入图像的每一列步长为2
for(u8 col=0; col<input_matrix_length; col=col+2)
for(u8 col=0; col<im_l; col=col+2)
{
pool_temp = 0; // 每个池化区域的最大值初始化为0
// 进行2x2的最大池化操作
for(u8 x=0; x<area; x++)
for(u8 x=0; x<2; x++)
{
for(u8 y=0; y<area; y++)
for(u8 y=0; y<2; y++)
{
// 更新当前池化区域的最大值
if(pool_temp <= input_matrix[row*input_matrix_length+col+x*input_matrix_length+y+n*(input_matrix_length*input_matrix_length)])
pool_temp = input_matrix[row*input_matrix_length+col+x*input_matrix_length+y+n*(input_matrix_length*input_matrix_length)];
if(pool_temp <= input_matrix[row*im_l+col+x*im_l+y+n*(im_l*im_l)])
pool_temp = input_matrix[row*im_l+col+x*im_l+y+n*(im_l*im_l)];
}
}
// 将最大值存入池化结果数组
pool_rslt[(row/2)*(input_matrix_length/2)+col/2+n*((input_matrix_length/2)*(input_matrix_length/2))] = pool_temp;
pool_rslt[(row/2)*(im_l/2)+col/2+n*((im_l/2)*(im_l/2))] = pool_temp;
}
}
}
@@ -131,20 +125,27 @@ int main(){
//第一层填充102 * 102
float* expand_matrix_1 = expand(data.array, 100, 1);
print_rslt(expand_matrix_1, 102, (0*102*102));
// print_rslt(expand_matrix_1, 102, (1*102*102));
float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102);
print_rslt(conv_rlst_1, 32, (1*100));
// float* pool_rslt_1 = pooling(32, 2, conv_rlst_1, 100);
//
////第二层填充102 * 102
// float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
// print_rslt(expand_matrix_2, 52, (0*52*52));
// float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 102);
// print_rslt(conv_rlst_2, 64, (1*50*50));
// float* pool_rslt_2 = pooling(64, 2, conv_rlst_2, 50);
//
// print_rslt(conv_rlst_1, 100, (32*100*100));
float* pool_rslt_1 = pooling(conv1_weight, conv_rlst_1, 100);
// print_rslt(pool_rslt_1, 50, (32*50*50));
//第二层填充32 * 52 * 52
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
// print_rslt(expand_matrix_2, 52, (1*52*52));
float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52);
// print_rslt(conv_rlst_2, 50, (1*50*50));
float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50);
// print_rslt(pool_rslt_2, 25, (1*25*25));
//第三层:填充 64 * 27 * 27
float* expand_matrix_3 = expand(pool_rslt_2, 25, 64);
// print_rslt(expand_matrix_2, 52, (1*52*52));
float* conv_rlst_3 = convolution(conv3_weight,conv3_bias,expand_matrix_3, 27);
print_rslt(conv_rlst_3, 25, (1*25*25));
float* pool_rslt_3 = pooling(conv3_weight, conv_rlst_3, 25);
print_rslt(pool_rslt_3, 25, (1*12*12));

View File

@@ -361,8 +361,8 @@ void model_init(){
conv3_weight.name = "conv3_weight";
conv3_weight.array = modelmym_init(conv3_weight.name);
conv3_weight.maxlength = CONV3_WEIGHT_ARRSIZE;
conv2_weight.layer = 64;
conv2_weight.num_kernels = 128;
conv3_weight.layer = 64;
conv3_weight.num_kernels = 128;
fc1_bias.name = "fc1_bias";
fc1_bias.array = modelmym_init(fc1_bias.name);