This commit is contained in:
Qiea
2024-11-10 21:15:37 +08:00
parent a477bd8606
commit fb74170c00
3 changed files with 53 additions and 49 deletions

66
cnn.c
View File

@@ -19,50 +19,44 @@ float* expand(const float* old_matrix, u8 old_matrix_length, u8 layer){
return new_matrix; return new_matrix;
} }
//num_kernels 卷积核的个数32 //model 模型名字
//area 卷积的面积3*3
//input_matrix 输入图像 //input_matrix 输入图像
//input_matrix_length 输入图像的边长102 //input_matrix_length 输入图像的边长102
//输出图像的边长100 //c_rl 输出图像的边长100
//返回卷积的结果 //返回卷积的结果
float* convolution(u8 num_kernels, u8 area, const float* input_matrix, u8 input_matrix_length, u8 layer){ float* convolution(Model model_w, Model model_b, const float* input_matrix, u8 input_matrix_length){
// 初始化卷积层参数 // 初始化卷积层参数
u8 c_rl = input_matrix_length - 2;
float conv_temp; // 临时变量,用于存储卷积计算的中间结果 float conv_temp; // 临时变量,用于存储卷积计算的中间结果
float* conv_rlst = (float *) malloc(sizeof (float)*layer*num_kernels*(input_matrix_length-2)*(input_matrix_length-2)); float* conv_rlst = (float *) malloc(sizeof (float) * model_w.num_kernels * model_w.layer * (c_rl*c_rl));
memset(conv_rlst, 0, sizeof (float)*layer*num_kernels*(input_matrix_length-2)*(input_matrix_length-2)); memset(conv_rlst, 0, sizeof (float) * model_w.num_kernels * model_w.layer * (c_rl*c_rl));
// 遍历30个卷积核假设有30个通道 // 遍历30个卷积核假设有30个通道
for(u8 l=0;l<layer;l++){ for(u8 n=0; n<model_w.num_kernels; n++){
for(u8 n=0; n<num_kernels; n++) for(u8 l=0;l<model_w.layer;l++){
{
// 遍历输出图像的每一行卷积输出大小为24x24 // 遍历输出图像的每一行卷积输出大小为24x24
for(u8 row=0; row<(input_matrix_length-2); row++) for(u8 row=0; row<(c_rl); row++) {
{
// 遍历输出图像的每一列 // 遍历输出图像的每一列
for(u8 col=0; col<(input_matrix_length-2); col++) for (u8 col = 0; col < (input_matrix_length - 2); col++) {
{
conv_temp = 0; // 每个输出像素初始化为0 conv_temp = 0; // 每个输出像素初始化为0
// 进行5x5的卷积操作 // 进行3x3的卷积操作
for(u8 x=0; x<area; x++) for (u8 x = 0; x < 3; x++) {
{ for (u8 y = 0; y < 3; y++) {
for(u8 y=0; y<area; y++)
{
// 将输入图像的对应像素与卷积核权重相乘并累加到conv_temp // 将输入图像的对应像素与卷积核权重相乘并累加到conv_temp
conv_temp += input_matrix[row*input_matrix_length+col+x*input_matrix_length+y + conv_temp += input_matrix[model_w.layer*(input_matrix_length*input_matrix_length)+row*28
0 +col+(x*(input_matrix_length)+y)] * model_w.array[n*(model_w.layer*3*3)+l*(3*3)+x*3+y];
] * conv1_weight.array[x*area+y+n*(area*area)+
0
];
} }
} }
// 加上对应卷积核的偏置 // 加上对应卷积核的偏置
conv_temp += conv1_bias.array[n]; conv_temp += model_b.array[n];
// 激活函数ReLU将小于0的值设为0 // 激活函数ReLU将小于0的值设为0
if (conv_temp > 0) if (conv_temp > 0)
conv_rlst[row*(input_matrix_length-2)+col+n*(input_matrix_length-2)*(input_matrix_length-2)+ conv_rlst[row * (input_matrix_length - 2) + col +
n * (input_matrix_length - 2) * (input_matrix_length - 2) +
0 0
] = conv_temp; // 如果卷积结果大于0存入结果数组 ] = conv_temp; // 如果卷积结果大于0存入结果数组
else else
conv_rlst[row*(input_matrix_length-2)+col+n*(input_matrix_length-2)*(input_matrix_length-2)+ conv_rlst[row * (input_matrix_length - 2) + col +
n * (input_matrix_length - 2) * (input_matrix_length - 2) +
0 0
] = 0; // 否则存入0 ] = 0; // 否则存入0
} }
@@ -138,17 +132,17 @@ int main(){
//第一层填充102 * 102 //第一层填充102 * 102
float* expand_matrix_1 = expand(data.array, 100, 1); float* expand_matrix_1 = expand(data.array, 100, 1);
print_rslt(expand_matrix_1, 102, (0*102*102)); print_rslt(expand_matrix_1, 102, (0*102*102));
float* conv_rlst_1 = convolution(32, 3, expand_matrix_1, 102,1); float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102);
print_rslt(conv_rlst_1, 32, (1*100)); print_rslt(conv_rlst_1, 32, (1*100));
float* pool_rslt_1 = pooling(32, 2, conv_rlst_1, 100); // float* pool_rslt_1 = pooling(32, 2, conv_rlst_1, 100);
//
//第二层填充102 * 102 ////第二层填充102 * 102
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32); // float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
print_rslt(expand_matrix_2, 52, (0*52*52)); // print_rslt(expand_matrix_2, 52, (0*52*52));
float* conv_rlst_2 = convolution(64, 3, expand_matrix_2, 52,1); // float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 102);
print_rslt(conv_rlst_2, 64, (1*50*50)); // print_rslt(conv_rlst_2, 64, (1*50*50));
float* pool_rslt_2 = pooling(64, 2, conv_rlst_2, 50); // float* pool_rslt_2 = pooling(64, 2, conv_rlst_2, 50);
//

View File

@@ -341,6 +341,8 @@ void model_init(){
conv1_weight.name = "conv1_weight"; conv1_weight.name = "conv1_weight";
conv1_weight.array = modelmym_init(conv1_weight.name); conv1_weight.array = modelmym_init(conv1_weight.name);
conv1_weight.maxlength = CONV1_WEIGHT_ARRSIZE; conv1_weight.maxlength = CONV1_WEIGHT_ARRSIZE;
conv1_weight.layer = 1;
conv1_weight.num_kernels = 32;
conv2_bias.name = "conv2_bias"; conv2_bias.name = "conv2_bias";
conv2_bias.array = modelmym_init(conv2_bias.name); conv2_bias.array = modelmym_init(conv2_bias.name);
@@ -349,6 +351,8 @@ void model_init(){
conv2_weight.name = "conv2_weight"; conv2_weight.name = "conv2_weight";
conv2_weight.array = modelmym_init(conv2_weight.name); conv2_weight.array = modelmym_init(conv2_weight.name);
conv2_weight.maxlength = CONV2_WEIGHT_ARRSIZE; conv2_weight.maxlength = CONV2_WEIGHT_ARRSIZE;
conv2_weight.layer = 32;
conv2_weight.num_kernels = 64;
conv3_bias.name = "conv3_bias"; conv3_bias.name = "conv3_bias";
conv3_bias.array = modelmym_init(conv3_bias.name); conv3_bias.array = modelmym_init(conv3_bias.name);
@@ -357,6 +361,8 @@ void model_init(){
conv3_weight.name = "conv3_weight"; conv3_weight.name = "conv3_weight";
conv3_weight.array = modelmym_init(conv3_weight.name); conv3_weight.array = modelmym_init(conv3_weight.name);
conv3_weight.maxlength = CONV3_WEIGHT_ARRSIZE; conv3_weight.maxlength = CONV3_WEIGHT_ARRSIZE;
conv2_weight.layer = 64;
conv2_weight.num_kernels = 128;
fc1_bias.name = "fc1_bias"; fc1_bias.name = "fc1_bias";
fc1_bias.array = modelmym_init(fc1_bias.name); fc1_bias.array = modelmym_init(fc1_bias.name);

View File

@@ -14,6 +14,10 @@ typedef struct {
float* array; float* array;
u32 maxlength; u32 maxlength;
u32 realength; u32 realength;
u8 layer;
u8 num_kernels;
} Model; } Model;