第一层卷积通过验证

This commit is contained in:
Qiea
2024-11-10 20:03:36 +08:00
parent 643eca68a3
commit a477bd8606

122
cnn.c
View File

@@ -3,12 +3,17 @@
// 将原始矩阵复制到填充后的矩阵中央
float* expand(const float* old_matrix, u8 old_matrix_num){
float* new_matrix = (float *)malloc(sizeof(float)*(old_matrix_num+2)*(old_matrix_num+2));
memset(new_matrix, 0, sizeof(float)*(old_matrix_num+2)*(old_matrix_num+2));
for (u8 i = 0; i < old_matrix_num; i++) {
for (u8 j = 0; j < old_matrix_num; j++) {
new_matrix[(i + 1) * (old_matrix_num+2) + (j + 1)] = old_matrix[i * old_matrix_num + j];
float* expand(const float* old_matrix, u8 old_matrix_length, u8 layer){
float* new_matrix = (float *)malloc(sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
memset(new_matrix, 0, sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
for(u8 l=0; l < layer; l++){
for (u8 i = 0; i < old_matrix_length; i++) {
for (u8 j = 0; j < old_matrix_length; j++) {
new_matrix[(i + 1) * (old_matrix_length+2) + (j + 1) +
l * (old_matrix_length+2) * (old_matrix_length+2)]
= old_matrix[i * old_matrix_length + j +
l * (old_matrix_length) * (old_matrix_length)];
}
}
}
return new_matrix;
@@ -20,37 +25,47 @@ float* expand(const float* old_matrix, u8 old_matrix_num){
//input_matrix_length 输入图像的边长102
//输出图像的边长100
//返回卷积的结果
float* convolution(u8 num_kernels, u8 area, const float* input_matrix, u8 input_matrix_length){
float* convolution(u8 num_kernels, u8 area, const float* input_matrix, u8 input_matrix_length, u8 layer){
// 初始化卷积层参数
float conv_temp; // 临时变量,用于存储卷积计算的中间结果
float* conv_rlst = (float *) malloc(sizeof (float)*num_kernels*(input_matrix_length-2)*(input_matrix_length-2));
memset(conv_rlst, 0, sizeof (float)*num_kernels*(input_matrix_length-2)*(input_matrix_length-2));
float* conv_rlst = (float *) malloc(sizeof (float)*layer*num_kernels*(input_matrix_length-2)*(input_matrix_length-2));
memset(conv_rlst, 0, sizeof (float)*layer*num_kernels*(input_matrix_length-2)*(input_matrix_length-2));
// 遍历30个卷积核假设有30个通道
for(u8 n=0; n<num_kernels; n++)
{
// 遍历输出图像的每一行卷积输出大小为24x24
for(u8 row=0; row<(input_matrix_length-2); row++)
for(u8 l=0;l<layer;l++){
for(u8 n=0; n<num_kernels; n++)
{
// 遍历输出图像的每一
for(u8 col=0; col<(input_matrix_length-2); col++)
// 遍历输出图像的每一卷积输出大小为24x24
for(u8 row=0; row<(input_matrix_length-2); row++)
{
conv_temp = 0; // 每个输出像素初始化为0
// 进行5x5的卷积操作
for(u8 x=0; x<area; x++)
// 遍历输出图像的每一列
for(u8 col=0; col<(input_matrix_length-2); col++)
{
for(u8 y=0; y<area; y++)
conv_temp = 0; // 每个输出像素初始化为0
// 进行5x5的卷积操作
for(u8 x=0; x<area; x++)
{
// 将输入图像的对应像素与卷积核权重相乘并累加到conv_temp
conv_temp += input_matrix[row*102+col+x*102+y] * conv1_weight.array[x*area+y+n*(area*area)];
for(u8 y=0; y<area; y++)
{
// 将输入图像的对应像素与卷积核权重相乘并累加到conv_temp
conv_temp += input_matrix[row*input_matrix_length+col+x*input_matrix_length+y +
0
] * conv1_weight.array[x*area+y+n*(area*area)+
0
];
}
}
// 加上对应卷积核的偏置
conv_temp += conv1_bias.array[n];
// 激活函数ReLU将小于0的值设为0
if(conv_temp > 0)
conv_rlst[row*(input_matrix_length-2)+col+n*(input_matrix_length-2)*(input_matrix_length-2)+
0
] = conv_temp; // 如果卷积结果大于0存入结果数组
else
conv_rlst[row*(input_matrix_length-2)+col+n*(input_matrix_length-2)*(input_matrix_length-2)+
0
] = 0; // 否则存入0
}
// 加上对应卷积核的偏置
conv_temp += conv1_bias.array[n];
// 激活函数ReLU将小于0的值设为0
if(conv_temp > 0)
conv_rlst[row*(input_matrix_length-2)+col+n*(input_matrix_length-2)*(input_matrix_length-2)] = conv_temp; // 如果卷积结果大于0存入结果数组
else
conv_rlst[row*(input_matrix_length-2)+col+n*(input_matrix_length-2)*(input_matrix_length-2)] = 0; // 否则存入0
}
}
}
@@ -98,7 +113,17 @@ float* pooling(u8 num_kernels, u8 area, const float* input_matrix, u8 input_matr
}
void print_rslt(float* rslt, u8 input_matrix_length, u32 length){
int _tmp = 0;
printf("[0:0]");
for (int i = 0; i < length; i++) {
printf("%f ",rslt[i]);
if ((i + 1) % input_matrix_length == 0) {
printf("\n[%d:%d]",++_tmp,i+1);
}
}
printf("\r\n\r\n");
}
@@ -110,32 +135,19 @@ int main(){
model_write("all");
model_switchdata("data");
//填充102 * 102
float* expand1_matrix = expand(data.array, 100);
float* conv_rlst = convolution(32,3,expand1_matrix,102);
for (int i = 0; i < 1000; i++) {
printf("%f ", conv_rlst[i]);
if ((i + 1) % 102 == 0) {
printf("\n");
}
}
printf("\r\n\r\n");
float* pool_rslt = pooling(32,2,conv_rlst,100);
for (int i = 0; i < 1000; i++) {
printf("%f ", pool_rslt[i]);
if ((i + 1) % 102 == 0) {
printf("\n");
}
}
printf("\r\n\r\n");
//第一层:填充102 * 102
float* expand_matrix_1 = expand(data.array, 100, 1);
print_rslt(expand_matrix_1, 102, (0*102*102));
float* conv_rlst_1 = convolution(32, 3, expand_matrix_1, 102,1);
print_rslt(conv_rlst_1, 32, (1*100));
float* pool_rslt_1 = pooling(32, 2, conv_rlst_1, 100);
//第二层填充102 * 102
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
print_rslt(expand_matrix_2, 52, (0*52*52));
float* conv_rlst_2 = convolution(64, 3, expand_matrix_2, 52,1);
print_rslt(conv_rlst_2, 64, (1*50*50));
float* pool_rslt_2 = pooling(64, 2, conv_rlst_2, 50);
@@ -169,7 +181,7 @@ int main(){
// // 进行矩阵乘法,将池化层输出展平为一维向量后,与全连接层权重进行点积
// for(int i=0; i<4320; i++)
// {
// affine1_temp = affine1_temp + pool_rslt[i] * affine1_w[i+4320*n];
// affine1_temp = affine1_temp + pool_rslt_1[i] * affine1_w[i+4320*n];
// }
//
// // 加上对应神经元的偏置