tmp
This commit is contained in:
61
cnn.c
61
cnn.c
@@ -3,12 +3,12 @@
|
||||
|
||||
|
||||
// 将原始矩阵复制到填充后的矩阵中央
|
||||
float* expand(const float* old_matrix, u8 old_matrix_length, u8 layer){
|
||||
float* expand(const float* old_matrix, int old_matrix_length, int layer){
|
||||
float* new_matrix = (float *)malloc(sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
|
||||
memset(new_matrix, 0, sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
|
||||
for(u8 l=0; l < layer; l++){
|
||||
for (u8 i = 0; i < old_matrix_length; i++) {
|
||||
for (u8 j = 0; j < old_matrix_length; j++) {
|
||||
for(int l=0; l < layer; l++){
|
||||
for (int i = 0; i < old_matrix_length; i++) {
|
||||
for (int j = 0; j < old_matrix_length; j++) {
|
||||
new_matrix[(i + 1) * (old_matrix_length+2) + (j + 1) +
|
||||
l * (old_matrix_length+2) * (old_matrix_length+2)]
|
||||
= old_matrix[i * old_matrix_length + j +
|
||||
@@ -24,28 +24,33 @@ float* expand(const float* old_matrix, u8 old_matrix_length, u8 layer){
|
||||
//input_matrix_length 输入图像的边长:102
|
||||
//c_rl 输出图像的边长:100
|
||||
//返回卷积的结果
|
||||
float* convolution(Model model_w, Model model_b, const float* input_matrix, u8 input_matrix_length){
|
||||
float* convolution(Model model_w, Model model_b, const float* input_matrix, int input_matrix_length){
|
||||
// 初始化卷积层参数
|
||||
u8 im_l = input_matrix_length;
|
||||
u8 cr_l = input_matrix_length - 2;
|
||||
int _debug=0;
|
||||
int im_l = input_matrix_length;
|
||||
int cr_l = input_matrix_length - 2;
|
||||
float conv_temp; // 临时变量,用于存储卷积计算的中间结果
|
||||
float* conv_rlst = (float *) malloc(sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
|
||||
memset(conv_rlst, 0, sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
|
||||
// 遍历30个卷积核(假设有30个通道)
|
||||
|
||||
for(u8 l=0;l<model_w.layer;l++){
|
||||
for(u8 n=0; n<model_w.num_kernels; n++){
|
||||
for(u8 row = 0; row < cr_l; row++) {
|
||||
for (u8 col = 0; col < cr_l; col++) {
|
||||
for(int l=0;l<model_w.layer;l++){
|
||||
for(int n=0; n<model_w.num_kernels; n++){
|
||||
for(int row = 0; row < cr_l; row++) {
|
||||
for (int col = 0; col < cr_l; col++) {
|
||||
conv_temp = 0; // 每个输出像素初始化为0
|
||||
// 进行3x3的卷积操作
|
||||
for (u8 x = 0; x < 3; x++) {
|
||||
for (u8 y = 0; y < 3; y++) {
|
||||
for (int x = 0; x < 3; x++) {
|
||||
for (int y = 0; y < 3; y++) {
|
||||
// 将输入图像的对应像素与卷积核权重相乘,并累加到conv_temp
|
||||
conv_temp += input_matrix[(l*(im_l*im_l)) + (row*im_l) + (col) + (x*im_l) + (y)] *
|
||||
model_w.array[(x*3) + y + (n*(3*3))];
|
||||
conv_temp += input_matrix[(l*(im_l*im_l)) + (row*im_l+col) + (x*im_l+y)] *
|
||||
model_w.array[(l+n*(model_w.layer))*(3*3) + (x*3) + y];
|
||||
}
|
||||
}
|
||||
if(_debug < 10 && strcmp(model_w.name, "conv2_weight") == 0){
|
||||
printf("[%d]%f\r\n",_debug,conv_temp);
|
||||
}
|
||||
_debug++;
|
||||
// 加上对应卷积核的偏置
|
||||
conv_temp += model_b.array[n];
|
||||
// 激活函数:ReLU(将小于0的值设为0)
|
||||
@@ -127,15 +132,15 @@ int main(){
|
||||
float* expand_matrix_1 = expand(data.array, 100, 1);
|
||||
// print_rslt(expand_matrix_1, 102, (1*102*102));
|
||||
float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102);
|
||||
// print_rslt(conv_rlst_1, 100, (32*100*100));
|
||||
// print_rslt(conv_rlst_1, 100*0.01, (0.01*10*100));
|
||||
float* pool_rslt_1 = pooling(conv1_weight, conv_rlst_1, 100);
|
||||
// print_rslt(pool_rslt_1, 50, (32*50*50));
|
||||
// print_rslt(pool_rslt_1, 50, (1*50*50));
|
||||
|
||||
//第二层:填充32 * 52 * 52
|
||||
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
|
||||
// print_rslt(expand_matrix_2, 52, (1*52*52));
|
||||
float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52);
|
||||
// print_rslt(conv_rlst_2, 50, (1*50*50));
|
||||
// print_rslt(conv_rlst_2, 50*0.02, (0.02*10*50));
|
||||
float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50);
|
||||
// print_rslt(pool_rslt_2, 25, (1*25*25));
|
||||
|
||||
@@ -143,9 +148,9 @@ int main(){
|
||||
float* expand_matrix_3 = expand(pool_rslt_2, 25, 64);
|
||||
// print_rslt(expand_matrix_2, 52, (1*52*52));
|
||||
float* conv_rlst_3 = convolution(conv3_weight,conv3_bias,expand_matrix_3, 27);
|
||||
print_rslt(conv_rlst_3, 25, (1*25*25));
|
||||
// print_rslt(conv_rlst_3, 25, (1*25*25));
|
||||
float* pool_rslt_3 = pooling(conv3_weight, conv_rlst_3, 25);
|
||||
print_rslt(pool_rslt_3, 12, (1*12*12));
|
||||
// print_rslt(pool_rslt_3, 12, (1*12*12));
|
||||
|
||||
|
||||
|
||||
@@ -167,7 +172,7 @@ int main(){
|
||||
float affine1_temp; // 临时变量,用于存储全连接层的中间结果
|
||||
|
||||
// 遍历128个神经元(假设隐藏层有128个神经元)
|
||||
for(int n=0; n<128; n++)
|
||||
for(u8 n=0; n<128; n++)
|
||||
{
|
||||
affine1_temp = 0; // 每个神经元的输出初始化为0
|
||||
|
||||
@@ -187,24 +192,25 @@ int main(){
|
||||
affine1_rslt[n] = 0; // 否则存入0
|
||||
}
|
||||
|
||||
print_rslt(affine1_rslt,1,128);
|
||||
// print_rslt(affine1_rslt,1,128);
|
||||
|
||||
|
||||
|
||||
float affine2_temp; // 临时变量,用于存储输出层的中间结果
|
||||
float affine2_rslt[7]; // 存储输出层的结果(假设输出层有7个神经元)
|
||||
float* affine2_rslt = (float *) malloc(sizeof(float)*7);
|
||||
memset(affine2_rslt, 0, 7);
|
||||
|
||||
// 比较输出层的最大值
|
||||
float temp = -100; // 用于存储最大值的临时变量,初始化为一个非常小的值
|
||||
int predict_num; // 用于存储预测的数字(对应最大值的索引)
|
||||
u8 predict_num; // 用于存储预测的数字(对应最大值的索引)
|
||||
|
||||
// 遍历7个输出神经元(假设有7个类别)
|
||||
for(int n=0; n<7; n++)
|
||||
for(u8 n=0; n<7; n++)
|
||||
{
|
||||
affine2_temp = 0; // 当前神经元的输出初始化为0
|
||||
|
||||
// 进行矩阵乘法,将隐藏层的输出与输出层权重进行点积
|
||||
for(int i=0; i<128; i++)
|
||||
for(u8 i=0; i<128; i++)
|
||||
{
|
||||
affine2_temp = affine2_temp + fc2_weight.array[i+100*n] * affine1_rslt[i];
|
||||
}
|
||||
@@ -221,8 +227,7 @@ int main(){
|
||||
}
|
||||
}
|
||||
|
||||
print_rslt(affine2_rslt,1,7);
|
||||
|
||||
print_rslt(affine2_rslt,7,7);
|
||||
printf("RES is:%d",predict_num+1);
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user