This commit is contained in:
Qiea
2024-12-19 14:06:05 +08:00
parent 1c0f3b676f
commit dcd484c1bd
58 changed files with 14859 additions and 863 deletions

View File

@@ -18,7 +18,7 @@ void print_rslt(float* rslt, u8 input_matrix_length, u32 length){
printf("\r\n");
}
// <EFBFBD><EFBFBD>ԭʼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ƶ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ľ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
// 将原始矩阵复制到填充后的矩阵中央
float* expand(const float* old_matrix, int old_matrix_length, int layer){
float* new_matrix = (float *)mymalloc(SRAMEX,sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
memset(new_matrix, 0, sizeof(float)*layer*(old_matrix_length+2)*(old_matrix_length+2));
@@ -35,34 +35,34 @@ float* expand(const float* old_matrix, int old_matrix_length, int layer){
return new_matrix;
}
//model ģ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//input_matrix <EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD>
//input_matrix_length <EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>ı߳<EFBFBD><EFBFBD><EFBFBD>102
//c_rl <EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>ı߳<EFBFBD><EFBFBD><EFBFBD>100
//<EFBFBD><EFBFBD><EFBFBD>ؾ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ľ<EFBFBD><EFBFBD><EFBFBD>
//model 模型名字
//input_matrix 输入图像
//input_matrix_length 输入图像的边长:102
//c_rl 输出图像的边长:100
//返回卷积的结果
float* convolution(Model model_w, Model model_b, const float* input_matrix, int input_matrix_length){
// <EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
// 初始化卷积层参数
int im_l = input_matrix_length;
int cr_l = input_matrix_length - 2;
float conv_temp; // <EFBFBD><EFBFBD>ʱ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ洢<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>м<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
float conv_temp; // 临时变量,用于存储卷积计算的中间结果
//<EFBFBD><EFBFBD><EFBFBD>ںϲ<EFBFBD>ǰ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>32*64*50*50(<EFBFBD>ڶ<EFBFBD><EFBFBD><EFBFBD>)<29>Ĵ<EFBFBD>С
//用于合并前的数组,具有32*64*50*50(第二层)的大小
float* _conv_rlst = (float *) mymalloc(SRAMEX,sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
memset(_conv_rlst, 0, sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
//<EFBFBD><EFBFBD>ͼ<EFBFBD>ϲ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//子图合并后的数组
float* conv_rlst = (float *) mymalloc(SRAMEX,sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
memset(conv_rlst, 0, sizeof (float) * model_w.num_kernels * (cr_l * cr_l));
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>30<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ˣ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>30<EFBFBD><EFBFBD>ͨ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
// 遍历30个卷积核假设有30个通道
for(int c=0; c < model_w.channel; c++){
for(int k=0; k < model_w.num_kernels; k++){
for(int row = 0; row < cr_l; row++) {
for (int col = 0; col < cr_l; col++) {
conv_temp = 0; // ÿ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>س<EFBFBD>ʼ<EFBFBD><EFBFBD>Ϊ0
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>3x3<EFBFBD>ľ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
conv_temp = 0; // 每个输出像素初始化为0
// 进行3x3的卷积操作
for (int x = 0; x < 3; x++) {
for (int y = 0; y < 3; y++) {
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>Ķ<EFBFBD>Ӧ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ȩ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ˣ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ۼӵ<EFBFBD>conv_temp
// 将输入图像的对应像素与卷积核权重相乘,并累加到conv_temp
conv_temp += input_matrix[(c*im_l*im_l) + (row*(im_l)+col) + (x*(im_l)+y)]
* model_w.array[((c+k*model_w.channel)*3*3) + (x*3+y)];
}
@@ -71,7 +71,7 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int
}
}
}
//<EFBFBD>ϲ<EFBFBD><EFBFBD><EFBFBD>ͼ
//合并子图
for(int k=0; k < model_w.num_kernels; k++) {
for (int row = 0; row < cr_l; row++) {
for (int col = 0; col < cr_l; col++) {
@@ -84,13 +84,13 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int
for (int row = 0; row < cr_l; row++) {
for (int col = 0; col < cr_l; col++) {
conv_temp = conv_rlst[(k * cr_l * cr_l) + (row * cr_l + col)];
// <EFBFBD><EFBFBD><EFBFBD>϶<EFBFBD>Ӧ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>˵<EFBFBD>ƫ<EFBFBD><EFBFBD>
// 加上对应卷积核的偏置
conv_temp += model_b.array[k];
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ReLU<EFBFBD><EFBFBD><EFBFBD><EFBFBD>С<EFBFBD><EFBFBD>0<EFBFBD><EFBFBD>ֵ<EFBFBD><EFBFBD>Ϊ0<EFBFBD><EFBFBD>
// 激活函数ReLU将小于0的值设为0
if (conv_temp > 0)
conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = conv_temp; // <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>0<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = conv_temp; // 如果卷积结果大于0存入结果数组
else
conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = 0; // <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>0
conv_rlst[(k * (cr_l * cr_l)) + (row * cr_l) + (col)] = 0; // 否则存入0
}
}
}
@@ -99,38 +99,38 @@ float* convolution(Model model_w, Model model_b, const float* input_matrix, int
return conv_rlst;
}
//num_kernels <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>˵ĸ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>32
//area <EFBFBD>ػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>2*2
//input_matrix <EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD>
//input_matrix_length <EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>ı߳<EFBFBD><EFBFBD><EFBFBD>100
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>ı߳<EFBFBD><EFBFBD><EFBFBD>50
//<EFBFBD><EFBFBD><EFBFBD>سػ<EFBFBD><EFBFBD>Ľ<EFBFBD><EFBFBD><EFBFBD>
//num_kernels 卷积核的个数:32
//area 池化的面积:2*2
//input_matrix 输入图像
//input_matrix_length 输入图像的边长:100
//输出图像的边长:50
//返回池化的结果
float* pooling(Model model_w, const float* input_matrix, u8 input_matrix_length){
u8 im_l = input_matrix_length;
float pool_temp = 0; // <EFBFBD><EFBFBD>ʱ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ洢<EFBFBD>ػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ
float pool_temp = 0; // 临时变量,用于存储池化操作的最大值
float* pool_rslt = (float *) mymalloc(SRAMEX,sizeof (float)*model_w.num_kernels*im_l*im_l);
memset(pool_rslt, 0, sizeof (float)*model_w.num_kernels*im_l*im_l);
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>30<EFBFBD><EFBFBD>ͨ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͬ<EFBFBD><EFBFBD>
// 遍历30个通道与卷积核数量相同
for(u8 n=0; n<model_w.num_kernels; n++)
{
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ÿһ<EFBFBD>У<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ϊ2<EFBFBD><EFBFBD>2x2<EFBFBD>ijػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڣ<EFBFBD>
// 遍历输入图像的每一行步长为22x2的池化窗口
for(u8 row=0; row<im_l; row=row+2)
{
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ÿһ<EFBFBD>У<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ϊ2
// 遍历输入图像的每一列,步长为2
for(u8 col=0; col<im_l; col=col+2)
{
pool_temp = 0; // ÿ<EFBFBD><EFBFBD><EFBFBD>ػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ<EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD>Ϊ0
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>2x2<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
pool_temp = 0; // 每个池化区域的最大值初始化为0
// 进行2x2的最大池化操作
for(u8 x=0; x<2; x++)
{
for(u8 y=0; y<2; y++)
{
// <EFBFBD><EFBFBD><EFBFBD>µ<EFBFBD>ǰ<EFBFBD>ػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ
// 更新当前池化区域的最大值
if(pool_temp <= input_matrix[row*im_l+col+x*im_l+y+n*(im_l*im_l)])
pool_temp = input_matrix[row*im_l+col+x*im_l+y+n*(im_l*im_l)];
}
}
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
// 将最大值存入池化结果数组
pool_rslt[(row/2)*(im_l/2)+col/2+n*((im_l/2)*(im_l/2))] = pool_temp;
}
}
@@ -139,29 +139,29 @@ float* pooling(Model model_w, const float* input_matrix, u8 input_matrix_length)
}
float* hidden(const float* input_matrix){
float affine1_temp; // <EFBFBD><EFBFBD>ʱ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ洢ȫ<EFBFBD><EFBFBD><EFBFBD>Ӳ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>м<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
float affine1_temp; // 临时变量,用于存储全连接层的中间结果
float *affine1_rslt = (float *) mymalloc(SRAMEX,sizeof(float)*128);
memset(affine1_rslt, 0, sizeof(float)*128);
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>128<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ԫ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><EFBFBD><EFBFBD>128<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ԫ<EFBFBD><EFBFBD>
// 遍历128个神经元假设隐藏层有128个神经元
for(u8 n=0; n<128; n++)
{
affine1_temp = 0; // ÿ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ԫ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD>Ϊ0
affine1_temp = 0; // 每个神经元的输出初始化为0
// <EFBFBD><EFBFBD><EFBFBD>о<EFBFBD><EFBFBD><EFBFBD><EFBFBD>˷<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ػ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>չƽΪһά<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ȫ<EFBFBD><EFBFBD><EFBFBD>Ӳ<EFBFBD>Ȩ<EFBFBD>ؽ<EFBFBD><EFBFBD>е<EFBFBD><EFBFBD><EFBFBD>
// 进行矩阵乘法,将池化层输出展平为一维向量后,与全连接层权重进行点积
for(int i=0; i<(128*12*12); i++)
{
affine1_temp = affine1_temp + input_matrix[i] * fc1_weight.array[i+(128*12*12)*n];
}
// <EFBFBD><EFBFBD><EFBFBD>϶<EFBFBD>Ӧ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ԫ<EFBFBD><EFBFBD>ƫ<EFBFBD><EFBFBD>
// 加上对应神经元的偏置
affine1_temp = affine1_temp + fc1_bias.array[n];
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ReLU<EFBFBD><EFBFBD><EFBFBD><EFBFBD>С<EFBFBD><EFBFBD>0<EFBFBD><EFBFBD>ֵ<EFBFBD><EFBFBD>Ϊ0<EFBFBD><EFBFBD>
// 激活函数ReLU将小于0的值设为0
if(affine1_temp > 0)
affine1_rslt[n] = affine1_temp; // <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>0<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
affine1_rslt[n] = affine1_temp; // 如果结果大于0存入结果数组
else
affine1_rslt[n] = 0; // <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>0
affine1_rslt[n] = 0; // 否则存入0
}
// print_rslt(affine1_rslt,1,128);
@@ -171,24 +171,24 @@ float* hidden(const float* input_matrix){
float* output(Model model_w, const float* input_matrix){
u8 num = model_w.num_kernels;
float affine2_temp; // <EFBFBD><EFBFBD>ʱ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ洢<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>м<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
float affine2_temp; // 临时变量,用于存储输出层的中间结果
float *affine2_rslt = (float *) mymalloc(SRAMEX,(sizeof(float)*num));
memset(affine2_rslt, 0, sizeof(float)*num);
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>10<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ԫ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>10<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
// 遍历10个输出神经元假设有10个类别
for(int n=0; n<num; n++)
{
affine2_temp = 0; // <EFBFBD><EFBFBD>ǰ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ԫ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD>Ϊ0
affine2_temp = 0; // 当前神经元的输出初始化为0
// <EFBFBD><EFBFBD><EFBFBD>о<EFBFBD><EFBFBD><EFBFBD><EFBFBD>˷<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ȩ<EFBFBD>ؽ<EFBFBD><EFBFBD>е<EFBFBD><EFBFBD><EFBFBD>
// 进行矩阵乘法,将隐藏层的输出与输出层权重进行点积
for(int i=0; i<128; i++)
{
affine2_temp = affine2_temp + fc2_weight.array[i+128*n] * input_matrix[i];
}
// <EFBFBD><EFBFBD><EFBFBD>϶<EFBFBD>Ӧ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ԫ<EFBFBD><EFBFBD>ƫ<EFBFBD><EFBFBD>
// 加上对应神经元的偏置
affine2_temp = affine2_temp + fc2_weight.array[n];
affine2_rslt[n] = affine2_temp; // <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ľ<EFBFBD><EFBFBD><EFBFBD>
affine2_rslt[n] = affine2_temp; // 存储输出层的结果
}
return affine2_rslt;
@@ -306,8 +306,8 @@ u8 calculate_layer(Model model_w, float *input_array){
{
if(max_temp <= input_array[n])
{
max_temp = input_array[n]; // <EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ
predict_num = n; // <EFBFBD><EFBFBD>¼<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ<EFBFBD><EFBFBD>Ӧ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
max_temp = input_array[n]; // 更新最大值
predict_num = n; // 记录最大值对应的类别索引
}
}
print_rslt(input_array,input_num,input_num);
@@ -319,20 +319,20 @@ void cnn_run(){
float value[3] = {0};
calculate_statistics(data,&value[0]);
if (check_threshold(data,&value[0])){
//<EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>100 * 100 <EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//初始化:生成100 * 100 矩阵
float* _data = generateMatrix(data,&value[0]);
char kind[50];
DEBUG_PRINTF("<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ŵ磡<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ:%f ƽ<><C6BD>ֵ:%f <20><>׼<EFBFBD><D7BC>:%f\r\n",value[0],value[1],value[2]);
DEBUG_PRINTF("<EFBFBD><EFBFBD>ԭʼ<EFBFBD><EFBFBD><EFBFBD>ݴ<EFBFBD><EFBFBD><EFBFBD>SD<EFBFBD><EFBFBD><EFBFBD><EFBFBD>\r\n");
DEBUG_PRINTF("检测到放电!最大值:%f 平均值:%f 标准差:%f\r\n",value[0],value[1],value[2]);
DEBUG_PRINTF("将原始数据存入SD卡中\r\n");
SDRAM_TO_SD();
char* _uuid = uuid();
CSTX_4G_RegALiYunIOT(1); //<2F><><EFBFBD>ĵ<EFBFBD><C4B5><EFBFBD>ģ<EFBFBD><C4A3> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ݵ<EFBFBD><DDB5>ϱ<EFBFBD>
send_blocks(_data,_uuid);
// CSTX_4G_RegALiYunIOT(1); //订阅到物模型 用于数据的上报
// send_blocks(_data,_uuid);
//<EFBFBD><EFBFBD>һ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>102 * 102
DEBUG_PRINTF("<EFBFBD><EFBFBD>һ<EFBFBD>ʼ\n");
//第一层:填充102 * 102
DEBUG_PRINTF("第一层开始\n");
float* expand_matrix_1 = expand(_data, 100, 1);
float* conv_rlst_1 = convolution(conv1_weight,conv1_bias,expand_matrix_1, 102);
float* pool_rslt_1 = pooling(conv1_weight, conv_rlst_1, 100);
@@ -344,8 +344,8 @@ void cnn_run(){
myfree(SRAMEX,conv_rlst_1);
conv_rlst_1 = NULL;
//<EFBFBD>ڶ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>32 * 52 * 52
DEBUG_PRINTF("<EFBFBD>ڶ<EFBFBD><EFBFBD>ʼ\n");
//第二层:填充32 * 52 * 52
DEBUG_PRINTF("第二层开始\n");
float* expand_matrix_2 = expand(pool_rslt_1, 50, 32);
float* conv_rlst_2 = convolution(conv2_weight,conv2_bias,expand_matrix_2, 52);
float* pool_rslt_2 = pooling(conv2_weight, conv_rlst_2, 50);
@@ -357,8 +357,8 @@ void cnn_run(){
myfree(SRAMEX,conv_rlst_2);
conv_rlst_2 = NULL;
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> 64 * 27 * 27
DEBUG_PRINTF("<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʼ\n");
//第三层:填充 64 * 27 * 27
DEBUG_PRINTF("第三层开始\n");
float* expand_matrix_3 = expand(pool_rslt_2, 25, 64);
float* conv_rlst_3 = convolution(conv3_weight,conv3_bias,expand_matrix_3, 27);
float* pool_rslt_3 = pooling(conv3_weight, conv_rlst_3, 25);
@@ -370,18 +370,18 @@ void cnn_run(){
myfree(SRAMEX,conv_rlst_3);
conv_rlst_3 = NULL;
DEBUG_PRINTF("<EFBFBD><EFBFBD><EFBFBD>IJ㿪ʼ\n");
DEBUG_PRINTF("第四层开始\n");
float* affine1_rslt = hidden(pool_rslt_3);
DEBUG_PRINTF("<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʼ\r\n");
DEBUG_PRINTF("第五层开始\r\n");
float* affine2_rslt = output(fc2_weight, affine1_rslt);
DEBUG_PRINTF("<EFBFBD><EFBFBD><EFBFBD>ʣ<EFBFBD>%f\r\n",calculate_probabilities(fc2_weight, affine2_rslt));
DEBUG_PRINTF("概率:%f\r\n",calculate_probabilities(fc2_weight, affine2_rslt));
DEBUG_PRINTF("Label is:%d\r\n",calculate_layer(fc2_weight, affine2_rslt));
snprintf(kind, 50,"UUID:%s P:%f Label:%d", _uuid, calculate_probabilities(fc2_weight, affine2_rslt), calculate_layer(fc2_weight, affine2_rslt));
CSTX_4G_ALiYunIOTSenddata_string(kind,"kind_string");
// snprintf(kind, 50,"UUID:%s P:%f Label:%d", _uuid, calculate_probabilities(fc2_weight, affine2_rslt), calculate_layer(fc2_weight, affine2_rslt));
// CSTX_4G_ALiYunIOTSenddata_string(kind,"kind_string");
CSTX_4G_RegALiYunIOT(0); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD>ʱ<EFBFBD><EFBFBD> <20>ź<EFBFBD>
// CSTX_4G_RegALiYunIOT(0); //接收下发时间 信号
myfree(SRAMEX,pool_rslt_3);
pool_rslt_3 = NULL;
@@ -391,6 +391,6 @@ void cnn_run(){
affine2_rslt = NULL;
} else{
DEBUG_PRINTF("δ<EFBFBD>ŵ磡<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ:%f ƽ<><C6BD>ֵ:%f <20><>׼<EFBFBD><D7BC>:%f\r\n",value[0],value[1],value[2]);
DEBUG_PRINTF("未放电!最大值:%f 平均值:%f 标准差:%f\r\n",value[0],value[1],value[2]);
}
}