Components and supplies
Arduino UNO
Any Arduino or Arduino compatible board
Apps and platforms
AIfES
Code::Blocks
Wokwi
Arduino IDE
Project description
Code
main.c
c_cpp
This is the main.c to use AIfES in Code::Blocks.
1/* 2 www.aifes.ai 3 https://github.com/Fraunhofer-IMS/AIfES_for_Arduino 4 Copyright (C) 2020-2021 Fraunhofer Institute for Microelectronic Circuits and Systems. All rights reserved. 5 6 AIfES is free software: you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation, either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program. If not, see <https://www.gnu.org/licenses/>. 18 19 AIfES XOR training demo 20 -------------------- 21 22 Versions: 23 1.0.0 Initial version 24 25 The sketch shows an example of how a neural network is trained from scratch in AIfES using training data. 26 As in the example "0_XOR_Inference", an XOR gate is mapped here using a neural network. 27 The 4 different states of an XOR gate are fed in as training data here. 28 The network structure is 2-3(Sigmoid)-1(Sigmoid) and Sigmoid is used as activation function. 29 In the example, the weights are initialized randomly in a range of values from -2 to +2. The Gotrot initialization was inserted as an alternative and commented out. 30 For the training the ADAM Optimizer is used, the SGD Optimizer was commented out. 31 The optimizer performs a batch training over 100 epochs. 32 The calculation is done in float 32. 33 34 XOR truth table / training data 35 Input Output 36 0 0 0 37 0 1 1 38 1 0 1 39 1 1 0 40 */ 41 42#include <stdio.h> 43#include <stdlib.h> 44#include <windows.h> 45#include <time.h> 46 47#include "aifes.h" 48 49#define INPUTS 2 50#define NEURONS 3 51#define OUTPUTS 1 52 53//For AIfES Express 54#define DATASETS 4 55#define FNN_3_LAYERS 3 56#define PRINT_INTERVAL 10 57uint32_t global_epoch_counter = 0; 58 59 60 61void AIfES_demo() 62{ 63 printf("AIfES Demo:\ 64\ 65"); 66 67 uint32_t i; 68 69 // Tensor for the training data 70 // Corresponds to the XOR truth table 71 float input_data[] = {0.0f, 0.0f, 72 0.0f, 1.0f, 73 1.0f, 0.0f, 74 1.0f, 1.0f}; 75 // Two dimensional(2D)array example 76 // The "printf" output must then be modified 77 /* 78 float input_data[4][2] = { 79 {0.0f, 0.0f}, 80 {0.0f, 1.0f}, 81 {1.0f, 0.0f}, 82 {1.0f, 1.0f} 83 }; 84 */ 85 uint16_t input_shape[] = {4, INPUTS}; // Definition of the input shape 86 aitensor_t input_tensor = AITENSOR_2D_F32(input_shape, input_data); // Creation of the input AIfES tensor with two dimensions and data type F32 (float32) 87 88 // Tensor for the target data 89 // Corresponds to the XOR truth table 90 float target_data[] = {0.0f, 91 1.0f, 92 1.0f, 93 0.0f}; 94 uint16_t target_shape[] = {4, OUTPUTS}; // Definition of the output shape 95 aitensor_t target_tensor = AITENSOR_2D_F32(target_shape, target_data); // Assign the target_data array to the tensor. It expects a pointer to the array where the data is stored 96 97 // Tensor for the output data (result after training). 98 // Same configuration as for the target tensor 99 // Corresponds to the XOR truth table 100 float output_data[4]; 101 uint16_t output_shape[] = {4, OUTPUTS}; 102 aitensor_t output_tensor = AITENSOR_2D_F32(output_shape, output_data); 103 104 // ---------------------------------- Layer definition --------------------------------------- 105 106 // Input layer 107 uint16_t input_layer_shape[] = {1, INPUTS}; // Definition of the input layer shape (Must fit to the input tensor) 108 109 ailayer_input_f32_t input_layer = AILAYER_INPUT_F32_A( /*input dimension=*/ 2, /*input shape=*/ input_layer_shape); // Creation of the AIfES input layer 110 ailayer_dense_f32_t dense_layer_1 = AILAYER_DENSE_F32_A( /*neurons=*/ 3); // Creation of the AIfES hidden dense layer with 3 neurons 111 ailayer_sigmoid_f32_t sigmoid_layer_1 = AILAYER_SIGMOID_F32_A(); // Hidden activation function 112 ailayer_dense_f32_t dense_layer_2 = AILAYER_DENSE_F32_A( /*neurons=*/ 1); // Creation of the AIfES output dense layer with 1 neuron 113 ailayer_sigmoid_f32_t sigmoid_layer_2 = AILAYER_SIGMOID_F32_A(); // Output activation function 114 115 ailoss_mse_t mse_loss; //Loss: mean squared error 116 117 // --------------------------- Define the structure of the model ---------------------------- 118 119 aimodel_t model; // AIfES model 120 ailayer_t *x; // Layer object from AIfES, contains the layers 121 122 // Passing the layers to the AIfES model 123 model.input_layer = ailayer_input_f32_default(&input_layer); 124 x = ailayer_dense_f32_default(&dense_layer_1, model.input_layer); 125 x = ailayer_sigmoid_f32_default(&sigmoid_layer_1, x); 126 x = ailayer_dense_f32_default(&dense_layer_2, x); 127 x = ailayer_sigmoid_f32_default(&sigmoid_layer_2, x); 128 model.output_layer = x; 129 130 // Add the loss to the AIfES model 131 model.loss = ailoss_mse_f32_default(&mse_loss, model.output_layer); 132 133 aialgo_compile_model(&model); // Compile the AIfES model 134 135 // ------------------------------- Allocate memory for the parameters of the model ------------------------------ 136 uint32_t parameter_memory_size = aialgo_sizeof_parameter_memory(&model); 137 printf("Required memory for parameter (Weights, Bias, ...):"); 138 printf("%d",parameter_memory_size); 139 printf("Byte\ 140"); 141 142 void *parameter_memory = malloc(parameter_memory_size); 143 144 // Distribute the memory to the trainable parameters of the model 145 aialgo_distribute_parameter_memory(&model, parameter_memory, parameter_memory_size); 146 147 // ------------------------------- Initialize the parameters ------------------------------ 148 149 150 // Alternative weight initialisation 151 /* 152 aimath_f32_default_init_glorot_uniform(&dense_layer_1.weights); 153 aimath_f32_default_init_zeros(&dense_layer_1.bias); 154 aimath_f32_default_init_glorot_uniform(&dense_layer_3.weights); 155 aimath_f32_default_init_zeros(&dense_layer_3.bias); 156 */ 157 158 // Random weights in the value range from -2 to +2 159 // The value range of the weights was chosen large, so that learning success is not always given ;) 160 float max = 2.0; 161 float min = -2.0; 162 aimath_f32_default_tensor_init_uniform(&dense_layer_1.weights,max,min); 163 aimath_f32_default_tensor_init_uniform(&dense_layer_1.bias,max,min); 164 aimath_f32_default_tensor_init_uniform(&dense_layer_2.weights,max,min); 165 aimath_f32_default_tensor_init_uniform(&dense_layer_2.bias,max,min); 166 167 // -------------------------------- Define the optimizer for training --------------------- 168 169 aiopti_t *optimizer; // Object for the optimizer 170 171 // Alternative: SGD Gradient descent optimizer 172 /* 173 aiopti_sgd_f32_t sgd_opti; 174 sgd_opti.learning_rate = 1.0f; 175 sgd_opti.momentum = 0.0f; 176 177 optimizer = aiopti_sgd_f32_default(&sgd_opti); 178 */ 179 180 //ADAM optimizer 181 aiopti_adam_f32_t adam_opti; 182 adam_opti.learning_rate = 0.1f; 183 adam_opti.beta1 = 0.9f; 184 adam_opti.beta2 = 0.999f; 185 adam_opti.eps = 1e-7; 186 187 // Choose the optimizer 188 optimizer = aiopti_adam_f32_default(&adam_opti); 189 190 // -------------------------------- Allocate and schedule the working memory for training --------- 191 192 uint32_t memory_size = aialgo_sizeof_training_memory(&model, optimizer); 193 printf("Required memory for the training (Intermediate results, gradients, optimization memory): %d Byte\ 194", memory_size); 195 196 void *memory_ptr = malloc(memory_size); 197 198 // Schedule the memory over the model 199 aialgo_schedule_training_memory(&model, optimizer, memory_ptr, memory_size); 200 201 // Initialize the AIfES model 202 aialgo_init_model_for_training(&model, optimizer); 203 204 // --------------------------------- Print the result before training ---------------------------------- 205 206 uint32_t input_counter = 0; // Counter to print the inputs/training data 207 208 // Do the inference before training 209 aialgo_inference_model(&model, &input_tensor, &output_tensor); 210 211 printf("\ 212"); 213 printf("Before training:\ 214"); 215 printf("Results:\ 216"); 217 printf("input 1:\ input 2:\ real output:\ calculated output:\ 218"); 219 220 for (i = 0; i < 4; i++) { 221 printf("%f",input_data[input_counter]); 222 //Serial.print(((float* ) input_tensor.data)[i]); //Alternative print for the tensor 223 input_counter++; 224 printf("\ "); 225 printf("%f",input_data[input_counter]); 226 input_counter++; 227 printf("\ "); 228 printf("%f",target_data[i]); 229 printf("\ "); 230 printf("%f\ 231",output_data[i]); 232 //Serial.println(((float* ) output_tensor.data)[i]); //Alternative print for the tensor 233 } 234 235 // ------------------------------------- Run the training ------------------------------------ 236 237 float loss; 238 uint32_t batch_size = 4; // Configuration tip: ADAM=4 / SGD=1 239 uint16_t epochs = 100; // Configuration tip: ADAM=100 / SGD=550 240 uint16_t print_interval = 10; 241 242 printf("\ 243"); 244 printf("Start training\ 245"); 246 for(i = 0; i < epochs; i++) 247 { 248 // One epoch of training. Iterates through the whole data once 249 aialgo_train_model(&model, &input_tensor, &target_tensor, optimizer, batch_size); 250 251 // Calculate and print loss every print_interval epochs 252 if(i % print_interval == 0) 253 { 254 aialgo_calc_loss_model_f32(&model, &input_tensor, &target_tensor, &loss); 255 printf("Epoch: "); 256 printf("%d",i); 257 printf(" Loss: "); 258 printf("%f\ 259",loss); 260 261 } 262 } 263 printf("Finished training\ 264\ 265"); 266 267 // ----------------------------------------- Evaluate the trained model -------------------------- 268 269 // Do the inference after training 270 aialgo_inference_model(&model, &input_tensor, &output_tensor); 271 272 273 printf("After training:\ 274"); 275 printf("Results:\ 276"); 277 printf("input 1:\ input 2:\ real output:\ calculated output:\ 278"); 279 280 input_counter = 0; 281 282 for (i = 0; i < 4; i++) { 283 printf("%f",input_data[input_counter]); 284 //Serial.print(((float* ) input_tensor.data)[i]); //Alternative print for the tensor 285 input_counter++; 286 printf("\ "); 287 printf("%f",input_data[input_counter]); 288 input_counter++; 289 printf("\ "); 290 printf("%f",target_data[i]); 291 printf("\ "); 292 printf("%f\ 293",output_data[i]); 294 //Serial.println(((float* ) output_tensor.data)[i]); //Alternative print for the tensor 295 } 296 297 //How to print the weights example 298 //Serial.println(((float *) dense_layer_1.weights.data)[0]); 299 //Serial.println(((float *) dense_layer_1.bias.data)[0]); 300 301 if(loss > 0.3f) 302 { 303 printf("\ 304"); 305 printf("WARNING\ 306"); 307 printf("The loss is very high\ 308"); 309 } 310 311 printf("\ 312"); 313 printf("A learning success is not guaranteed\ 314"); 315 printf("The weights were initialized randomly\ 316\ 317"); 318 printf("copy the weights in the (3_XOR_Inference_keras.ino) example:\ 319"); 320 printf("---------------------------------------------------------------------------------\ 321\ 322"); 323 324 325 printf("float weights_data_dense_1[] = {\ 326"); 327 328 for (i = 0; i < INPUTS * NEURONS; i++) { 329 330 if(i == INPUTS * NEURONS - 1) 331 { 332 printf("%ff\ 333",((float *) dense_layer_1.weights.data)[i]); 334 } 335 else 336 { 337 printf("%ff,\ 338",((float *) dense_layer_1.weights.data)[i]); 339 } 340 341 } 342 printf("};\ 343\ 344"); 345 346 printf("float bias_data_dense_1[] = {\ 347"); 348 349 for (i = 0; i < NEURONS; i++) { 350 351 if(i == NEURONS - 1) 352 { 353 printf("%ff\ 354",((float *) dense_layer_1.bias.data)[i]); 355 } 356 else 357 { 358 printf("%ff,\ 359",((float *) dense_layer_1.bias.data)[i]); 360 } 361 362 } 363 printf("};\ 364\ 365"); 366 367 printf("-------------------------------\ 368\ 369"); 370 371 printf("float weights_data_dense_2[] = {\ 372"); 373 374 for (i = 0; i < NEURONS * OUTPUTS; i++) { 375 376 if(i == NEURONS * OUTPUTS - 1) 377 { 378 printf("%ff\ 379",((float *) dense_layer_2.weights.data)[i]); 380 } 381 else 382 { 383 printf("%ff,\ 384",((float *) dense_layer_2.weights.data)[i]); 385 } 386 387 } 388 printf("};\ 389\ 390"); 391 392 printf("float bias_data_dense_2[] = {\ 393"); 394 395 for (i = 0; i < OUTPUTS; i++) { 396 397 if(i == OUTPUTS - 1) 398 { 399 printf("%ff\ 400",((float *) dense_layer_2.bias.data)[i]); 401 } 402 else 403 { 404 printf("%ff,\ 405",((float *) dense_layer_2.bias.data)[i]); 406 } 407 408 } 409 printf("};\ 410\ 411"); 412 413 free(parameter_memory); 414 free(memory_ptr); 415} 416 417// The AIfES Express print function for the loss. It can be customized. 418void printLoss(float loss) 419{ 420 global_epoch_counter = global_epoch_counter + 1; 421 printf("Epoch: %d / Loss: %f\ 422",global_epoch_counter * PRINT_INTERVAL, loss); 423 424} 425 426void AIfES_Express_demo() 427{ 428 429 printf("AIfES-Express Demo:\ 430\ 431"); 432 433 global_epoch_counter = 0; 434 435 uint32_t i; 436 437 // -------------------------------- describe the feed forward neural network ---------------------------------- 438 // neurons each layer 439 // FNN_structure[0] = input layer with 2 inputs 440 // FNN_structure[1] = hidden (dense) layer with 3 neurons 441 // FNN_structure[2] = output (dense) layer with 1 output 442 uint32_t FNN_structure[FNN_3_LAYERS] = {2,3,1}; 443 444 // select the activation functions for the dense layer 445 AIFES_E_activations FNN_activations[FNN_3_LAYERS - 1]; 446 FNN_activations[0] = AIfES_E_sigmoid; // Sigmoid for hidden (dense) layer 447 FNN_activations[1] = AIfES_E_sigmoid; // Sigmoid for output (dense) layer 448 449 /* possible activation functions 450 AIfES_E_relu 451 AIfES_E_sigmoid 452 AIfES_E_softmax 453 AIfES_E_leaky_relu 454 AIfES_E_elu 455 AIfES_E_tanh 456 AIfES_E_softsign 457 AIfES_E_linear 458 */ 459 460 // AIfES Express function: calculate the number of weights needed 461 uint32_t weight_number = AIFES_E_flat_weights_number_fnn_f32(FNN_structure,FNN_3_LAYERS); 462 463 printf("Weights: %d\ 464",weight_number); 465 466 // FlatWeights array 467 //float FlatWeights[weight_number]; 468 469 // Alternative weight array 470 float *FlatWeights; 471 FlatWeights = (float *)malloc(sizeof(float)*weight_number); 472 473 474 // fill the AIfES Express struct 475 AIFES_E_model_parameter_fnn_f32 FNN; 476 FNN.layer_count = FNN_3_LAYERS; 477 FNN.fnn_structure = FNN_structure; 478 FNN.fnn_activations = FNN_activations; 479 FNN.flat_weights = FlatWeights; 480 481 // -------------------------------- create the tensors ---------------------------------- 482 483 float input_data[4][2] = { 484 {0.0f, 0.0f}, // Input data 485 {0.0f, 1.0f}, 486 {1.0f, 0.0f}, 487 {1.0f, 1.0f} 488 }; 489 uint16_t input_shape[] = {DATASETS, (uint16_t)FNN_structure[0]}; // Definition of the input shape 490 aitensor_t input_tensor = AITENSOR_2D_F32(input_shape, input_data); // Macro for the simple creation of a float32 tensor. Also usable in the normal AIfES version 491 492 float target_data[] = {0.0f, 1.0f, 1.0f, 0.0f}; // Target Data 493 uint16_t target_shape[] = {DATASETS, (uint16_t)FNN_structure[FNN_3_LAYERS - 1]}; // Definition of the target shape 494 aitensor_t target_tensor = AITENSOR_2D_F32(target_shape, target_data); // Macro for the simple creation of a float32 tensor. Also usable in the normal AIfES version 495 496 float output_data[DATASETS]; // Output data 497 uint16_t output_shape[] = {DATASETS, (uint16_t)FNN_structure[FNN_3_LAYERS - 1]}; // Definition of the output shape 498 aitensor_t output_tensor = AITENSOR_2D_F32(output_shape, output_data); // Macro for the simple creation of a float32 tensor. Also usable in the normal AIfES version 499 500 // -------------------------------- init weights settings ---------------------------------- 501 502 AIFES_E_init_weights_parameter_fnn_f32 FNN_INIT_WEIGHTS; 503 FNN_INIT_WEIGHTS.init_weights_method = AIfES_E_init_uniform; 504 505 /* init methods 506 AIfES_E_init_uniform 507 AIfES_E_init_glorot_uniform 508 AIfES_E_init_no_init //If starting weights are already available or if you want to continue training 509 */ 510 511 FNN_INIT_WEIGHTS.min_init_uniform = -2; // only for the AIfES_E_init_uniform 512 FNN_INIT_WEIGHTS.max_init_uniform = 2; // only for the AIfES_E_init_uniform 513 // -------------------------------- set training parameter ---------------------------------- 514 AIFES_E_training_parameter_fnn_f32 FNN_TRAIN; 515 FNN_TRAIN.optimizer = AIfES_E_adam; 516 /* optimizers 517 AIfES_E_adam 518 AIfES_E_sgd 519 */ 520 FNN_TRAIN.loss = AIfES_E_mse; 521 /* loss 522 AIfES_E_mse, 523 AIfES_E_crossentropy 524 */ 525 FNN_TRAIN.learn_rate = 0.05f; // Learning rate is for all optimizers 526 FNN_TRAIN.sgd_momentum = 0.0; // Only interesting for SGD 527 FNN_TRAIN.batch_size = DATASETS; // Here a full batch 528 FNN_TRAIN.epochs = 1000; // Number of epochs 529 FNN_TRAIN.epochs_loss_print_interval = PRINT_INTERVAL; // Print the loss every x times 530 531 // Your individual print function 532 // it must look like this: void YourFunctionName(float x) 533 FNN_TRAIN.loss_print_function = printLoss; 534 535 //You can enable early stopping, so that learning is automatically stopped when a learning target is reached 536 FNN_TRAIN.early_stopping = AIfES_E_early_stopping_on; 537 /* early_stopping 538 AIfES_E_early_stopping_off, 539 AIfES_E_early_stopping_on 540 */ 541 //Define your target loss 542 FNN_TRAIN.early_stopping_target_loss = 0.004; 543 544 printf("\ 545"); 546 printf("Start training\ 547"); 548 printf("Early stopping at: %f\ 549",FNN_TRAIN.early_stopping_target_loss); 550 551 // -------------------------------- do the training ---------------------------------- 552 // In the training function, the FNN is set up, the weights are initialized and the training is performed. 553 AIFES_E_training_fnn_f32(&input_tensor,&target_tensor,&FNN,&FNN_TRAIN,&FNN_INIT_WEIGHTS,&output_tensor); 554 555 556 // -------------------------------- do the inference ---------------------------------- 557 // AIfES Express function: do the inference 558 AIFES_E_inference_fnn_f32(&input_tensor,&FNN,&output_tensor); 559 560 // -------------------------------- print the results ---------------------------------- 561 562 printf("\ 563"); 564 printf("After training:\ 565"); 566 printf("Results:\ 567"); 568 printf("input 1:\ input 2:\ real output:\ calculated output:\ 569"); 570 571 for (i = 0; i < DATASETS; i++) { 572 printf("%f\ %f\ %f\ %f\ 573",input_data[i][0],input_data[i][1],target_data[i],output_data[i]); 574 } 575 576 printf("\ 577Weights for: 0_Universal/2_AIfES_Express_XOR_F32/0_AIfES_Express_XOR_F32_Inference/0_AIfES_Express_XOR_F32_Inference.ino\ 578"); 579 printf("copy and paste the weights\ 580\ 581"); 582 printf("float FlatWeights[] = {"); 583 584 for (i = 0; i < weight_number; i++) { 585 if(i == weight_number - 1) 586 { 587 printf("%ff",FlatWeights[i]); 588 } 589 else 590 { 591 printf("%ff,",FlatWeights[i]); 592 } 593 } 594 printf("};\ 595\ 596\ 597"); 598 599 600 free(FlatWeights); 601} 602 603int main(int argc, char *argv[]) 604{ 605 606 time_t t; 607 608 //IMPORTANT 609 //AIfES requires random weights for training 610 srand((unsigned) time(&t)); 611 612 printf("rand test: %d\ 613",rand()); 614 615 AIfES_demo(); 616 //AIfES_Express_demo(); 617 618 system("pause"); 619 620 return 0; 621} 622
Comments
Only logged in users can leave comments
aifes_team
0 Followers
•0 Projects
Table of contents
Intro
2
0