Code:
// A Neural Network for Arduino
// http://robotics.hobbizine.com/arduinoann.html
// 2-layer Backpropagation net
// modified by HaWe
// version 0.0.2
#include <math.h>
#define REPORT_N 100
/******************************************************************
* Network Configuration - customized per network
******************************************************************/
const int PatternCount = 10;
const int InputNodes = 7;
const int HiddenNodes = 8;
const int OutputNodes = 4;
const float LearningRate = 0.3;
const float Momentum = 0.9;
const float InitialWeightMax = 0.5;
const float Success = 0.00040;
const byte Input[PatternCount][InputNodes] = {
{ 1, 1, 1, 1, 1, 1, 0 }, // 0
{ 0, 1, 1, 0, 0, 0, 0 }, // 1
{ 1, 1, 0, 1, 1, 0, 1 }, // 2
{ 1, 1, 1, 1, 0, 0, 1 }, // 3
{ 0, 1, 1, 0, 0, 1, 1 }, // 4
{ 1, 0, 1, 1, 0, 1, 1 }, // 5
{ 0, 0, 1, 1, 1, 1, 1 }, // 6
{ 1, 1, 1, 0, 0, 0, 0 }, // 7
{ 1, 1, 1, 1, 1, 1, 1 }, // 8
{ 1, 1, 1, 0, 0, 1, 1 } // 9
};
const byte Target[PatternCount][OutputNodes] = {
{ 0, 0, 0, 0 },
{ 0, 0, 0, 1 },
{ 0, 0, 1, 0 },
{ 0, 0, 1, 1 },
{ 0, 1, 0, 0 },
{ 0, 1, 0, 1 },
{ 0, 1, 1, 0 },
{ 0, 1, 1, 1 },
{ 1, 0, 0, 0 },
{ 1, 0, 0, 1 }
};
/******************************************************************
* End Network Configuration
******************************************************************/
int i, j, p, q, r;
int ReportEvery_n;
int RandomizedIndex[PatternCount];
long TrainingCycle;
float Rando;
float Error;
float Accum;
float Hidden[HiddenNodes];
float Output[OutputNodes];
float HiddenWeights[InputNodes+1][HiddenNodes];
float OutputWeights[HiddenNodes+1][OutputNodes];
float HiddenDelta[HiddenNodes];
float OutputDelta[OutputNodes];
float ChangeHiddenWeights[InputNodes+1][HiddenNodes];
float ChangeOutputWeights[HiddenNodes+1][OutputNodes];
//-----------------------------------------------------------------
//-----------------------------------------------------------------
void setup(){
Serial.begin(115200);
delay(1000);
randomSeed(analogRead(3));
ReportEvery_n = 1;
for( p = 0 ; p < PatternCount ; p++ ) {
RandomizedIndex[p] = p ;
}
}
//-----------------------------------------------------------------
//-----------------------------------------------------------------
void loop (){
/******************************************************************
* Initialize HiddenWeights and ChangeHiddenWeights
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
for( j = 0 ; j <= InputNodes ; j++ ) {
ChangeHiddenWeights[j][i] = 0.0 ;
Rando = float(random(100))/100;
HiddenWeights[j][i] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax ;
}
}
/******************************************************************
* Initialize OutputWeights and ChangeOutputWeights
******************************************************************/
for( i = 0 ; i < OutputNodes ; i ++ ) {
for( j = 0 ; j <= HiddenNodes ; j++ ) {
ChangeOutputWeights[j][i] = 0.0 ;
Rando = float(random(100))/100;
OutputWeights[j][i] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax ;
}
}
Serial.println("Initial/Untrained Outputs: ");
toTerminal();
/******************************************************************
* Begin training
******************************************************************/
for( TrainingCycle = 1 ; TrainingCycle < 2147483647 ; TrainingCycle++) {
/******************************************************************
* Randomize order of training patterns
******************************************************************/
for( p = 0 ; p < PatternCount ; p++) {
q = random(PatternCount);
r = RandomizedIndex[p] ;
RandomizedIndex[p] = RandomizedIndex[q] ;
RandomizedIndex[q] = r ;
}
Error = 0.0 ;
/******************************************************************
* Cycle through each training pattern in the randomized order
******************************************************************/
for( q = 0 ; q < PatternCount ; q++ ) {
p = RandomizedIndex[q];
/******************************************************************
* Compute hidden layer activations
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
Accum = HiddenWeights[InputNodes][i] ;
for( j = 0 ; j < InputNodes ; j++ ) {
Accum += Input[p][j] * HiddenWeights[j][i] ;
}
Hidden[i] = 1.0/(1.0 + exp(-Accum)) ;
}
/******************************************************************
* Compute output layer activations and calculate errors
******************************************************************/
for( i = 0 ; i < OutputNodes ; i++ ) {
Accum = OutputWeights[HiddenNodes][i] ;
for( j = 0 ; j < HiddenNodes ; j++ ) {
Accum += Hidden[j] * OutputWeights[j][i] ;
}
Output[i] = 1.0/(1.0 + exp(-Accum)) ;
OutputDelta[i] = (Target[p][i] - Output[i]) * Output[i] * (1.0 - Output[i]) ;
Error += 0.5 * (Target[p][i] - Output[i]) * (Target[p][i] - Output[i]) ;
}
/******************************************************************
* Backpropagate errors to hidden layer
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
Accum = 0.0 ;
delay(1);
for( j = 0 ; j < OutputNodes ; j++ ) {
Accum += OutputWeights[i][j] * OutputDelta[j] ;
}
HiddenDelta[i] = Accum * Hidden[i] * (1.0 - Hidden[i]) ;
}
/******************************************************************
* Update Inner-->Hidden Weights
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
delay(1);
ChangeHiddenWeights[InputNodes][i] = LearningRate * HiddenDelta[i] + Momentum * ChangeHiddenWeights[InputNodes][i] ;
HiddenWeights[InputNodes][i] += ChangeHiddenWeights[InputNodes][i] ;
for( j = 0 ; j < InputNodes ; j++ ) {
ChangeHiddenWeights[j][i] = LearningRate * Input[p][j] * HiddenDelta[i] + Momentum * ChangeHiddenWeights[j][i];
HiddenWeights[j][i] += ChangeHiddenWeights[j][i] ;
}
}
/******************************************************************
* Update Hidden-->Output Weights
******************************************************************/
for( i = 0 ; i < OutputNodes ; i ++ ) {
ChangeOutputWeights[HiddenNodes][i] = LearningRate * OutputDelta[i] + Momentum * ChangeOutputWeights[HiddenNodes][i] ;
OutputWeights[HiddenNodes][i] += ChangeOutputWeights[HiddenNodes][i] ;
for( j = 0 ; j < HiddenNodes ; j++ ) {
ChangeOutputWeights[j][i] = LearningRate * Hidden[j] * OutputDelta[i] + Momentum * ChangeOutputWeights[j][i] ;
OutputWeights[j][i] += ChangeOutputWeights[j][i] ;
}
}
}
/******************************************************************
* Every 1000 cycles send data to terminal for display
******************************************************************/
ReportEvery_n = ReportEvery_n - 1;
if (ReportEvery_n == 0)
{
Serial.println();
Serial.println();
Serial.print ("TrainingCycle: ");
Serial.print (TrainingCycle);
Serial.print (" Error = ");
Serial.println (Error, 5);
toTerminal();
if (TrainingCycle==1)
{
ReportEvery_n = REPORT_N-1;
}
else
{
ReportEvery_n = REPORT_N;
}
}
/******************************************************************
* If error rate is less than pre-determined threshold then end
******************************************************************/
if( Error < Success ) break ;
}
Serial.println ();
Serial.println();
Serial.print ("TrainingCycle: ");
Serial.print (TrainingCycle);
Serial.print (" Error = ");
Serial.println (Error, 5);
toTerminal();
Serial.println ();
Serial.println ();
Serial.println ("Training Set Solved! ");
Serial.println ("--------");
Serial.println ();
Serial.println ();
ReportEvery_n = 1;
}
void toTerminal()
{
for( p = 0 ; p < PatternCount ; p++ ) {
Serial.println();
Serial.print (" Training Pattern: ");
Serial.println (p);
Serial.print (" Input ");
for( i = 0 ; i < InputNodes ; i++ ) {
Serial.print (Input[p][i], DEC);
Serial.print (" ");
}
Serial.print (" Target ");
for( i = 0 ; i < OutputNodes ; i++ ) {
Serial.print (Target[p][i], DEC);
Serial.print (" ");
}
/******************************************************************
* Compute hidden layer activations
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
Accum = HiddenWeights[InputNodes][i] ;
for( j = 0 ; j < InputNodes ; j++ ) {
Accum += Input[p][j] * HiddenWeights[j][i] ;
}
Hidden[i] = 1.0/(1.0 + exp(-Accum)) ;
}
/******************************************************************
* Compute output layer activations and calculate errors
******************************************************************/
for( i = 0 ; i < OutputNodes ; i++ ) {
Accum = OutputWeights[HiddenNodes][i] ;
for( j = 0 ; j < HiddenNodes ; j++ ) {
Accum += Hidden[j] * OutputWeights[j][i] ;
}
Output[i] = 1.0/(1.0 + exp(-Accum)) ;
}
Serial.print (" Output ");
for( i = 0 ; i < OutputNodes ; i++ ) {
Serial.print (Output[i], 5);
Serial.print (" ");
}
}
}
Lesezeichen