#include #include #include #include #include #include #include #include #include "cleanbench.h" #include "randnum.h" /******************************** ** BACK PROPAGATION NEURAL NET ** ********************************* ** This code is a modified version of the code ** that was submitted to BYTE Magazine by ** Maureen Caudill. It accomanied an article ** that I CANNOT NOW RECALL. ** The author's original heading/comment was ** as follows: ** ** Backpropagation Network ** Written by Maureen Caudill ** in Think C 4.0 on a Macintosh ** ** (c) Maureen Caudill 1988-1991 ** This network will accept 5x7 input patterns ** and produce 8 bit output patterns. ** The source code may be copied or modified without restriction, ** but no fee may be charged for its use. ** ** ++++++++++++++ ** I have modified the code so that it will work ** on systems other than a Macintosh -- RG */ /* ** LOOP_MAX ** ** This constant sets the max number of loops through the neural ** net that the system will attempt before giving up. This ** is not a critical constant. You can alter it if your system ** has sufficient horsepower. */ #define LOOP_MAX 500000L #define ERR -1 #define MAXPATS 10 /* max number of patterns in data file */ #define IN_X_SIZE 5 /* number of neurodes/row of input layer */ #define IN_Y_SIZE 7 /* number of neurodes/col of input layer */ #define IN_SIZE 35 /* equals IN_X_SIZE*IN_Y_SIZE */ #define MID_SIZE 8 /* number of neurodes in middle layer */ #define OUT_SIZE 8 /* number of neurodes in output layer */ #define MARGIN 0.1 /* how near to 1,0 do we have to come to stop? */ #define BETA 0.09 /* beta learning constant */ #define ALPHA 0.09 /* momentum term constant */ #define STOP 0.1 /* when worst_error less than STOP, training is done */ double mid_wts[MID_SIZE][IN_SIZE]; /* middle layer weights */ double out_wts[OUT_SIZE][MID_SIZE]; /* output layer weights */ double mid_out[MID_SIZE]; /* middle layer output */ double out_out[OUT_SIZE]; /* output layer output */ double mid_error[MID_SIZE]; /* middle layer errors */ double out_error[OUT_SIZE]; /* output layer errors */ double mid_wt_change[MID_SIZE][IN_SIZE]; /* storage for last wt change */ double out_wt_change[OUT_SIZE][MID_SIZE]; /* storage for last wt change */ double in_pats[MAXPATS][IN_SIZE]; /* input patterns */ double out_pats[MAXPATS][OUT_SIZE]; /* desired output patterns */ double tot_out_error[MAXPATS]; /* measure of whether net is done */ double out_wt_cum_change[OUT_SIZE][MID_SIZE]; /* accumulated wt changes */ double mid_wt_cum_change[MID_SIZE][IN_SIZE]; /* accumulated wt changes */ double worst_error; /* worst error each pass through the data */ double average_error; /* average error each pass through the data */ double avg_out_error[MAXPATS]; /* average error each pattern */ int iteration_count; /* number of passes thru network so far */ int numpats; /* number of patterns in data file */ int numpasses; /* number of training passes through data file */ int learned; /* flag--if true, network has learned all patterns */ static clock_t DoNNetIteration(unsigned long nloops); static void do_mid_forward(int patt); static void do_out_forward(); static void do_forward_pass(int patt); static void do_out_error(int patt); static void worst_pass_error(); static void do_mid_error(); static void adjust_out_wts(); static void adjust_mid_wts(); static void do_back_pass(int patt); static void move_wt_changes(); static int check_out_error(); static void zero_changes(); static void randomize_wts(); static int read_data_file(); /*********** ** DoNNet ** ************ ** Perform the neural net benchmark. ** Note that this benchmark is one of the few that ** requires an input file. That file is "NNET.DAT" and ** should be on the local directory (from which the ** benchmark program in launched). */ double DoNNET(void) { clock_t total_time = 0; int iterations = 0; static bool is_adjusted = false; static int loops = 0; /* ** Init random number generator. ** NOTE: It is important that the random number generator ** be re-initialized for every pass through this test. ** The NNET algorithm uses the random number generator ** to initialize the net. Results are sensitive to ** the initial neural net state. */ randnum((int32_t)3); /* ** Read in the input and output patterns. We'll do this ** only once here at the beginning. These values don't ** change once loaded. */ if(read_data_file()!=0) { exit(1); } /* ** See if we need to perform self adjustment loop. */ if (is_adjusted == false) { is_adjusted = true; /* ** Do self-adjustment. This involves initializing the ** # of loops and increasing the loop count until we ** get a number of loops that we can use. */ do { randnum(3); ++loops; } while ((DoNNetIteration(loops) <= MINIMUM_TICKS) && (loops < LOOP_MAX)); } do { randnum((int32_t)3); /* Gotta do this for Neural Net */ total_time += DoNNetIteration(loops); iterations += loops; } while (total_time < MINIMUM_SECONDS * CLOCKS_PER_SEC); return (double)(iterations * CLOCKS_PER_SEC) / total_time; } /******************** ** DoNNetIteration ** ********************* ** Do a single iteration of the neural net benchmark. ** By iteration, we mean a "learning" pass. */ static clock_t DoNNetIteration(unsigned long nloops) { clock_t start, stop; int patt; /* ** Run nloops learning cycles. Notice that, counted with ** the learning cycle is the weight randomization and ** zeroing of changes. This should reduce clock jitter, ** since we don't have to stop and start the clock for ** each iteration. */ start = clock(); while(nloops--) { randomize_wts(); zero_changes(); iteration_count=1; learned = false; numpasses = 0; while (!learned) { for (patt=0; patt tot_error) tot_error = -error; /* worst error this pattern */ } else { sum += error; if (error > tot_error) tot_error = error; /* worst error this pattern */ } } avg_out_error[patt] = sum/OUT_SIZE; tot_out_error[patt] = tot_error; return; } /*********************** ** worst_pass_error() ** ************************ ** Find the worst and average error in the pass and save it **/ static void worst_pass_error() { double error,sum; int i; error = 0.0; sum = 0.0; for (i=0; i error) error = tot_out_error[i]; sum += avg_out_error[i]; } worst_error = error; average_error = sum/numpats; return; } /******************* ** do_mid_error() ** ******************** ** Compute the error for the middle layer neurodes ** This is based on the output errors computed above. ** Note that the derivative of the sigmoid f(x) is ** f'(x) = f(x)(1 - f(x)) ** Recall that f(x) is merely the output of the middle ** layer neurode on the forward pass. **/ static void do_mid_error() { double sum; int neurode, i; for (neurode=0; neurode= STOP) result = false; if (tot_out_error[i] >= 16.0) error = true; } if (error) result = ERR; #ifdef DEBUG /* printf("\n Error this pass thru data: Worst: %8.3f; Average: %8.3f", worst_error,average_error); */ /* fprintf(outfile, "\n Error this pass thru data: Worst: %8.3f; Average: %8.3f", worst_error, average_error); */ #endif return(result); } /******************* ** zero_changes() ** ******************** ** Zero out all the wt change arrays **/ static void zero_changes() { int i,j; for (i = 0; i MAXPATS) numpats = MAXPATS; for (patt=0; patt= 0.9) in_pats[patt][i] = 0.9; if (in_pats[patt][i] <= 0.1) in_pats[patt][i] = 0.1; } element = 0; vals_read = fscanf(infile,"%d %d %d %d %d %d %d %d", &val1, &val2, &val3, &val4, &val5, &val6, &val7, &val8); out_pats[patt][element] = (double) val1; element++; out_pats[patt][element] = (double) val2; element++; out_pats[patt][element] = (double) val3; element++; out_pats[patt][element] = (double) val4; element++; out_pats[patt][element] = (double) val5; element++; out_pats[patt][element] = (double) val6; element++; out_pats[patt][element] = (double) val7; element++; out_pats[patt][element] = (double) val8; element++; } fclose(infile); return(0); }