summaryrefslogtreecommitdiff
path: root/neural.c
diff options
context:
space:
mode:
authorMatt Turner <mattst88@gmail.com>2008-11-11 23:00:38 +0000
committerMatt Turner <mattst88@gmail.com>2008-11-11 23:00:38 +0000
commit5a7f0d2e7b4265153ccc70051bdae8b851617ede (patch)
treeb29e974f32a1ddba669359100bb6748c72cbfd1e /neural.c
parentbef5cb4c61e44ee8784f233eb2ec230c776dbda6 (diff)
Remove stupid datatypes. Begin code cleanup
git-svn-id: svn://mattst88.com/svn/cleanbench/trunk@5 0d43b9a7-5ab2-4d7b-af9d-f64450cef757
Diffstat (limited to 'neural.c')
-rw-r--r--neural.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/neural.c b/neural.c
index 0a1bced..04d49fe 100644
--- a/neural.c
+++ b/neural.c
@@ -50,7 +50,7 @@ void DoNNET(void)
{
NNetStruct *locnnetstruct; /* Local ptr to global data */
char *errorcontext;
-ulong accumtime;
+unsigned long accumtime;
double iterations;
/*
@@ -72,7 +72,7 @@ errorcontext="CPU:NNET";
** the initial neural net state.
*/
/* randnum(3L); */
-randnum((int32)3);
+randnum((int32_t)3);
/*
** Read in the input and output patterns. We'll do this
@@ -97,7 +97,7 @@ if(locnnetstruct->adjust==0)
locnnetstruct->loops<MAXNNETLOOPS;
locnnetstruct->loops++)
{ /*randnum(3L); */
- randnum((int32)3);
+ randnum((int32_t)3);
if(DoNNetIteration(locnnetstruct->loops)
>global_min_ticks) break;
}
@@ -111,7 +111,7 @@ iterations=(double)0.0;
do {
/* randnum(3L); */ /* Gotta do this for Neural Net */
- randnum((int32)3); /* Gotta do this for Neural Net */
+ randnum((int32_t)3); /* Gotta do this for Neural Net */
accumtime+=DoNNetIteration(locnnetstruct->loops);
iterations+=(double)locnnetstruct->loops;
} while(TicksToSecs(accumtime)<locnnetstruct->request_secs);
@@ -135,9 +135,9 @@ return;
** Do a single iteration of the neural net benchmark.
** By iteration, we mean a "learning" pass.
*/
-static ulong DoNNetIteration(ulong nloops)
+static unsigned long DoNNetIteration(unsigned long nloops)
{
-ulong elapsed; /* Elapsed time */
+unsigned long elapsed; /* Elapsed time */
int patt;
/*
@@ -602,7 +602,7 @@ for (neurode = 0; neurode<MID_SIZE; neurode++)
for(i=0; i<IN_SIZE; i++)
{
/* value=(double)abs_randwc(100000L); */
- value=(double)abs_randwc((int32)100000);
+ value=(double)abs_randwc((int32_t)100000);
value=value/(double)100000.0 - (double) 0.5;
mid_wts[neurode][i] = value/2;
}
@@ -612,7 +612,7 @@ for (neurode=0; neurode<OUT_SIZE; neurode++)
for(i=0; i<MID_SIZE; i++)
{
/* value=(double)abs_randwc(100000L); */
- value=(double)abs_randwc((int32)100000);
+ value=(double)abs_randwc((int32_t)100000);
value=value/(double)10000.0 - (double) 0.5;
out_wts[neurode][i] = value/2;
}