summaryrefslogtreecommitdiff
path: root/cleanbench.c
diff options
context:
space:
mode:
Diffstat (limited to 'cleanbench.c')
-rw-r--r--cleanbench.c190
1 files changed, 75 insertions, 115 deletions
diff --git a/cleanbench.c b/cleanbench.c
index 6792bce..c5245f2 100644
--- a/cleanbench.c
+++ b/cleanbench.c
@@ -10,31 +10,19 @@
#include "cleanbench.h"
#include "randnum.h"
-void DoNumSort(void);
-void DoStringSort(void);
-void DoBitops(void);
-void DoEmFloat(void);
-void DoFourier(void);
-void DoAssign(void);
-void DoIDEA(void);
-void DoHuffman(void);
-void DoNNET(void);
-void DoLU(void);
+double DoNumSort(void);
+double DoStringSort(void);
+double DoBitops(void);
+double DoEmFloat(void);
+double DoFourier(void);
+double DoAssign(void);
+double DoIDEA(void);
+double DoHuffman(void);
+double DoNNET(void);
+double DoLU(void);
-double getNumSortScore(void);
-double getStringSortScore(void);
-double getBitfieldScore(void);
-double getEmFloatScore(void);
-double getFourierScore(void);
-double getAssignmentScore(void);
-double getIDEAScore(void);
-double getHuffmanScore(void);
-double getNNETScore(void);
-double getLinearScore(void);
-
-static int bench_with_confidence(int fid, double *mean, double *stdev, unsigned long *numtries);
-static int calc_confidence(double scores[], int num_scores, double *c_half_interval,double *smean, double *sdev);
-static double getScore(int fid);
+static int bench_with_confidence(int benchmark, double* average, double* std_dev, int* runs);
+static int calc_confidence(double scores[], int num_scores, double *c_half_interval,double* average, double* std_dev);
#define NUMTESTS 10
@@ -109,7 +97,7 @@ main(int argc, char *argv[])
double fpindex = 1.0; /* Old Floating-point index */
double average; /* Average of benchmark results */
double std_dev; /* Standard deviation of benchmark results */
- unsigned long runs; /* # of runs */
+ int runs; /* # of runs */
int benchmark = 0;
puts( "TEST : Iterations/sec. : Old Index : New Index\n"
@@ -142,7 +130,7 @@ main(int argc, char *argv[])
"INTEGER INDEX : %.3f\n"
"FLOATING-POINT INDEX: %.3f\n"
"Baseline (MSDOS) : Pentium 90, 256 KB L2-cache, Watcom compiler 10.0\n"
- "==============================LINUX DATA BELOW===============================\n",
+ "===========================LINUX BENCHMARK RESULTS===========================\n",
pow(intindex, .142857), pow(fpindex, .33333));
hardware();
#include "sysinfo.c"
@@ -169,73 +157,67 @@ main(int argc, char *argv[])
** along. We simply do more runs and hope to get a big enough sample
** size so that things stabilize. Uwe F. Mayer
**
-** Return true; false on failure. Returns mean
+** Return true; false on failure. Returns average
** and standard deviation through argument list if successful.
*/
-static int bench_with_confidence(int benchmark,
- double *mean, /* Mean of scores */
- double *stdev, /* Standard deviation */
- unsigned long *numtries) /* # of attempts */
+static int
+bench_with_confidence(int benchmark, double* average, double* std_dev, int* runs)
{
-void (*funcpointer[])(void) =
-{ DoNumSort,
- DoEmFloat,
- DoIDEA,
- DoHuffman,
- DoStringSort,
- DoBitops,
- DoAssign,
- DoFourier,
- DoNNET,
- DoLU };
-
-double myscores[30]; /* Need at least 5 scores, use at most 30 */
-double c_half_interval; /* Confidence half interval */
-int i; /* Index */
-/* double newscore; */ /* For improving confidence interval */
+ double (*funcpointer[])(void) = {
+ DoNumSort,
+ DoEmFloat,
+ DoIDEA,
+ DoHuffman,
+ DoStringSort,
+ DoBitops,
+ DoAssign,
+ DoFourier,
+ DoNNET,
+ DoLU
+ };
-/*
-** Get first 5 scores. Then begin confidence testing.
-*/
-for (i=0;i<5;i++)
-{ (*funcpointer[benchmark])();
- myscores[i]=getScore(benchmark);
-}
-*numtries=5; /* Show 5 attempts */
+ double myscores[30]; /* Need at least 5 scores, use at most 30 */
+ double c_half_interval; /* Confidence half interval */
+ int i; /* Index */
-/*
-** The system allows a maximum of 30 tries before it gives
-** up. Since we've done 5 already, we'll allow 25 more.
-*/
+ /*
+ ** Get first 5 scores. Then begin confidence testing.
+ */
+ for (i = 0; i < 5; i++) {
+ myscores[i] = (*funcpointer[benchmark])();
+ }
+ *runs = 5; /* Show 5 attempts */
-/*
-** Enter loop to test for confidence criteria.
-*/
-while(1)
-{
/*
- ** Calculate confidence. Should always return true
+ ** The system allows a maximum of 30 tries before it gives
+ ** up. Since we've done 5 already, we'll allow 25 more.
*/
- if (0!=calc_confidence(myscores,
- *numtries,
- &c_half_interval,
- mean,
- stdev)) return false;
/*
- ** Is the length of the half interval 5% or less of mean?
- ** If so, we can go home. Otherwise, we have to continue.
+ ** Enter loop to test for confidence criteria.
*/
- if(c_half_interval/ (*mean) <= (double)0.05)
- break;
+ while(1) {
+ /* Calculate confidence. Should always return true */
+ if (0!=calc_confidence(myscores,
+ *runs,
+ &c_half_interval,
+ average,
+ std_dev)) return false;
- /* We now simply add a new test run and hope that the runs
- finally stabilize, Uwe F. Mayer */
- if(*numtries==30) return false;
- (*funcpointer[benchmark])();
- myscores[*numtries]=getScore(benchmark);
- *numtries+=1;
-}
+ /*
+ ** Is the length of the half interval 5% or less of average?
+ ** If so, we can go home. Otherwise, we have to continue.
+ */
+ if (c_half_interval / (*average) <= 0.05) {
+ break;
+ }
+
+ /* We now simply add a new test run and hope that the runs
+ finally stabilize, Uwe F. Mayer */
+ if(*runs == 30) return false;
+ myscores[*runs] = (*funcpointer[benchmark])();
+ *runs += 1;
+ }
return true;
}
@@ -244,7 +226,7 @@ while(1)
** calc_confidence **
*********************
** Given a set of numtries scores, calculate the confidence
-** half-interval. We'll also return the sample mean and sample
+** half-interval. We'll also return the sample average and sample
** standard deviation.
** NOTE: This routines presumes a confidence of 95% and
** a confidence coefficient of .95
@@ -253,8 +235,8 @@ while(1)
static int calc_confidence(double scores[], /* Array of scores */
int num_scores, /* number of scores in array */
double *c_half_interval, /* Confidence half-int */
- double *smean, /* Standard mean */
- double *sdev) /* Sample stand dev */
+ double *average, /* Standard average */
+ double *std_dev) /* Sample stand dev */
{
/* Here is a list of the student-t distribution up to 29 degrees of
freedom. The value at 0 is bogus, as there is no value for zero
@@ -271,21 +253,21 @@ if ((num_scores<2) || (num_scores>30)) {
return true;
}
/*
-** First calculate mean.
+** First calculate average.
*/
-*smean=(double)0.0;
+*average=(double)0.0;
for(i=0;i<num_scores;i++){
- *smean+=scores[i];
+ *average+=scores[i];
}
-*smean/=(double)num_scores;
+*average/=(double)num_scores;
/* Get standard deviation */
-*sdev=(double)0.0;
+*std_dev=(double)0.0;
for(i=0;i<num_scores;i++) {
- *sdev+=(scores[i]-(*smean))*(scores[i]-(*smean));
+ *std_dev+=(scores[i]-(*average))*(scores[i]-(*average));
}
-*sdev/=(double)(num_scores-1);
-*sdev=sqrt(*sdev);
+*std_dev/=(double)(num_scores-1);
+*std_dev=sqrt(*std_dev);
/* Now calculate the length of the confidence half-interval. For a
** confidence level of 95% our confidence coefficient gives us a
@@ -293,28 +275,6 @@ for(i=0;i<num_scores;i++) {
** with num_scores-1 degrees of freedom, and dividing by sqrt(number of
** observations). See any introduction to statistics.
*/
-*c_half_interval=student_t[num_scores-1] * (*sdev) / sqrt((double)num_scores);
+*c_half_interval=student_t[num_scores-1] * (*std_dev) / sqrt((double)num_scores);
return false;
}
-
-/*************
-** getScore **
-**************
-** Return the score for a particular benchmark.
-*/
-static double getScore(int benchmark) {
- double (*getScore[])(void) = {
- getNumSortScore,
- getEmFloatScore,
- getIDEAScore,
- getHuffmanScore,
- getStringSortScore,
- getBitfieldScore,
- getAssignmentScore,
- getFourierScore,
- getNNETScore,
- getLinearScore
- };
-
- return (*getScore[benchmark])();
-}