initial version of the run_all.py script
diff --git a/scripts/bench/bench.py b/scripts/bench/bench.py
index ddfc03b..9118965 100755
--- a/scripts/bench/bench.py
+++ b/scripts/bench/bench.py
@@ -2,11 +2,8 @@
 
 import os
 import sys
-import random
 import pprint
-import string
 import argparse
-import multiprocessing
 import urllib
 from datetime import datetime
 import shutil
@@ -30,6 +27,7 @@
     parser.add_argument("--checks",     type=int, default=1,    help = "number of asserts per test case")
     parser.add_argument("--asserts",    choices=['normal', 'binary', 'fast'], default="normal",
                                                                 help = "<doctest> type of assert used - Catch: only normal")
+    parser.add_argument("--to-file",    action = "store_true",  help = "dumps the result to a file named result.txt")
 
 parser = argparse.ArgumentParser()
 subparsers = parser.add_subparsers()
@@ -200,8 +198,7 @@
 os.system('cmake --build .' + the_config)
 end = datetime.now()
 
-print("Time for compiling (+ linking): " + str(end - start))
-print(str(end - start))
+print("Time running compiler (+ linker) in seconds: " + str((end - start).total_seconds()))
 
 # ==============================================================================
 # == RUN PROJECT ===============================================================
@@ -217,8 +214,7 @@
         os.system('./bench')
     end = datetime.now()
 
-    print("Time running the tests: " + str(end - start))
-    print(str(end - start))
+    print("Time running the tests in seconds: " + str((end - start).total_seconds()))
 
 # leave folder
 os.chdir("../");
diff --git a/scripts/bench/run_all.py b/scripts/bench/run_all.py
index cae92a9..8dcee7f 100644
--- a/scripts/bench/run_all.py
+++ b/scripts/bench/run_all.py
@@ -2,28 +2,43 @@
 
 import os
 import sys
-#import pprint
 import json
+import subprocess
 
 with open('tests.json') as data_file:    
     data = json.load(data_file)
 
-#pprint.pprint(data)
+def runBench(prog):
+    result = subprocess.Popen(prog, stdout = subprocess.PIPE).communicate()[0]
+    result = result.splitlines()
+    for line in result:
+        if line.startswith("Time running "):
+            return str(line.rsplit(' ', 1)[-1])
+    return ""
+
+#print(runBench("python bench.py compile --msvc  --debug --files 200 --tests 0"))
+
+call = 'python ./bench.py'
+the_os = 'linux'
+if os.name == "nt":
+    call = 'python bench.py'
+    the_os = 'windows'
 
 f = open('results.txt', 'w')
-if os.name == "nt":
-    call = 'bench.py'
-    #f.write('header cost')
-    for test in ['header', 'asserts']:
-        print('************** ' + test)
-        for framework in ['doctest', 'catch']:
-            print('== ' + framework)
-            for config in data['compilers']['windows']:
-                #print(config)
-                for curr in data[test]:
-                    if curr[0] == framework or curr[0] == "any":
-                        print(call + config + curr[1] + (' --catch' if framework == 'catch' else ''))
-
+for test in ['header', 'asserts']:
+    print('************** ' + test)
+    for framework in ['doctest', 'catch']:
+        print('== ' + framework)
+        for config in data['compilers'][the_os]:
+            #print(config)
+            for curr in data[test]:
+                if curr[0] == framework or curr[0] == "any":
+                    command = call + config + curr[1] + (' --catch' if framework == 'catch' else '')
+                    print(command)
+                    res = runBench(command)
+                    print(res)
+                    f.write(res + " ")
+            f.write("\n")
 
 f.close()