diff --git a/api.php b/api.php
index cd39ba3222eb3c1e15f56140afbaa3325c6fe30a..934de5b8bab7fbfe9a931495ae67654082f91052 100644
--- a/api.php
+++ b/api.php
@@ -380,7 +380,7 @@ foreach ($testCases as $test) {
         );
         array_push($testResults, $result);
 
-    } else if ($arg_command === $s_command_blackBoxTest || $arg_command === $s_command_justRunTest) {
+    } else if ($arg_command === $s_command_blackBoxTest || $arg_command === $s_command_justRunTest || $arg_command === $s_command_compareFilesTest) {
 
         if ($isDebug) {
             $time_pre = microtime(true);
@@ -496,7 +496,6 @@ foreach ($testCases as $test) {
 
         # was just a test...
         #output("t" . $min_timeout . " m" . $min_memoryLimit . " d" . $min_diskSpaceLimit . " dd: " . $arg_DiskSpaceLimit);
-        require_once './do_blackBoxTest_func.php';
 
         if ($isDebug) {
             $time_post = microtime(true);
@@ -523,19 +522,41 @@ foreach ($testCases as $test) {
                 $showTestRunnerDebugOutput = $config['showTestRunnerDebugOutput'];
             }
 
-            try {
-                $_result = do_blackBoxTest($arg_mainFileNameWithExtension, $test, $fullWorkingDirPath,
-                    $min_timeout, $min_memoryLimit, $min_diskSpaceLimit,
-                    $compileCmd, $execCmd, $sourceFileExtensions, $needCompilation,
-                    $maxLinesToRead, $maxErrLinesToRead, $maxLinesToWrite,
-                    ($arg_command === $s_command_justRunTest),
-                    $requestDistinctionString,
-                    $showTestRunnerDebugOutput
-                );
-            } catch (Exception $e) {
-                //critical error, abort execution (all other tests are likely to fail)
-                output($status_code_InternalServerError, "error executing black box test: " . $e->getMessage());
-                goto handleCleanup;
+            if ($arg_command === $s_command_compareFilesTest) {
+
+                require_once './do_compareFilesTest_func.php';
+
+                try {
+                    $_result = do_compareFilesTest($arg_mainFileNameWithExtension, $test, $fullWorkingDirPath,
+                        $min_timeout, $min_memoryLimit, $min_diskSpaceLimit,
+                        $compileCmd, $execCmd, $sourceFileExtensions, $needCompilation,
+                        $maxLinesToRead, $maxErrLinesToRead, $maxLinesToWrite,
+                        $requestDistinctionString,
+                        $showTestRunnerDebugOutput
+                    );
+                } catch (Exception $e) {
+                    //critical error, abort execution (all other tests are likely to fail)
+                    output($status_code_InternalServerError, "error executing compare files test: " . $e->getMessage());
+                    goto handleCleanup;
+                }
+            }
+            else {
+                require_once './do_blackBoxTest_func.php';
+
+                try {
+                    $_result = do_blackBoxTest($arg_mainFileNameWithExtension, $test, $fullWorkingDirPath,
+                        $min_timeout, $min_memoryLimit, $min_diskSpaceLimit,
+                        $compileCmd, $execCmd, $sourceFileExtensions, $needCompilation,
+                        $maxLinesToRead, $maxErrLinesToRead, $maxLinesToWrite,
+                        ($arg_command === $s_command_justRunTest),
+                        $requestDistinctionString,
+                        $showTestRunnerDebugOutput
+                    );
+                } catch (Exception $e) {
+                    //critical error, abort execution (all other tests are likely to fail)
+                    output($status_code_InternalServerError, "error executing black box test: " . $e->getMessage());
+                    goto handleCleanup;
+                }
             }
 
         }
diff --git a/constants.php b/constants.php
index c1913df275892928a5691da27bad8116c732e06e..f84beaba30a75e6589a4d7889e78bdeb05e0d2d4 100644
--- a/constants.php
+++ b/constants.php
@@ -3,7 +3,7 @@
 # variables
 
 //use this to know which version we published
-$versionString = '1.0.1';
+$versionString = '1.1.1';
 $rootDirNameToUseString = 'work';
 
 $isDebug = false; //logs debug to STDOUT
@@ -74,6 +74,7 @@ $default_not_executed_user_program_exit_code = 0;
 #vars for the file array array[$index] -> tuple[$s_fileName] -> fileName, tuple[$s_fileContent] -> fileContent
 $s_fileName = 'fileName';
 $s_fileContent = 'fileContent';
+$s_fileHash = "hash";
 
 #config vars
 $jsonConfigFile = 'config.json';
@@ -87,9 +88,10 @@ $s_arg_maxNumberOfTestsWithOneRequest = 'maxNumberOfTestsWithOneRequest';
 # the supported test types
 $s_command_compile = 'compileTest';
 $s_command_blackBoxTest = 'blackBoxTest';
+$s_command_compareFilesTest = 'compareFilesTest';
 $s_command_regexTest = 'regexTest';
 $s_command_justRunTest = 'justRunTest';
-$s_supportedCommands = [$s_command_compile, $s_command_blackBoxTest, $s_command_regexTest, $s_command_justRunTest];
+$s_supportedCommands = [$s_command_compile, $s_command_blackBoxTest, $s_command_compareFilesTest, $s_command_regexTest, $s_command_justRunTest];
 
 
 # db table columns
diff --git a/do_blackBoxTest_func.php b/do_blackBoxTest_func.php
index 165bb4df256ebd3fe0121a0e71fad9f6f7da0af1..5688ae93875405ebe60e8f19a6a44fe0a2b5bad1 100644
--- a/do_blackBoxTest_func.php
+++ b/do_blackBoxTest_func.php
@@ -157,16 +157,8 @@ function do_blackBoxTest($mainFileNameWithExtension, $test, $fullWorkingDirPath,
     }
 
 
-    # clear created assets to avoid side effects
-    if (deleteTestAssets($allAssets, $fullWorkingDirPath) === FALSE) {
-        # this no not important because the whole dir will be removed after the test...
-        # but could lead to side effects...
-        return array(
-            $s_return_val => 100,
-            $s_output => 'could not remove asset(s)',
-            $s_user_program_exit_code => 0
-        );
-    }
+    # the test assets are removed when the dir gets removed
+    # also the test runner could renamed/added/deleted some files
 
     //extract the program exit code from the protocol if any/possible else pretend 0
     $userProgramExitCode = 0;
diff --git a/do_compareFilesTest_func.php b/do_compareFilesTest_func.php
new file mode 100644
index 0000000000000000000000000000000000000000..ca5020693b58e0a044cde3d1b296e9e0feb2dbf8
--- /dev/null
+++ b/do_compareFilesTest_func.php
@@ -0,0 +1,196 @@
+<?php
+
+
+/**
+ * runs the given black box test against the given files
+ * @param string $mainFileNameWithExtension the main file to compile (with extension)
+ * @param $test
+ * @param $fullWorkingDirPath
+ * @param int|string $timeout the timeout in MS  (can be string from db or int from default values)
+ * @param int|string $memoryLimit the memory limit in kb  (can be string from db or int from default values)
+ * @param int|string $diskSpaceLimit the max disk space the program can write to (can be string from db or int from default values)
+ * @param string $compileCmd the command to execute to compile the file(s)
+ * @param string $execCmd the command to execute to run the test
+ * @param $sourceFileExtensions array the list with the file extensions from the source files for the p language
+ * @param $needCompilation bool true: should compile sources, false: not (this assumes the sources are already compiled)
+ * @param $maxLinesToRead int the max lines to read from the user program (passed to the runner)
+ * @param $maxErrLinesToRead int the max lines to read from the user program (from stderr) (passed to the runner)
+ * @param $maxLinesToWrite int the max lines to write the user program (passed to the runner)
+ * @param $uniqueSessionId string the unique session id for the test runner to handle sandbox file system management
+ * @param $showTestRunnerDebugOutput 1: show internal debug output from the test runner (included in the test response)
+ * @return array the result
+ * @internal param array $allFiles all files to copy in teh dir to use
+ * Format: array[int] => {fileName: string, fileContent: string}
+ */
+function do_compareFilesTest($mainFileNameWithExtension, $test, $fullWorkingDirPath,
+                             $timeout, $memoryLimit, $diskSpaceLimit,
+                             $compileCmd, $execCmd, $sourceFileExtensions, $needCompilation,
+                             $maxLinesToRead, $maxErrLinesToRead, $maxLinesToWrite,
+                             $uniqueSessionId,
+                             $showTestRunnerDebugOutput
+
+
+)
+{
+
+    global $isDebug;
+    global $config;
+    global $s_command_compareFilesTest;
+    global $s_fullWorkingDirPath;
+    global $status_code_FileSystemIssue;
+    global $s_test_id;
+    global $s_test_content;
+    global $s_passed;
+    global $s_hasCompiled;
+    global $s_programExitCode;
+    global $s_testResultCode;
+    global $s_protocol;
+    global $responseNewLineString;
+    global $s_test_allAssets;
+    global $s_return_val;
+    global $s_user_program_exit_code;
+    global $s_output;
+    global $s_fileName;
+    global $s_fileHash;
+
+
+    $command_to_execute_string = $s_command_compareFilesTest;
+
+
+    # compile is done in the runner
+    $commandToExecute = $execCmd;
+    $output = [];
+    $return_var = -1;
+
+    $testContent = $test[$s_test_content];
+    $allAssets = $test[$s_test_allAssets];
+
+    # create the assets for the current test
+    if (createTestAssets($allAssets, $fullWorkingDirPath) === FALSE) {
+        //error message is in createFiles function
+        return array(
+            $s_return_val => 100,
+            $s_output => 'could not create asset(s)',
+            $s_user_program_exit_code => 0
+        );
+    }
+
+    $fileHashInfo = "";
+    for ($i = 0; $i < count($allAssets); $i++) {
+        $file = $allAssets[$i];
+
+        if ($i === count($allAssets) - 1) {
+            $fileHashInfo = $fileHashInfo . $file[$s_fileName] . ":" . $file[$s_fileHash];
+            continue;
+        }
+
+        $fileHashInfo = $fileHashInfo . $file[$s_fileName] . ":" . $file[$s_fileHash] . " ";
+    }
+
+    $longCmd = $config['runner']
+        . ' ' . $command_to_execute_string                           # arg[0] the test method
+        . ' "' . $fullWorkingDirPath . '"'                           # arg[1] dir path
+        . ' ' . $mainFileNameWithExtension                           # arg[2] file path
+        . ' "' . $compileCmd . '"'                                   # arg[3] command to execute # currently ignored because we call compile in php...
+        . ' "' . $commandToExecute . '"'                             # arg[4] command to execute the test
+        . ' ' . $timeout                                             # arg[5] the timeout
+        . ' ' . $memoryLimit                                         # arg[6] the memory limit
+        . ' ' . $diskSpaceLimit                                      # arg[7] the disk limit
+        . ' "' . implode(',', $sourceFileExtensions) . '"'     # arg[8] the source file extensions
+        . ' ' . ($needCompilation ? 'true' : 'false')                # arg[9] true: compile program new, false: not
+        . ' "' . $maxLinesToRead . '"'                               # arg[10] max lines to read from the user program (inclusive)
+        . ' "' . $maxErrLinesToRead . '"'                            # arg[11] max lines to read from the user program stderr (inclusive)
+        . ' "' . $maxLinesToWrite . '"'                              # arg[12]  max lines to write to the user program (inclusive)
+        . ' "' . $uniqueSessionId . '"'                              # arg[13] a unique session id
+        . ' "' . ($showTestRunnerDebugOutput === TRUE ? 1 : 0) . '"' # arg[14] showTestRunnerDebugOutput
+        . ' "' . $fileHashInfo . '"'                                  # arg[15] file:hash list
+
+    ;
+
+    if ($isDebug) {
+        debug("using runner: " . $config['runner']);
+        debug("full command: " . $longCmd);
+    }
+
+    # the test content is passed in as stdin (because might be too long for a console argument
+    # e.g. windows its 32bit ca. 2.000 kb...
+
+    $pipesDescriptor = array(
+        0 => array('pipe', 'r'), # stdin is a pipe that the child will read from
+        1 => array('pipe', 'w'),  # stdout is a pipe that the child will write to
+        2 => array("pipe", "w")   # stderr
+    );
+
+
+    if ($isDebug) {
+        $time_pre = microtime(true);
+    }
+
+
+    $env = $config['environmentVarsParsed'];
+
+    # without bypass_shell it won't work on windows
+    $process = proc_open($longCmd, $pipesDescriptor, $pipes, $fullWorkingDirPath, $env, array('bypass_shell' => TRUE));
+
+//    $state = proc_get_status($process);
+//    warn('open pid: ' . $state['pid']);
+
+    if (is_resource($process)) {
+        fwrite($pipes[0], $testContent);
+        fclose($pipes[0]);
+
+
+        $output = stream_get_contents($pipes[1]);
+        fclose($pipes[1]);
+
+        $errorOutput = stream_get_contents($pipes[2]);
+        fclose($pipes[2]);
+
+        $return_var = proc_close($process);
+    }
+
+    # we cannot use exec because this does no let us write to the stdin of the process
+    #exec($longCmd, $output, $return_var);
+
+    if ($isDebug) {
+        $time_post = microtime(true);
+        $exec_time = ($time_post - $time_pre) * 1000; //in ms
+        debug("time to run the black-box-tests: " . $exec_time);
+    }
+
+    if ($isDebug && (isset($errorOutput) && trim($errorOutput) !== '')) {
+        debug("error during execution of the (blackbox) test runner: " . $errorOutput);
+    }
+
+
+    # the test assets are removed when the dir gets removed
+    # also the test runner could renamed/added/deleted some files
+
+    //extract the program exit code from the protocol if any/possible else pretend 0
+    $userProgramExitCode = 0;
+    //the test runner uses \n
+    $maybeExitOutput = substr($output, 0, strpos($output, "\n"));
+
+    if (isset($maybeExitOutput)) {
+        $exitCodeOutputPrefix = 'exit:';
+
+        $checkString = substr($maybeExitOutput, 0, strlen($exitCodeOutputPrefix));
+
+        //user output as > as start
+        if ($checkString === $exitCodeOutputPrefix) {
+            $exitCodeString = substr($maybeExitOutput, strlen($exitCodeOutputPrefix));
+
+            $userProgramExitCode = intval($exitCodeString, 10);
+        }
+    }
+
+    //$output
+
+    #file_put_contents($fullWorkingDirPath . 'xyz.txt', $output);
+
+    return array(
+        $s_return_val => $return_var,
+        $s_output => $output,
+        $s_user_program_exit_code => $userProgramExitCode
+    );
+}
diff --git a/helpers.php b/helpers.php
index 67bdf7ef00ce608091671cc83bf518c0eca30a7b..c468ce9a95e77eadf7a0da74268bd5be910d57b7 100644
--- a/helpers.php
+++ b/helpers.php
@@ -399,34 +399,6 @@ function createTestAssets($allAssets, $fullWorkingDirPath)
     return TRUE;
 }
 
-/**
- * @param $allAssets array all tests
- * Format: array[int] => {fileName: string, fileContent: byte[]}
- * @param $fullWorkingDirPath string the path (dir) where to put the files
- * @return bool true: all ok, false: some error
- */
-function deleteTestAssets($allAssets, $fullWorkingDirPath) {
-
-    # do not output something here because we use this in the test case loop
-    global $s_fileName;
-    global $s_fileContent;
-    global $status_code_FileSystemIssue;
-
-    # now create all files for the test
-    $count = 0;
-    foreach ($allAssets as $file) {
-
-
-        if (unlink($fullWorkingDirPath . DIRECTORY_SEPARATOR . $file[$s_fileName]) === FALSE) {
-        #output($status_code_FileSystemIssue, "could not delete file nr. " . $count);
-        return FALSE;
-        }
-
-        $count++;
-    }
-    return TRUE;
-}
-
 /**
  * removes the given dir with all content
  * taken from http://stackoverflow.com/questions/3349753/delete-directory-with-files-in-it
diff --git a/ipaccess.json b/ipaccess.json
deleted file mode 100644
index a7d0a91cd34c051fb4c6c9a6a664ce1f2bf9e4ac..0000000000000000000000000000000000000000
--- a/ipaccess.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
-    {
-      "ip": "*"
-    },
-    {
-        "ip": "192.168.30.56"
-    },
-    {
-        "ip": "127.0.0.1"
-    },
-    {
-        "ip": "::1"
-    }
-]
\ No newline at end of file
diff --git a/readme.md b/readme.md
index 78ce575d12dd8e7a7613624183e3e42ac119abc6..5700b0c6c6e038a5216e29546b31936aa3001d1f 100644
--- a/readme.md
+++ b/readme.md
@@ -45,8 +45,12 @@ The request is a json object so the request should be from mime type application
   * **id** (int) is the id of the test to identify the test (and the related result)
   * **content** (string/multi line string) the test content to use (depends on the used command) 
   * **assets** (array) the files (assets) for the test
-    * **fileName** (string) the file name for the asset
+    * **fileName** (string) the file name for the asset. 
+      * **note** that the file name must not contain the following characters: ` ` (whitespace), `:`
+      * also all files need to have an extension (x.y)  
     * **fileContent** (string) the content of the asset (base64 encoded)
+    * **hash** (string/null) the md5 hash of the content (can be null if not calculated)
+      * but the test runner **will not accept null** as hash (for now)
 * **timeoutInMs**: (int) the timeout in ms to wait after terminating the users program
 * **memoryLimitInKb**: (int) the max memory in kb the users program can use 
 * **maxDiskSpaceInKb**: (int) the max disk space in kb the users program can write to
@@ -148,6 +152,17 @@ other fields
 
 **For explanation about the different test types see below**
 
+### Example
+```json
+{
+    "passed": true,
+    "hasCompiled": true,
+    "programExitCode": 0,
+    "testResultCode": 0,
+    "protocol": "can be multiline \n\r string"
+}
+```
+
 ## Protocol Output Compile Test
 empty  
 Or  
@@ -199,22 +214,49 @@ all other lines (meaning different by the first character):
 * '**EOT**' indicates that the test ended here (written after the program terminated)  
   this is useful when some output is received after the test has finished
 * '**error:**' output from the error stream (from the user program)
-* '**expected:**' indicates the expected output of the user program (normally followed after a **>** when the output was wrong)
+* '**expected:**' indicates the expected output of the user program (this normally followed after a **>** when the output was wrong)
 
 OR   
 when a compile error occurred then the output of the compiler
 
+## Protocol Output Compare files Test
 
-### Example
-```json
-{
-    "passed": true,
-    "hasCompiled": true,
-    "programExitCode": 0,
-    "testResultCode": 0,
-    "protocol": "can be multiline \n\r string"
-}
 ```
+exit:[X]
+EOT
+ccompare [fileName1WithExtension] [fileName2WithExtension] (ignoreCase) (ignoreLeadingWhitespaces) (ignoreTrailingWhitespaces) (ignoreAllEmptyOrWhitespacesOnlyLines)
+c[result]
+```
+
+
+where `[X]` is the user program exit code (must be the first line)
+
+where `EOT` indicates the end of the user program (usually before all `ccompares`)
+
+where `[result]` is one of the following string
+
+- `ok` the files were equal line by line with the given options
+- `notfound1` the file 1 was not found (probably a test error)
+- `notfound2` the file 2 was not found
+- `expected` indicates the expected line (line from file1) when the lines mismatched 
+- `actual` indicates the actual line (line from file2) when the lines mismatched 
+
+when a line mismatched we have the following output
+
+```
+ccompare [fileName1WithExtension] [fileName2WithExtension] (ignoreCase) (ignoreLeadingWhitespaces) (ignoreTrailingWhitespaces) (ignoreAllEmptyOrWhitespacesOnlyLines)
+cactual [line nr] [line]
+cexpected [line nr] [line]
+```
+
+if the user file ended before the expected file then the output is
+
+```
+ccompare [fileName1WithExtension] [fileName2WithExtension] (ignoreCase) (ignoreLeadingWhitespaces) (ignoreTrailingWhitespaces) (ignoreAllEmptyOrWhitespacesOnlyLines)
+cactual [line nr] eof
+cexpected [line nr] [line]
+```
+
 
 # Test inputs
 
@@ -492,6 +534,49 @@ arg[2] = the main file name to use
 arg[3] = "true" to enable debug output (this will leak internal paths!!) or "false" to disable debug output.
 The debug output will be normal lines starting with " [Test-Runner]"
 
+## Compare Files Test
+
+
+arg[1] = the absolute directory path containing all files (including the main file)
+
+arg[2] = the main file name to use
+
+arg[3] = the compile command to execute
+
+arg[4] = the execute command to run the test case
+
+arg[5] = time limit (int) in MS
+time limit = time the program can run (hard limit) in MS e.g. 1000
+
+arg[6] = memory limit (int) in KB
+memory limit = the max memory the program can use in KB .e.g. 1000 kb
+
+arg[7] = disk limit (int) in KB
+disk limit = the max disk space the test can write to
+
+arg[8] = contains a list of extensions of the soruce files for the p lang (differentiate beteen source files and assets). This is a "," separated list
+e.g. "cpp, h"
+
+arg[9] = **true** to let the runner compile the sources, **false** to skip compilation (when running multiple tests skipping increases performance)
+
+arg[10] = max lines to read from the user program (inclusive) (int) e.g. 100  
+negative or 0 is treated as unlimited
+
+arg[11] = max lines to read from the user program stderr (inclusive) (int) e.g. 100  
+negative or 0 is treated as unlimited
+
+arg[12] = max lines to write to the user program (inclusive) (int) e.g. 100  
+negative or 0 is treated as unlimited
+
+arg[13] = a unique session id (e.g. used with firejail to manage overlay fs names)
+
+arg[14] = **"true"** to enable debug output (this will leak internal paths!!) or **"false"** to disable debug output. 
+The debug output will be normal lines starting with " [Test-Runner]"
+
+arg[15] = "file1:hash1 file2:hash2" a list of `file:hash` pairs separated with a single whitespace.   
+These are the files for the test (the assets) with the pre calculated hash or `null` (if not pre calculated).
+The test runner decides of the hash needs to be calculated or not (only files that should be compared need to be hashed).
+
 
 ## For all tests
 
@@ -564,7 +649,7 @@ The output from the user program is read and added to the protocol but nothing i
 
 #### Example
 ```
-# 2 Arg2 "Arg mit Leerzeichen"
+$ 2 Arg2 "Arg mit Leerzeichen"
 
 <Eingabe 1
 >Ausgabe 1
@@ -576,6 +661,53 @@ The output from the user program is read and added to the protocol but nothing i
 >Ausgabe 4
 ```
 
+
+## Compare files Test
+
+All black-box-test commands are supported here.
+ 
+**But the compare files test lines must be the last in the test content (after all black-box-test commands).
+This is because they are executed after the user program has finished.**
+
+a line has the following pattern
+
+```
+ccompare [fileName1WithExtension] [fileName2WithExtension] (1) (2) (4) (8)
+```
+
+the file names can be separated with `/` for directories but **must not** include the text `..`
+ else the test is ignored (with an error information) 
+ 
+- options within `()` are optional if they are not set they are `false`
+- the order of the options is arbitrary
+- whitespaces are mandatory
+
+The files can have the same name!
+
+the files are compared line by line. a line might be separated by `\n` or `\r\n`
+
+**options**
+
+- `1`: ignoreCase: while comparing lines the case is ignored (e.g. all to lower case)
+- `2`: ignoreLeadingWhitespaces: while comparing we ignore leading whitespace characters (whitespace and tab) in the user file line
+- `4`: ignoreTrailingWhitespaces: while comparing we ignore trailing whitespace characters (whitespace and tab) in the user file line
+- `8`: ignoreAllEmptyOrWhitespacesOnlyLines: while comparing we empty lines (whitespace and tab) in the user file line
+
+
+all other lines are ignored
+
+note that only the file 1 lines are compared with the user file (file 2). Thus the user file can have more lines, they are ignored
+
+
+Because the files can have the same name the test runner must rename the `file 1` (from the tutor) to some unique identifier (that cannot be guessed).
+
+However the user could explore the files in the directory and may find the renamed `file 1` and  overwrite `file 1` with empty content and the test would always be successful...
+To prevent this we calculate a hash (md5) from the file and after the user program finished we check if the md5 still matches else we fail the test with an error message
+
+To speed things up we get the md5 hash for the files in the request. It's only necessary to calculate the md5 hash for files in the compare command.
+Other files are not required to be checked (for now).
+
+
 ### Regex Test
 The content of a regex test a `rule` on each line (empty lines are ignored).
 
@@ -635,9 +767,12 @@ The Runner needs to return result code (exit code) to indicate the result of the
 * **3** - timeout hit / some read thread interrupted
 * **4** - memory limit hit
 * **5** - disk limit hit
+* **6** - compare files line mismatched
 * **50** - compile error
 * **51** - test content read timeout
 * **52** - exit mismatched, **note** that output mismatched has a higher priority (output mismatched is displayed of we have both errors), this is only generated if the test contained a check for the exit code
+* **53** - error during compare files test
+* **54** - some asset test file was changed or deleted by the user program (hash changed), the test protocol will contain some more information about the file
 
 * **100** - some error occurred before or after executing the users program
 * **101** - unknown test method (or unsupported)