about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--TODO3
-rw-r--r--docs/env_variables.txt4
-rwxr-xr-xqemu_mode/build_qemu_support.sh1
-rw-r--r--qemu_mode/libcompcov/Makefile42
-rw-r--r--qemu_mode/libcompcov/README.compcov33
-rw-r--r--qemu_mode/libcompcov/compcovtest.cc63
-rw-r--r--qemu_mode/libcompcov/libcompcov.so.c279
-rw-r--r--qemu_mode/libcompcov/pmparser.h280
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-inl.h11
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-translate-inl.h125
-rw-r--r--qemu_mode/patches/afl-qemu-tcg-inl.h149
-rw-r--r--qemu_mode/patches/afl-qemu-translate-inl.h11
-rw-r--r--qemu_mode/patches/i386-translate.diff33
14 files changed, 1025 insertions, 11 deletions
diff --git a/.gitignore b/.gitignore
index 70acb8da..0b8b2513 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
+*.o
+*.so
 .gitignore
 afl-analyze
 afl-as
diff --git a/TODO b/TODO
index 2c5d05a5..6631350c 100644
--- a/TODO
+++ b/TODO
@@ -23,6 +23,9 @@ gcc_plugin:
  - laf-intel
  - neverZero
 
+qemu_mode:
+ - Instrument only cmparison with immediate values by default when using compcov
+
 unit testing / or large testcase campaign
 
 
diff --git a/docs/env_variables.txt b/docs/env_variables.txt
index 1703a947..36fdc369 100644
--- a/docs/env_variables.txt
+++ b/docs/env_variables.txt
@@ -244,6 +244,10 @@ The QEMU wrapper used to instrument binary-only code supports several settings:
 
   - Setting AFL_INST_LIBS causes the translator to also instrument the code
     inside any dynamically linked libraries (notably including glibc).
+  
+  - Setting AFL_QEMU_COMPCOV enables the CompareCoverage tracing of all
+    cmp and sub in x86 and x86_64. Support for other architectures and
+    comparison functions (mem/strcmp et al.) is planned.
 
   - The underlying QEMU binary will recognize any standard "user space
     emulation" variables (e.g., QEMU_STACK_SIZE), but there should be no
diff --git a/qemu_mode/build_qemu_support.sh b/qemu_mode/build_qemu_support.sh
index 1e952f4e..78ad5680 100755
--- a/qemu_mode/build_qemu_support.sh
+++ b/qemu_mode/build_qemu_support.sh
@@ -133,6 +133,7 @@ patch -p1 <../patches/cpu-exec.diff || exit 1
 patch -p1 <../patches/syscall.diff || exit 1
 patch -p1 <../patches/translate-all.diff || exit 1
 patch -p1 <../patches/tcg.diff || exit 1
+patch -p1 <../patches/i386-translate.diff || exit 1
 
 echo "[+] Patching done."
 
diff --git a/qemu_mode/libcompcov/Makefile b/qemu_mode/libcompcov/Makefile
new file mode 100644
index 00000000..c984588b
--- /dev/null
+++ b/qemu_mode/libcompcov/Makefile
@@ -0,0 +1,42 @@
+#
+# american fuzzy lop - libcompcov
+# --------------------------------
+#
+# Written by Andrea Fioraldi <andreafioraldi@gmail.com>
+#
+# Copyright 2019 Andrea Fioraldi. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+
+PREFIX      ?= /usr/local
+HELPER_PATH  = $(PREFIX)/lib/afl
+
+VERSION     = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2)
+
+CFLAGS      ?= -O3 -funroll-loops
+CFLAGS      += -Wall -Wno-unused-result -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign
+LDFLAGS     += -ldl
+
+all: libcompcov.so compcovtest
+
+libcompcov.so: libcompcov.so.c ../../config.h
+	$(CC) $(CFLAGS) -shared -fPIC $< -o $@ $(LDFLAGS)
+
+.NOTPARALLEL: clean
+
+clean:
+	rm -f *.o *.so *~ a.out core core.[1-9][0-9]*
+	rm -f libcompcov.so compcovtest
+
+compcovtest:	compcovtest.cc
+	$(CXX) $< -o $@ 
+
+install: all
+	install -m 755 libcompcov.so $${DESTDIR}$(HELPER_PATH)
+	install -m 644 README.compcov $${DESTDIR}$(HELPER_PATH)
+
diff --git a/qemu_mode/libcompcov/README.compcov b/qemu_mode/libcompcov/README.compcov
new file mode 100644
index 00000000..2a4a0ee5
--- /dev/null
+++ b/qemu_mode/libcompcov/README.compcov
@@ -0,0 +1,33 @@
+================================================================
+strcmp() / memcmp() CompareCoverage library for AFLplusplus-QEMU
+================================================================
+
+  Written by Andrea Fioraldi <andreafioraldi@gmail.com>
+
+This Linux-only companion library allows you to instrument strcmp(), memcmp(),
+and related functions to log the CompareCoverage of these libcalls.
+
+Use this with caution. While this can speedup a lot the bypass of hard
+branch conditions it can also waste a lot of time and take up unnecessary space
+in the shared memory when logging the coverage related to functions that
+doesn't process input-related data.
+
+To use the library, you *need* to make sure that your fuzzing target is linked
+dynamically and make use of strcmp(), memcmp(), and related functions.
+For optimized binaries this is an issue, those functions are often inlined
+and this module is not capable to log the coverage in this case.
+
+If you have the source code of the fuzzing target you should nto use this
+library and QEMU but build ot with afl-clang-fast and the laf-intel options.
+
+To use this library make sure to preload it with AFL_PRELOAD.
+
+  export AFL_PRELOAD=/path/to/libcompcov.so
+  export AFL_QEMU_COMPCOV=1
+  
+  afl-fuzz -Q -i input -o output <your options> -- <target args>
+
+The library make use of https://github.com/ouadev/proc_maps_parser and so it is
+Linux specific. However this is not a strict dependency, other UNIX operating
+systems can be supported simply replacing the code related to the
+/proc/self/maps parsing.
diff --git a/qemu_mode/libcompcov/compcovtest.cc b/qemu_mode/libcompcov/compcovtest.cc
new file mode 100644
index 00000000..fd1fda00
--- /dev/null
+++ b/qemu_mode/libcompcov/compcovtest.cc
@@ -0,0 +1,63 @@
+/////////////////////////////////////////////////////////////////////////

+//

+// Author: Mateusz Jurczyk (mjurczyk@google.com)

+//

+// Copyright 2019 Google LLC

+// 

+// Licensed under the Apache License, Version 2.0 (the "License");

+// you may not use this file except in compliance with the License.

+// You may obtain a copy of the License at

+// 

+// https://www.apache.org/licenses/LICENSE-2.0

+// 

+// Unless required by applicable law or agreed to in writing, software

+// distributed under the License is distributed on an "AS IS" BASIS,

+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+// See the License for the specific language governing permissions and

+// limitations under the License.

+//

+

+// solution: echo -ne 'The quick brown fox jumps over the lazy dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest

+

+#include <cstdint>

+#include <cstdio>

+#include <cstdlib>

+#include <cstring>

+

+int main() {

+  char buffer[44] = { /* zero padding */ };

+  fread(buffer, 1, sizeof(buffer) - 1, stdin);

+

+  if (memcmp(&buffer[0], "The quick brown fox ", 20) != 0 ||

+      strncmp(&buffer[20], "jumps over ", 11) != 0 ||

+      strcmp(&buffer[31], "the lazy dog") != 0) {

+    return 1;

+  }

+

+  uint64_t x = 0;

+  fread(&x, sizeof(x), 1, stdin);

+  if (x != 0xCAFEBABECAFEBABE) {

+    return 2;

+  }

+

+  uint32_t y = 0;

+  fread(&y, sizeof(y), 1, stdin);

+  if (y != 0xDEADC0DE) {

+    return 3;

+  }

+

+  uint16_t z = 0;

+  fread(&z, sizeof(z), 1, stdin);

+

+  switch (z) {

+    case 0xBEEF:

+      break;

+

+    default:

+      return 4;

+  }

+

+  printf("Puzzle solved, congrats!\n");

+  abort();

+  return 0;

+}

diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c
new file mode 100644
index 00000000..582230db
--- /dev/null
+++ b/qemu_mode/libcompcov/libcompcov.so.c
@@ -0,0 +1,279 @@
+/*
+
+   american fuzzy lop++ - strcmp() / memcmp() CompareCoverage library
+   ------------------------------------------------------------------
+
+   Written and maintained by Andrea Fioraldi <andreafioraldi@gmail.com>
+
+   Copyright 2019 Andrea Fioraldi. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This Linux-only companion library allows you to instrument strcmp(),
+   memcmp(), and related functions to get compare coverage.
+   See README.compcov for more info.
+
+ */
+
+#define _GNU_SOURCE
+#include <dlfcn.h>
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/shm.h>
+
+#include "../../types.h"
+#include "../../config.h"
+
+#include "pmparser.h"
+
+#ifndef __linux__
+#  error "Sorry, this library is Linux-specific for now!"
+#endif /* !__linux__ */
+
+/* Change this value to tune the compare coverage */
+
+#define MAX_CMP_LENGTH 32
+
+static void *__compcov_code_start,
+            *__compcov_code_end;
+
+static u8 *__compcov_afl_map;
+
+static int (*__libc_strcmp)(const char*, const char*);
+static int (*__libc_strncmp)(const char*, const char*, size_t);
+static int (*__libc_strcasecmp)(const char*, const char*);
+static int (*__libc_strncasecmp)(const char*, const char*, size_t);
+static int (*__libc_memcmp)(const void*, const void*, size_t);
+
+static int debug_fd = -1;
+
+
+static size_t __strlen2(const char *s1, const char *s2, size_t max_length) {
+  // from https://github.com/googleprojectzero/CompareCoverage
+  
+  size_t len = 0;
+  for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) { }
+  return len;
+}
+
+/* Identify the binary boundaries in the memory mapping */
+
+static void __compcov_load(void) {
+  
+  __libc_strcmp = dlsym(RTLD_NEXT, "strcmp");
+  __libc_strncmp = dlsym(RTLD_NEXT, "strncmp");
+  __libc_strcasecmp = dlsym(RTLD_NEXT, "strcasecmp");
+  __libc_strncasecmp = dlsym(RTLD_NEXT, "strncasecmp");
+  __libc_memcmp = dlsym(RTLD_NEXT, "memcmp");
+  
+  char *id_str = getenv(SHM_ENV_VAR);
+  int shm_id;
+
+  if (id_str) {
+
+    shm_id = atoi(id_str);
+    __compcov_afl_map = shmat(shm_id, NULL, 0);
+
+    if (__compcov_afl_map == (void*)-1) exit(1);
+  } else {
+  
+    __compcov_afl_map = calloc(1, MAP_SIZE);
+  }
+
+  if (getenv("AFL_INST_LIBS")) {
+  
+    __compcov_code_start = (void*)0;
+    __compcov_code_end = (void*)-1;
+    return;
+  }
+
+  char* bin_name = getenv("AFL_COMPCOV_BINNAME");
+
+  procmaps_iterator* maps = pmparser_parse(-1);
+  procmaps_struct* maps_tmp = NULL;
+
+  while ((maps_tmp = pmparser_next(maps)) != NULL) {
+  
+    /* If AFL_COMPCOV_BINNAME is not set pick the first executable segment */
+    if (!bin_name || strstr(maps_tmp->pathname, bin_name) != NULL) {
+    
+      if (maps_tmp->is_x) {
+        if (!__compcov_code_start)
+            __compcov_code_start = maps_tmp->addr_start;
+        if (!__compcov_code_end)
+            __compcov_code_end = maps_tmp->addr_end;
+      }
+    }
+  }
+
+  pmparser_free(maps);
+}
+
+
+static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) {
+
+  size_t i;
+  
+  if (debug_fd != 1) {
+    char debugbuf[4096];
+    snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc, v0 == NULL ? "(null)" : (char*)v0, v1 == NULL ? "(null)" : (char*)v1, n);
+    write(debug_fd, debugbuf, strlen(debugbuf));
+  }
+  
+  for (i = 0; i < n && v0[i] == v1[i]; ++i) {
+  
+    __compcov_afl_map[cur_loc +i]++;
+  }
+}
+
+/* Check an address against the list of read-only mappings. */
+
+static u8 __compcov_is_in_bound(const void* ptr) {
+
+  return ptr >= __compcov_code_start && ptr < __compcov_code_end;
+}
+
+
+/* Replacements for strcmp(), memcmp(), and so on. Note that these will be used
+   only if the target is compiled with -fno-builtins and linked dynamically. */
+
+#undef strcmp
+
+int strcmp(const char* str1, const char* str2) {
+
+  void* retaddr = __builtin_return_address(0);
+  
+  if (__compcov_is_in_bound(retaddr)) {
+
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
+    
+    if (n <= MAX_CMP_LENGTH) {
+    
+      u64 cur_loc = (u64)retaddr;
+      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc &= MAP_SIZE - 1;
+      
+      __compcov_trace(cur_loc, str1, str2, n);
+    }
+  }
+
+  return __libc_strcmp(str1, str2);
+}
+
+
+#undef strncmp
+
+int strncmp(const char* str1, const char* str2, size_t len) {
+
+  void* retaddr = __builtin_return_address(0);
+  
+  if (__compcov_is_in_bound(retaddr)) {
+
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
+    n = MIN(n, len);
+    
+    if (n <= MAX_CMP_LENGTH) {
+    
+      u64 cur_loc = (u64)retaddr;
+      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc &= MAP_SIZE - 1;
+      
+      __compcov_trace(cur_loc, str1, str2, n);
+    }
+  }
+  
+  return __libc_strncmp(str1, str2, len);
+}
+
+
+#undef strcasecmp
+
+int strcasecmp(const char* str1, const char* str2) {
+
+  void* retaddr = __builtin_return_address(0);
+  
+  if (__compcov_is_in_bound(retaddr)) {
+    /* Fallback to strcmp, maybe improve in future */
+
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
+    
+    if (n <= MAX_CMP_LENGTH) {
+    
+      u64 cur_loc = (u64)retaddr;
+      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc &= MAP_SIZE - 1;
+      
+      __compcov_trace(cur_loc, str1, str2, n);
+    }
+  }
+
+  return __libc_strcasecmp(str1, str2);
+}
+
+
+#undef strncasecmp
+
+int strncasecmp(const char* str1, const char* str2, size_t len) {
+
+  void* retaddr = __builtin_return_address(0);
+  
+  if (__compcov_is_in_bound(retaddr)) {
+    /* Fallback to strncmp, maybe improve in future */
+
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
+    n = MIN(n, len);
+    
+    if (n <= MAX_CMP_LENGTH) {
+    
+      u64 cur_loc = (u64)retaddr;
+      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc &= MAP_SIZE - 1;
+      
+      __compcov_trace(cur_loc, str1, str2, n);
+    }
+  }
+
+  return __libc_strncasecmp(str1, str2, len);
+}
+
+
+#undef memcmp
+
+int memcmp(const void* mem1, const void* mem2, size_t len) {
+
+  void* retaddr = __builtin_return_address(0);
+  
+  if (__compcov_is_in_bound(retaddr)) {
+
+    size_t n = len;
+    
+    if (n <= MAX_CMP_LENGTH) {
+    
+      u64 cur_loc = (u64)retaddr;
+      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc &= MAP_SIZE - 1;
+      
+      __compcov_trace(cur_loc, mem1, mem2, n);
+    }
+  }
+
+  return __libc_memcmp(mem1, mem2, len);
+}
+
+/* Init code to open init the library. */
+
+__attribute__((constructor)) void __compcov_init(void) {
+
+  if (getenv("AFL_QEMU_COMPCOV_DEBUG") != NULL)
+    debug_fd = open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644);
+
+  __compcov_load();
+}
+
+
diff --git a/qemu_mode/libcompcov/pmparser.h b/qemu_mode/libcompcov/pmparser.h
new file mode 100644
index 00000000..34d0cd50
--- /dev/null
+++ b/qemu_mode/libcompcov/pmparser.h
@@ -0,0 +1,280 @@
+/*
+ @Author	: ouadimjamal@gmail.com
+ @date		: December 2015
+
+Permission to use, copy, modify, distribute, and sell this software and its
+documentation for any purpose is hereby granted without fee, provided that
+the above copyright notice appear in all copies and that both that
+copyright notice and this permission notice appear in supporting
+documentation.  No representations are made about the suitability of this
+software for any purpose.  It is provided "as is" without express or
+implied warranty.
+
+ */
+
+#ifndef H_PMPARSER
+#define H_PMPARSER
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <linux/limits.h>
+
+//maximum line length in a procmaps file
+#define PROCMAPS_LINE_MAX_LENGTH  (PATH_MAX + 100) 
+/**
+ * procmaps_struct
+ * @desc hold all the information about an area in the process's  VM
+ */
+typedef struct procmaps_struct{
+	void* addr_start; 	//< start address of the area
+	void* addr_end; 	//< end address
+	unsigned long length; //< size of the range
+
+	char perm[5];		//< permissions rwxp
+	short is_r;			//< rewrote of perm with short flags
+	short is_w;
+	short is_x;
+	short is_p;
+
+	long offset;	//< offset
+	char dev[12];	//< dev major:minor
+	int inode;		//< inode of the file that backs the area
+
+	char pathname[600];		//< the path of the file that backs the area
+	//chained list
+	struct procmaps_struct* next;		//<handler of the chinaed list
+} procmaps_struct;
+
+/**
+ * procmaps_iterator
+ * @desc holds iterating information
+ */
+typedef struct procmaps_iterator{
+	procmaps_struct* head;
+	procmaps_struct* current;
+} procmaps_iterator;
+/**
+ * pmparser_parse
+ * @param pid the process id whose memory map to be parser. the current process if pid<0
+ * @return an iterator over all the nodes
+ */
+procmaps_iterator* pmparser_parse(int pid);
+
+/**
+ * pmparser_next
+ * @description move between areas
+ * @param p_procmaps_it the iterator to move on step in the chained list
+ * @return a procmaps structure filled with information about this VM area
+ */
+procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it);
+/**
+ * pmparser_free
+ * @description should be called at the end to free the resources
+ * @param p_procmaps_it the iterator structure returned by pmparser_parse
+ */
+void pmparser_free(procmaps_iterator* p_procmaps_it);
+
+/**
+ * _pmparser_split_line
+ * @description internal usage
+ */
+void _pmparser_split_line(char*buf,char*addr1,char*addr2,char*perm, char* offset, char* device,char*inode,char* pathname);
+
+/**
+ * pmparser_print
+ * @param map the head of the list
+ * @order the order of the area to print, -1 to print everything
+ */
+void pmparser_print(procmaps_struct* map,int order);
+
+
+/**
+ * gobal variables
+ */
+//procmaps_struct* g_last_head=NULL;
+//procmaps_struct* g_current=NULL;
+
+
+procmaps_iterator* pmparser_parse(int pid){
+	procmaps_iterator* maps_it = malloc(sizeof(procmaps_iterator));
+	char maps_path[500];
+	if(pid>=0 ){
+		sprintf(maps_path,"/proc/%d/maps",pid);
+	}else{
+		sprintf(maps_path,"/proc/self/maps");
+	}
+	FILE* file=fopen(maps_path,"r");
+	if(!file){
+		fprintf(stderr,"pmparser : cannot open the memory maps, %s\n",strerror(errno));
+		return NULL;
+	}
+	int ind=0;char buf[PROCMAPS_LINE_MAX_LENGTH];
+	//int c;
+	procmaps_struct* list_maps=NULL;
+	procmaps_struct* tmp;
+	procmaps_struct* current_node=list_maps;
+	char addr1[20],addr2[20], perm[8], offset[20], dev[10],inode[30],pathname[PATH_MAX];
+	while( !feof(file) ){
+		fgets(buf,PROCMAPS_LINE_MAX_LENGTH,file);
+		//allocate a node
+		tmp=(procmaps_struct*)malloc(sizeof(procmaps_struct));
+		//fill the node
+		_pmparser_split_line(buf,addr1,addr2,perm,offset, dev,inode,pathname);
+		//printf("#%s",buf);
+		//printf("%s-%s %s %s %s %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname);
+		//addr_start & addr_end
+		//unsigned long l_addr_start;
+		sscanf(addr1,"%lx",(long unsigned *)&tmp->addr_start );
+		sscanf(addr2,"%lx",(long unsigned *)&tmp->addr_end );
+		//size
+		tmp->length=(unsigned long)(tmp->addr_end-tmp->addr_start);
+		//perm
+		strcpy(tmp->perm,perm);
+		tmp->is_r=(perm[0]=='r');
+		tmp->is_w=(perm[1]=='w');
+		tmp->is_x=(perm[2]=='x');
+		tmp->is_p=(perm[3]=='p');
+
+		//offset
+		sscanf(offset,"%lx",&tmp->offset );
+		//device
+		strcpy(tmp->dev,dev);
+		//inode
+		tmp->inode=atoi(inode);
+		//pathname
+		strcpy(tmp->pathname,pathname);
+		tmp->next=NULL;
+		//attach the node
+		if(ind==0){
+			list_maps=tmp;
+			list_maps->next=NULL;
+			current_node=list_maps;
+		}
+		current_node->next=tmp;
+		current_node=tmp;
+		ind++;
+		//printf("%s",buf);
+	}
+
+	//close file
+	fclose(file);
+
+
+	//g_last_head=list_maps;
+	maps_it->head = list_maps;
+	maps_it->current =  list_maps;
+	return maps_it;
+}
+
+
+procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it){
+	if(p_procmaps_it->current == NULL)
+		return NULL;
+	procmaps_struct* p_current = p_procmaps_it->current;
+	p_procmaps_it->current = p_procmaps_it->current->next;
+	return p_current;
+	/*
+	if(g_current==NULL){
+		g_current=g_last_head;
+	}else
+		g_current=g_current->next;
+
+	return g_current;
+	*/
+}
+
+
+
+void pmparser_free(procmaps_iterator* p_procmaps_it){
+	procmaps_struct* maps_list = p_procmaps_it->head;
+	if(maps_list==NULL) return ;
+	procmaps_struct* act=maps_list;
+	procmaps_struct* nxt=act->next;
+	while(act!=NULL){
+		free(act);
+		act=nxt;
+		if(nxt!=NULL)
+			nxt=nxt->next;
+	}
+
+}
+
+
+void _pmparser_split_line(
+		char*buf,char*addr1,char*addr2,
+		char*perm,char* offset,char* device,char*inode,
+		char* pathname){
+	//
+	int orig=0;
+	int i=0;
+	//addr1
+	while(buf[i]!='-'){
+		addr1[i-orig]=buf[i];
+		i++;
+	}
+	addr1[i]='\0';
+	i++;
+	//addr2
+	orig=i;
+	while(buf[i]!='\t' && buf[i]!=' '){
+		addr2[i-orig]=buf[i];
+		i++;
+	}
+	addr2[i-orig]='\0';
+
+	//perm
+	while(buf[i]=='\t' || buf[i]==' ')
+		i++;
+	orig=i;
+	while(buf[i]!='\t' && buf[i]!=' '){
+		perm[i-orig]=buf[i];
+		i++;
+	}
+	perm[i-orig]='\0';
+	//offset
+	while(buf[i]=='\t' || buf[i]==' ')
+		i++;
+	orig=i;
+	while(buf[i]!='\t' && buf[i]!=' '){
+		offset[i-orig]=buf[i];
+		i++;
+	}
+	offset[i-orig]='\0';
+	//dev
+	while(buf[i]=='\t' || buf[i]==' ')
+		i++;
+	orig=i;
+	while(buf[i]!='\t' && buf[i]!=' '){
+		device[i-orig]=buf[i];
+		i++;
+	}
+	device[i-orig]='\0';
+	//inode
+	while(buf[i]=='\t' || buf[i]==' ')
+		i++;
+	orig=i;
+	while(buf[i]!='\t' && buf[i]!=' '){
+		inode[i-orig]=buf[i];
+		i++;
+	}
+	inode[i-orig]='\0';
+	//pathname
+	pathname[0]='\0';
+	while(buf[i]=='\t' || buf[i]==' ')
+		i++;
+	orig=i;
+	while(buf[i]!='\t' && buf[i]!=' ' && buf[i]!='\n'){
+		pathname[i-orig]=buf[i];
+		i++;
+	}
+	pathname[i-orig]='\0';
+
+}
+
+
+#endif
diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h
index 851478a0..03951fea 100644
--- a/qemu_mode/patches/afl-qemu-cpu-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-inl.h
@@ -9,7 +9,8 @@
 
    TCG instrumentation and block chaining support by Andrea Biondo
                                       <andrea.biondo965@gmail.com>
-   QEMU 3.1.0 port and thread-safety by Andrea Fioraldi
+
+   QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi
                                       <andreafioraldi@gmail.com>
 
    Copyright 2015, 2016, 2017 Google Inc. All rights reserved.
@@ -65,6 +66,8 @@ abi_ulong afl_entry_point, /* ELF entry point (_start) */
           afl_start_code,  /* .text start pointer      */
           afl_end_code;    /* .text end pointer        */
 
+u8 afl_enable_compcov;
+
 /* Set in the child process in forkserver mode: */
 
 static int forkserver_installed = 0;
@@ -147,7 +150,6 @@ static void afl_setup(void) {
 
     if (inst_r) afl_area_ptr[0] = 1;
 
-
   }
 
   if (getenv("AFL_INST_LIBS")) {
@@ -156,6 +158,11 @@ static void afl_setup(void) {
     afl_end_code   = (abi_ulong)-1;
 
   }
+  
+  if (getenv("AFL_QEMU_COMPCOV")) {
+
+    afl_enable_compcov = 1;
+  }
 
   /* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm
      not entirely sure what is the cause. This disables that
diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
new file mode 100644
index 00000000..0ca89c98
--- /dev/null
+++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
@@ -0,0 +1,125 @@
+/*
+   american fuzzy lop - high-performance binary-only instrumentation
+   -----------------------------------------------------------------
+
+   Written by Andrew Griffiths <agriffiths@google.com> and
+              Michal Zalewski <lcamtuf@google.com>
+
+   Idea & design very much by Andrew Griffiths.
+
+   TCG instrumentation and block chaining support by Andrea Biondo
+                                      <andrea.biondo965@gmail.com>
+   
+   QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi
+                                      <andreafioraldi@gmail.com>
+
+   Copyright 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This code is a shim patched into the separately-distributed source
+   code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality
+   to implement AFL-style instrumentation and to take care of the remaining
+   parts of the AFL fork server logic.
+
+   The resulting QEMU binary is essentially a standalone instrumentation
+   tool; for an example of how to leverage it for other purposes, you can
+   have a look at afl-showmap.c.
+
+ */
+
+#include "../../config.h"
+#include "tcg.h"
+#include "tcg-op.h"
+
+/* Declared in afl-qemu-cpu-inl.h */
+extern unsigned char *afl_area_ptr;
+extern unsigned int afl_inst_rms;
+extern abi_ulong afl_start_code, afl_end_code;
+extern u8 afl_enable_compcov;
+
+void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
+                                  TCGv_i64 arg1, TCGv_i64 arg2);
+
+static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1,
+                               target_ulong arg2) {
+
+  if ((arg1 & 0xff) == (arg2 & 0xff)) {
+    afl_area_ptr[cur_loc]++;
+  }
+}
+
+static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1,
+                               target_ulong arg2) {
+
+  if ((arg1 & 0xff) == (arg2 & 0xff)) {
+    afl_area_ptr[cur_loc]++;
+    if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
+      afl_area_ptr[cur_loc +1]++;
+      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
+        afl_area_ptr[cur_loc +2]++;
+      }
+    }
+  }
+}
+
+static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1,
+                               target_ulong arg2) {
+
+  if ((arg1 & 0xff) == (arg2 & 0xff)) {
+    afl_area_ptr[cur_loc]++;
+    if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
+      afl_area_ptr[cur_loc +1]++;
+      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
+        afl_area_ptr[cur_loc +2]++;
+        if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) {
+          afl_area_ptr[cur_loc +3]++;
+          if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) {
+            afl_area_ptr[cur_loc +4]++;
+            if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) {
+              afl_area_ptr[cur_loc +5]++;
+              if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) {
+                afl_area_ptr[cur_loc +6]++;
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+
+static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
+                            TCGMemOp ot) {
+
+  void *func;
+  
+  if (!afl_enable_compcov || cur_loc > afl_end_code || cur_loc < afl_start_code)
+    return;
+
+  switch (ot) {
+    case MO_64:
+      func = &afl_compcov_log_64;
+      break;
+    case MO_32: 
+      func = &afl_compcov_log_32;
+      break;
+    case MO_16:
+      func = &afl_compcov_log_16;
+      break;
+    default:
+      return;
+  }
+  
+  cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+  cur_loc &= MAP_SIZE - 1;
+  
+  if (cur_loc >= afl_inst_rms) return;
+  
+  tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2);
+}
diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h
index fab3d9e3..ff90d1b9 100644
--- a/qemu_mode/patches/afl-qemu-tcg-inl.h
+++ b/qemu_mode/patches/afl-qemu-tcg-inl.h
@@ -9,7 +9,8 @@
 
    TCG instrumentation and block chaining support by Andrea Biondo
                                       <andrea.biondo965@gmail.com>
-   QEMU 3.1.0 port and thread-safety by Andrea Fioraldi
+
+   QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi
                                       <andreafioraldi@gmail.com>
 
    Copyright 2015, 2016, 2017 Google Inc. All rights reserved.
@@ -42,10 +43,10 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
     unsigned sizemask, flags;
     TCGOp *op;
 
-    TCGTemp *arg = tcgv_ptr_temp( tcg_const_tl(cur_loc) );
+    TCGTemp *arg = tcgv_i64_temp( tcg_const_tl(cur_loc) );
 
     flags = 0;
-    sizemask = dh_sizemask(void, 0) | dh_sizemask(ptr, 1);
+    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
 
 #if defined(__sparc__) && !defined(__arch64__) \
     && !defined(CONFIG_TCG_INTERPRETER)
@@ -151,7 +152,7 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
         /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
            Note that describing these as TCGv_i64 eliminates an unnecessary
            zero-extension that tcg_gen_concat_i32_i64 would create.  */
-        tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
+        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
         tcg_temp_free_i64(retl);
         tcg_temp_free_i64(reth);
     }
@@ -163,3 +164,143 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
 #endif /* TCG_TARGET_EXTEND_ARGS */
 }
 
+void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+    int i, real_args, nb_rets, pi;
+    unsigned sizemask, flags;
+    TCGOp *op;
+
+    const int nargs = 3;
+    TCGTemp *args[3] = { tcgv_i64_temp( tcg_const_tl(cur_loc) ),
+                         tcgv_i64_temp(arg1),
+                         tcgv_i64_temp(arg2) };
+
+    flags = 0;
+    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) |
+               dh_sizemask(i64, 2) | dh_sizemask(i64, 3);
+
+#if defined(__sparc__) && !defined(__arch64__) \
+    && !defined(CONFIG_TCG_INTERPRETER)
+    /* We have 64-bit values in one register, but need to pass as two
+       separate parameters.  Split them.  */
+    int orig_sizemask = sizemask;
+    int orig_nargs = nargs;
+    TCGv_i64 retl, reth;
+    TCGTemp *split_args[MAX_OPC_PARAM];
+
+    retl = NULL;
+    reth = NULL;
+    if (sizemask != 0) {
+        for (i = real_args = 0; i < nargs; ++i) {
+            int is_64bit = sizemask & (1 << (i+1)*2);
+            if (is_64bit) {
+                TCGv_i64 orig = temp_tcgv_i64(args[i]);
+                TCGv_i32 h = tcg_temp_new_i32();
+                TCGv_i32 l = tcg_temp_new_i32();
+                tcg_gen_extr_i64_i32(l, h, orig);
+                split_args[real_args++] = tcgv_i32_temp(h);
+                split_args[real_args++] = tcgv_i32_temp(l);
+            } else {
+                split_args[real_args++] = args[i];
+            }
+        }
+        nargs = real_args;
+        args = split_args;
+        sizemask = 0;
+    }
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+    for (i = 0; i < nargs; ++i) {
+        int is_64bit = sizemask & (1 << (i+1)*2);
+        int is_signed = sizemask & (2 << (i+1)*2);
+        if (!is_64bit) {
+            TCGv_i64 temp = tcg_temp_new_i64();
+            TCGv_i64 orig = temp_tcgv_i64(args[i]);
+            if (is_signed) {
+                tcg_gen_ext32s_i64(temp, orig);
+            } else {
+                tcg_gen_ext32u_i64(temp, orig);
+            }
+            args[i] = tcgv_i64_temp(temp);
+        }
+    }
+#endif /* TCG_TARGET_EXTEND_ARGS */
+
+    op = tcg_emit_op(INDEX_op_call);
+
+    pi = 0;
+    nb_rets = 0;
+    TCGOP_CALLO(op) = nb_rets;
+
+    real_args = 0;
+    for (i = 0; i < nargs; i++) {
+        int is_64bit = sizemask & (1 << (i+1)*2);
+        if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+            /* some targets want aligned 64 bit args */
+            if (real_args & 1) {
+                op->args[pi++] = TCG_CALL_DUMMY_ARG;
+                real_args++;
+            }
+#endif
+           /* If stack grows up, then we will be placing successive
+              arguments at lower addresses, which means we need to
+              reverse the order compared to how we would normally
+              treat either big or little-endian.  For those arguments
+              that will wind up in registers, this still works for
+              HPPA (the only current STACK_GROWSUP target) since the
+              argument registers are *also* allocated in decreasing
+              order.  If another such target is added, this logic may
+              have to get more complicated to differentiate between
+              stack arguments and register arguments.  */
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
+            op->args[pi++] = temp_arg(args[i] + 1);
+            op->args[pi++] = temp_arg(args[i]);
+#else
+            op->args[pi++] = temp_arg(args[i]);
+            op->args[pi++] = temp_arg(args[i] + 1);
+#endif
+            real_args += 2;
+            continue;
+        }
+
+        op->args[pi++] = temp_arg(args[i]);
+        real_args++;
+    }
+    op->args[pi++] = (uintptr_t)func;
+    op->args[pi++] = flags;
+    TCGOP_CALLI(op) = real_args;
+
+    /* Make sure the fields didn't overflow.  */
+    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) \
+    && !defined(CONFIG_TCG_INTERPRETER)
+    /* Free all of the parts we allocated above.  */
+    for (i = real_args = 0; i < orig_nargs; ++i) {
+        int is_64bit = orig_sizemask & (1 << (i+1)*2);
+        if (is_64bit) {
+            tcg_temp_free_internal(args[real_args++]);
+            tcg_temp_free_internal(args[real_args++]);
+        } else {
+            real_args++;
+        }
+    }
+    if (orig_sizemask & 1) {
+        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+           Note that describing these as TCGv_i64 eliminates an unnecessary
+           zero-extension that tcg_gen_concat_i32_i64 would create.  */
+        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+        tcg_temp_free_i64(retl);
+        tcg_temp_free_i64(reth);
+    }
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+    for (i = 0; i < nargs; ++i) {
+        int is_64bit = sizemask & (1 << (i+1)*2);
+        if (!is_64bit) {
+            tcg_temp_free_internal(args[i]);
+        }
+    }
+#endif /* TCG_TARGET_EXTEND_ARGS */
+}
+
diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h
index 74c827f5..bfb2897e 100644
--- a/qemu_mode/patches/afl-qemu-translate-inl.h
+++ b/qemu_mode/patches/afl-qemu-translate-inl.h
@@ -9,7 +9,8 @@
 
    TCG instrumentation and block chaining support by Andrea Biondo
                                       <andrea.biondo965@gmail.com>
-   QEMU 3.1.0 port and thread-safety by Andrea Fioraldi
+
+   QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi
                                       <andreafioraldi@gmail.com>
 
    Copyright 2015, 2016, 2017 Google Inc. All rights reserved.
@@ -41,12 +42,12 @@ extern abi_ulong afl_start_code, afl_end_code;
 
 void tcg_gen_afl_maybe_log_call(target_ulong cur_loc);
 
-void afl_maybe_log(void* cur_loc) { 
+void afl_maybe_log(target_ulong cur_loc) { 
 
   static __thread abi_ulong prev_loc;
 
-  afl_area_ptr[(abi_ulong)cur_loc ^ prev_loc]++;
-  prev_loc = (abi_ulong)cur_loc >> 1;
+  afl_area_ptr[cur_loc ^ prev_loc]++;
+  prev_loc = cur_loc >> 1;
 
 }
 
@@ -59,7 +60,7 @@ static void afl_gen_trace(target_ulong cur_loc) {
   if (cur_loc > afl_end_code || cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of static dummy buffer
     return;
 
-  /* Looks like QEMU always maps to fixed locations, so ASAN is not a
+  /* Looks like QEMU always maps to fixed locations, so ASLR is not a
      concern. Phew. But instruction addresses may be aligned. Let's mangle
      the value to get something quasi-uniform. */
 
diff --git a/qemu_mode/patches/i386-translate.diff b/qemu_mode/patches/i386-translate.diff
new file mode 100644
index 00000000..0bc48828
--- /dev/null
+++ b/qemu_mode/patches/i386-translate.diff
@@ -0,0 +1,33 @@
+diff --git a/target/i386/translate.c b/target/i386/translate.c
+index 0dd5fbe4..b95d341e 100644
+--- a/target/i386/translate.c
++++ b/target/i386/translate.c
+@@ -32,6 +32,8 @@
+ #include "trace-tcg.h"
+ #include "exec/log.h"
+ 
++#include "../patches/afl-qemu-cpu-translate-inl.h"
++
+ #define PREFIX_REPZ   0x01
+ #define PREFIX_REPNZ  0x02
+ #define PREFIX_LOCK   0x04
+@@ -1343,9 +1345,11 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
+             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
+                                         s1->mem_index, ot | MO_LE);
+             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
++            afl_gen_compcov(s1->pc, s1->cc_srcT, s1->T1, ot);
+         } else {
+             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
+             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
++            afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot);
+             gen_op_st_rm_T0_A0(s1, ot, d);
+         }
+         gen_op_update2_cc(s1);
+@@ -1389,6 +1393,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
+         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
+         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
+         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
++        afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot);
+         set_cc_op(s1, CC_OP_SUBB + ot);
+         break;
+     }