aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/afl-fuzz.h21
-rw-r--r--include/cmplog.h17
-rw-r--r--include/debug.h24
-rw-r--r--include/types.h43
-rw-r--r--instrumentation/afl-compiler-rt.o.c89
-rw-r--r--instrumentation/cmplog-instructions-pass.cc586
-rw-r--r--src/afl-cc.c49
-rw-r--r--src/afl-fuzz-init.c38
-rw-r--r--src/afl-fuzz-one.c81
-rw-r--r--src/afl-fuzz-queue.c1
-rw-r--r--src/afl-fuzz-redqueen.c1437
-rw-r--r--src/afl-fuzz-state.c1
-rw-r--r--src/afl-fuzz.c60
13 files changed, 2055 insertions, 392 deletions
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index 988a907d..8a2122dc 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -145,12 +145,22 @@ extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
extern s32
interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
+struct tainted {
+
+ u32 pos;
+ u32 len;
+ struct tainted *next;
+ struct tainted *prev;
+
+};
+
struct queue_entry {
u8 *fname; /* File name for the test case */
u32 len; /* Input length */
- u8 cal_failed; /* Calibration failed? */
+ u8 colorized, /* Do not run redqueen stage again */
+ cal_failed; /* Calibration failed? */
bool trim_done, /* Trimmed? */
was_fuzzed, /* historical, but needed for MOpt */
passed_det, /* Deterministic stages passed? */
@@ -158,7 +168,6 @@ struct queue_entry {
var_behavior, /* Variable behavior? */
favored, /* Currently favored? */
fs_redundant, /* Marked as redundant in the fs? */
- fully_colorized, /* Do not run redqueen stage again */
is_ascii, /* Is the input just ascii text? */
disabled; /* Is disabled from fuzz selection */
@@ -183,7 +192,11 @@ struct queue_entry {
u8 *testcase_buf; /* The testcase buffer, if loaded. */
- struct queue_entry *next; /* Next element, if any */
+ u8 * cmplog_colorinput; /* the result buf of colorization */
+ struct tainted *taint; /* Taint information from CmpLog */
+
+ struct queue_entry *mother, /* queue entry this based on */
+ *next; /* Next element, if any */
};
@@ -636,6 +649,8 @@ typedef struct afl_state {
/* cmplog forkserver ids */
s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
u32 cmplog_prev_timed_out;
+ u32 cmplog_max_filesize;
+ u32 cmplog_lvl;
struct afl_pass_stat *pass_stats;
struct cmp_map * orig_cmp_map;
diff --git a/include/cmplog.h b/include/cmplog.h
index bf557785..6392c503 100644
--- a/include/cmplog.h
+++ b/include/cmplog.h
@@ -30,8 +30,10 @@
#include "config.h"
+#define CMPLOG_LVL_MAX 3
+
#define CMP_MAP_W 65536
-#define CMP_MAP_H 256
+#define CMP_MAP_H 32
#define CMP_MAP_RTN_H (CMP_MAP_H / 4)
#define SHAPE_BYTES(x) (x + 1)
@@ -41,13 +43,12 @@
struct cmp_header {
- unsigned hits : 20;
-
- unsigned cnt : 20;
- unsigned id : 16;
-
- unsigned shape : 5; // from 0 to 31
+ unsigned hits : 24;
+ unsigned id : 24;
+ unsigned shape : 5;
unsigned type : 1;
+ unsigned attribute : 4;
+ unsigned reserved : 6;
} __attribute__((packed));
@@ -55,6 +56,8 @@ struct cmp_operands {
u64 v0;
u64 v1;
+ u64 v0_128;
+ u64 v1_128;
};
diff --git a/include/debug.h b/include/debug.h
index ef5b195b..fc1f39cb 100644
--- a/include/debug.h
+++ b/include/debug.h
@@ -295,8 +295,8 @@ static inline const char *colorfilter(const char *x) {
\
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \
"\n[-] PROGRAM ABORT : " cRST x); \
- SAYF(cLRD "\n Location : " cRST "%s(), %s:%d\n\n", __func__, \
- __FILE__, __LINE__); \
+ SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", __func__, \
+ __FILE__, (u32)__LINE__); \
exit(1); \
\
} while (0)
@@ -308,8 +308,8 @@ static inline const char *colorfilter(const char *x) {
\
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \
"\n[-] PROGRAM ABORT : " cRST x); \
- SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%d\n\n", __func__, \
- __FILE__, __LINE__); \
+ SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", __func__, \
+ __FILE__, (u32)__LINE__); \
abort(); \
\
} while (0)
@@ -322,8 +322,8 @@ static inline const char *colorfilter(const char *x) {
fflush(stdout); \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \
"\n[-] SYSTEM ERROR : " cRST x); \
- SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%d\n", __func__, \
- __FILE__, __LINE__); \
+ SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", __func__, \
+ __FILE__, (u32)__LINE__); \
SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \
exit(1); \
\
@@ -344,12 +344,12 @@ static inline const char *colorfilter(const char *x) {
/* Show a prefixed debug output. */
-#define DEBUGF(x...) \
- do { \
- \
- SAYF(cMGN "[D] " cBRI "DEBUG: " cRST x); \
- SAYF(cRST ""); \
- \
+#define DEBUGF(x...) \
+ do { \
+ \
+ fprintf(stderr, cMGN "[D] " cBRI "DEBUG: " cRST x); \
+ fprintf(stderr, cRST ""); \
+ \
} while (0)
/* Error-checking versions of read() and write() that call RPFATAL() as
diff --git a/include/types.h b/include/types.h
index 3e3bc953..d5c31597 100644
--- a/include/types.h
+++ b/include/types.h
@@ -26,9 +26,11 @@
#include <stdint.h>
#include <stdlib.h>
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef unsigned __int128 uint128_t;
+typedef uint128_t u128;
/* Extended forkserver option values */
@@ -57,10 +59,12 @@ typedef uint32_t u32;
typedef unsigned long long u64;
-typedef int8_t s8;
-typedef int16_t s16;
-typedef int32_t s32;
-typedef int64_t s64;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef __int128 int128_t;
+typedef int128_t s128;
#ifndef MIN
#define MIN(a, b) \
@@ -114,6 +118,31 @@ typedef int64_t s64;
\
})
+// It is impossible to define 128 bit constants, so ...
+#define SWAPN(_x, _l) \
+ ({ \
+ \
+ u128 _res = (_x), _ret; \
+ char *d = (char *)&_ret, *s = (char *)&_res; \
+ int i; \
+ for (i = 0; i < 16; i++) \
+ d[15 - i] = s[i]; \
+ u32 sr = 128U - ((_l) << 3U); \
+ (_ret >>= sr); \
+ (u128) _ret; \
+ \
+ })
+
+#define SWAPNN(_x, _y, _l) \
+ ({ \
+ \
+ char *d = (char *)(_x), *s = (char *)(_y); \
+ u32 i, l = (_l)-1; \
+ for (i = 0; i <= l; i++) \
+ d[l - i] = s[i]; \
+ \
+ })
+
#ifdef AFL_LLVM_PASS
#if defined(__linux__) || !defined(__ANDROID__)
#define AFL_SR(s) (srandom(s))
diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c
index b735d8df..5d75af78 100644
--- a/instrumentation/afl-compiler-rt.o.c
+++ b/instrumentation/afl-compiler-rt.o.c
@@ -161,7 +161,7 @@ void send_forkserver_error(int error) {
u32 status;
if (!error || error > 0xffff) return;
status = (FS_OPT_ERROR | FS_OPT_SET_ERROR(error));
- if (write(FORKSRV_FD + 1, (char *)&status, 4) != 4) return;
+ if (write(FORKSRV_FD + 1, (char *)&status, 4) != 4) { return; }
}
@@ -544,11 +544,11 @@ static void __afl_start_snapshots(void) {
if (__afl_dictionary_len && __afl_dictionary) status |= FS_OPT_AUTODICT;
memcpy(tmp, &status, 4);
- if (write(FORKSRV_FD + 1, tmp, 4) != 4) return;
+ if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; }
if (__afl_sharedmem_fuzzing || (__afl_dictionary_len && __afl_dictionary)) {
- if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
+ if (read(FORKSRV_FD, &was_killed, 4) != 4) { _exit(1); }
if (getenv("AFL_DEBUG")) {
@@ -1207,7 +1207,9 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) {
///// CmpLog instrumentation
-void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2) {
+void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2, uint8_t attr) {
+
+ // fprintf(stderr, "hook1 arg0=%02x arg1=%02x attr=%u\n", arg1, arg2, attr);
if (unlikely(!__afl_cmp_map)) return;
@@ -1216,6 +1218,7 @@ void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2) {
k &= CMP_MAP_W - 1;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
+ __afl_cmp_map->headers[k].attribute = attr;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
@@ -1230,7 +1233,7 @@ void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2) {
}
-void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2) {
+void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2, uint8_t attr) {
if (unlikely(!__afl_cmp_map)) return;
@@ -1239,6 +1242,7 @@ void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2) {
k &= CMP_MAP_W - 1;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
+ __afl_cmp_map->headers[k].attribute = attr;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
@@ -1251,7 +1255,9 @@ void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2) {
}
-void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2) {
+void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2, uint8_t attr) {
+
+ // fprintf(stderr, "hook4 arg0=%x arg1=%x attr=%u\n", arg1, arg2, attr);
if (unlikely(!__afl_cmp_map)) return;
@@ -1260,6 +1266,7 @@ void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2) {
k &= CMP_MAP_W - 1;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
+ __afl_cmp_map->headers[k].attribute = attr;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
@@ -1272,7 +1279,9 @@ void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2) {
}
-void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2) {
+void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2, uint8_t attr) {
+
+ // fprintf(stderr, "hook8 arg0=%lx arg1=%lx attr=%u\n", arg1, arg2, attr);
if (unlikely(!__afl_cmp_map)) return;
@@ -1281,6 +1290,7 @@ void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2) {
k &= CMP_MAP_W - 1;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
+ __afl_cmp_map->headers[k].attribute = attr;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
@@ -1293,16 +1303,77 @@ void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2) {
}
+// support for u24 to u120 via llvm _ExitInt(). size is in bytes minus 1
+void __cmplog_ins_hookN(uint128_t arg1, uint128_t arg2, uint8_t attr,
+ uint8_t size) {
+
+ // fprintf(stderr, "hookN arg0=%llx:%llx arg1=%llx:%llx bytes=%u attr=%u\n",
+ // (u64)(arg1 >> 64), (u64)arg1, (u64)(arg2 >> 64), (u64)arg2, size + 1,
+ // attr);
+
+ if (unlikely(!__afl_cmp_map)) return;
+
+ uintptr_t k = (uintptr_t)__builtin_return_address(0);
+ k = (k >> 4) ^ (k << 8);
+ k &= CMP_MAP_W - 1;
+
+ __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
+ __afl_cmp_map->headers[k].attribute = attr;
+
+ u32 hits = __afl_cmp_map->headers[k].hits;
+ __afl_cmp_map->headers[k].hits = hits + 1;
+
+ __afl_cmp_map->headers[k].shape = size;
+
+ hits &= CMP_MAP_H - 1;
+ __afl_cmp_map->log[k][hits].v0 = (u64)arg1;
+ __afl_cmp_map->log[k][hits].v1 = (u64)arg2;
+
+ if (size > 7) {
+
+ __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64);
+ __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64);
+
+ }
+
+}
+
+void __cmplog_ins_hook16(uint128_t arg1, uint128_t arg2, uint8_t attr) {
+
+ if (unlikely(!__afl_cmp_map)) return;
+
+ uintptr_t k = (uintptr_t)__builtin_return_address(0);
+ k = (k >> 4) ^ (k << 8);
+ k &= CMP_MAP_W - 1;
+
+ __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
+ __afl_cmp_map->headers[k].attribute = attr;
+
+ u32 hits = __afl_cmp_map->headers[k].hits;
+ __afl_cmp_map->headers[k].hits = hits + 1;
+
+ __afl_cmp_map->headers[k].shape = 15;
+
+ hits &= CMP_MAP_H - 1;
+ __afl_cmp_map->log[k][hits].v0 = (u64)arg1;
+ __afl_cmp_map->log[k][hits].v1 = (u64)arg2;
+ __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64);
+ __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64);
+
+}
+
#if defined(__APPLE__)
#pragma weak __sanitizer_cov_trace_const_cmp1 = __cmplog_ins_hook1
#pragma weak __sanitizer_cov_trace_const_cmp2 = __cmplog_ins_hook2
#pragma weak __sanitizer_cov_trace_const_cmp4 = __cmplog_ins_hook4
#pragma weak __sanitizer_cov_trace_const_cmp8 = __cmplog_ins_hook8
+ #pragma weak __sanitizer_cov_trace_const_cmp16 = __cmplog_ins_hook16
#pragma weak __sanitizer_cov_trace_cmp1 = __cmplog_ins_hook1
#pragma weak __sanitizer_cov_trace_cmp2 = __cmplog_ins_hook2
#pragma weak __sanitizer_cov_trace_cmp4 = __cmplog_ins_hook4
#pragma weak __sanitizer_cov_trace_cmp8 = __cmplog_ins_hook8
+ #pragma weak __sanitizer_cov_trace_cmp16 = __cmplog_ins_hook16
#else
void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
__attribute__((alias("__cmplog_ins_hook1")));
@@ -1312,6 +1383,8 @@ void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
__attribute__((alias("__cmplog_ins_hook4")));
void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
__attribute__((alias("__cmplog_ins_hook8")));
+void __sanitizer_cov_trace_const_cmp16(uint128_t arg1, uint128_t arg2)
+ __attribute__((alias("__cmplog_ins_hook16")));
void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
__attribute__((alias("__cmplog_ins_hook1")));
@@ -1321,6 +1394,8 @@ void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
__attribute__((alias("__cmplog_ins_hook4")));
void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
__attribute__((alias("__cmplog_ins_hook8")));
+void __sanitizer_cov_trace_cmp16(uint128_t arg1, uint128_t arg2)
+ __attribute__((alias("__cmplog_ins_hook16")));
#endif /* defined(__APPLE__) */
void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) {
diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc
index 3499ccf0..a74fb6c8 100644
--- a/instrumentation/cmplog-instructions-pass.cc
+++ b/instrumentation/cmplog-instructions-pass.cc
@@ -85,9 +85,25 @@ class CmpLogInstructions : public ModulePass {
char CmpLogInstructions::ID = 0;
+template <class Iterator>
+Iterator Unique(Iterator first, Iterator last) {
+
+ while (first != last) {
+
+ Iterator next(first);
+ last = std::remove(++next, last, *first);
+ first = next;
+
+ }
+
+ return last;
+
+}
+
bool CmpLogInstructions::hookInstrs(Module &M) {
std::vector<Instruction *> icomps;
+ std::vector<SwitchInst *> switches;
LLVMContext & C = M.getContext();
Type * VoidTy = Type::getVoidTy(C);
@@ -95,13 +111,15 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
IntegerType *Int16Ty = IntegerType::getInt16Ty(C);
IntegerType *Int32Ty = IntegerType::getInt32Ty(C);
IntegerType *Int64Ty = IntegerType::getInt64Ty(C);
+ IntegerType *Int128Ty = IntegerType::getInt128Ty(C);
#if LLVM_VERSION_MAJOR < 9
Constant *
#else
FunctionCallee
#endif
- c1 = M.getOrInsertFunction("__cmplog_ins_hook1", VoidTy, Int8Ty, Int8Ty
+ c1 = M.getOrInsertFunction("__cmplog_ins_hook1", VoidTy, Int8Ty, Int8Ty,
+ Int8Ty
#if LLVM_VERSION_MAJOR < 5
,
NULL
@@ -118,7 +136,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
#else
FunctionCallee
#endif
- c2 = M.getOrInsertFunction("__cmplog_ins_hook2", VoidTy, Int16Ty, Int16Ty
+ c2 = M.getOrInsertFunction("__cmplog_ins_hook2", VoidTy, Int16Ty, Int16Ty,
+ Int8Ty
#if LLVM_VERSION_MAJOR < 5
,
NULL
@@ -135,7 +154,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
#else
FunctionCallee
#endif
- c4 = M.getOrInsertFunction("__cmplog_ins_hook4", VoidTy, Int32Ty, Int32Ty
+ c4 = M.getOrInsertFunction("__cmplog_ins_hook4", VoidTy, Int32Ty, Int32Ty,
+ Int8Ty
#if LLVM_VERSION_MAJOR < 5
,
NULL
@@ -152,7 +172,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
#else
FunctionCallee
#endif
- c8 = M.getOrInsertFunction("__cmplog_ins_hook8", VoidTy, Int64Ty, Int64Ty
+ c8 = M.getOrInsertFunction("__cmplog_ins_hook8", VoidTy, Int64Ty, Int64Ty,
+ Int8Ty
#if LLVM_VERSION_MAJOR < 5
,
NULL
@@ -164,6 +185,42 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
FunctionCallee cmplogHookIns8 = c8;
#endif
+#if LLVM_VERSION_MAJOR < 9
+ Constant *
+#else
+ FunctionCallee
+#endif
+ c16 = M.getOrInsertFunction("__cmplog_ins_hook16", VoidTy, Int128Ty,
+ Int128Ty, Int8Ty
+#if LLVM_VERSION_MAJOR < 5
+ ,
+ NULL
+#endif
+ );
+#if LLVM_VERSION_MAJOR < 9
+ Function *cmplogHookIns16 = cast<Function>(c16);
+#else
+ FunctionCallee cmplogHookIns16 = c16;
+#endif
+
+#if LLVM_VERSION_MAJOR < 9
+ Constant *
+#else
+ FunctionCallee
+#endif
+ cN = M.getOrInsertFunction("__cmplog_ins_hookN", VoidTy, Int128Ty,
+ Int128Ty, Int8Ty, Int8Ty
+#if LLVM_VERSION_MAJOR < 5
+ ,
+ NULL
+#endif
+ );
+#if LLVM_VERSION_MAJOR < 9
+ Function *cmplogHookInsN = cast<Function>(cN);
+#else
+ FunctionCallee cmplogHookInsN = cN;
+#endif
+
/* iterate over all functions, bbs and instruction and add suitable calls */
for (auto &F : M) {
@@ -174,35 +231,16 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
for (auto &IN : BB) {
CmpInst *selectcmpInst = nullptr;
-
if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) {
- if (selectcmpInst->getPredicate() == CmpInst::ICMP_EQ ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_NE ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_UGT ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_SGT ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_ULT ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_SLT ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_UGE ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_SGE ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_ULE ||
- selectcmpInst->getPredicate() == CmpInst::ICMP_SLE ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_OGE ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_UGE ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_OLE ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_ULE ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_OGT ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_UGT ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_OLT ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_ULT ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_UEQ ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_OEQ ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_UNE ||
- selectcmpInst->getPredicate() == CmpInst::FCMP_ONE) {
-
- icomps.push_back(selectcmpInst);
+ icomps.push_back(selectcmpInst);
- }
+ }
+
+ SwitchInst *switchInst = nullptr;
+ if ((switchInst = dyn_cast<SwitchInst>(BB.getTerminator()))) {
+
+ if (switchInst->getNumCases() > 1) { switches.push_back(switchInst); }
}
@@ -212,101 +250,473 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
}
- if (!icomps.size()) return false;
- // if (!be_quiet) errs() << "Hooking " << icomps.size() << " cmp
- // instructions\n";
+ // unique the collected switches
+ switches.erase(Unique(switches.begin(), switches.end()), switches.end());
+
+ // Instrument switch values for cmplog
+ if (switches.size()) {
+
+ if (!be_quiet)
+ errs() << "Hooking " << switches.size() << " switch instructions\n";
- for (auto &selectcmpInst : icomps) {
+ for (auto &SI : switches) {
- IRBuilder<> IRB(selectcmpInst->getParent());
- IRB.SetInsertPoint(selectcmpInst);
+ Value * Val = SI->getCondition();
+ unsigned int max_size = Val->getType()->getIntegerBitWidth(), cast_size;
+ unsigned char do_cast = 0;
- auto op0 = selectcmpInst->getOperand(0);
- auto op1 = selectcmpInst->getOperand(1);
+ if (!SI->getNumCases() || max_size <= 8) {
- IntegerType * intTyOp0 = NULL;
- IntegerType * intTyOp1 = NULL;
- unsigned max_size = 0;
- std::vector<Value *> args;
+ // if (!be_quiet) errs() << "skip trivial switch..\n";
+ continue;
- if (selectcmpInst->getOpcode() == Instruction::FCmp) {
+ }
+
+ IRBuilder<> IRB(SI->getParent());
+ IRB.SetInsertPoint(SI);
+
+ if (max_size % 8) {
+
+ max_size = (((max_size / 8) + 1) * 8);
+ do_cast = 1;
+
+ }
+
+ if (max_size > 128) {
+
+ if (!be_quiet) {
+
+ fprintf(stderr,
+ "Cannot handle this switch bit size: %u (truncating)\n",
+ max_size);
+
+ }
+
+ max_size = 128;
+ do_cast = 1;
+
+ }
+
+ // do we need to cast?
+ switch (max_size) {
+
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ cast_size = max_size;
+ break;
+ default:
+ cast_size = 128;
+ do_cast = 1;
+
+ }
+
+ Value *CompareTo = Val;
+
+ if (do_cast) {
+
+ ConstantInt *cint = dyn_cast<ConstantInt>(Val);
+ if (cint) {
+
+ uint64_t val = cint->getZExtValue();
+ // fprintf(stderr, "ConstantInt: %lu\n", val);
+ switch (cast_size) {
+
+ case 8:
+ CompareTo = ConstantInt::get(Int8Ty, val);
+ break;
+ case 16:
+ CompareTo = ConstantInt::get(Int16Ty, val);
+ break;
+ case 32:
+ CompareTo = ConstantInt::get(Int32Ty, val);
+ break;
+ case 64:
+ CompareTo = ConstantInt::get(Int64Ty, val);
+ break;
+ case 128:
+ CompareTo = ConstantInt::get(Int128Ty, val);
+ break;
+
+ }
- auto ty0 = op0->getType();
- if (ty0->isHalfTy()
+ } else {
+
+ CompareTo = IRB.CreateBitCast(Val, IntegerType::get(C, cast_size));
+
+ }
+
+ }
+
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e;
+ ++i) {
+
+#if LLVM_VERSION_MAJOR < 5
+ ConstantInt *cint = i.getCaseValue();
+#else
+ ConstantInt *cint = i->getCaseValue();
+#endif
+
+ if (cint) {
+
+ std::vector<Value *> args;
+ args.push_back(CompareTo);
+
+ Value *new_param = cint;
+
+ if (do_cast) {
+
+ uint64_t val = cint->getZExtValue();
+ // fprintf(stderr, "ConstantInt: %lu\n", val);
+ switch (cast_size) {
+
+ case 8:
+ new_param = ConstantInt::get(Int8Ty, val);
+ break;
+ case 16:
+ new_param = ConstantInt::get(Int16Ty, val);
+ break;
+ case 32:
+ new_param = ConstantInt::get(Int32Ty, val);
+ break;
+ case 64:
+ new_param = ConstantInt::get(Int64Ty, val);
+ break;
+ case 128:
+ new_param = ConstantInt::get(Int128Ty, val);
+ break;
+
+ }
+
+ }
+
+ if (new_param) {
+
+ args.push_back(new_param);
+ ConstantInt *attribute = ConstantInt::get(Int8Ty, 1);
+ args.push_back(attribute);
+ if (cast_size != max_size) {
+
+ ConstantInt *bitsize =
+ ConstantInt::get(Int8Ty, (max_size / 8) - 1);
+ args.push_back(bitsize);
+
+ }
+
+ switch (cast_size) {
+
+ case 8:
+ IRB.CreateCall(cmplogHookIns1, args);
+ break;
+ case 16:
+ IRB.CreateCall(cmplogHookIns2, args);
+ break;
+ case 32:
+ IRB.CreateCall(cmplogHookIns4, args);
+ break;
+ case 64:
+ IRB.CreateCall(cmplogHookIns8, args);
+ break;
+ case 128:
+ if (max_size == 128) {
+
+ IRB.CreateCall(cmplogHookIns16, args);
+
+ } else {
+
+ IRB.CreateCall(cmplogHookInsN, args);
+
+ }
+
+ break;
+
+ }
+
+ }
+
+ }
+
+ }
+
+ }
+
+ }
+
+ if (icomps.size()) {
+
+ // if (!be_quiet) errs() << "Hooking " << icomps.size() <<
+ // " cmp instructions\n";
+
+ for (auto &selectcmpInst : icomps) {
+
+ IRBuilder<> IRB(selectcmpInst->getParent());
+ IRB.SetInsertPoint(selectcmpInst);
+
+ Value *op0 = selectcmpInst->getOperand(0);
+ Value *op1 = selectcmpInst->getOperand(1);
+
+ IntegerType * intTyOp0 = NULL;
+ IntegerType * intTyOp1 = NULL;
+ unsigned max_size = 0, cast_size = 0;
+ unsigned char attr = 0, do_cast = 0;
+ std::vector<Value *> args;
+
+ CmpInst *cmpInst = dyn_cast<CmpInst>(selectcmpInst);
+
+ if (!cmpInst) { continue; }
+
+ switch (cmpInst->getPredicate()) {
+
+ case CmpInst::ICMP_NE:
+ case CmpInst::FCMP_UNE:
+ case CmpInst::FCMP_ONE:
+ break;
+ case CmpInst::ICMP_EQ:
+ case CmpInst::FCMP_UEQ:
+ case CmpInst::FCMP_OEQ:
+ attr += 1;
+ break;
+ case CmpInst::ICMP_UGT:
+ case CmpInst::ICMP_SGT:
+ case CmpInst::FCMP_OGT:
+ case CmpInst::FCMP_UGT:
+ attr += 2;
+ break;
+ case CmpInst::ICMP_UGE:
+ case CmpInst::ICMP_SGE:
+ case CmpInst::FCMP_OGE:
+ case CmpInst::FCMP_UGE:
+ attr += 3;
+ break;
+ case CmpInst::ICMP_ULT:
+ case CmpInst::ICMP_SLT:
+ case CmpInst::FCMP_OLT:
+ case CmpInst::FCMP_ULT:
+ attr += 4;
+ break;
+ case CmpInst::ICMP_ULE:
+ case CmpInst::ICMP_SLE:
+ case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_ULE:
+ attr += 5;
+ break;
+ default:
+ break;
+
+ }
+
+ if (selectcmpInst->getOpcode() == Instruction::FCmp) {
+
+ auto ty0 = op0->getType();
+ if (ty0->isHalfTy()
#if LLVM_VERSION_MAJOR >= 11
- || ty0->isBFloatTy()
+ || ty0->isBFloatTy()
#endif
- )
- max_size = 16;
- else if (ty0->isFloatTy())
- max_size = 32;
- else if (ty0->isDoubleTy())
- max_size = 64;
+ )
+ max_size = 16;
+ else if (ty0->isFloatTy())
+ max_size = 32;
+ else if (ty0->isDoubleTy())
+ max_size = 64;
+ else if (ty0->isX86_FP80Ty())
+ max_size = 80;
+ else if (ty0->isFP128Ty() || ty0->isPPC_FP128Ty())
+ max_size = 128;
+
+ attr += 8;
+ do_cast = 1;
- if (max_size) {
+ } else {
- Value *V0 = IRB.CreateBitCast(op0, IntegerType::get(C, max_size));
- intTyOp0 = dyn_cast<IntegerType>(V0->getType());
- Value *V1 = IRB.CreateBitCast(op1, IntegerType::get(C, max_size));
- intTyOp1 = dyn_cast<IntegerType>(V1->getType());
+ intTyOp0 = dyn_cast<IntegerType>(op0->getType());
+ intTyOp1 = dyn_cast<IntegerType>(op1->getType());
if (intTyOp0 && intTyOp1) {
max_size = intTyOp0->getBitWidth() > intTyOp1->getBitWidth()
? intTyOp0->getBitWidth()
: intTyOp1->getBitWidth();
- args.push_back(V0);
- args.push_back(V1);
- } else {
+ }
+
+ }
+
+ if (!max_size) { continue; }
+
+ // _ExtInt() with non-8th values
+ if (max_size % 8) {
+
+ max_size = (((max_size / 8) + 1) * 8);
+ do_cast = 1;
+
+ }
+
+ if (max_size > 128) {
+
+ if (!be_quiet) {
- max_size = 0;
+ fprintf(stderr,
+ "Cannot handle this compare bit size: %u (truncating)\n",
+ max_size);
}
+ max_size = 128;
+ do_cast = 1;
+
+ }
+
+ // do we need to cast?
+ switch (max_size) {
+
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ cast_size = max_size;
+ break;
+ default:
+ cast_size = 128;
+ do_cast = 1;
+
}
- } else {
+ if (do_cast) {
+
+ // F*cking LLVM optimized out any kind of bitcasts of ConstantInt values
+ // creating illegal calls. WTF. So we have to work around this.
+
+ ConstantInt *cint = dyn_cast<ConstantInt>(op0);
+ if (cint) {
+
+ uint64_t val = cint->getZExtValue();
+ // fprintf(stderr, "ConstantInt: %lu\n", val);
+ ConstantInt *new_param = NULL;
+ switch (cast_size) {
+
+ case 8:
+ new_param = ConstantInt::get(Int8Ty, val);
+ break;
+ case 16:
+ new_param = ConstantInt::get(Int16Ty, val);
+ break;
+ case 32:
+ new_param = ConstantInt::get(Int32Ty, val);
+ break;
+ case 64:
+ new_param = ConstantInt::get(Int64Ty, val);
+ break;
+ case 128:
+ new_param = ConstantInt::get(Int128Ty, val);
+ break;
+
+ }
+
+ if (!new_param) { continue; }
+ args.push_back(new_param);
+
+ } else {
+
+ Value *V0 = IRB.CreateBitCast(op0, IntegerType::get(C, cast_size));
+ args.push_back(V0);
+
+ }
+
+ cint = dyn_cast<ConstantInt>(op1);
+ if (cint) {
+
+ uint64_t val = cint->getZExtValue();
+ ConstantInt *new_param = NULL;
+ switch (cast_size) {
+
+ case 8:
+ new_param = ConstantInt::get(Int8Ty, val);
+ break;
+ case 16:
+ new_param = ConstantInt::get(Int16Ty, val);
+ break;
+ case 32:
+ new_param = ConstantInt::get(Int32Ty, val);
+ break;
+ case 64:
+ new_param = ConstantInt::get(Int64Ty, val);
+ break;
+ case 128:
+ new_param = ConstantInt::get(Int128Ty, val);
+ break;
+
+ }
+
+ if (!new_param) { continue; }
+ args.push_back(new_param);
+
+ } else {
+
+ Value *V1 = IRB.CreateBitCast(op1, IntegerType::get(C, cast_size));
+ args.push_back(V1);
- intTyOp0 = dyn_cast<IntegerType>(op0->getType());
- intTyOp1 = dyn_cast<IntegerType>(op1->getType());
+ }
- if (intTyOp0 && intTyOp1) {
+ } else {
- max_size = intTyOp0->getBitWidth() > intTyOp1->getBitWidth()
- ? intTyOp0->getBitWidth()
- : intTyOp1->getBitWidth();
args.push_back(op0);
args.push_back(op1);
}
- }
+ ConstantInt *attribute = ConstantInt::get(Int8Ty, attr);
+ args.push_back(attribute);
+
+ if (cast_size != max_size) {
+
+ ConstantInt *bitsize = ConstantInt::get(Int8Ty, (max_size / 8) - 1);
+ args.push_back(bitsize);
+
+ }
+
+ // fprintf(stderr, "_ExtInt(%u) castTo %u with attr %u didcast %u\n",
+ // max_size, cast_size, attr, do_cast);
+
+ switch (cast_size) {
- if (max_size < 8 || max_size > 64 || !intTyOp0 || !intTyOp1) continue;
-
- switch (max_size) {
-
- case 8:
- IRB.CreateCall(cmplogHookIns1, args);
- break;
- case 16:
- IRB.CreateCall(cmplogHookIns2, args);
- break;
- case 32:
- IRB.CreateCall(cmplogHookIns4, args);
- break;
- case 64:
- IRB.CreateCall(cmplogHookIns8, args);
- break;
- default:
- break;
+ case 8:
+ IRB.CreateCall(cmplogHookIns1, args);
+ break;
+ case 16:
+ IRB.CreateCall(cmplogHookIns2, args);
+ break;
+ case 32:
+ IRB.CreateCall(cmplogHookIns4, args);
+ break;
+ case 64:
+ IRB.CreateCall(cmplogHookIns8, args);
+ break;
+ case 128:
+ if (max_size == 128) {
+
+ IRB.CreateCall(cmplogHookIns16, args);
+
+ } else {
+
+ IRB.CreateCall(cmplogHookInsN, args);
+
+ }
+
+ break;
+
+ }
}
}
- return true;
+ if (switches.size() || icomps.size())
+ return true;
+ else
+ return false;
}
diff --git a/src/afl-cc.c b/src/afl-cc.c
index 8fb42718..02c9c7c5 100644
--- a/src/afl-cc.c
+++ b/src/afl-cc.c
@@ -529,9 +529,9 @@ static void edit_params(u32 argc, char **argv, char **envp) {
cc_params[cc_par_cnt++] = alloc_printf(
"-Wl,-mllvm=-load=%s/cmplog-routines-pass.so", obj_path);
cc_params[cc_par_cnt++] = alloc_printf(
- "-Wl,-mllvm=-load=%s/split-switches-pass.so", obj_path);
- cc_params[cc_par_cnt++] = alloc_printf(
"-Wl,-mllvm=-load=%s/cmplog-instructions-pass.so", obj_path);
+ cc_params[cc_par_cnt++] = alloc_printf(
+ "-Wl,-mllvm=-load=%s/split-switches-pass.so", obj_path);
} else {
@@ -541,18 +541,18 @@ static void edit_params(u32 argc, char **argv, char **envp) {
cc_params[cc_par_cnt++] =
alloc_printf("%s/cmplog-routines-pass.so", obj_path);
- // reuse split switches from laf
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = "-load";
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] =
- alloc_printf("%s/split-switches-pass.so", obj_path);
+ alloc_printf("%s/cmplog-instructions-pass.so", obj_path);
+ // reuse split switches from laf
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = "-load";
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] =
- alloc_printf("%s/cmplog-instructions-pass.so", obj_path);
+ alloc_printf("%s/split-switches-pass.so", obj_path);
}
@@ -792,10 +792,8 @@ static void edit_params(u32 argc, char **argv, char **envp) {
}
-#if defined(USEMMAP)
- #if !defined(__HAIKU__)
+#if defined(USEMMAP) && !defined(__HAIKU__)
cc_params[cc_par_cnt++] = "-lrt";
- #endif
#endif
cc_params[cc_par_cnt++] = "-D__AFL_HAVE_MANUAL_CONTROL=1";
@@ -858,6 +856,7 @@ static void edit_params(u32 argc, char **argv, char **envp) {
cc_params[cc_par_cnt++] =
"-D__AFL_COVERAGE_DISCARD()=__afl_coverage_discard()";
cc_params[cc_par_cnt++] = "-D__AFL_COVERAGE_ABORT()=__afl_coverage_abort()";
+
cc_params[cc_par_cnt++] =
"-D__AFL_FUZZ_TESTCASE_BUF=(__afl_fuzz_ptr ? __afl_fuzz_ptr : "
"__afl_fuzz_alt_ptr)";
@@ -967,10 +966,8 @@ static void edit_params(u32 argc, char **argv, char **envp) {
alloc_printf("-Wl,--dynamic-list=%s/dynamic_list.txt", obj_path);
#endif
- #if defined(USEMMAP)
- #if !defined(__HAIKU__)
+ #if defined(USEMMAP) && !defined(__HAIKU__)
cc_params[cc_par_cnt++] = "-lrt";
- #endif
#endif
}
@@ -1278,7 +1275,6 @@ int main(int argc, char **argv, char **envp) {
}
- // this is a hidden option
if (strncasecmp(ptr2, "llvmnative", strlen("llvmnative")) == 0 ||
strncasecmp(ptr2, "llvm-native", strlen("llvm-native")) == 0) {
@@ -1349,29 +1345,28 @@ int main(int argc, char **argv, char **envp) {
if (strncasecmp(ptr2, "ngram", strlen("ngram")) == 0) {
- ptr2 += strlen("ngram");
- while (*ptr2 && (*ptr2 < '0' || *ptr2 > '9'))
- ptr2++;
+ u8 *ptr3 = ptr2 + strlen("ngram");
+ while (*ptr3 && (*ptr3 < '0' || *ptr3 > '9'))
+ ptr3++;
- if (!*ptr2) {
+ if (!*ptr3) {
- if ((ptr2 = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL)
+ if ((ptr3 = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL)
FATAL(
"you must set the NGRAM size with (e.g. for value 2) "
"AFL_LLVM_INSTRUMENT=ngram-2");
}
- ngram_size = atoi(ptr2);
+ ngram_size = atoi(ptr3);
if (ngram_size < 2 || ngram_size > NGRAM_SIZE_MAX)
FATAL(
"NGRAM instrumentation option must be between 2 and "
- "NGRAM_SIZE_MAX "
- "(%u)",
+ "NGRAM_SIZE_MAX (%u)",
NGRAM_SIZE_MAX);
instrument_opt_mode |= (INSTRUMENT_OPT_NGRAM);
- ptr2 = alloc_printf("%u", ngram_size);
- setenv("AFL_LLVM_NGRAM_SIZE", ptr2, 1);
+ u8 *ptr4 = alloc_printf("%u", ngram_size);
+ setenv("AFL_LLVM_NGRAM_SIZE", ptr4, 1);
}
@@ -1507,6 +1502,7 @@ int main(int argc, char **argv, char **envp) {
"((instrumentation/README.ngram.md)\n"
" INSTRIM: Dominator tree (for LLVM <= 6.0) "
"(instrumentation/README.instrim.md)\n\n");
+
#undef NATIVE_MSG
SAYF(
@@ -1641,16 +1637,15 @@ int main(int argc, char **argv, char **envp) {
if (have_lto)
SAYF("afl-cc LTO with ld=%s %s\n", AFL_REAL_LD, AFL_CLANG_FLTO);
if (have_llvm)
- SAYF("afl-cc LLVM version %d using binary path \"%s\".\n", LLVM_MAJOR,
+ SAYF("afl-cc LLVM version %d using the binary path \"%s\".\n", LLVM_MAJOR,
LLVM_BINDIR);
#endif
-#if defined(USEMMAP)
+#ifdef USEMMAP
#if !defined(__HAIKU__)
- cc_params[cc_par_cnt++] = "-lrt";
- SAYF("Compiled with shm_open support (adds -lrt when linking).\n");
- #else
SAYF("Compiled with shm_open support.\n");
+ #else
+ SAYF("Compiled with shm_open support (adds -lrt when linking).\n");
#endif
#else
SAYF("Compiled with shmat support.\n");
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index dbffa4f9..cbff6d7e 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -729,6 +729,30 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
add_to_queue(afl, fn2, st.st_size >= MAX_FILE ? MAX_FILE : st.st_size,
passed_det);
+ if (unlikely(afl->shm.cmplog_mode)) {
+
+ if (afl->cmplog_lvl == 1) {
+
+ if (!afl->cmplog_max_filesize ||
+ afl->cmplog_max_filesize < st.st_size) {
+
+ afl->cmplog_max_filesize = st.st_size;
+
+ }
+
+ } else if (afl->cmplog_lvl == 2) {
+
+ if (!afl->cmplog_max_filesize ||
+ afl->cmplog_max_filesize > st.st_size) {
+
+ afl->cmplog_max_filesize = st.st_size;
+
+ }
+
+ }
+
+ }
+
if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
@@ -756,6 +780,20 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
}
+ if (unlikely(afl->shm.cmplog_mode)) {
+
+ if (afl->cmplog_max_filesize < 1024) {
+
+ afl->cmplog_max_filesize = 1024;
+
+ } else {
+
+ afl->cmplog_max_filesize = (((afl->cmplog_max_filesize >> 10) + 1) << 10);
+
+ }
+
+ }
+
afl->last_path_time = 0;
afl->queued_at_start = afl->queued_paths;
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index f9509e86..596bae22 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -165,7 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
/* See if one-byte adjustments to any byte could produce this result. */
- for (i = 0; i < blen; ++i) {
+ for (i = 0; (u8)i < blen; ++i) {
u8 a = old_val >> (8 * i), b = new_val >> (8 * i);
@@ -193,7 +193,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
diffs = 0;
- for (i = 0; i < blen / 2; ++i) {
+ for (i = 0; (u8)i < blen / 2; ++i) {
u16 a = old_val >> (16 * i), b = new_val >> (16 * i);
@@ -290,7 +290,7 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
/* See if two-byte insertions over old_val could give us new_val. */
- for (i = 0; (s32)i < blen - 1; ++i) {
+ for (i = 0; (u8)i < blen - 1; ++i) {
for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
@@ -545,14 +545,31 @@ u8 fuzz_one_original(afl_state_t *afl) {
else
orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
- if (unlikely(perf_score == 0)) { goto abandon_entry; }
+ if (unlikely(perf_score <= 0)) { goto abandon_entry; }
- if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) {
+ if (unlikely(afl->shm.cmplog_mode &&
+ afl->queue_cur->colorized < afl->cmplog_lvl &&
+ (u32)len <= afl->cmplog_max_filesize)) {
- if (input_to_state_stage(afl, in_buf, out_buf, len,
- afl->queue_cur->exec_cksum)) {
+ if (unlikely(len < 4)) {
- goto abandon_entry;
+ afl->queue_cur->colorized = 0xff;
+
+ } else {
+
+ if (afl->cmplog_lvl == 3 ||
+ (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) ||
+ !(afl->fsrv.total_execs % afl->queued_paths) ||
+ get_cur_time() - afl->last_path_time > 15000) {
+
+ if (input_to_state_stage(afl, in_buf, out_buf, len,
+ afl->queue_cur->exec_cksum)) {
+
+ goto abandon_entry;
+
+ }
+
+ }
}
@@ -2796,7 +2813,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
}
- s32 len, temp_len;
+ u32 len, temp_len;
u32 i;
u32 j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@@ -2952,14 +2969,31 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
else
orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
- if (unlikely(perf_score == 0)) { goto abandon_entry; }
+ if (unlikely(perf_score <= 0)) { goto abandon_entry; }
- if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) {
+ if (unlikely(afl->shm.cmplog_mode &&
+ afl->queue_cur->colorized < afl->cmplog_lvl &&
+ (u32)len <= afl->cmplog_max_filesize)) {
- if (input_to_state_stage(afl, in_buf, out_buf, len,
- afl->queue_cur->exec_cksum)) {
+ if (unlikely(len < 4)) {
- goto abandon_entry;
+ afl->queue_cur->colorized = 0xff;
+
+ } else {
+
+ if (afl->cmplog_lvl == 3 ||
+ (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) ||
+ !(afl->fsrv.total_execs % afl->queued_paths) ||
+ get_cur_time() - afl->last_path_time > 15000) {
+
+ if (input_to_state_stage(afl, in_buf, out_buf, len,
+ afl->queue_cur->exec_cksum)) {
+
+ goto abandon_entry;
+
+ }
+
+ }
}
@@ -3315,7 +3349,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
orig_hit_cnt = new_hit_cnt;
- for (i = 0; (s32)i < len - 1; ++i) {
+ for (i = 0; i < len - 1; ++i) {
/* Let's consult the effector map... */
@@ -3357,7 +3391,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
orig_hit_cnt = new_hit_cnt;
- for (i = 0; (s32)i < len - 3; ++i) {
+ for (i = 0; i < len - 3; ++i) {
/* Let's consult the effector map... */
if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
@@ -3489,7 +3523,7 @@ skip_bitflip:
orig_hit_cnt = new_hit_cnt;
- for (i = 0; (s32)i < len - 1; ++i) {
+ for (i = 0; i < len - 1; ++i) {
u16 orig = *(u16 *)(out_buf + i);
@@ -3615,7 +3649,7 @@ skip_bitflip:
orig_hit_cnt = new_hit_cnt;
- for (i = 0; (s32)i < len - 3; ++i) {
+ for (i = 0; i < len - 3; ++i) {
u32 orig = *(u32 *)(out_buf + i);
@@ -3805,7 +3839,7 @@ skip_arith:
orig_hit_cnt = new_hit_cnt;
- for (i = 0; (s32)i < len - 1; ++i) {
+ for (i = 0; i < len - 1; ++i) {
u16 orig = *(u16 *)(out_buf + i);
@@ -3891,7 +3925,7 @@ skip_arith:
orig_hit_cnt = new_hit_cnt;
- for (i = 0; (s32)i < len - 3; ++i) {
+ for (i = 0; i < len - 3; ++i) {
u32 orig = *(u32 *)(out_buf + i);
@@ -4120,7 +4154,7 @@ skip_user_extras:
/* See the comment in the earlier code; extras are sorted by size. */
- if ((s32)(afl->a_extras[j].len) > (s32)(len - i) ||
+ if ((afl->a_extras[j].len) > (len - i) ||
!memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) ||
!memchr(eff_map + EFF_APOS(i), 1,
EFF_SPAN_ALEN(i, afl->a_extras[j].len))) {
@@ -4837,7 +4871,7 @@ pacemaker_fuzzing:
u32 copy_from, copy_to, copy_len;
copy_len = choose_block_len(afl, new_len - 1);
- if ((s32)copy_len > temp_len) copy_len = temp_len;
+ if (copy_len > temp_len) copy_len = temp_len;
copy_from = rand_below(afl, new_len - copy_len + 1);
copy_to = rand_below(afl, temp_len - copy_len + 1);
@@ -5033,8 +5067,7 @@ pacemaker_fuzzing:
the last differing byte. Bail out if the difference is just a single
byte or so. */
- locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff,
- &l_diff);
+ locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index 66938635..aec57a6e 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -433,6 +433,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
q->passed_det = passed_det;
q->trace_mini = NULL;
q->testcase_buf = NULL;
+ q->mother = afl->queue_cur;
#ifdef INTROSPECTION
q->bitsmap_size = afl->bitsmap_size;
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index 28585afe..955a9232 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -28,6 +28,8 @@
#include "afl-fuzz.h"
#include "cmplog.h"
+//#define _DEBUG
+
///// Colorization
struct range {
@@ -35,6 +37,8 @@ struct range {
u32 start;
u32 end;
struct range *next;
+ struct range *prev;
+ u8 ok;
};
@@ -44,6 +48,8 @@ static struct range *add_range(struct range *ranges, u32 start, u32 end) {
r->start = start;
r->end = end;
r->next = ranges;
+ r->ok = 0;
+ if (likely(ranges)) ranges->prev = r;
return r;
}
@@ -51,45 +57,61 @@ static struct range *add_range(struct range *ranges, u32 start, u32 end) {
static struct range *pop_biggest_range(struct range **ranges) {
struct range *r = *ranges;
- struct range *prev = NULL;
struct range *rmax = NULL;
- struct range *prev_rmax = NULL;
u32 max_size = 0;
while (r) {
- u32 s = r->end - r->start;
- if (s >= max_size) {
+ if (!r->ok) {
+
+ u32 s = 1 + r->end - r->start;
+
+ if (s >= max_size) {
+
+ max_size = s;
+ rmax = r;
- max_size = s;
- prev_rmax = prev;
- rmax = r;
+ }
}
- prev = r;
r = r->next;
}
- if (rmax) {
+ return rmax;
- if (prev_rmax) {
+}
- prev_rmax->next = rmax->next;
+#ifdef _DEBUG
+// static int logging = 0;
+static void dump(char *txt, u8 *buf, u32 len) {
- } else {
+ u32 i;
+ fprintf(stderr, "DUMP %s %llx ", txt, hash64(buf, len, 0));
+ for (i = 0; i < len; i++)
+ fprintf(stderr, "%02x", buf[i]);
+ fprintf(stderr, "\n");
- *ranges = rmax->next;
+}
- }
+static void dump_file(char *path, char *name, u32 counter, u8 *buf, u32 len) {
- }
+ char fn[4096];
+ if (!path) path = ".";
+ snprintf(fn, sizeof(fn), "%s/%s%d", path, name, counter);
+ int fd = open(fn, O_RDWR | O_CREAT | O_TRUNC, 0644);
+ if (fd >= 0) {
- return rmax;
+ write(fd, buf, len);
+ close(fd);
+
+ }
}
+#endif
+
static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) {
if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; }
@@ -99,107 +121,270 @@ static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) {
}
-static void xor_replace(u8 *buf, u32 len) {
+/* replace everything with different values but stay in the same type */
+static void type_replace(afl_state_t *afl, u8 *buf, u32 len) {
u32 i;
+ u8 c;
for (i = 0; i < len; ++i) {
- buf[i] ^= 0xff;
+ // wont help for UTF or non-latin charsets
+ do {
+
+ switch (buf[i]) {
+
+ case 'A' ... 'F':
+ c = 'A' + rand_below(afl, 1 + 'F' - 'A');
+ break;
+ case 'a' ... 'f':
+ c = 'a' + rand_below(afl, 1 + 'f' - 'a');
+ break;
+ case '0':
+ c = '1';
+ break;
+ case '1':
+ c = '0';
+ break;
+ case '2' ... '9':
+ c = '2' + rand_below(afl, 1 + '9' - '2');
+ break;
+ case 'G' ... 'Z':
+ c = 'G' + rand_below(afl, 1 + 'Z' - 'G');
+ break;
+ case 'g' ... 'z':
+ c = 'g' + rand_below(afl, 1 + 'z' - 'g');
+ break;
+ case '!' ... '*':
+ c = '!' + rand_below(afl, 1 + '*' - '!');
+ break;
+ case ',' ... '.':
+ c = ',' + rand_below(afl, 1 + '.' - ',');
+ break;
+ case ':' ... '@':
+ c = ':' + rand_below(afl, 1 + '@' - ':');
+ break;
+ case '[' ... '`':
+ c = '[' + rand_below(afl, 1 + '`' - '[');
+ break;
+ case '{' ... '~':
+ c = '{' + rand_below(afl, 1 + '~' - '{');
+ break;
+ case '+':
+ c = '/';
+ break;
+ case '/':
+ c = '+';
+ break;
+ case ' ':
+ c = '\t';
+ break;
+ case '\t':
+ c = ' ';
+ break;
+ /*
+ case '\r':
+ case '\n':
+ // nothing ...
+ break;
+ */
+ default:
+ c = (buf[i] ^ 0xff);
+
+ }
+
+ } while (c == buf[i]);
+
+ buf[i] = c;
}
}
-static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) {
+static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum,
+ struct tainted **taints) {
- struct range *ranges = add_range(NULL, 0, len);
- u8 * backup = ck_alloc_nozero(len);
+ struct range * ranges = add_range(NULL, 0, len - 1), *rng;
+ struct tainted *taint = NULL;
+ u8 * backup = ck_alloc_nozero(len);
+ u8 * changed = ck_alloc_nozero(len);
u64 orig_hit_cnt, new_hit_cnt;
orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
afl->stage_name = "colorization";
afl->stage_short = "colorization";
- afl->stage_max = 1000;
+ afl->stage_max = (len << 1);
- struct range *rng = NULL;
afl->stage_cur = 0;
+ memcpy(backup, buf, len);
+ memcpy(changed, buf, len);
+ type_replace(afl, changed, len);
+
while ((rng = pop_biggest_range(&ranges)) != NULL &&
afl->stage_cur < afl->stage_max) {
- u32 s = rng->end - rng->start;
+ u32 s = 1 + rng->end - rng->start;
+
+ memcpy(buf + rng->start, changed + rng->start, s);
- if (s != 0) {
+ u64 cksum;
+ u64 start_us = get_cur_time_us();
+ if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) {
- /* Range not empty */
+ goto checksum_fail;
- memcpy(backup, buf + rng->start, s);
- xor_replace(buf + rng->start, s);
+ }
+
+ u64 stop_us = get_cur_time_us();
+
+ /* Discard if the mutations change the path or if it is too decremental
+ in speed - how could the same path have a much different speed
+ though ...*/
+ if (cksum != exec_cksum ||
+ (unlikely(stop_us - start_us > 3 * afl->queue_cur->exec_us) &&
+ likely(!afl->fixed_seed))) {
+
+ memcpy(buf + rng->start, backup + rng->start, s);
- u64 cksum;
- u64 start_us = get_cur_time_us();
- if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) {
+ if (s > 1) { // to not add 0 size ranges
- goto checksum_fail;
+ ranges = add_range(ranges, rng->start, rng->start - 1 + s / 2);
+ ranges = add_range(ranges, rng->start + s / 2, rng->end);
}
- u64 stop_us = get_cur_time_us();
+ if (ranges == rng) {
+
+ ranges = rng->next;
+ if (ranges) { ranges->prev = NULL; }
+
+ } else if (rng->next) {
+
+ rng->prev->next = rng->next;
+ rng->next->prev = rng->prev;
- /* Discard if the mutations change the paths or if it is too decremental
- in speed */
- if (cksum != exec_cksum ||
- ((stop_us - start_us > 2 * afl->queue_cur->exec_us) &&
- likely(!afl->fixed_seed))) {
+ } else {
- ranges = add_range(ranges, rng->start, rng->start + s / 2);
- ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end);
- memcpy(buf + rng->start, backup, s);
+ if (rng->prev) { rng->prev->next = NULL; }
}
+ free(rng);
+
+ } else {
+
+ rng->ok = 1;
+
}
- ck_free(rng);
- rng = NULL;
++afl->stage_cur;
}
- if (afl->stage_cur < afl->stage_max) { afl->queue_cur->fully_colorized = 1; }
+ rng = ranges;
+ while (rng) {
- new_hit_cnt = afl->queued_paths + afl->unique_crashes;
- afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt;
- afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur;
- ck_free(backup);
+ rng = rng->next;
- ck_free(rng);
- rng = NULL;
+ }
- while (ranges) {
+ u32 i = 1;
+ u32 positions = 0;
+ while (i) {
+ restart:
+ i = 0;
+ struct range *r = NULL;
+ u32 pos = (u32)-1;
rng = ranges;
- ranges = rng->next;
- ck_free(rng);
- rng = NULL;
- }
+ while (rng) {
- return 0;
+ if (rng->ok == 1 && rng->start < pos) {
-checksum_fail:
- if (rng) { ck_free(rng); }
- ck_free(backup);
+ if (taint && taint->pos + taint->len == rng->start) {
+
+ taint->len += (1 + rng->end - rng->start);
+ positions += (1 + rng->end - rng->start);
+ rng->ok = 2;
+ goto restart;
+
+ } else {
+
+ r = rng;
+ pos = rng->start;
+
+ }
+
+ }
+
+ rng = rng->next;
+
+ }
+
+ if (r) {
+
+ struct tainted *t = ck_alloc_nozero(sizeof(struct tainted));
+ t->pos = r->start;
+ t->len = 1 + r->end - r->start;
+ positions += (1 + r->end - r->start);
+ if (likely(taint)) { taint->prev = t; }
+ t->next = taint;
+ t->prev = NULL;
+ taint = t;
+ r->ok = 2;
+ i = 1;
+ }
+
+ }
+
+ *taints = taint;
+
+ /* temporary: clean ranges */
while (ranges) {
rng = ranges;
ranges = rng->next;
ck_free(rng);
- rng = NULL;
}
+ new_hit_cnt = afl->queued_paths + afl->unique_crashes;
+
+#ifdef _DEBUG
+ /*
+ char fn[4096];
+ snprintf(fn, sizeof(fn), "%s/introspection_color.txt", afl->out_dir);
+ FILE *f = fopen(fn, "a");
+ if (f) {
+
+ */
+ FILE *f = stderr;
+ fprintf(f,
+ "Colorization: fname=%s len=%u result=%u execs=%u found=%llu "
+ "taint=%u\n",
+ afl->queue_cur->fname, len, afl->queue_cur->colorized, afl->stage_cur,
+ new_hit_cnt - orig_hit_cnt, positions);
+/*
+ fclose(f);
+
+ }
+
+*/
+#endif
+
+ afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt;
+ afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur;
+ ck_free(backup);
+ ck_free(changed);
+
+ return 0;
+
+checksum_fail:
+ ck_free(backup);
+ ck_free(changed);
+
return 1;
}
@@ -212,12 +397,19 @@ static u8 its_fuzz(afl_state_t *afl, u8 *buf, u32 len, u8 *status) {
orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
+#ifdef _DEBUG
+ dump("DATA", buf, len);
+#endif
+
if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; }
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
if (unlikely(new_hit_cnt != orig_hit_cnt)) {
+#ifdef _DEBUG
+ fprintf(stderr, "NEW FIND\n");
+#endif
*status = 1;
} else {
@@ -278,11 +470,33 @@ static int strntoull(const char *str, size_t sz, char **end, int base,
}
static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
- u64 pattern, u64 repl, u64 o_pattern, u32 idx,
- u8 *orig_buf, u8 *buf, u32 len, u8 do_reverse,
- u8 *status) {
-
- if (!buf) { FATAL("BUG: buf was NULL. Please report this.\n"); }
+ u64 pattern, u64 repl, u64 o_pattern,
+ u64 changed_val, u8 attr, u32 idx, u32 taint_len,
+ u8 *orig_buf, u8 *buf, u8 *cbuf, u32 len,
+ u8 do_reverse, u8 lvl, u8 *status) {
+
+ // (void)(changed_val); // TODO
+ // we can use the information in changed_val to see if there is a
+ // computable i2s transformation.
+ // if (pattern != o_pattern && repl != changed_val) {
+
+ // u64 in_diff = pattern - o_pattern, out_diff = repl - changed_val;
+ // if (in_diff != out_diff) {
+
+ // switch(in_diff) {
+
+ // detect uppercase <-> lowercase, base64, hex encoding, etc.:
+ // repl = reverse_transform(TYPE, pattern);
+ // }
+ // }
+ // }
+ // not 100% but would have a chance to be detected
+
+ // fprintf(stderr,
+ // "Encode: %llx->%llx into %llx(<-%llx) at pos=%u "
+ // "taint_len=%u shape=%u attr=%u\n",
+ // o_pattern, pattern, repl, changed_val, idx, taint_len,
+ // h->shape + 1, attr);
u64 *buf_64 = (u64 *)&buf[idx];
u32 *buf_32 = (u32 *)&buf[idx];
@@ -293,76 +507,215 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u16 *o_buf_16 = (u16 *)&orig_buf[idx];
u8 * o_buf_8 = &orig_buf[idx];
- u32 its_len = len - idx;
- // *status = 0;
+ u32 its_len = MIN(len - idx, taint_len);
u8 * endptr;
u8 use_num = 0, use_unum = 0;
unsigned long long unum;
long long num;
- if (afl->queue_cur->is_ascii) {
+ // reverse atoi()/strnu?toll() is expensive, so we only to it in lvl == 3
+ if (lvl & 4) {
- endptr = buf_8;
- if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) {
+ if (afl->queue_cur->is_ascii) {
- if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum))
- use_unum = 1;
+ endptr = buf_8;
+ if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) {
- } else
+ if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum))
+ use_unum = 1;
- use_num = 1;
+ } else
- }
+ use_num = 1;
- if (use_num && (u64)num == pattern) {
+ }
- size_t old_len = endptr - buf_8;
- size_t num_len = snprintf(NULL, 0, "%lld", num);
+#ifdef _DEBUG
+ if (idx == 0)
+ fprintf(stderr, "ASCII is=%u use_num=%u use_unum=%u idx=%u %llx==%llx\n",
+ afl->queue_cur->is_ascii, use_num, use_unum, idx, num, pattern);
+#endif
- u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len);
- if (unlikely(!new_buf)) { PFATAL("alloc"); }
- memcpy(new_buf, buf, idx);
+ // num is likely not pattern as atoi("AAA") will be zero...
+ if (use_num && ((u64)num == pattern || !num)) {
- snprintf(new_buf + idx, num_len, "%lld", num);
- memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len);
+ u8 tmp_buf[32];
+ size_t num_len = snprintf(tmp_buf, sizeof(tmp_buf), "%lld", repl);
+ size_t old_len = endptr - buf_8;
- if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; }
+ u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len);
+ if (unlikely(!new_buf)) { PFATAL("alloc"); }
- } else if (use_unum && unum == pattern) {
+ memcpy(new_buf, buf, idx);
+ memcpy(new_buf + idx, tmp_buf, num_len);
+ memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len);
- size_t old_len = endptr - buf_8;
- size_t num_len = snprintf(NULL, 0, "%llu", unum);
+ if (new_buf[idx + num_len] >= '0' && new_buf[idx + num_len] <= '9') {
+
+ new_buf[idx + num_len] = ' ';
+
+ }
- u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len);
- if (unlikely(!new_buf)) { PFATAL("alloc"); }
- memcpy(new_buf, buf, idx);
+ if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; }
- snprintf(new_buf + idx, num_len, "%llu", unum);
- memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len);
+ } else if (use_unum && (unum == pattern || !unum)) {
- if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; }
+ u8 tmp_buf[32];
+ size_t num_len = snprintf(tmp_buf, sizeof(tmp_buf), "%llu", repl);
+ size_t old_len = endptr - buf_8;
+
+ u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len);
+ if (unlikely(!new_buf)) { PFATAL("alloc"); }
+
+ memcpy(new_buf, buf, idx);
+ memcpy(new_buf + idx, tmp_buf, num_len);
+ memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len);
+
+ if (new_buf[idx + num_len] >= '0' && new_buf[idx + num_len] <= '9') {
+
+ new_buf[idx + num_len] = ' ';
+
+ }
+
+ if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; }
+
+ }
}
- if (SHAPE_BYTES(h->shape) >= 8 && *status != 1) {
+ // we only allow this for ascii2integer (above)
+ if (unlikely(pattern == o_pattern)) { return 0; }
- if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == o_pattern) {
+ if ((lvl & 1) || ((lvl & 2) && (attr >= 8 && attr <= 15)) || attr >= 16) {
- *buf_64 = repl;
- if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
- *buf_64 = pattern;
+ if (SHAPE_BYTES(h->shape) >= 8 && *status != 1) {
+
+ // if (its_len >= 8 && (attr == 0 || attr >= 8))
+ // fprintf(stderr,
+ // "TestU64: %u>=4 %x==%llx"
+ // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n",
+ // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr,
+ // repl, changed_val);
+
+ // if this is an fcmp (attr & 8 == 8) then do not compare the patterns -
+ // due to a bug in llvm dynamic float bitcasts do not work :(
+ // the value 16 means this is a +- 1.0 test case
+ if (its_len >= 8 &&
+ ((*buf_64 == pattern && *o_buf_64 == o_pattern) || attr >= 16)) {
+
+ u64 tmp_64 = *buf_64;
+ *buf_64 = repl;
+ if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
+ if (*status == 1) { memcpy(cbuf + idx, buf_64, 8); }
+ *buf_64 = tmp_64;
+
+ // fprintf(stderr, "Status=%u\n", *status);
+
+ }
+
+ // reverse encoding
+ if (do_reverse && *status != 1) {
+
+ if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl),
+ SWAP64(o_pattern), SWAP64(changed_val),
+ attr, idx, taint_len, orig_buf, buf,
+ cbuf, len, 0, lvl, status))) {
+
+ return 1;
+
+ }
+
+ }
}
- // reverse encoding
- if (do_reverse && *status != 1) {
+ if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) {
- if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl),
- SWAP64(o_pattern), idx, orig_buf, buf,
- len, 0, status))) {
+ // if (its_len >= 4 && (attr <= 1 || attr >= 8))
+ // fprintf(stderr,
+ // "TestU32: %u>=4 %x==%llx"
+ // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n",
+ // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr,
+ // repl, changed_val);
- return 1;
+ if (its_len >= 4 &&
+ ((*buf_32 == (u32)pattern && *o_buf_32 == (u32)o_pattern) ||
+ attr >= 16)) {
+
+ u32 tmp_32 = *buf_32;
+ *buf_32 = (u32)repl;
+ if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
+ if (*status == 1) { memcpy(cbuf + idx, buf_32, 4); }
+ *buf_32 = tmp_32;
+
+ // fprintf(stderr, "Status=%u\n", *status);
+
+ }
+
+ // reverse encoding
+ if (do_reverse && *status != 1) {
+
+ if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl),
+ SWAP32(o_pattern), SWAP32(changed_val),
+ attr, idx, taint_len, orig_buf, buf,
+ cbuf, len, 0, lvl, status))) {
+
+ return 1;
+
+ }
+
+ }
+
+ }
+
+ if (SHAPE_BYTES(h->shape) >= 2 && *status != 1) {
+
+ if (its_len >= 2 &&
+ ((*buf_16 == (u16)pattern && *o_buf_16 == (u16)o_pattern) ||
+ attr >= 16)) {
+
+ u16 tmp_16 = *buf_16;
+ *buf_16 = (u16)repl;
+ if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
+ if (*status == 1) { memcpy(cbuf + idx, buf_16, 2); }
+ *buf_16 = tmp_16;
+
+ }
+
+ // reverse encoding
+ if (do_reverse && *status != 1) {
+
+ if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl),
+ SWAP16(o_pattern), SWAP16(changed_val),
+ attr, idx, taint_len, orig_buf, buf,
+ cbuf, len, 0, lvl, status))) {
+
+ return 1;
+
+ }
+
+ }
+
+ }
+
+ if (*status != 1) { // u8
+
+ // if (its_len >= 1 && (attr <= 1 || attr >= 8))
+ // fprintf(stderr,
+ // "TestU8: %u>=1 %x==%x %x==%x (idx=%u attr=%u) <= %x<-%x\n",
+ // its_len, *buf_8, pattern, *o_buf_8, o_pattern, idx, attr,
+ // repl, changed_val);
+
+ if (its_len >= 1 &&
+ ((*buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) ||
+ attr >= 16)) {
+
+ u8 tmp_8 = *buf_8;
+ *buf_8 = (u8)repl;
+ if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
+ if (*status == 1) { cbuf[idx] = *buf_8; }
+ *buf_8 = tmp_8;
}
@@ -370,49 +723,205 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
- if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) {
+ // here we add and subract 1 from the value, but only if it is not an
+ // == or != comparison
+ // Bits: 1 = Equal, 2 = Greater, 3 = Lesser, 4 = Float
- if (its_len >= 4 && *buf_32 == (u32)pattern &&
- *o_buf_32 == (u32)o_pattern) {
+ if (lvl < 4) { return 0; }
- *buf_32 = (u32)repl;
- if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
- *buf_32 = pattern;
+ if (attr >= 8 && attr < 16) { // lesser/greater integer comparison
+
+ u64 repl_new;
+ if (SHAPE_BYTES(h->shape) == 4 && its_len >= 4) {
+
+ float *f = (float *)&repl;
+ float g = *f;
+ g += 1.0;
+ u32 *r = (u32 *)&g;
+ repl_new = (u32)*r;
+
+ } else if (SHAPE_BYTES(h->shape) == 8 && its_len >= 8) {
+
+ double *f = (double *)&repl;
+ double g = *f;
+ g += 1.0;
+
+ u64 *r = (u64 *)&g;
+ repl_new = *r;
+
+ } else {
+
+ return 0;
}
- // reverse encoding
- if (do_reverse && *status != 1) {
+ changed_val = repl_new;
+
+ if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern,
+ changed_val, 16, idx, taint_len, orig_buf,
+ buf, cbuf, len, 1, lvl, status))) {
+
+ return 1;
+
+ }
+
+ if (SHAPE_BYTES(h->shape) == 4) {
+
+ float *f = (float *)&repl;
+ float g = *f;
+ g -= 1.0;
+ u32 *r = (u32 *)&g;
+ repl_new = (u32)*r;
+
+ } else if (SHAPE_BYTES(h->shape) == 8) {
+
+ double *f = (double *)&repl;
+ double g = *f;
+ g -= 1.0;
+ u64 *r = (u64 *)&g;
+ repl_new = *r;
+
+ } else {
+
+ return 0;
+
+ }
+
+ changed_val = repl_new;
+
+ if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern,
+ changed_val, 16, idx, taint_len, orig_buf,
+ buf, cbuf, len, 1, lvl, status))) {
+
+ return 1;
+
+ }
+
+ // transform double to float, llvm likes to do that internally ...
+ if (SHAPE_BYTES(h->shape) == 8 && its_len >= 4) {
+
+ double *f = (double *)&repl;
+ float g = (float)*f;
+ repl_new = 0;
+#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ memcpy((char *)&repl_new, (char *)&g, 4);
+#else
+ memcpy(((char *)&repl_new) + 4, (char *)&g, 4);
+#endif
+ changed_val = repl_new;
+ h->shape = 3; // modify shape
- if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl),
- SWAP32(o_pattern), idx, orig_buf, buf,
- len, 0, status))) {
+ // fprintf(stderr, "DOUBLE2FLOAT %llx\n", repl_new);
+ if (unlikely(cmp_extend_encoding(
+ afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx,
+ taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) {
+
+ h->shape = 7;
return 1;
}
+ h->shape = 7; // recover shape
+
+ }
+
+ } else if (attr > 1 && attr < 8) { // lesser/greater integer comparison
+
+ u64 repl_new;
+
+ repl_new = repl + 1;
+ changed_val = repl_new;
+ if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern,
+ changed_val, 32, idx, taint_len, orig_buf,
+ buf, cbuf, len, 1, lvl, status))) {
+
+ return 1;
+
+ }
+
+ repl_new = repl - 1;
+ changed_val = repl_new;
+ if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern,
+ changed_val, 32, idx, taint_len, orig_buf,
+ buf, cbuf, len, 1, lvl, status))) {
+
+ return 1;
+
}
}
- if (SHAPE_BYTES(h->shape) >= 2 && *status != 1) {
+ return 0;
- if (its_len >= 2 && *buf_16 == (u16)pattern &&
- *o_buf_16 == (u16)o_pattern) {
+}
- *buf_16 = (u16)repl;
+static u8 cmp_extend_encoding128(afl_state_t *afl, struct cmp_header *h,
+ u128 pattern, u128 repl, u128 o_pattern,
+ u128 changed_val, u8 attr, u32 idx,
+ u32 taint_len, u8 *orig_buf, u8 *buf, u8 *cbuf,
+ u32 len, u8 do_reverse, u8 lvl, u8 *status) {
+
+ u128 *buf_128 = (u128 *)&buf[idx];
+ u64 * buf0 = (u64 *)&buf[idx];
+ u64 * buf1 = (u64 *)(buf + idx + 8);
+ u128 *o_buf_128 = (u128 *)&orig_buf[idx];
+ u32 its_len = MIN(len - idx, taint_len);
+ u64 v10 = (u64)repl;
+ u64 v11 = (u64)(repl >> 64);
+
+ // if this is an fcmp (attr & 8 == 8) then do not compare the patterns -
+ // due to a bug in llvm dynamic float bitcasts do not work :(
+ // the value 16 means this is a +- 1.0 test case
+ if (its_len >= 16) {
+
+#ifdef _DEBUG
+ fprintf(stderr, "TestU128: %u>=16 (idx=%u attr=%u) (%u)\n", its_len, idx,
+ attr, do_reverse);
+ u64 v00 = (u64)pattern;
+ u64 v01 = pattern >> 64;
+ u64 ov00 = (u64)o_pattern;
+ u64 ov01 = o_pattern >> 64;
+ u64 ov10 = (u64)changed_val;
+ u64 ov11 = changed_val >> 64;
+ u64 b00 = (u64)*buf_128;
+ u64 b01 = *buf_128 >> 64;
+ u64 ob00 = (u64)*o_buf_128;
+ u64 ob01 = *o_buf_128 >> 64;
+ fprintf(stderr,
+ "TestU128: %llx:%llx==%llx:%llx"
+ " %llx:%llx==%llx:%llx <= %llx:%llx<-%llx:%llx\n",
+ b01, b00, v01, v00, ob01, ob00, ov01, ov00, v11, v10, ov11, ov10);
+#endif
+
+ if (*buf_128 == pattern && *o_buf_128 == o_pattern) {
+
+ u128 tmp_128 = *buf_128;
+ // *buf_128 = repl; <- this crashes
+#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ *buf0 = v10;
+ *buf1 = v11;
+#else
+ *buf1 = v10;
+ *buf0 = v11;
+#endif
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
- *buf_16 = (u16)pattern;
+ if (*status == 1) { memcpy(cbuf + idx, buf_128, 16); }
+ *buf_128 = tmp_128;
+
+#ifdef _DEBUG
+ fprintf(stderr, "Status=%u\n", *status);
+#endif
}
// reverse encoding
if (do_reverse && *status != 1) {
- if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl),
- SWAP16(o_pattern), idx, orig_buf, buf,
- len, 0, status))) {
+ if (unlikely(cmp_extend_encoding128(
+ afl, h, SWAPN(pattern, 128), SWAPN(repl, 128),
+ SWAPN(o_pattern, 128), SWAPN(changed_val, 128), attr, idx,
+ taint_len, orig_buf, buf, cbuf, len, 0, lvl, status))) {
return 1;
@@ -422,14 +931,82 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
- /* avoid CodeQL warning on unsigned overflow */
- if (/* SHAPE_BYTES(h->shape) >= 1 && */ *status != 1) {
+ return 0;
- if (its_len >= 1 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) {
+}
- *buf_8 = (u8)repl;
+// uh a pointer read from (long double*) reads 12 bytes, not 10 ...
+// so lets make this complicated.
+static u8 cmp_extend_encoding_ld(afl_state_t *afl, struct cmp_header *h,
+ u8 *pattern, u8 *repl, u8 *o_pattern,
+ u8 *changed_val, u8 attr, u32 idx,
+ u32 taint_len, u8 *orig_buf, u8 *buf, u8 *cbuf,
+ u32 len, u8 do_reverse, u8 lvl, u8 *status) {
+
+ u8 *buf_ld = &buf[idx], *o_buf_ld = &orig_buf[idx], backup[10];
+ u32 its_len = MIN(len - idx, taint_len);
+
+ if (its_len >= 10) {
+
+#ifdef _DEBUG
+ fprintf(stderr, "TestUld: %u>=10 (len=%u idx=%u attr=%u) (%u)\n", its_len,
+ len, idx, attr, do_reverse);
+ fprintf(stderr, "TestUld: ");
+ u32 i;
+ for (i = 0; i < 10; i++)
+ fprintf(stderr, "%02x", pattern[i]);
+ fprintf(stderr, "==");
+ for (i = 0; i < 10; i++)
+ fprintf(stderr, "%02x", buf_ld[i]);
+ fprintf(stderr, " ");
+ for (i = 0; i < 10; i++)
+ fprintf(stderr, "%02x", o_pattern[i]);
+ fprintf(stderr, "==");
+ for (i = 0; i < 10; i++)
+ fprintf(stderr, "%02x", o_buf_ld[i]);
+ fprintf(stderr, " <= ");
+ for (i = 0; i < 10; i++)
+ fprintf(stderr, "%02x", repl[i]);
+ fprintf(stderr, "==");
+ for (i = 0; i < 10; i++)
+ fprintf(stderr, "%02x", changed_val[i]);
+ fprintf(stderr, "\n");
+#endif
+
+ if (!memcmp(pattern, buf_ld, 10) && !memcmp(o_pattern, o_buf_ld, 10)) {
+
+ // if this is an fcmp (attr & 8 == 8) then do not compare the patterns -
+ // due to a bug in llvm dynamic float bitcasts do not work :(
+ // the value 16 means this is a +- 1.0 test case
+
+ memcpy(backup, buf_ld, 10);
+ memcpy(buf_ld, repl, 10);
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
- *buf_8 = (u8)pattern;
+ if (*status == 1) { memcpy(cbuf + idx, repl, 10); }
+ memcpy(buf_ld, backup, 10);
+
+#ifdef _DEBUG
+ fprintf(stderr, "Status=%u\n", *status);
+#endif
+
+ }
+
+ }
+
+ // reverse encoding
+ if (do_reverse && *status != 1) {
+
+ u8 sp[10], sr[10], osp[10], osr[10];
+ SWAPNN(sp, pattern, 10);
+ SWAPNN(sr, repl, 10);
+ SWAPNN(osp, o_pattern, 10);
+ SWAPNN(osr, changed_val, 10);
+
+ if (unlikely(cmp_extend_encoding_ld(afl, h, sp, sr, osp, osr, attr, idx,
+ taint_len, orig_buf, buf, cbuf, len, 0,
+ lvl, status))) {
+
+ return 1;
}
@@ -445,10 +1022,6 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
u32 k;
u8 cons_ff = 0, cons_0 = 0;
-
- if (shape > sizeof(v))
- FATAL("shape is greater than %zu, please report!", sizeof(v));
-
for (k = 0; k < shape; ++k) {
if (b[k] == 0) {
@@ -457,7 +1030,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
} else if (b[k] == 0xff) {
- ++cons_ff;
+ ++cons_0;
} else {
@@ -493,28 +1066,126 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
}
-static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
+static void try_to_add_to_dict128(afl_state_t *afl, u128 v) {
- struct cmp_header *h = &afl->shm.cmp_map->headers[key];
- u32 i, j, idx;
+ u8 *b = (u8 *)&v;
- u32 loggeds = h->hits;
+ u32 k;
+ u8 cons_ff = 0, cons_0 = 0;
+ for (k = 0; k < 16; ++k) {
+
+ if (b[k] == 0) {
+
+ ++cons_0;
+
+ } else if (b[k] == 0xff) {
+
+ ++cons_0;
+
+ } else {
+
+ cons_0 = cons_ff = 0;
+
+ }
+
+ // too many uninteresting values? try adding 2 64-bit values
+ if (cons_0 > 6 || cons_ff > 6) {
+
+ u64 v64 = (u64)v;
+ try_to_add_to_dict(afl, v64, 8);
+ v64 = (u64)(v >> 64);
+ try_to_add_to_dict(afl, v64, 8);
+
+ return;
+
+ }
+
+ }
+
+ maybe_add_auto(afl, (u8 *)&v, 16);
+ u128 rev = SWAPN(v, 128);
+ maybe_add_auto(afl, (u8 *)&rev, 16);
+
+}
+
+static void try_to_add_to_dictN(afl_state_t *afl, u128 v, u8 size) {
+
+ u8 *b = (u8 *)&v;
+
+ u32 k;
+ u8 cons_ff = 0, cons_0 = 0;
+#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ for (k = 0; k < size; ++k) {
+
+#else
+ for (k = 16 - size; k < 16; ++k) {
+
+#endif
+ if (b[k] == 0) {
+
+ ++cons_0;
+
+ } else if (b[k] == 0xff) {
+
+ ++cons_0;
+
+ } else {
+
+ cons_0 = cons_ff = 0;
+
+ }
+
+ }
+
+ maybe_add_auto(afl, (u8 *)&v, size);
+ u128 rev = SWAPN(v, size);
+ maybe_add_auto(afl, (u8 *)&rev, size);
+
+}
+
+static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
+ u32 len, u32 lvl, struct tainted *taint) {
+
+ struct cmp_header *h = &afl->shm.cmp_map->headers[key];
+ struct tainted * t;
+ u32 i, j, idx, taint_len;
+ u32 have_taint = 1, is_128 = 0, is_n = 0, is_ld = 0;
+ u32 loggeds = h->hits;
if (h->hits > CMP_MAP_H) { loggeds = CMP_MAP_H; }
u8 status = 0;
- // opt not in the paper
- u32 fails;
- u8 found_one = 0;
+ u8 found_one = 0;
/* loop cmps are useless, detect and ignore them */
- u64 s_v0, s_v1;
- u8 s_v0_fixed = 1, s_v1_fixed = 1;
- u8 s_v0_inc = 1, s_v1_inc = 1;
- u8 s_v0_dec = 1, s_v1_dec = 1;
+ u128 s128_v0 = 0, s128_v1 = 0, orig_s128_v0 = 0, orig_s128_v1 = 0;
+ long double ld0, ld1, o_ld0, o_ld1;
+ u64 s_v0, s_v1;
+ u8 s_v0_fixed = 1, s_v1_fixed = 1;
+ u8 s_v0_inc = 1, s_v1_inc = 1;
+ u8 s_v0_dec = 1, s_v1_dec = 1;
- for (i = 0; i < loggeds; ++i) {
+ switch (SHAPE_BYTES(h->shape)) {
+
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ case 16:
+ is_128 = 1;
+ break;
+ case 10:
+ if (h->attribute & 8) { is_ld = 1; }
+ // fall through
+ default:
+ is_n = 1;
- fails = 0;
+ }
+
+ // FCmp not in if level 1 only
+ if ((h->attribute & 8) && lvl < 2) return 0;
+
+ for (i = 0; i < loggeds; ++i) {
struct cmp_operands *o = &afl->shm.cmp_map->log[key][i];
@@ -551,55 +1222,242 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
}
- for (idx = 0; idx < len && fails < 8; ++idx) {
+#ifdef _DEBUG
+ fprintf(stderr, "Handling: %llx->%llx vs %llx->%llx attr=%u shape=%u\n",
+ orig_o->v0, o->v0, orig_o->v1, o->v1, h->attribute,
+ SHAPE_BYTES(h->shape));
+#endif
+
+ if (taint) {
+
+ t = taint;
+
+ while (t->next) {
+
+ t = t->next;
+
+ }
+
+ } else {
+
+ have_taint = 0;
+ t = NULL;
+
+ }
+
+ if (unlikely(is_128 || is_n)) {
+
+ s128_v0 = ((u128)o->v0) + (((u128)o->v0_128) << 64);
+ s128_v1 = ((u128)o->v1) + (((u128)o->v1_128) << 64);
+ orig_s128_v0 = ((u128)orig_o->v0) + (((u128)orig_o->v0_128) << 64);
+ orig_s128_v1 = ((u128)orig_o->v1) + (((u128)orig_o->v1_128) << 64);
+
+ if (is_ld) {
+
+#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ memcpy((char *)&ld0, (char *)&s128_v0, sizeof(long double));
+ memcpy((char *)&ld1, (char *)&s128_v1, sizeof(long double));
+ memcpy((char *)&o_ld0, (char *)&orig_s128_v0, sizeof(long double));
+ memcpy((char *)&o_ld1, (char *)&orig_s128_v1, sizeof(long double));
+#else
+ memcpy((char *)&ld0, (char *)(&s128_v0) + 6, sizeof(long double));
+ memcpy((char *)&ld1, (char *)(&s128_v1) + 6, sizeof(long double));
+ memcpy((char *)&o_ld0, (char *)(&orig_s128_v0) + 6,
+ sizeof(long double));
+ memcpy((char *)&o_ld1, (char *)(&orig_s128_v1) + 6,
+ sizeof(long double));
+#endif
+
+ }
+
+ }
+
+ for (idx = 0; idx < len; ++idx) {
+
+ if (have_taint) {
+
+ if (!t || idx < t->pos) {
+
+ continue;
+
+ } else {
+
+ taint_len = t->pos + t->len - idx;
+
+ if (idx == t->pos + t->len - 1) { t = t->prev; }
+
+ }
+
+ } else {
+
+ taint_len = len - idx;
+
+ }
status = 0;
- if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, orig_o->v0, idx,
- orig_buf, buf, len, 1, &status))) {
- return 1;
+ if (is_ld) { // long double special case
+
+ if (ld0 != o_ld0 && o_ld1 != o_ld0) {
+
+ if (unlikely(cmp_extend_encoding_ld(
+ afl, h, (u8 *)&ld0, (u8 *)&ld1, (u8 *)&o_ld0, (u8 *)&o_ld1,
+ h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1,
+ lvl, &status))) {
+
+ return 1;
+
+ }
+
+ }
+
+ if (status == 1) {
+
+ found_one = 1;
+ break;
+
+ }
+
+ if (ld1 != o_ld1 && o_ld0 != o_ld1) {
+
+ if (unlikely(cmp_extend_encoding_ld(
+ afl, h, (u8 *)&ld1, (u8 *)&ld0, (u8 *)&o_ld1, (u8 *)&o_ld0,
+ h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1,
+ lvl, &status))) {
+
+ return 1;
+
+ }
+
+ }
+
+ if (status == 1) {
+
+ found_one = 1;
+ break;
+
+ }
+
+ }
+
+ if (is_128) { // u128 special case
+
+ if (s128_v0 != orig_s128_v0 && orig_s128_v0 != orig_s128_v1) {
+
+ if (unlikely(cmp_extend_encoding128(
+ afl, h, s128_v0, s128_v1, orig_s128_v0, orig_s128_v1,
+ h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1,
+ lvl, &status))) {
+
+ return 1;
+
+ }
+
+ }
+
+ if (status == 1) {
+
+ found_one = 1;
+ break;
+
+ }
+
+ if (s128_v1 != orig_s128_v1 && orig_s128_v1 != orig_s128_v0) {
+
+ if (unlikely(cmp_extend_encoding128(
+ afl, h, s128_v1, s128_v0, orig_s128_v1, orig_s128_v0,
+ h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1,
+ lvl, &status))) {
+
+ return 1;
+
+ }
+
+ }
+
+ if (status == 1) {
+
+ found_one = 1;
+ break;
+
+ }
}
- if (status == 2) {
+ // even for u128 and long double do cmp_extend_encoding() because
+ // if we got here their own special trials failed and it might just be
+ // a cast from e.g. u64 to u128 from the input data.
- ++fails;
+ if ((o->v0 != orig_o->v0 || lvl >= 4) && orig_o->v0 != orig_o->v1) {
- } else if (status == 1) {
+ if (unlikely(cmp_extend_encoding(
+ afl, h, o->v0, o->v1, orig_o->v0, orig_o->v1, h->attribute, idx,
+ taint_len, orig_buf, buf, cbuf, len, 1, lvl, &status))) {
+ return 1;
+
+ }
+
+ }
+
+ if (status == 1) {
+
+ found_one = 1;
break;
}
status = 0;
- if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, orig_o->v1, idx,
- orig_buf, buf, len, 1, &status))) {
+ if ((o->v1 != orig_o->v1 || lvl >= 4) && orig_o->v0 != orig_o->v1) {
- return 1;
+ if (unlikely(cmp_extend_encoding(
+ afl, h, o->v1, o->v0, orig_o->v1, orig_o->v0, h->attribute, idx,
+ taint_len, orig_buf, buf, cbuf, len, 1, lvl, &status))) {
- }
+ return 1;
- if (status == 2) {
+ }
- ++fails;
+ }
- } else if (status == 1) {
+ if (status == 1) {
+ found_one = 1;
break;
}
}
- if (status == 1) { found_one = 1; }
+#ifdef _DEBUG
+ fprintf(stderr,
+ "END: %llx->%llx vs %llx->%llx attr=%u i=%u found=%u is128=%u "
+ "isN=%u size=%u\n",
+ orig_o->v0, o->v0, orig_o->v1, o->v1, h->attribute, i, found_one,
+ is_128, is_n, SHAPE_BYTES(h->shape));
+#endif
// If failed, add to dictionary
- if (fails == 8) {
+ if (!found_one) {
if (afl->pass_stats[key].total == 0) {
- try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape));
- try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape));
+ if (unlikely(is_128)) {
+
+ try_to_add_to_dict128(afl, s128_v0);
+ try_to_add_to_dict128(afl, s128_v1);
+
+ } else if (unlikely(is_n)) {
+
+ try_to_add_to_dictN(afl, s128_v0, SHAPE_BYTES(h->shape));
+ try_to_add_to_dictN(afl, s128_v1, SHAPE_BYTES(h->shape));
+
+ } else {
+
+ try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape));
+ try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape));
+
+ }
}
@@ -630,20 +1488,19 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
}
static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
- u8 *o_pattern, u32 idx, u8 *orig_buf, u8 *buf,
- u32 len, u8 *status) {
+ u8 *o_pattern, u32 idx, u32 taint_len,
+ u8 *orig_buf, u8 *buf, u8 *cbuf, u32 len,
+ u8 *status) {
u32 i;
u32 its_len = MIN((u32)32, len - idx);
-
+ its_len = MIN(its_len, taint_len);
u8 save[32];
memcpy(save, &buf[idx], its_len);
- *status = 0;
-
for (i = 0; i < its_len; ++i) {
- if (pattern[i] != buf[idx + i] || o_pattern[i] != orig_buf[idx + i] ||
+ if ((pattern[i] != buf[idx + i] && o_pattern[i] != orig_buf[idx + i]) ||
*status == 1) {
break;
@@ -654,6 +1511,8 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
+ if (*status == 1) { memcpy(cbuf + idx, &buf[idx], i); }
+
}
memcpy(&buf[idx], save, i);
@@ -661,23 +1520,21 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
-static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
+static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
+ u32 len, struct tainted *taint) {
+ struct tainted * t;
struct cmp_header *h = &afl->shm.cmp_map->headers[key];
- u32 i, j, idx;
+ u32 i, j, idx, have_taint = 1, taint_len;
u32 loggeds = h->hits;
if (h->hits > CMP_MAP_RTN_H) { loggeds = CMP_MAP_RTN_H; }
u8 status = 0;
- // opt not in the paper
- // u32 fails = 0;
u8 found_one = 0;
for (i = 0; i < loggeds; ++i) {
- u32 fails = 0;
-
struct cmpfn_operands *o =
&((struct cmpfn_operands *)afl->shm.cmp_map->log[key])[i];
@@ -696,50 +1553,84 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
}
- for (idx = 0; idx < len && fails < 8; ++idx) {
+ if (taint) {
- if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx,
- orig_buf, buf, len, &status))) {
+ t = taint;
+ while (t->next) {
- return 1;
+ t = t->next;
}
- if (status == 2) {
+ } else {
- ++fails;
+ have_taint = 0;
+ t = NULL;
- } else if (status == 1) {
+ }
- break;
+ for (idx = 0; idx < len; ++idx) {
+
+ if (have_taint) {
+
+ if (!t || idx < t->pos) {
+
+ continue;
+
+ } else {
+
+ taint_len = t->pos + t->len - idx;
+
+ if (idx == t->pos + t->len - 1) { t = t->prev; }
+
+ }
+
+ } else {
+
+ taint_len = len - idx;
}
- if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx,
- orig_buf, buf, len, &status))) {
+ status = 0;
+
+ if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx,
+ taint_len, orig_buf, buf, cbuf, len,
+ &status))) {
return 1;
}
- if (status == 2) {
+ if (status == 1) {
+
+ found_one = 1;
+ break;
+
+ }
+
+ status = 0;
+
+ if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx,
+ taint_len, orig_buf, buf, cbuf, len,
+ &status))) {
- ++fails;
+ return 1;
- } else if (status == 1) {
+ }
+ if (status == 1) {
+
+ found_one = 1;
break;
}
}
- if (status == 1) { found_one = 1; }
-
// If failed, add to dictionary
- if (fails == 8) {
+ if (!found_one) {
- if (afl->pass_stats[key].total == 0) {
+ if (unlikely(!afl->pass_stats[key].total)) {
maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape));
maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape));
@@ -791,7 +1682,44 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
memcpy(afl->orig_cmp_map, afl->shm.cmp_map, sizeof(struct cmp_map));
- if (unlikely(colorization(afl, buf, len, exec_cksum))) { return 1; }
+ struct tainted *taint = NULL;
+
+ if (!afl->queue_cur->taint || !afl->queue_cur->cmplog_colorinput) {
+
+ if (unlikely(colorization(afl, buf, len, exec_cksum, &taint))) { return 1; }
+
+ // no taint? still try, create a dummy to prevent again colorization
+ if (!taint) {
+
+ taint = ck_alloc(sizeof(struct tainted));
+ taint->len = len;
+
+ }
+
+ } else {
+
+ buf = afl->queue_cur->cmplog_colorinput;
+ taint = afl->queue_cur->taint;
+ // reget the cmplog information
+ if (unlikely(common_fuzz_cmplog_stuff(afl, buf, len))) { return 1; }
+
+ }
+
+#ifdef _DEBUG
+ dump("ORIG", orig_buf, len);
+ dump("NEW ", buf, len);
+#endif
+
+ struct tainted *t = taint;
+
+ while (t) {
+
+#ifdef _DEBUG
+ fprintf(stderr, "T: pos=%u len=%u\n", t->pos, t->len);
+#endif
+ t = t->next;
+
+ }
// do it manually, forkserver clear only afl->fsrv.trace_bits
memset(afl->shm.cmp_map->headers, 0, sizeof(afl->shm.cmp_map->headers));
@@ -807,15 +1735,38 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
afl->stage_max = 0;
afl->stage_cur = 0;
+ u32 lvl;
+ u32 cmplog_done = afl->queue_cur->colorized;
+ u32 cmplog_lvl = afl->cmplog_lvl;
+ if (!cmplog_done) {
+
+ lvl = 1;
+
+ } else {
+
+ lvl = 0;
+
+ }
+
+ if (cmplog_lvl >= 2 && cmplog_done < 2) { lvl += 2; }
+ if (cmplog_lvl >= 3 && cmplog_done < 3) { lvl += 4; }
+
+ u8 *cbuf = afl_realloc((void **)&afl->in_scratch_buf, len + 128);
+ memcpy(cbuf, orig_buf, len);
+ u8 *virgin_backup = afl_realloc((void **)&afl->ex_buf, afl->shm.map_size);
+ memcpy(virgin_backup, afl->virgin_bits, afl->shm.map_size);
+
u32 k;
for (k = 0; k < CMP_MAP_W; ++k) {
if (!afl->shm.cmp_map->headers[k].hits) { continue; }
- if (afl->pass_stats[k].total &&
- (rand_below(afl, afl->pass_stats[k].total) >=
- afl->pass_stats[k].faileds ||
- afl->pass_stats[k].total == 0xff)) {
+ if (afl->pass_stats[k].faileds == 0xff ||
+ afl->pass_stats[k].total == 0xff) {
+
+#ifdef _DEBUG
+ fprintf(stderr, "DISABLED %u\n", k);
+#endif
afl->shm.cmp_map->headers[k].hits = 0; // ignore this cmp
@@ -841,11 +1792,19 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) {
- if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, len))) { goto exit_its; }
+ if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, cbuf, len, lvl, taint))) {
+
+ goto exit_its;
+
+ }
} else {
- if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, len))) { goto exit_its; }
+ if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, cbuf, len, taint))) {
+
+ goto exit_its;
+
+ }
}
@@ -854,12 +1813,86 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
r = 0;
exit_its:
+
+ afl->queue_cur->colorized = afl->cmplog_lvl;
+ if (afl->cmplog_lvl == CMPLOG_LVL_MAX) {
+
+ ck_free(afl->queue_cur->cmplog_colorinput);
+ t = taint;
+ while (taint) {
+
+ t = taint->next;
+ ck_free(taint);
+ taint = t;
+
+ }
+
+ afl->queue_cur->taint = NULL;
+
+ } else {
+
+ if (!afl->queue_cur->taint) { afl->queue_cur->taint = taint; }
+
+ if (!afl->queue_cur->cmplog_colorinput) {
+
+ afl->queue_cur->cmplog_colorinput = ck_alloc_nozero(len);
+ memcpy(afl->queue_cur->cmplog_colorinput, buf, len);
+ memcpy(buf, orig_buf, len);
+
+ }
+
+ }
+
+ // copy the current virgin bits so we can recover the information
+ u8 *virgin_save = afl_realloc((void **)&afl->eff_buf, afl->shm.map_size);
+ memcpy(virgin_save, afl->virgin_bits, afl->shm.map_size);
+ // reset virgin bits to the backup previous to redqueen
+ memcpy(afl->virgin_bits, virgin_backup, afl->shm.map_size);
+
+ u8 status = 0;
+ its_fuzz(afl, cbuf, len, &status);
+
+ // now combine with the saved virgin bits
+#ifdef WORD_SIZE_64
+ u64 *v = (u64 *)afl->virgin_bits;
+ u64 *s = (u64 *)virgin_save;
+ u32 i;
+ for (i = 0; i < (afl->shm.map_size >> 3); i++) {
+
+ v[i] &= s[i];
+
+ }
+
+#else
+ u32 *v = (u64 *)afl->virgin_bits;
+ u32 *s = (u64 *)virgin_save;
+ u32 i;
+ for (i = 0; i < (afl->shm.map_size >> 2); i++) {
+
+ v[i] &= s[i];
+
+ }
+
+#endif
+
+#ifdef _DEBUG
+ dump("COMB", cbuf, len);
+ if (status == 1) {
+
+ fprintf(stderr, "NEW COMBINED\n");
+
+ } else {
+
+ fprintf(stderr, "NO new combined\n");
+
+ }
+
+#endif
+
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
afl->stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt;
afl->stage_cycles[STAGE_ITS] += afl->fsrv.total_execs - orig_execs;
- memcpy(buf, orig_buf, len);
-
return r;
}
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index 60c9684c..8423a3d1 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -102,6 +102,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
afl->stats_update_freq = 1;
afl->stats_avg_exec = 0;
afl->skip_deterministic = 1;
+ afl->cmplog_lvl = 1;
#ifndef NO_SPLICING
afl->use_splicing = 1;
#endif
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index bb2674f0..1e914ca6 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -77,13 +77,8 @@ static void at_exit() {
}
int kill_signal = SIGKILL;
-
/* AFL_KILL_SIGNAL should already be a valid int at this point */
- if (getenv("AFL_KILL_SIGNAL")) {
-
- kill_signal = atoi(getenv("AFL_KILL_SIGNAL"));
-
- }
+ if ((ptr = getenv("AFL_KILL_SIGNAL"))) { kill_signal = atoi(ptr); }
if (pid1 > 0) { kill(pid1, kill_signal); }
if (pid2 > 0) { kill(pid2, kill_signal); }
@@ -103,13 +98,14 @@ static void usage(u8 *argv0, int more_help) {
"Execution control settings:\n"
" -p schedule - power schedules compute a seed's performance score:\n"
- " <fast(default), rare, exploit, seek, mmopt, coe, "
- "explore,\n"
- " lin, quad> -- see docs/power_schedules.md\n"
+ " fast(default), explore, exploit, seek, rare, mmopt, "
+ "coe, lin\n"
+ " quad -- see docs/power_schedules.md\n"
" -f file - location read by the fuzzed program (default: stdin "
"or @@)\n"
" -t msec - timeout for each run (auto-scaled, 50-%u ms)\n"
- " -m megs - memory limit for child process (%u MB, 0 = no limit)\n"
+ " -m megs - memory limit for child process (%u MB, 0 = no limit "
+ "[default])\n"
" -Q - use binary-only instrumentation (QEMU mode)\n"
" -U - use unicorn-based instrumentation (Unicorn mode)\n"
" -W - use qemu-based instrumentation with Wine (Wine "
@@ -125,7 +121,9 @@ static void usage(u8 *argv0, int more_help) {
" See docs/README.MOpt.md\n"
" -c program - enable CmpLog by specifying a binary compiled for "
"it.\n"
- " if using QEMU, just use -c 0.\n\n"
+ " if using QEMU, just use -c 0.\n"
+ " -l cmplog_level - set the complexity/intensivity of CmpLog.\n"
+ " Values: 1 (default), 2 (intensive) and 3 (heavy)\n\n"
"Fuzzing behavior settings:\n"
" -Z - sequential queue selection instead of weighted "
@@ -337,7 +335,6 @@ int main(int argc, char **argv_orig, char **envp) {
if (get_afl_env("AFL_DEBUG")) { debug = afl->debug = 1; }
- // map_size = get_map_size();
afl_state_init(afl, map_size);
afl->debug = debug;
afl_fsrv_init(&afl->fsrv);
@@ -358,7 +355,8 @@ int main(int argc, char **argv_orig, char **envp) {
while ((opt = getopt(
argc, argv,
- "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:Z")) > 0) {
+ "+b:B:c:CdDe:E:hi:I:f:F:l:L:m:M:nNo:p:P:RQs:S:t:T:UV:Wx:Z")) >
+ 0) {
switch (opt) {
@@ -787,6 +785,26 @@ int main(int argc, char **argv_orig, char **envp) {
} break;
+ case 'l': {
+
+ afl->cmplog_lvl = atoi(optarg);
+ if (afl->cmplog_lvl < 1 || afl->cmplog_lvl > CMPLOG_LVL_MAX) {
+
+ FATAL(
+ "Bad complog level value, accepted values are 1 (default), 2 and "
+ "%u.",
+ CMPLOG_LVL_MAX);
+
+ }
+
+ if (afl->cmplog_lvl == CMPLOG_LVL_MAX) {
+
+ afl->cmplog_max_filesize = MAX_FILE;
+
+ }
+
+ } break;
+
case 'L': { /* MOpt mode */
if (afl->limit_time_sig) { FATAL("Multiple -L options not supported"); }
@@ -1635,6 +1653,14 @@ int main(int argc, char **argv_orig, char **envp) {
if (afl->use_splicing) {
++afl->cycles_wo_finds;
+
+ if (unlikely(afl->shm.cmplog_mode &&
+ afl->cmplog_max_filesize < MAX_FILE)) {
+
+ afl->cmplog_max_filesize <<= 4;
+
+ }
+
switch (afl->expand_havoc) {
case 0:
@@ -1652,6 +1678,7 @@ int main(int argc, char **argv_orig, char **envp) {
}
afl->expand_havoc = 2;
+ if (afl->cmplog_lvl < 2) afl->cmplog_lvl = 2;
break;
case 2:
// if (!have_p) afl->schedule = EXPLOIT;
@@ -1665,11 +1692,14 @@ int main(int argc, char **argv_orig, char **envp) {
afl->expand_havoc = 4;
break;
case 4:
- // if not in sync mode, enable deterministic mode?
- // if (!afl->sync_id) afl->skip_deterministic = 0;
afl->expand_havoc = 5;
+ if (afl->cmplog_lvl < 3) afl->cmplog_lvl = 3;
break;
case 5:
+ // if not in sync mode, enable deterministic mode?
+ if (!afl->sync_id) afl->skip_deterministic = 0;
+ afl->expand_havoc = 6;
+ case 6:
// nothing else currently
break;