diff options
Diffstat (limited to 'instrumentation')
-rw-r--r-- | instrumentation/README.ctx.md | 22 | ||||
-rw-r--r-- | instrumentation/afl-llvm-pass.so.cc | 18 |
2 files changed, 29 insertions, 11 deletions
diff --git a/instrumentation/README.ctx.md b/instrumentation/README.ctx.md index caf2c09a..ffcce0a9 100644 --- a/instrumentation/README.ctx.md +++ b/instrumentation/README.ctx.md @@ -4,14 +4,19 @@ This is an LLVM-based implementation of the context sensitive branch coverage. -Basically every function gets its own ID and that ID is combined with the -edges of the called functions. +Basically every function gets its own ID and, every time that an edge is logged, +all the IDs in the callstack are hashed and combined with the edge transition +hash to augment the classic edge coverage with the information about the +calling context. So if both function A and function B call a function C, the coverage collected in C will be different. In math the coverage is collected as follows: -`map[current_location_ID ^ previous_location_ID >> 1 ^ previous_callee_ID] += 1` +`map[current_location_ID ^ previous_location_ID >> 1 ^ hash_callstack_IDs] += 1` + +The callstack hash is produced XOR-ing the function IDs to avoid explosion with +recusrsive functions. ## Usage @@ -20,3 +25,14 @@ Set the `AFL_LLVM_INSTRUMENT=CTX` or `AFL_LLVM_CTX=1` environment variable. It is highly recommended to increase the MAP_SIZE_POW2 definition in config.h to at least 18 and maybe up to 20 for this as otherwise too many map collisions occur. + +## Caller Branch Coverage + +If the context sensitive coverage introduces too may collisions becoming +decremental, the user can choose to augment edge coverage with just the +called function ID, instead of the entire callstack hash. + +In math the coverage is collected as follows: +`map[current_location_ID ^ previous_location_ID >> 1 ^ previous_callee_ID] += 1` + +Set the `AFL_LLVM_INSTRUMENT=CALLER` or `AFL_LLVM_CALLER=1` environment variable. diff --git a/instrumentation/afl-llvm-pass.so.cc b/instrumentation/afl-llvm-pass.so.cc index 87267e35..d06d3201 100644 --- a/instrumentation/afl-llvm-pass.so.cc +++ b/instrumentation/afl-llvm-pass.so.cc @@ -84,7 +84,7 @@ class AFLCoverage : public ModulePass { uint32_t ngram_size = 0; uint32_t map_size = MAP_SIZE; uint32_t function_minimum_size = 1; - char * ctx_str = NULL, *skip_nozero = NULL; + char * ctx_str = NULL, *caller_str = NULL, *skip_nozero = NULL; }; @@ -187,6 +187,7 @@ bool AFLCoverage::runOnModule(Module &M) { char *ngram_size_str = getenv("AFL_LLVM_NGRAM_SIZE"); if (!ngram_size_str) ngram_size_str = getenv("AFL_NGRAM_SIZE"); ctx_str = getenv("AFL_LLVM_CTX"); + caller_str = getenv("AFL_LLVM_CALLER"); #ifdef AFL_HAVE_VECTOR_INTRINSICS /* Decide previous location vector size (must be a power of two) */ @@ -240,7 +241,7 @@ bool AFLCoverage::runOnModule(Module &M) { GlobalVariable *AFLPrevLoc; GlobalVariable *AFLContext = NULL; - if (ctx_str) + if (ctx_str || caller_str) #if defined(__ANDROID__) || defined(__HAIKU__) AFLContext = new GlobalVariable( M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_ctx"); @@ -318,7 +319,7 @@ bool AFLCoverage::runOnModule(Module &M) { IRBuilder<> IRB(&(*IP)); // Context sensitive coverage - if (ctx_str && &BB == &F.getEntryBlock()) { + if ((ctx_str || caller_str) && &BB == &F.getEntryBlock()) { // load the context ID of the previous function and write to to a local // variable on the stack @@ -354,8 +355,9 @@ bool AFLCoverage::runOnModule(Module &M) { // if yes we store a context ID for this function in the global var if (has_calls) { - Value *NewCtx = IRB.CreateXor( - PrevCtx, ConstantInt::get(Int32Ty, AFL_R(map_size))); + Value *NewCtx = ConstantInt::get(Int32Ty, AFL_R(map_size)); + if (ctx_str) + NewCtx = IRB.CreateXor(PrevCtx, NewCtx); StoreInst * StoreCtx = IRB.CreateStore(NewCtx, AFLContext); StoreCtx->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); @@ -412,7 +414,7 @@ bool AFLCoverage::runOnModule(Module &M) { // in CTX mode we have to restore the original context for the caller - // she might be calling other functions which need the correct CTX - if (ctx_str && has_calls) { + if ((ctx_str || caller_str) && has_calls) { Instruction *Inst = BB.getTerminator(); if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst)) { @@ -459,7 +461,7 @@ bool AFLCoverage::runOnModule(Module &M) { #endif PrevLocTrans = PrevLoc; - if (ctx_str) + if (ctx_str || caller_str) PrevLocTrans = IRB.CreateZExt(IRB.CreateXor(PrevLocTrans, PrevCtx), Int32Ty); else @@ -546,7 +548,7 @@ bool AFLCoverage::runOnModule(Module &M) { // in CTX mode we have to restore the original context for the caller - // she might be calling other functions which need the correct CTX. // Currently this is only needed for the Ubuntu clang-6.0 bug - if (ctx_str && has_calls) { + if ((ctx_str || caller_str) && has_calls) { Instruction *Inst = BB.getTerminator(); if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst)) { |