about summary refs log tree commit diff
path: root/src/afl-fuzz-run.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/afl-fuzz-run.c')
-rw-r--r--src/afl-fuzz-run.c174
1 files changed, 129 insertions, 45 deletions
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index ac4fb4a9..b62db1ea 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -5,12 +5,12 @@
    Originally written by Michal Zalewski
 
    Now maintained by Marc Heuse <mh@mh-sec.de>,
-                        Heiko Eißfeldt <heiko.eissfeldt@hexco.de> and
+                        Heiko Eissfeldt <heiko.eissfeldt@hexco.de> and
                         Andrea Fioraldi <andreafioraldi@gmail.com> and
                         Dominik Maier <mail@dmnk.co>
 
    Copyright 2016, 2017 Google Inc. All rights reserved.
-   Copyright 2019-2023 AFLplusplus Project. All rights reserved.
+   Copyright 2019-2024 AFLplusplus Project. All rights reserved.
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
@@ -60,6 +60,23 @@ fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv, u32 timeout) {
 
   fsrv_run_result_t res = afl_fsrv_run_target(fsrv, timeout, &afl->stop_soon);
 
+  /* If post_run() function is defined in custom mutator, the function will be
+     called each time after AFL++ executes the target program. */
+
+  if (unlikely(afl->custom_mutators_count)) {
+
+    LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
+
+      if (unlikely(el->afl_custom_post_run)) {
+
+        el->afl_custom_post_run(el->data);
+
+      }
+
+    });
+
+  }
+
 #ifdef PROFILING
   clock_gettime(CLOCK_REALTIME, &spec);
   time_spent_start = (spec.tv_sec * 1000000000) + spec.tv_nsec;
@@ -152,20 +169,16 @@ write_to_testcase(afl_state_t *afl, void **mem, u32 len, u32 fix) {
 
     }
 
-    if (unlikely(afl->custom_mutators_count)) {
-
-      LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
-
-        if (el->afl_custom_fuzz_send) {
+    LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
 
-          el->afl_custom_fuzz_send(el->data, *mem, new_size);
-          sent = 1;
+      if (el->afl_custom_fuzz_send) {
 
-        }
+        el->afl_custom_fuzz_send(el->data, *mem, new_size);
+        sent = 1;
 
-      });
+      }
 
-    }
+    });
 
     if (likely(!sent)) {
 
@@ -186,7 +199,7 @@ write_to_testcase(afl_state_t *afl, void **mem, u32 len, u32 fix) {
 
     }
 
-  } else {
+  } else {                                   /* !afl->custom_mutators_count */
 
     if (unlikely(len < afl->min_length && !fix)) {
 
@@ -198,27 +211,8 @@ write_to_testcase(afl_state_t *afl, void **mem, u32 len, u32 fix) {
 
     }
 
-    if (unlikely(afl->custom_mutators_count)) {
-
-      LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
-
-        if (el->afl_custom_fuzz_send) {
-
-          el->afl_custom_fuzz_send(el->data, *mem, len);
-          sent = 1;
-
-        }
-
-      });
-
-    }
-
-    if (likely(!sent)) {
-
-      /* boring uncustom. */
-      afl_fsrv_write_to_testcase(&afl->fsrv, *mem, len);
-
-    }
+    /* boring uncustom. */
+    afl_fsrv_write_to_testcase(&afl->fsrv, *mem, len);
 
   }
 
@@ -415,6 +409,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
   u32 use_tmout = afl->fsrv.exec_tmout;
   u8 *old_sn = afl->stage_name;
 
+  u64 calibration_start_us = get_cur_time_us();
   if (unlikely(afl->shm.cmplog_mode)) { q->exec_cksum = 0; }
 
   /* Be a bit more generous about timeouts when resuming sessions, or when
@@ -510,6 +505,10 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
 
     fault = fuzz_run_target(afl, &afl->fsrv, use_tmout);
 
+    // update the time spend in calibration after each execution, as those may
+    // be slow
+    update_calibration_time(afl, &calibration_start_us);
+
     /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
        we want to bail out quickly. */
 
@@ -607,6 +606,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
   }
 
   q->exec_us = diff_us / afl->stage_max;
+  if (unlikely(!q->exec_us)) { q->exec_us = 1; }
+
   q->bitmap_size = count_bytes(afl, afl->fsrv.trace_bits);
   q->handicap = handicap;
   q->cal_failed = 0;
@@ -656,6 +657,7 @@ abort_calibration:
 
   if (!first_run) { show_stats(afl); }
 
+  update_calibration_time(afl, &calibration_start_us);
   return fault;
 
 }
@@ -675,11 +677,16 @@ void sync_fuzzers(afl_state_t *afl) {
   afl->stage_max = afl->stage_cur = 0;
   afl->cur_depth = 0;
 
+  u64 sync_start_us = get_cur_time_us();
   /* Look at the entries created for every other fuzzer in the sync directory.
    */
 
   while ((sd_ent = readdir(sd))) {
 
+    // since sync can take substantial amounts of time, update time spend every
+    // iteration
+    update_sync_time(afl, &sync_start_us);
+
     u8  qd_synced_path[PATH_MAX], qd_path[PATH_MAX];
     u32 min_accept = 0, next_min_accept = 0;
 
@@ -766,6 +773,8 @@ void sync_fuzzers(afl_state_t *afl) {
     afl->stage_cur = 0;
     afl->stage_max = 0;
 
+    show_stats(afl);
+
     /* For every file queued by this fuzzer, parse ID and see if we have
        looked at it before; exec a test case if not. */
 
@@ -817,15 +826,15 @@ void sync_fuzzers(afl_state_t *afl) {
         /* See what happens. We rely on save_if_interesting() to catch major
            errors and save the test case. */
 
-        (void)write_to_testcase(afl, (void **)&mem, st.st_size, 1);
+        u32 new_len = write_to_testcase(afl, (void **)&mem, st.st_size, 1);
 
         fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
 
         if (afl->stop_soon) { goto close_sync; }
 
         afl->syncing_party = sd_ent->d_name;
-        afl->queued_imported +=
-            save_if_interesting(afl, mem, st.st_size, fault);
+        afl->queued_imported += save_if_interesting(afl, mem, new_len, fault);
+        show_stats(afl);
         afl->syncing_party = 0;
 
         munmap(mem, st.st_size);
@@ -867,6 +876,9 @@ void sync_fuzzers(afl_state_t *afl) {
 
   if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0);
 
+  // add time in sync one last time
+  update_sync_time(afl, &sync_start_us);
+
   afl->last_sync_time = get_cur_time();
   afl->last_sync_cycle = afl->queue_cycle;
 
@@ -878,8 +890,9 @@ void sync_fuzzers(afl_state_t *afl) {
 
 u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
 
+  u8  needs_write = 0, fault = 0;
   u32 orig_len = q->len;
-
+  u64 trim_start_us = get_cur_time_us();
   /* Custom mutator trimmer */
   if (afl->custom_mutators_count) {
 
@@ -903,11 +916,15 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
 
     }
 
-    if (custom_trimmed) return trimmed_case;
+    if (custom_trimmed) {
+
+      fault = trimmed_case;
+      goto abort_trimming;
+
+    }
 
   }
 
-  u8  needs_write = 0, fault = 0;
   u32 trim_exec = 0;
   u32 remove_len;
   u32 len_p2;
@@ -918,7 +935,12 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
      detected, it will still work to some extent, so we don't check for
      this. */
 
-  if (q->len < 5) { return 0; }
+  if (unlikely(q->len < 5)) {
+
+    fault = 0;
+    goto abort_trimming;
+
+  }
 
   afl->stage_name = afl->stage_name_buf;
   afl->bytes_trim_in += q->len;
@@ -952,6 +974,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
 
       fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
 
+      update_trim_time(afl, &trim_start_us);
+
       if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
 
       /* Note that we don't keep track of crashes or hangs here; maybe TODO?
@@ -978,7 +1002,6 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
 
         /* Let's save a clean trace, which will be needed by
            update_bitmap_score once we're done with the trimming stuff. */
-
         if (!needs_write) {
 
           needs_write = 1;
@@ -993,7 +1016,6 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
       }
 
       /* Since this can be slow, update the screen every now and then. */
-
       if (!(trim_exec++ % afl->stats_update_freq)) { show_stats(afl); }
       ++afl->stage_cur;
 
@@ -1008,6 +1030,68 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
 
   if (needs_write) {
 
+    // run afl_custom_post_process
+
+    if (unlikely(afl->custom_mutators_count) &&
+        likely(!afl->afl_env.afl_post_process_keep_original)) {
+
+      ssize_t new_size = q->len;
+      u8     *new_mem = in_buf;
+      u8     *new_buf = NULL;
+
+      LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
+
+        if (el->afl_custom_post_process) {
+
+          new_size = el->afl_custom_post_process(el->data, new_mem, new_size,
+                                                 &new_buf);
+
+          if (unlikely(!new_buf || new_size <= 0)) {
+
+            new_size = 0;
+            new_buf = new_mem;
+
+          } else {
+
+            new_mem = new_buf;
+
+          }
+
+        }
+
+      });
+
+      if (unlikely(!new_size)) {
+
+        new_size = q->len;
+        new_mem = in_buf;
+
+      }
+
+      if (unlikely(new_size < afl->min_length)) {
+
+        new_size = afl->min_length;
+
+      } else if (unlikely(new_size > afl->max_length)) {
+
+        new_size = afl->max_length;
+
+      }
+
+      q->len = new_size;
+
+      if (new_mem != in_buf && new_mem != NULL) {
+
+        new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), new_size);
+        if (unlikely(!new_buf)) { PFATAL("alloc"); }
+        memcpy(new_buf, new_mem, new_size);
+
+        in_buf = new_buf;
+
+      }
+
+    }
+
     s32 fd;
 
     if (unlikely(afl->no_unlink)) {
@@ -1045,8 +1129,9 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
   }
 
 abort_trimming:
-
   afl->bytes_trim_out += q->len;
+  update_trim_time(afl, &trim_start_us);
+
   return fault;
 
 }
@@ -1110,4 +1195,3 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) {
   return 0;
 
 }
-