about summary refs log tree commit diff
path: root/src/afl-fuzz-init.c
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2020-07-08 11:16:39 +0200
committervan Hauser <vh@thc.org>2020-07-08 11:16:39 +0200
commit83790d65afb52a055d093451a50ce55690a25002 (patch)
treee8bb9f04e8f90453f05f19b015025655c8950cd3 /src/afl-fuzz-init.c
parent70bd0f799de1a907627b28ad80a447634299839b (diff)
downloadafl++-83790d65afb52a055d093451a50ce55690a25002.tar.gz
eliminate race condition for cpu affinity on -M/-S
Diffstat (limited to 'src/afl-fuzz-init.c')
-rw-r--r--src/afl-fuzz-init.c97
1 files changed, 85 insertions, 12 deletions
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index a2e849dc..e51b4729 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -36,14 +36,11 @@ void bind_to_free_cpu(afl_state_t *afl) {
   #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
   cpu_set_t c;
   #elif defined(__NetBSD__)
-  cpuset_t *         c;
+  cpuset_t *c;
   #elif defined(__sun)
-  psetid_t            c;
+  psetid_t c;
   #endif
 
-  u8  cpu_used[4096] = {0};
-  u32 i;
-
   if (afl->cpu_core_count < 2) { return; }
 
   if (afl->afl_env.afl_no_affinity) {
@@ -53,13 +50,46 @@ void bind_to_free_cpu(afl_state_t *afl) {
 
   }
 
+  u8  cpu_used[4096] = {0}, lockfile[PATH_MAX] = "";
+  u32 i;
+
+  if (afl->sync_id) {
+
+    s32 lockfd, first = 1;
+
+    snprintf(lockfile, sizeof(lockfile), "%s/.affinity_lock", afl->sync_dir);
+    setenv(CPU_AFFINITY_ENV_VAR, lockfile, 1);
+
+    do {
+
+      if ((lockfd = open(lockfile, O_RDWR | O_CREAT | O_EXCL, 0600)) < 0) {
+
+        if (first) {
+
+          WARNF("CPU affinity lock file present, waiting ...");
+          first = 0;
+
+        }
+
+        usleep(1000);
+
+      }
+
+    } while (lockfd < 0);
+
+    close(lockfd);
+
+  }
+
   #if defined(__linux__)
+
   DIR *          d;
   struct dirent *de;
   d = opendir("/proc");
 
   if (!d) {
 
+    if (lockfile[0]) unlink(lockfile);
     WARNF("Unable to access /proc - can't scan for free CPU cores.");
     return;
 
@@ -67,11 +97,6 @@ void bind_to_free_cpu(afl_state_t *afl) {
 
   ACTF("Checking CPU core loadout...");
 
-  /* Introduce some jitter, in case multiple AFL tasks are doing the same
-     thing at the same time... */
-
-  usleep(R(1000) * 250);
-
   /* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
      Flag all processes bound to a specific CPU using cpu_used[]. This will
      fail for some exotic binding setups, but is likely good enough in almost
@@ -114,20 +139,29 @@ void bind_to_free_cpu(afl_state_t *afl) {
   }
 
   closedir(d);
+
   #elif defined(__FreeBSD__) || defined(__DragonFly__)
+
   struct kinfo_proc *procs;
   size_t             nprocs;
   size_t             proccount;
   int                s_name[] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL};
   size_t             s_name_l = sizeof(s_name) / sizeof(s_name[0]);
 
-  if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) return;
+  if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) {
+
+    if (lockfile[0]) unlink(lockfile);
+    return;
+
+  }
+
   proccount = nprocs / sizeof(*procs);
   nprocs = nprocs * 4 / 3;
 
   procs = ck_alloc(nprocs);
   if (sysctl(s_name, s_name_l, procs, &nprocs, NULL, 0) != 0) {
 
+    if (lockfile[0]) unlink(lockfile);
     ck_free(procs);
     return;
 
@@ -136,6 +170,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
   for (i = 0; i < proccount; i++) {
 
     #if defined(__FreeBSD__)
+
     if (!strcmp(procs[i].ki_comm, "idle")) continue;
 
     // fix when ki_oncpu = -1
@@ -145,16 +180,21 @@ void bind_to_free_cpu(afl_state_t *afl) {
 
     if (oncpu != -1 && oncpu < sizeof(cpu_used) && procs[i].ki_pctcpu > 60)
       cpu_used[oncpu] = 1;
+
     #elif defined(__DragonFly__)
+
     if (procs[i].kp_lwp.kl_cpuid < sizeof(cpu_used) &&
         procs[i].kp_lwp.kl_pctcpu > 10)
       cpu_used[procs[i].kp_lwp.kl_cpuid] = 1;
+
     #endif
 
   }
 
   ck_free(procs);
+
   #elif defined(__NetBSD__)
+
   struct kinfo_proc2 *procs;
   size_t              nprocs;
   size_t              proccount;
@@ -163,13 +203,20 @@ void bind_to_free_cpu(afl_state_t *afl) {
       CTL_KERN, KERN_PROC2, KERN_PROC_ALL, 0, sizeof(struct kinfo_proc2), 0};
   size_t s_name_l = sizeof(s_name) / sizeof(s_name[0]);
 
-  if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) return;
+  if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) {
+
+    if (lockfile[0]) unlink(lockfile);
+    return;
+
+  }
+
   proccount = nprocs / sizeof(struct kinfo_proc2);
   procs = ck_alloc(nprocs * sizeof(struct kinfo_proc2));
   s_name[5] = proccount;
 
   if (sysctl(s_name, s_name_l, procs, &nprocs, NULL, 0) != 0) {
 
+    if (lockfile[0]) unlink(lockfile);
     ck_free(procs);
     return;
 
@@ -183,7 +230,9 @@ void bind_to_free_cpu(afl_state_t *afl) {
   }
 
   ck_free(procs);
+
   #elif defined(__sun)
+
   kstat_named_t *n;
   kstat_ctl_t *  m;
   kstat_t *      k;
@@ -198,6 +247,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
 
   if (!k) {
 
+    if (lockfile[0]) unlink(lockfile);
     kstat_close(m);
     return;
 
@@ -205,6 +255,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
 
   if (kstat_read(m, k, NULL)) {
 
+    if (lockfile[0]) unlink(lockfile);
     kstat_close(m);
     return;
 
@@ -220,6 +271,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
     k = kstat_lookup(m, "cpu_stat", i, NULL);
     if (kstat_read(m, k, &cs)) {
 
+      if (lockfile[0]) unlink(lockfile);
       kstat_close(m);
       return;
 
@@ -233,6 +285,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
   }
 
   kstat_close(m);
+
   #else
     #warning \
         "For this platform we do not have free CPU binding code yet. If possible, please supply a PR to https://github.com/AFLplusplus/AFLplusplus"
@@ -241,7 +294,9 @@ void bind_to_free_cpu(afl_state_t *afl) {
   size_t cpu_start = 0;
 
   try:
+
   #if !defined(__ANDROID__)
+
     for (i = cpu_start; i < afl->cpu_core_count; i++) {
 
       if (!cpu_used[i]) { break; }
@@ -251,6 +306,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
   if (i == afl->cpu_core_count) {
 
   #else
+
     for (i = afl->cpu_core_count - cpu_start - 1; i > -1; i--)
       if (!cpu_used[i]) break;
   if (i == -1) {
@@ -274,18 +330,25 @@ void bind_to_free_cpu(afl_state_t *afl) {
   afl->cpu_aff = i;
 
   #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
+
   CPU_ZERO(&c);
   CPU_SET(i, &c);
+
   #elif defined(__NetBSD__)
+
   c = cpuset_create();
   if (c == NULL) PFATAL("cpuset_create failed");
   cpuset_set(i, c);
+
   #elif defined(__sun)
+
 pset_create(&c);
 if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
+
   #endif
 
   #if defined(__linux__)
+
   if (sched_setaffinity(0, sizeof(c), &c)) {
 
     if (cpu_start == afl->cpu_core_count) {
@@ -302,6 +365,7 @@ if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
   }
 
   #elif defined(__FreeBSD__) || defined(__DragonFly__)
+
   if (pthread_setaffinity_np(pthread_self(), sizeof(c), &c)) {
 
     if (cpu_start == afl->cpu_core_count)
@@ -314,6 +378,7 @@ if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
   }
 
   #elif defined(__NetBSD__)
+
 if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c)) {
 
   if (cpu_start == afl->cpu_core_count)
@@ -326,7 +391,9 @@ if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c)) {
 }
 
 cpuset_destroy(c);
+
   #elif defined(__sun)
+
 if (pset_bind(c, P_PID, getpid(), NULL)) {
 
   if (cpu_start == afl->cpu_core_count)
@@ -339,11 +406,17 @@ if (pset_bind(c, P_PID, getpid(), NULL)) {
 }
 
 pset_destroy(c);
+
   #else
+
   // this will need something for other platforms
   // TODO: Solaris/Illumos has processor_bind ... might worth a try
+
   #endif
 
+  if (lockfile[0]) unlink(lockfile);
+  // we leave the environment variable to ensure a cleanup for other processes
+
 }
 
 #endif                                                     /* HAVE_AFFINITY */