about summary refs log tree commit diff
path: root/frida_mode/src/prefetch.c
diff options
context:
space:
mode:
Diffstat (limited to 'frida_mode/src/prefetch.c')
-rw-r--r--frida_mode/src/prefetch.c121
1 files changed, 121 insertions, 0 deletions
diff --git a/frida_mode/src/prefetch.c b/frida_mode/src/prefetch.c
new file mode 100644
index 00000000..64633c1c
--- /dev/null
+++ b/frida_mode/src/prefetch.c
@@ -0,0 +1,121 @@
+#include <errno.h>
+#include <sys/shm.h>
+#include <sys/mman.h>
+
+#include "frida-gum.h"
+#include "prefetch.h"
+#include "debug.h"
+
+#define TRUST 0
+#define PREFETCH_SIZE 65536
+#define PREFETCH_ENTRIES ((PREFETCH_SIZE - sizeof(size_t)) / sizeof(void *))
+
+typedef struct {
+
+  size_t count;
+  void * entry[PREFETCH_ENTRIES];
+
+} prefetch_data_t;
+
+static prefetch_data_t *prefetch_data = NULL;
+
+static int prefetch_shm_id = -1;
+
+/*
+ * We do this from the transformer since we need one anyway for coverage, this
+ * saves the need to use an event sink.
+ */
+void prefetch_write(void *addr) {
+
+  /* Bail if we aren't initialized */
+  if (prefetch_data == NULL) return;
+
+  /*
+   * Our shared memory IPC is large enough for about 1000 entries, we can fine
+   * tune this if we need to. But if we have more new blocks that this in a
+   * single run then we ignore them and we'll pick them up next time.
+   */
+  if (prefetch_data->count >= PREFETCH_ENTRIES) return;
+
+  /*
+   * Write the block address to the SHM IPC and increment the number of entries.
+   */
+
+  prefetch_data->entry[prefetch_data->count] = addr;
+  prefetch_data->count++;
+
+}
+
+/*
+ * Read the IPC region one block at the time and prefetch it
+ */
+void prefetch_read(GumStalker *stalker) {
+
+  if (prefetch_data == NULL) return;
+
+  for (size_t i = 0; i < prefetch_data->count; i++) {
+
+    void *addr = prefetch_data->entry[i];
+    gum_stalker_prefetch(stalker, addr, 1);
+
+  }
+
+  /*
+   * Reset the entry count to indicate we have finished with it and it can be
+   * refilled by the child.
+   */
+  prefetch_data->count = 0;
+
+}
+
+void prefetch_init() {
+
+  g_assert_cmpint(sizeof(prefetch_data_t), ==, PREFETCH_SIZE);
+  gboolean prefetch = (getenv("AFL_FRIDA_INST_NO_PREFETCH") == NULL);
+
+  OKF("Instrumentation - prefetch [%c]", prefetch ? 'X' : ' ');
+
+  if (!prefetch) { return; }
+  /*
+   * Make our shared memory, we can attach before we fork, just like AFL does
+   * with the coverage bitmap region and fork will take care of ensuring both
+   * the parent and child see the same consistent memory region.
+   */
+  prefetch_shm_id =
+      shmget(IPC_PRIVATE, sizeof(prefetch_data_t), IPC_CREAT | IPC_EXCL | 0600);
+  if (prefetch_shm_id < 0) {
+
+    FATAL("prefetch_shm_id < 0 - errno: %d\n", errno);
+
+  }
+
+  prefetch_data = shmat(prefetch_shm_id, NULL, 0);
+  g_assert(prefetch_data != MAP_FAILED);
+
+  /*
+   * Configure the shared memory region to be removed once the process dies.
+   */
+  if (shmctl(prefetch_shm_id, IPC_RMID, NULL) < 0) {
+
+    FATAL("shmctl (IPC_RMID) < 0 - errno: %d\n", errno);
+
+  }
+
+  /* Clear it, not sure it's necessary, just seems like good practice */
+  memset(prefetch_data, '\0', sizeof(prefetch_data_t));
+
+}
+
+__attribute__((noinline)) static void prefetch_activation() {
+
+  asm volatile("");
+
+}
+
+void prefetch_start(GumStalker *stalker) {
+
+  gum_stalker_activate(stalker, prefetch_activation);
+  prefetch_activation();
+
+}
+