about summary refs log tree commit diff
path: root/qemu_mode/patches/afl-qemu-tcg-inl.h
blob: d53a1ccf7bbda276b0dd8330bc4ee55e1359cbe5 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
/*
   american fuzzy lop++ - high-performance binary-only instrumentation
   -------------------------------------------------------------------

   Originally written by Andrew Griffiths <agriffiths@google.com> and
                         Michal Zalewski <lcamtuf@google.com>

   TCG instrumentation and block chaining support by Andrea Biondo
                                      <andrea.biondo965@gmail.com>

   QEMU 3.1.0 port, TCG thread-safety, CompareCoverage and NeverZero
   counters by Andrea Fioraldi <andreafioraldi@gmail.com>

   Copyright 2015, 2016, 2017 Google Inc. All rights reserved.
   Copyright 2019 AFLplusplus Project. All rights reserved.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at:

     http://www.apache.org/licenses/LICENSE-2.0

   This code is a shim patched into the separately-distributed source
   code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality
   to implement AFL-style instrumentation and to take care of the remaining
   parts of the AFL fork server logic.

   The resulting QEMU binary is essentially a standalone instrumentation
   tool; for an example of how to leverage it for other purposes, you can
   have a look at afl-showmap.c.

 */

void afl_maybe_log(void *cur_loc);

/* Note: we convert the 64 bit args to 32 bit and do some alignment
   and endian swap. Maybe it would be better to do the alignment
   and endian swap in tcg_reg_alloc_call(). */
void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {

  int      real_args, pi;
  unsigned sizemask, flags;
  TCGOp *  op;

  TCGTemp *arg = tcgv_i64_temp(tcg_const_tl(cur_loc));

  flags = 0;
  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);

#if defined(__sparc__) && !defined(__arch64__) && \
    !defined(CONFIG_TCG_INTERPRETER)
  /* We have 64-bit values in one register, but need to pass as two
     separate parameters.  Split them.  */
  int      orig_sizemask = sizemask;
  TCGv_i64 retl, reth;
  TCGTemp *split_args[MAX_OPC_PARAM];

  retl = NULL;
  reth = NULL;
  if (sizemask != 0) {

    real_args = 0;
    int is_64bit = sizemask & (1 << 2);
    if (is_64bit) {

      TCGv_i64 orig = temp_tcgv_i64(arg);
      TCGv_i32 h = tcg_temp_new_i32();
      TCGv_i32 l = tcg_temp_new_i32();
      tcg_gen_extr_i64_i32(l, h, orig);
      split_args[real_args++] = tcgv_i32_temp(h);
      split_args[real_args++] = tcgv_i32_temp(l);

    } else {

      split_args[real_args++] = arg;

    }

    nargs = real_args;
    args = split_args;
    sizemask = 0;

  }

#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
  int is_64bit = sizemask & (1 << 2);
  int is_signed = sizemask & (2 << 2);
  if (!is_64bit) {

    TCGv_i64 temp = tcg_temp_new_i64();
    TCGv_i64 orig = temp_tcgv_i64(arg);
    if (is_signed) {

      tcg_gen_ext32s_i64(temp, orig);

    } else {

      tcg_gen_ext32u_i64(temp, orig);

    }

    arg = tcgv_i64_temp(temp);

  }

#endif /* TCG_TARGET_EXTEND_ARGS */

  op = tcg_emit_op(INDEX_op_call);

  pi = 0;

  TCGOP_CALLO(op) = 0;

  real_args = 0;
  int is_64bit = sizemask & (1 << 2);
  if (TCG_TARGET_REG_BITS < 64 && is_64bit) {

#ifdef TCG_TARGET_CALL_ALIGN_ARGS
    /* some targets want aligned 64 bit args */
    if (real_args & 1) {

      op->args[pi++] = TCG_CALL_DUMMY_ARG;
      real_args++;

    }

#endif
    /* If stack grows up, then we will be placing successive
       arguments at lower addresses, which means we need to
       reverse the order compared to how we would normally
       treat either big or little-endian.  For those arguments
       that will wind up in registers, this still works for
       HPPA (the only current STACK_GROWSUP target) since the
       argument registers are *also* allocated in decreasing
       order.  If another such target is added, this logic may
       have to get more complicated to differentiate between
       stack arguments and register arguments.  */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
    op->args[pi++] = temp_arg(arg + 1);
    op->args[pi++] = temp_arg(arg);
#else
    op->args[pi++] = temp_arg(arg);
    op->args[pi++] = temp_arg(arg + 1);
#endif
    real_args += 2;

  }

  op->args[pi++] = temp_arg(arg);
  real_args++;

  op->args[pi++] = (uintptr_t)&afl_maybe_log;
  op->args[pi++] = flags;
  TCGOP_CALLI(op) = real_args;

  /* Make sure the fields didn't overflow.  */
  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));

#if defined(__sparc__) && !defined(__arch64__) && \
    !defined(CONFIG_TCG_INTERPRETER)
  /* Free all of the parts we allocated above.  */
  real_args = 0;
  int is_64bit = orig_sizemask & (1 << 2);
  if (is_64bit) {

    tcg_temp_free_internal(args[real_args++]);
    tcg_temp_free_internal(args[real_args++]);

  } else {

    real_args++;

  }

  if (orig_sizemask & 1) {

    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
       Note that describing these as TCGv_i64 eliminates an unnecessary
       zero-extension that tcg_gen_concat_i32_i64 would create.  */
    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
    tcg_temp_free_i64(retl);
    tcg_temp_free_i64(reth);

  }

#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
  int is_64bit = sizemask & (1 << 2);
  if (!is_64bit) { tcg_temp_free_internal(arg); }
#endif /* TCG_TARGET_EXTEND_ARGS */

}

void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
                                  TCGv_i64 arg1, TCGv_i64 arg2) {

  int      i, real_args, nb_rets, pi;
  unsigned sizemask, flags;
  TCGOp *  op;

  const int nargs = 3;
  TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1),
                      tcgv_i64_temp(arg2)};

  flags = 0;
  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) |
             dh_sizemask(i64, 3);

#if defined(__sparc__) && !defined(__arch64__) && \
    !defined(CONFIG_TCG_INTERPRETER)
  /* We have 64-bit values in one register, but need to pass as two
     separate parameters.  Split them.  */
  int      orig_sizemask = sizemask;
  int      orig_nargs = nargs;
  TCGv_i64 retl, reth;
  TCGTemp *split_args[MAX_OPC_PARAM];

  retl = NULL;
  reth = NULL;
  if (sizemask != 0) {

    for (i = real_args = 0; i < nargs; ++i) {

      int is_64bit = sizemask & (1 << (i + 1) * 2);
      if (is_64bit) {

        TCGv_i64 orig = temp_tcgv_i64(args[i]);
        TCGv_i32 h = tcg_temp_new_i32();
        TCGv_i32 l = tcg_temp_new_i32();
        tcg_gen_extr_i64_i32(l, h, orig);
        split_args[real_args++] = tcgv_i32_temp(h);
        split_args[real_args++] = tcgv_i32_temp(l);

      } else {

        split_args[real_args++] = args[i];

      }

    }

    nargs = real_args;
    args = split_args;
    sizemask = 0;

  }

#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
  for (i = 0; i < nargs; ++i) {

    int is_64bit = sizemask & (1 << (i + 1) * 2);
    int is_signed = sizemask & (2 << (i + 1) * 2);
    if (!is_64bit) {

      TCGv_i64 temp = tcg_temp_new_i64();
      TCGv_i64 orig = temp_tcgv_i64(args[i]);
      if (is_signed) {

        tcg_gen_ext32s_i64(temp, orig);

      } else {

        tcg_gen_ext32u_i64(temp, orig);

      }

      args[i] = tcgv_i64_temp(temp);

    }

  }

#endif /* TCG_TARGET_EXTEND_ARGS */

  op = tcg_emit_op(INDEX_op_call);

  pi = 0;
  nb_rets = 0;
  TCGOP_CALLO(op) = nb_rets;

  real_args = 0;
  for (i = 0; i < nargs; i++) {

    int is_64bit = sizemask & (1 << (i + 1) * 2);
    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {

#ifdef TCG_TARGET_CALL_ALIGN_ARGS
      /* some targets want aligned 64 bit args */
      if (real_args & 1) {

        op->args[pi++] = TCG_CALL_DUMMY_ARG;
        real_args++;

      }

#endif
      /* If stack grows up, then we will be placing successive
         arguments at lower addresses, which means we need to
         reverse the order compared to how we would normally
         treat either big or little-endian.  For those arguments
         that will wind up in registers, this still works for
         HPPA (the only current STACK_GROWSUP target) since the
         argument registers are *also* allocated in decreasing
         order.  If another such target is added, this logic may
         have to get more complicated to differentiate between
         stack arguments and register arguments.  */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
      op->args[pi++] = temp_arg(args[i] + 1);
      op->args[pi++] = temp_arg(args[i]);
#else
      op->args[pi++] = temp_arg(args[i]);
      op->args[pi++] = temp_arg(args[i] + 1);
#endif
      real_args += 2;
      continue;

    }

    op->args[pi++] = temp_arg(args[i]);
    real_args++;

  }

  op->args[pi++] = (uintptr_t)func;
  op->args[pi++] = flags;
  TCGOP_CALLI(op) = real_args;

  /* Make sure the fields didn't overflow.  */
  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));

#if defined(__sparc__) && !defined(__arch64__) && \
    !defined(CONFIG_TCG_INTERPRETER)
  /* Free all of the parts we allocated above.  */
  for (i = real_args = 0; i < orig_nargs; ++i) {

    int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
    if (is_64bit) {

      tcg_temp_free_internal(args[real_args++]);
      tcg_temp_free_internal(args[real_args++]);

    } else {

      real_args++;

    }

  }

  if (orig_sizemask & 1) {

    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
       Note that describing these as TCGv_i64 eliminates an unnecessary
       zero-extension that tcg_gen_concat_i32_i64 would create.  */
    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
    tcg_temp_free_i64(retl);
    tcg_temp_free_i64(reth);

  }

#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
  for (i = 0; i < nargs; ++i) {

    int is_64bit = sizemask & (1 << (i + 1) * 2);
    if (!is_64bit) { tcg_temp_free_internal(args[i]); }

  }

#endif /* TCG_TARGET_EXTEND_ARGS */

}