about summary refs log tree commit diff
path: root/qemu_mode/patches/afl-qemu-tcg-inl.h
blob: fab3d9e3fb394862aefe2bf59810cf454862a49c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
/*
   american fuzzy lop - high-performance binary-only instrumentation
   -----------------------------------------------------------------

   Written by Andrew Griffiths <agriffiths@google.com> and
              Michal Zalewski <lcamtuf@google.com>

   Idea & design very much by Andrew Griffiths.

   TCG instrumentation and block chaining support by Andrea Biondo
                                      <andrea.biondo965@gmail.com>
   QEMU 3.1.0 port and thread-safety by Andrea Fioraldi
                                      <andreafioraldi@gmail.com>

   Copyright 2015, 2016, 2017 Google Inc. All rights reserved.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at:

     http://www.apache.org/licenses/LICENSE-2.0

   This code is a shim patched into the separately-distributed source
   code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality
   to implement AFL-style instrumentation and to take care of the remaining
   parts of the AFL fork server logic.

   The resulting QEMU binary is essentially a standalone instrumentation
   tool; for an example of how to leverage it for other purposes, you can
   have a look at afl-showmap.c.

 */

void afl_maybe_log(void* cur_loc);

/* Note: we convert the 64 bit args to 32 bit and do some alignment
   and endian swap. Maybe it would be better to do the alignment
   and endian swap in tcg_reg_alloc_call(). */
void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
{
    int real_args, pi;
    unsigned sizemask, flags;
    TCGOp *op;

    TCGTemp *arg = tcgv_ptr_temp( tcg_const_tl(cur_loc) );

    flags = 0;
    sizemask = dh_sizemask(void, 0) | dh_sizemask(ptr, 1);

#if defined(__sparc__) && !defined(__arch64__) \
    && !defined(CONFIG_TCG_INTERPRETER)
    /* We have 64-bit values in one register, but need to pass as two
       separate parameters.  Split them.  */
    int orig_sizemask = sizemask;
    TCGv_i64 retl, reth;
    TCGTemp *split_args[MAX_OPC_PARAM];

    retl = NULL;
    reth = NULL;
    if (sizemask != 0) {
        real_args = 0;
        int is_64bit = sizemask & (1 << 2);
        if (is_64bit) {
            TCGv_i64 orig = temp_tcgv_i64(arg);
            TCGv_i32 h = tcg_temp_new_i32();
            TCGv_i32 l = tcg_temp_new_i32();
            tcg_gen_extr_i64_i32(l, h, orig);
            split_args[real_args++] = tcgv_i32_temp(h);
            split_args[real_args++] = tcgv_i32_temp(l);
        } else {
            split_args[real_args++] = arg;
        }
        nargs = real_args;
        args = split_args;
        sizemask = 0;
    }
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
    int is_64bit = sizemask & (1 << 2);
    int is_signed = sizemask & (2 << 2);
    if (!is_64bit) {
        TCGv_i64 temp = tcg_temp_new_i64();
        TCGv_i64 orig = temp_tcgv_i64(arg);
        if (is_signed) {
            tcg_gen_ext32s_i64(temp, orig);
        } else {
            tcg_gen_ext32u_i64(temp, orig);
        }
        arg = tcgv_i64_temp(temp);
    }
#endif /* TCG_TARGET_EXTEND_ARGS */

    op = tcg_emit_op(INDEX_op_call);

    pi = 0;

    TCGOP_CALLO(op) = 0;

    real_args = 0;
    int is_64bit = sizemask & (1 << 2);
    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
        /* some targets want aligned 64 bit args */
        if (real_args & 1) {
            op->args[pi++] = TCG_CALL_DUMMY_ARG;
            real_args++;
        }
#endif
       /* If stack grows up, then we will be placing successive
          arguments at lower addresses, which means we need to
          reverse the order compared to how we would normally
          treat either big or little-endian.  For those arguments
          that will wind up in registers, this still works for
          HPPA (the only current STACK_GROWSUP target) since the
          argument registers are *also* allocated in decreasing
          order.  If another such target is added, this logic may
          have to get more complicated to differentiate between
          stack arguments and register arguments.  */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
        op->args[pi++] = temp_arg(arg + 1);
        op->args[pi++] = temp_arg(arg);
#else
        op->args[pi++] = temp_arg(arg);
        op->args[pi++] = temp_arg(arg + 1);
#endif
        real_args += 2;
    }

    op->args[pi++] = temp_arg(arg);
    real_args++;

    op->args[pi++] = (uintptr_t)&afl_maybe_log;
    op->args[pi++] = flags;
    TCGOP_CALLI(op) = real_args;

    /* Make sure the fields didn't overflow.  */
    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));

#if defined(__sparc__) && !defined(__arch64__) \
    && !defined(CONFIG_TCG_INTERPRETER)
    /* Free all of the parts we allocated above.  */
    real_args = 0;
    int is_64bit = orig_sizemask & (1 << 2);
    if (is_64bit) {
        tcg_temp_free_internal(args[real_args++]);
        tcg_temp_free_internal(args[real_args++]);
    } else {
        real_args++;
    }
    if (orig_sizemask & 1) {
        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
           Note that describing these as TCGv_i64 eliminates an unnecessary
           zero-extension that tcg_gen_concat_i32_i64 would create.  */
        tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
        tcg_temp_free_i64(retl);
        tcg_temp_free_i64(reth);
    }
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
    int is_64bit = sizemask & (1 << 2);
    if (!is_64bit) {
        tcg_temp_free_internal(arg);
    }
#endif /* TCG_TARGET_EXTEND_ARGS */
}