1 #include "instrument.h" 2 3 #include <unistd.h> 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/mman.h> 14 #include <sys/stat.h> 15 #include <sys/types.h> 16 #include <unistd.h> 17 18 #include "honggfuzz.h" 19 #include "libcommon/common.h" 20 #include "libcommon/log.h" 21 #include "libcommon/util.h" 22 23 int hfuzz_module_instrument = 0; 24 25 static bool guards_initialized = false; 26 27 /* 28 * We require SSE4.2 with x86-(32|64) for the 'popcnt', as it's much faster than the software 29 * emulation of gcc/clang 30 */ 31 #if defined(__x86_64__) || defined(__i386__) 32 #define ATTRIBUTE_X86_REQUIRE_SSE42 __attribute__((__target__("sse4.2"))) 33 #else 34 #define ATTRIBUTE_X86_REQUIRE_SSE42 35 #endif /* defined(__x86_64__) || defined(__i386__) */ 36 37 static feedback_t bbMapFb; 38 feedback_t* feedback = &bbMapFb; 39 uint32_t my_thread_no = 0; 40 41 __attribute__((constructor)) static void mapBB(void) { 42 char* my_thread_no_str = getenv(_HF_THREAD_NO_ENV); 43 if (my_thread_no_str == NULL) { 44 return; 45 } 46 my_thread_no = atoi(my_thread_no_str); 47 48 if (my_thread_no >= _HF_THREAD_MAX) { 49 LOG_F("Received (via envvar) my_thread_no > _HF_THREAD_MAX (%" PRIu32 " > %d)\n", 50 my_thread_no, _HF_THREAD_MAX); 51 } 52 struct stat st; 53 if (fstat(_HF_BITMAP_FD, &st) == -1) { 54 return; 55 } 56 if (st.st_size != sizeof(feedback_t)) { 57 LOG_F( 58 "size of the feedback structure mismatch: st.size != sizeof(feedback_t) (%zu != %zu). " 59 "Recompile your fuzzed binaries with your newest honggfuzz sources (libhfuzz.a)\n", 60 (size_t)st.st_size, sizeof(feedback_t)); 61 } 62 if ((feedback = mmap(NULL, sizeof(feedback_t), PROT_READ | PROT_WRITE, MAP_SHARED, 63 _HF_BITMAP_FD, 0)) == MAP_FAILED) { 64 PLOG_F("mmap of the feedback structure"); 65 } 66 feedback->pidFeedbackPc[my_thread_no] = 0U; 67 feedback->pidFeedbackEdge[my_thread_no] = 0U; 68 feedback->pidFeedbackCmp[my_thread_no] = 0U; 69 } 70 71 /* 72 * -finstrument-functions 73 */ 74 ATTRIBUTE_X86_REQUIRE_SSE42 void __cyg_profile_func_enter(void* func, void* caller) { 75 register size_t pos = 76 (((uintptr_t)func << 12) | ((uintptr_t)caller & 0xFFF)) & _HF_PERF_BITMAP_BITSZ_MASK; 77 register uint8_t prev = ATOMIC_BTS(feedback->bbMapPc, pos); 78 if (!prev) { 79 ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackPc[my_thread_no]); 80 } 81 } 82 83 ATTRIBUTE_X86_REQUIRE_SSE42 void __cyg_profile_func_exit(void* func UNUSED, void* caller UNUSED) { 84 return; 85 } 86 87 /* 88 * -fsanitize-coverage=trace-pc 89 */ 90 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_pc(void) { 91 register uintptr_t ret = (uintptr_t)__builtin_return_address(0) & _HF_PERF_BITMAP_BITSZ_MASK; 92 register uint8_t prev = ATOMIC_BTS(feedback->bbMapPc, ret); 93 if (!prev) { 94 ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackPc[my_thread_no]); 95 } 96 } 97 98 /* 99 * -fsanitize-coverage=trace-cmp 100 */ 101 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) { 102 uintptr_t pos = (uintptr_t)__builtin_return_address(0) % _HF_PERF_BITMAP_SIZE_16M; 103 register uint8_t v = ((sizeof(Arg1) * 8) - __builtin_popcount(Arg1 ^ Arg2)); 104 uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]); 105 if (prev < v) { 106 ATOMIC_SET(feedback->bbMapCmp[pos], v); 107 ATOMIC_POST_ADD(feedback->pidFeedbackCmp[my_thread_no], v - prev); 108 } 109 } 110 111 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) { 112 uintptr_t pos = (uintptr_t)__builtin_return_address(0) % _HF_PERF_BITMAP_SIZE_16M; 113 register uint8_t v = ((sizeof(Arg1) * 8) - __builtin_popcount(Arg1 ^ Arg2)); 114 uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]); 115 if (prev < v) { 116 ATOMIC_SET(feedback->bbMapCmp[pos], v); 117 ATOMIC_POST_ADD(feedback->pidFeedbackCmp[my_thread_no], v - prev); 118 } 119 } 120 121 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) { 122 uintptr_t pos = (uintptr_t)__builtin_return_address(0) % _HF_PERF_BITMAP_SIZE_16M; 123 register uint8_t v = ((sizeof(Arg1) * 8) - __builtin_popcount(Arg1 ^ Arg2)); 124 uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]); 125 if (prev < v) { 126 ATOMIC_SET(feedback->bbMapCmp[pos], v); 127 ATOMIC_POST_ADD(feedback->pidFeedbackCmp[my_thread_no], v - prev); 128 } 129 } 130 131 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) { 132 uintptr_t pos = (uintptr_t)__builtin_return_address(0) % _HF_PERF_BITMAP_SIZE_16M; 133 register uint8_t v = ((sizeof(Arg1) * 8) - __builtin_popcountll(Arg1 ^ Arg2)); 134 uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]); 135 if (prev < v) { 136 ATOMIC_SET(feedback->bbMapCmp[pos], v); 137 ATOMIC_POST_ADD(feedback->pidFeedbackCmp[my_thread_no], v - prev); 138 } 139 } 140 141 /* 142 * Const versions of trace_cmp, we don't use any special handling for these (for 143 * now) 144 */ 145 void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2) 146 __attribute__((alias("__sanitizer_cov_trace_cmp1"))); 147 void __sanitizer_cov_trace_const_cmp2(uint16_t Arg1, uint16_t Arg2) 148 __attribute__((alias("__sanitizer_cov_trace_cmp2"))); 149 void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2) 150 __attribute__((alias("__sanitizer_cov_trace_cmp4"))); 151 void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2) 152 __attribute__((alias("__sanitizer_cov_trace_cmp8"))); 153 154 /* 155 * Cases[0] is number of comparison entries 156 * Cases[1] is length of Val in bits 157 */ 158 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t* Cases) { 159 for (uint64_t i = 0; i < Cases[0]; i++) { 160 uintptr_t pos = ((uintptr_t)__builtin_return_address(0) + i) % _HF_PERF_BITMAP_SIZE_16M; 161 uint8_t v = (uint8_t)Cases[1] - __builtin_popcountll(Val ^ Cases[i + 2]); 162 uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]); 163 if (prev < v) { 164 ATOMIC_SET(feedback->bbMapCmp[pos], v); 165 ATOMIC_POST_ADD(feedback->pidFeedbackCmp[my_thread_no], v - prev); 166 } 167 } 168 } 169 170 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_cmp( 171 uint64_t SizeAndType, uint64_t Arg1, uint64_t Arg2) { 172 uint64_t CmpSize = (SizeAndType >> 32) / 8; 173 switch (CmpSize) { 174 case (sizeof(uint8_t)): 175 __sanitizer_cov_trace_cmp1(Arg1, Arg2); 176 return; 177 case (sizeof(uint16_t)): 178 __sanitizer_cov_trace_cmp2(Arg1, Arg2); 179 return; 180 case (sizeof(uint32_t)): 181 __sanitizer_cov_trace_cmp4(Arg1, Arg2); 182 return; 183 case (sizeof(uint64_t)): 184 __sanitizer_cov_trace_cmp8(Arg1, Arg2); 185 return; 186 } 187 } 188 189 /* 190 * -fsanitize-coverage=indirect-calls 191 */ 192 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_pc_indir(uintptr_t callee) { 193 register size_t pos1 = (uintptr_t)__builtin_return_address(0) << 12; 194 register size_t pos2 = callee & 0xFFF; 195 register size_t pos = (pos1 | pos2) & _HF_PERF_BITMAP_BITSZ_MASK; 196 197 register uint8_t prev = ATOMIC_BTS(feedback->bbMapPc, pos); 198 if (!prev) { 199 ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackPc[my_thread_no]); 200 } 201 } 202 203 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_indir_call16( 204 void* callee, void* callee_cache16[] UNUSED) { 205 register size_t pos1 = (uintptr_t)__builtin_return_address(0) << 12; 206 register size_t pos2 = (uintptr_t)callee & 0xFFF; 207 register size_t pos = (pos1 | pos2) & _HF_PERF_BITMAP_BITSZ_MASK; 208 209 register uint8_t prev = ATOMIC_BTS(feedback->bbMapPc, pos); 210 if (!prev) { 211 ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackPc[my_thread_no]); 212 } 213 } 214 215 /* 216 * -fsanitize-coverage=trace-pc-guard 217 */ 218 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_pc_guard_init( 219 uint32_t* start, uint32_t* stop) { 220 guards_initialized = true; 221 static uint32_t n = 1U; 222 for (uint32_t* x = start; x < stop; x++, n++) { 223 if (n >= _HF_PC_GUARD_MAX) { 224 LOG_F("This process has too many PC guards: %tx\n", 225 ((uintptr_t)stop - (uintptr_t)start) / sizeof(start)); 226 } 227 /* If the corresponding PC was already hit, map this specific guard as non-interesting (0) 228 */ 229 *x = ATOMIC_GET(feedback->pcGuardMap[n]) ? 0U : n; 230 } 231 } 232 233 ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_pc_guard(uint32_t* guard) { 234 #if defined(__ANDROID__) 235 // ANDROID: Bionic invokes routines that Honggfuzz wraps, before either 236 // *SAN or Honggfuzz have initialized. Check to see if Honggfuzz 237 // has initialized -- if not, force *SAN to initialize (otherwise 238 // _strcmp() will crash, as it is *SAN-instrumented). 239 // 240 // Defer all trace_pc_guard activity until trace_pc_guard_init is 241 // invoked via sancov.module_ctor in the normal process of things. 242 if (!guards_initialized) { 243 void __asan_init(void) __attribute__((weak)); 244 if (__asan_init) { 245 __asan_init(); 246 } 247 void __msan_init(void) __attribute__((weak)); 248 if (__msan_init) { 249 __msan_init(); 250 } 251 void __ubsan_init(void) __attribute__((weak)); 252 if (__ubsan_init) { 253 __ubsan_init(); 254 } 255 void __tsan_init(void) __attribute__((weak)); 256 if (__tsan_init) { 257 __tsan_init(); 258 } 259 return; 260 } 261 #endif /* defined(__ANDROID__) */ 262 if (*guard == 0U) { 263 return; 264 } 265 bool prev = ATOMIC_XCHG(feedback->pcGuardMap[*guard], true); 266 if (prev == false) { 267 ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackEdge[my_thread_no]); 268 } 269 *guard = 0U; 270 } 271 272 void instrumentUpdateCmpMap(uintptr_t addr, unsigned int n) { 273 uintptr_t pos = addr % _HF_PERF_BITMAP_SIZE_16M; 274 uint8_t v = n > 254 ? 254 : n; 275 uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]); 276 if (prev < v) { 277 ATOMIC_SET(feedback->bbMapCmp[pos], v); 278 ATOMIC_POST_ADD(feedback->pidFeedbackCmp[my_thread_no], v - prev); 279 } 280 } 281