1 2 /*---------------------------------------------------------------*/ 3 /*--- begin guest_x86_defs.h ---*/ 4 /*---------------------------------------------------------------*/ 5 6 /* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2004-2012 OpenWorks LLP 11 info (at) open-works.net 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 26 02110-1301, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 30 Neither the names of the U.S. Department of Energy nor the 31 University of California nor the names of its contributors may be 32 used to endorse or promote products derived from this software 33 without prior written permission. 34 */ 35 36 /* Only to be used within the guest-x86 directory. */ 37 38 #ifndef __VEX_GUEST_X86_DEFS_H 39 #define __VEX_GUEST_X86_DEFS_H 40 41 42 /*---------------------------------------------------------*/ 43 /*--- x86 to IR conversion ---*/ 44 /*---------------------------------------------------------*/ 45 46 /* Convert one x86 insn to IR. See the type DisOneInstrFn in 47 bb_to_IR.h. */ 48 extern 49 DisResult disInstr_X86 ( IRSB* irbb, 50 Bool (*resteerOkFn) ( void*, Addr64 ), 51 Bool resteerCisOk, 52 void* callback_opaque, 53 UChar* guest_code, 54 Long delta, 55 Addr64 guest_IP, 56 VexArch guest_arch, 57 VexArchInfo* archinfo, 58 VexAbiInfo* abiinfo, 59 Bool host_bigendian ); 60 61 /* Used by the optimiser to specialise calls to helpers. */ 62 extern 63 IRExpr* guest_x86_spechelper ( HChar* function_name, 64 IRExpr** args, 65 IRStmt** precedingStmts, 66 Int n_precedingStmts ); 67 68 /* Describes to the optimiser which part of the guest state require 69 precise memory exceptions. This is logically part of the guest 70 state description. */ 71 extern 72 Bool guest_x86_state_requires_precise_mem_exns ( Int, Int ); 73 74 extern 75 VexGuestLayout x86guest_layout; 76 77 78 /*---------------------------------------------------------*/ 79 /*--- x86 guest helpers ---*/ 80 /*---------------------------------------------------------*/ 81 82 /* --- CLEAN HELPERS --- */ 83 84 extern UInt x86g_calculate_eflags_all ( 85 UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 86 ); 87 88 VEX_REGPARM(3) 89 extern UInt x86g_calculate_eflags_c ( 90 UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 91 ); 92 93 extern UInt x86g_calculate_condition ( 94 UInt/*X86Condcode*/ cond, 95 UInt cc_op, 96 UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 97 ); 98 99 extern UInt x86g_calculate_FXAM ( UInt tag, ULong dbl ); 100 101 extern ULong x86g_calculate_RCR ( 102 UInt arg, UInt rot_amt, UInt eflags_in, UInt sz 103 ); 104 extern ULong x86g_calculate_RCL ( 105 UInt arg, UInt rot_amt, UInt eflags_in, UInt sz 106 ); 107 108 extern UInt x86g_calculate_daa_das_aaa_aas ( UInt AX_and_flags, UInt opcode ); 109 110 extern UInt x86g_calculate_aad_aam ( UInt AX_and_flags, UInt opcode ); 111 112 extern ULong x86g_check_fldcw ( UInt fpucw ); 113 114 extern UInt x86g_create_fpucw ( UInt fpround ); 115 116 extern ULong x86g_check_ldmxcsr ( UInt mxcsr ); 117 118 extern UInt x86g_create_mxcsr ( UInt sseround ); 119 120 121 /* Translate a guest virtual_addr into a guest linear address by 122 consulting the supplied LDT/GDT structures. Their representation 123 must be as specified in pub/libvex_guest_x86.h. To indicate a 124 translation failure, 1<<32 is returned. On success, the lower 32 125 bits of the returned result indicate the linear address. 126 */ 127 extern 128 ULong x86g_use_seg_selector ( HWord ldt, HWord gdt, 129 UInt seg_selector, UInt virtual_addr ); 130 131 extern ULong x86g_calculate_mmx_pmaddwd ( ULong, ULong ); 132 extern ULong x86g_calculate_mmx_psadbw ( ULong, ULong ); 133 extern UInt x86g_calculate_mmx_pmovmskb ( ULong ); 134 extern UInt x86g_calculate_sse_pmovmskb ( ULong w64hi, ULong w64lo ); 135 136 137 /* --- DIRTY HELPERS --- */ 138 139 extern ULong x86g_dirtyhelper_loadF80le ( UInt ); 140 141 extern void x86g_dirtyhelper_storeF80le ( UInt, ULong ); 142 143 extern void x86g_dirtyhelper_CPUID_sse0 ( VexGuestX86State* ); 144 extern void x86g_dirtyhelper_CPUID_sse1 ( VexGuestX86State* ); 145 extern void x86g_dirtyhelper_CPUID_sse2 ( VexGuestX86State* ); 146 147 extern void x86g_dirtyhelper_FINIT ( VexGuestX86State* ); 148 149 extern void x86g_dirtyhelper_FXSAVE ( VexGuestX86State*, HWord ); 150 extern void x86g_dirtyhelper_FSAVE ( VexGuestX86State*, HWord ); 151 extern void x86g_dirtyhelper_FSTENV ( VexGuestX86State*, HWord ); 152 153 extern ULong x86g_dirtyhelper_RDTSC ( void ); 154 155 extern UInt x86g_dirtyhelper_IN ( UInt portno, UInt sz/*1,2 or 4*/ ); 156 extern void x86g_dirtyhelper_OUT ( UInt portno, UInt data, 157 UInt sz/*1,2 or 4*/ ); 158 159 extern void x86g_dirtyhelper_SxDT ( void* address, 160 UInt op /* 0 or 1 */ ); 161 162 extern VexEmWarn 163 x86g_dirtyhelper_FXRSTOR ( VexGuestX86State*, HWord ); 164 165 extern VexEmWarn 166 x86g_dirtyhelper_FRSTOR ( VexGuestX86State*, HWord ); 167 168 extern VexEmWarn 169 x86g_dirtyhelper_FLDENV ( VexGuestX86State*, HWord ); 170 171 172 /*---------------------------------------------------------*/ 173 /*--- Condition code stuff ---*/ 174 /*---------------------------------------------------------*/ 175 176 /* eflags masks */ 177 #define X86G_CC_SHIFT_O 11 178 #define X86G_CC_SHIFT_S 7 179 #define X86G_CC_SHIFT_Z 6 180 #define X86G_CC_SHIFT_A 4 181 #define X86G_CC_SHIFT_C 0 182 #define X86G_CC_SHIFT_P 2 183 184 #define X86G_CC_MASK_O (1 << X86G_CC_SHIFT_O) 185 #define X86G_CC_MASK_S (1 << X86G_CC_SHIFT_S) 186 #define X86G_CC_MASK_Z (1 << X86G_CC_SHIFT_Z) 187 #define X86G_CC_MASK_A (1 << X86G_CC_SHIFT_A) 188 #define X86G_CC_MASK_C (1 << X86G_CC_SHIFT_C) 189 #define X86G_CC_MASK_P (1 << X86G_CC_SHIFT_P) 190 191 /* FPU flag masks */ 192 #define X86G_FC_SHIFT_C3 14 193 #define X86G_FC_SHIFT_C2 10 194 #define X86G_FC_SHIFT_C1 9 195 #define X86G_FC_SHIFT_C0 8 196 197 #define X86G_FC_MASK_C3 (1 << X86G_FC_SHIFT_C3) 198 #define X86G_FC_MASK_C2 (1 << X86G_FC_SHIFT_C2) 199 #define X86G_FC_MASK_C1 (1 << X86G_FC_SHIFT_C1) 200 #define X86G_FC_MASK_C0 (1 << X86G_FC_SHIFT_C0) 201 202 203 /* %EFLAGS thunk descriptors. A four-word thunk is used to record 204 details of the most recent flag-setting operation, so the flags can 205 be computed later if needed. It is possible to do this a little 206 more efficiently using a 3-word thunk, but that makes it impossible 207 to describe the flag data dependencies sufficiently accurately for 208 Memcheck. Hence 4 words are used, with minimal loss of efficiency. 209 210 The four words are: 211 212 CC_OP, which describes the operation. 213 214 CC_DEP1 and CC_DEP2. These are arguments to the operation. 215 We want Memcheck to believe that the resulting flags are 216 data-dependent on both CC_DEP1 and CC_DEP2, hence the 217 name DEP. 218 219 CC_NDEP. This is a 3rd argument to the operation which is 220 sometimes needed. We arrange things so that Memcheck does 221 not believe the resulting flags are data-dependent on CC_NDEP 222 ("not dependent"). 223 224 To make Memcheck believe that (the definedness of) the encoded 225 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2 226 requires two things: 227 228 (1) In the guest state layout info (x86guest_layout), CC_OP and 229 CC_NDEP are marked as always defined. 230 231 (2) When passing the thunk components to an evaluation function 232 (calculate_condition, calculate_eflags, calculate_eflags_c) the 233 IRCallee's mcx_mask must be set so as to exclude from 234 consideration all passed args except CC_DEP1 and CC_DEP2. 235 236 Strictly speaking only (2) is necessary for correctness. However, 237 (1) helps efficiency in that since (2) means we never ask about the 238 definedness of CC_OP or CC_NDEP, we may as well not even bother to 239 track their definedness. 240 241 When building the thunk, it is always necessary to write words into 242 CC_DEP1 and CC_DEP2, even if those args are not used given the 243 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4). 244 This is important because otherwise Memcheck could give false 245 positives as it does not understand the relationship between the 246 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the 247 definedness of the stored flags always depends on both CC_DEP1 and 248 CC_DEP2. 249 250 However, it is only necessary to set CC_NDEP when the CC_OP value 251 requires it, because Memcheck ignores CC_NDEP, and the evaluation 252 functions do understand the CC_OP fields and will only examine 253 CC_NDEP for suitable values of CC_OP. 254 255 A summary of the field usages is: 256 257 Operation DEP1 DEP2 NDEP 258 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 259 260 add/sub/mul first arg second arg unused 261 262 adc/sbb first arg (second arg) 263 XOR old_carry old_carry 264 265 and/or/xor result zero unused 266 267 inc/dec result zero old_carry 268 269 shl/shr/sar result subshifted- unused 270 result 271 272 rol/ror result zero old_flags 273 274 copy old_flags zero unused. 275 276 277 Therefore Memcheck will believe the following: 278 279 * add/sub/mul -- definedness of result flags depends on definedness 280 of both args. 281 282 * adc/sbb -- definedness of result flags depends on definedness of 283 both args and definedness of the old C flag. Because only two 284 DEP fields are available, the old C flag is XOR'd into the second 285 arg so that Memcheck sees the data dependency on it. That means 286 the NDEP field must contain a second copy of the old C flag 287 so that the evaluation functions can correctly recover the second 288 arg. 289 290 * and/or/xor are straightforward -- definedness of result flags 291 depends on definedness of result value. 292 293 * inc/dec -- definedness of result flags depends only on 294 definedness of result. This isn't really true -- it also depends 295 on the old C flag. However, we don't want Memcheck to see that, 296 and so the old C flag must be passed in NDEP and not in DEP2. 297 It's inconceivable that a compiler would generate code that puts 298 the C flag in an undefined state, then does an inc/dec, which 299 leaves C unchanged, and then makes a conditional jump/move based 300 on C. So our fiction seems a good approximation. 301 302 * shl/shr/sar -- straightforward, again, definedness of result 303 flags depends on definedness of result value. The subshifted 304 value (value shifted one less) is also needed, but its 305 definedness is the same as the definedness of the shifted value. 306 307 * rol/ror -- these only set O and C, and leave A Z C P alone. 308 However it seems prudent (as per inc/dec) to say the definedness 309 of all resulting flags depends on the definedness of the result, 310 hence the old flags must go in as NDEP and not DEP2. 311 312 * rcl/rcr are too difficult to do in-line, and so are done by a 313 helper function. They are not part of this scheme. The helper 314 function takes the value to be rotated, the rotate amount and the 315 old flags, and returns the new flags and the rotated value. 316 Since the helper's mcx_mask does not have any set bits, Memcheck 317 will lazily propagate undefinedness from any of the 3 args into 318 both results (flags and actual value). 319 */ 320 enum { 321 X86G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */ 322 /* just copy DEP1 to output */ 323 324 X86G_CC_OP_ADDB, /* 1 */ 325 X86G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */ 326 X86G_CC_OP_ADDL, /* 3 */ 327 328 X86G_CC_OP_SUBB, /* 4 */ 329 X86G_CC_OP_SUBW, /* 5 DEP1 = argL, DEP2 = argR, NDEP = unused */ 330 X86G_CC_OP_SUBL, /* 6 */ 331 332 X86G_CC_OP_ADCB, /* 7 */ 333 X86G_CC_OP_ADCW, /* 8 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */ 334 X86G_CC_OP_ADCL, /* 9 */ 335 336 X86G_CC_OP_SBBB, /* 10 */ 337 X86G_CC_OP_SBBW, /* 11 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */ 338 X86G_CC_OP_SBBL, /* 12 */ 339 340 X86G_CC_OP_LOGICB, /* 13 */ 341 X86G_CC_OP_LOGICW, /* 14 DEP1 = result, DEP2 = 0, NDEP = unused */ 342 X86G_CC_OP_LOGICL, /* 15 */ 343 344 X86G_CC_OP_INCB, /* 16 */ 345 X86G_CC_OP_INCW, /* 17 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */ 346 X86G_CC_OP_INCL, /* 18 */ 347 348 X86G_CC_OP_DECB, /* 19 */ 349 X86G_CC_OP_DECW, /* 20 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */ 350 X86G_CC_OP_DECL, /* 21 */ 351 352 X86G_CC_OP_SHLB, /* 22 DEP1 = res, DEP2 = res', NDEP = unused */ 353 X86G_CC_OP_SHLW, /* 23 where res' is like res but shifted one bit less */ 354 X86G_CC_OP_SHLL, /* 24 */ 355 356 X86G_CC_OP_SHRB, /* 25 DEP1 = res, DEP2 = res', NDEP = unused */ 357 X86G_CC_OP_SHRW, /* 26 where res' is like res but shifted one bit less */ 358 X86G_CC_OP_SHRL, /* 27 */ 359 360 X86G_CC_OP_ROLB, /* 28 */ 361 X86G_CC_OP_ROLW, /* 29 DEP1 = res, DEP2 = 0, NDEP = old flags */ 362 X86G_CC_OP_ROLL, /* 30 */ 363 364 X86G_CC_OP_RORB, /* 31 */ 365 X86G_CC_OP_RORW, /* 32 DEP1 = res, DEP2 = 0, NDEP = old flags */ 366 X86G_CC_OP_RORL, /* 33 */ 367 368 X86G_CC_OP_UMULB, /* 34 */ 369 X86G_CC_OP_UMULW, /* 35 DEP1 = argL, DEP2 = argR, NDEP = unused */ 370 X86G_CC_OP_UMULL, /* 36 */ 371 372 X86G_CC_OP_SMULB, /* 37 */ 373 X86G_CC_OP_SMULW, /* 38 DEP1 = argL, DEP2 = argR, NDEP = unused */ 374 X86G_CC_OP_SMULL, /* 39 */ 375 376 X86G_CC_OP_NUMBER 377 }; 378 379 typedef 380 enum { 381 X86CondO = 0, /* overflow */ 382 X86CondNO = 1, /* no overflow */ 383 384 X86CondB = 2, /* below */ 385 X86CondNB = 3, /* not below */ 386 387 X86CondZ = 4, /* zero */ 388 X86CondNZ = 5, /* not zero */ 389 390 X86CondBE = 6, /* below or equal */ 391 X86CondNBE = 7, /* not below or equal */ 392 393 X86CondS = 8, /* negative */ 394 X86CondNS = 9, /* not negative */ 395 396 X86CondP = 10, /* parity even */ 397 X86CondNP = 11, /* not parity even */ 398 399 X86CondL = 12, /* jump less */ 400 X86CondNL = 13, /* not less */ 401 402 X86CondLE = 14, /* less or equal */ 403 X86CondNLE = 15, /* not less or equal */ 404 405 X86CondAlways = 16 /* HACK */ 406 } 407 X86Condcode; 408 409 #endif /* ndef __VEX_GUEST_X86_DEFS_H */ 410 411 /*---------------------------------------------------------------*/ 412 /*--- end guest_x86_defs.h ---*/ 413 /*---------------------------------------------------------------*/ 414