1 2 /*---------------------------------------------------------------*/ 3 /*--- begin guest_amd64_defs.h ---*/ 4 /*---------------------------------------------------------------*/ 5 6 /* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2004-2013 OpenWorks LLP 11 info (at) open-works.net 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 26 02110-1301, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 30 Neither the names of the U.S. Department of Energy nor the 31 University of California nor the names of its contributors may be 32 used to endorse or promote products derived from this software 33 without prior written permission. 34 */ 35 36 /* Only to be used within the guest-amd64 directory. */ 37 38 #ifndef __VEX_GUEST_AMD64_DEFS_H 39 #define __VEX_GUEST_AMD64_DEFS_H 40 41 #include "libvex_basictypes.h" 42 #include "libvex_emnote.h" // VexEmNote 43 #include "libvex_guest_amd64.h" // VexGuestAMD64State 44 #include "guest_generic_bb_to_IR.h" // DisResult 45 46 /*---------------------------------------------------------*/ 47 /*--- amd64 to IR conversion ---*/ 48 /*---------------------------------------------------------*/ 49 50 /* Convert one amd64 insn to IR. See the type DisOneInstrFn in 51 bb_to_IR.h. */ 52 extern 53 DisResult disInstr_AMD64 ( IRSB* irbb, 54 Bool (*resteerOkFn) ( void*, Addr64 ), 55 Bool resteerCisOk, 56 void* callback_opaque, 57 UChar* guest_code, 58 Long delta, 59 Addr64 guest_IP, 60 VexArch guest_arch, 61 VexArchInfo* archinfo, 62 VexAbiInfo* abiinfo, 63 Bool host_bigendian, 64 Bool sigill_diag ); 65 66 /* Used by the optimiser to specialise calls to helpers. */ 67 extern 68 IRExpr* guest_amd64_spechelper ( const HChar* function_name, 69 IRExpr** args, 70 IRStmt** precedingStmts, 71 Int n_precedingStmts ); 72 73 /* Describes to the optimiser which part of the guest state require 74 precise memory exceptions. This is logically part of the guest 75 state description. */ 76 extern 77 Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int ); 78 79 extern 80 VexGuestLayout amd64guest_layout; 81 82 83 /*---------------------------------------------------------*/ 84 /*--- amd64 guest helpers ---*/ 85 /*---------------------------------------------------------*/ 86 87 /* --- CLEAN HELPERS --- */ 88 89 extern ULong amd64g_calculate_rflags_all ( 90 ULong cc_op, 91 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep 92 ); 93 94 extern ULong amd64g_calculate_rflags_c ( 95 ULong cc_op, 96 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep 97 ); 98 99 extern ULong amd64g_calculate_condition ( 100 ULong/*AMD64Condcode*/ cond, 101 ULong cc_op, 102 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep 103 ); 104 105 extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl ); 106 107 extern ULong amd64g_calculate_RCR ( 108 ULong arg, ULong rot_amt, ULong rflags_in, Long sz 109 ); 110 111 extern ULong amd64g_calculate_RCL ( 112 ULong arg, ULong rot_amt, ULong rflags_in, Long sz 113 ); 114 115 extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which); 116 117 extern ULong amd64g_check_fldcw ( ULong fpucw ); 118 119 extern ULong amd64g_create_fpucw ( ULong fpround ); 120 121 extern ULong amd64g_check_ldmxcsr ( ULong mxcsr ); 122 123 extern ULong amd64g_create_mxcsr ( ULong sseround ); 124 125 extern VexEmNote amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord ); 126 extern VexEmNote amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord ); 127 extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord ); 128 129 extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord ); 130 extern void amd64g_dirtyhelper_FNSAVE ( VexGuestAMD64State*, HWord ); 131 extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord ); 132 133 /* Translate a guest virtual_addr into a guest linear address by 134 consulting the supplied LDT/GDT structures. Their representation 135 must be as specified in pub/libvex_guest_amd64.h. To indicate a 136 translation failure, 1<<32 is returned. On success, the lower 32 137 bits of the returned result indicate the linear address. 138 */ 139 //extern 140 //ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt, 141 // UInt seg_selector, UInt virtual_addr ); 142 143 extern ULong amd64g_calculate_mmx_pmaddwd ( ULong, ULong ); 144 extern ULong amd64g_calculate_mmx_psadbw ( ULong, ULong ); 145 146 extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi ); 147 148 extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b ); 149 extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w ); 150 extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l ); 151 extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q ); 152 153 extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo, 154 ULong dHi, ULong dLo, 155 ULong imm_and_return_control_bit ); 156 157 extern ULong amd64g_calculate_pext ( ULong, ULong ); 158 extern ULong amd64g_calculate_pdep ( ULong, ULong ); 159 160 /* --- DIRTY HELPERS --- */ 161 162 extern ULong amd64g_dirtyhelper_loadF80le ( ULong/*addr*/ ); 163 164 extern void amd64g_dirtyhelper_storeF80le ( ULong/*addr*/, ULong/*data*/ ); 165 166 extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st ); 167 extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st ); 168 extern void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st ); 169 extern void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st ); 170 171 extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* ); 172 173 extern void amd64g_dirtyhelper_FXSAVE ( VexGuestAMD64State*, HWord ); 174 extern VexEmNote amd64g_dirtyhelper_FXRSTOR ( VexGuestAMD64State*, HWord ); 175 176 extern ULong amd64g_dirtyhelper_RDTSC ( void ); 177 extern void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st ); 178 179 extern ULong amd64g_dirtyhelper_IN ( ULong portno, ULong sz/*1,2 or 4*/ ); 180 extern void amd64g_dirtyhelper_OUT ( ULong portno, ULong data, 181 ULong sz/*1,2 or 4*/ ); 182 183 extern void amd64g_dirtyhelper_SxDT ( void* address, 184 ULong op /* 0 or 1 */ ); 185 186 /* Helps with PCMP{I,E}STR{I,M}. 187 188 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really, 189 actually it could be a clean helper, but for the fact that we can't 190 pass by value 2 x V128 to a clean helper, nor have one returned.) 191 Reads guest state, writes to guest state for the xSTRM cases, no 192 accesses of memory, is a pure function. 193 194 opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so 195 the callee knows which I/E and I/M variant it is dealing with and 196 what the specific operation is. 4th byte of opcode is in the range 197 0x60 to 0x63: 198 istri 66 0F 3A 63 199 istrm 66 0F 3A 62 200 estri 66 0F 3A 61 201 estrm 66 0F 3A 60 202 203 gstOffL and gstOffR are the guest state offsets for the two XMM 204 register inputs. We never have to deal with the memory case since 205 that is handled by pre-loading the relevant value into the fake 206 XMM16 register. 207 208 For ESTRx variants, edxIN and eaxIN hold the values of those two 209 registers. 210 211 In all cases, the bottom 16 bits of the result contain the new 212 OSZACP %rflags values. For xSTRI variants, bits[31:16] of the 213 result hold the new %ecx value. For xSTRM variants, the helper 214 writes the result directly to the guest XMM0. 215 216 Declarable side effects: in all cases, reads guest state at 217 [gstOffL, +16) and [gstOffR, +16). For xSTRM variants, also writes 218 guest_XMM0. 219 220 Is expected to be called with opc_and_imm combinations which have 221 actually been validated, and will assert if otherwise. The front 222 end should ensure we're only called with verified values. 223 */ 224 extern ULong amd64g_dirtyhelper_PCMPxSTRx ( 225 VexGuestAMD64State*, 226 HWord opc4_and_imm, 227 HWord gstOffL, HWord gstOffR, 228 HWord edxIN, HWord eaxIN 229 ); 230 231 /* Implementation of intel AES instructions as described in 232 Intel Advanced Vector Extensions 233 Programming Reference 234 MARCH 2008 235 319433-002. 236 237 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really, 238 actually it could be a clean helper, but for the fact that we can't 239 pass by value 2 x V128 to a clean helper, nor have one returned.) 240 Reads guest state, writes to guest state, no 241 accesses of memory, is a pure function. 242 243 opc4 contains the 4th byte of opcode. Front-end should only 244 give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC. 245 (will assert otherwise). 246 247 gstOffL and gstOffR are the guest state offsets for the two XMM 248 register inputs, gstOffD is the guest state offset for the XMM register 249 output. We never have to deal with the memory case since that is handled 250 by pre-loading the relevant value into the fake XMM16 register. 251 252 */ 253 extern void amd64g_dirtyhelper_AES ( 254 VexGuestAMD64State* gst, 255 HWord opc4, HWord gstOffD, 256 HWord gstOffL, HWord gstOffR 257 ); 258 259 /* Implementation of AESKEYGENASSIST. 260 261 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really, 262 actually it could be a clean helper, but for the fact that we can't 263 pass by value 1 x V128 to a clean helper, nor have one returned.) 264 Reads guest state, writes to guest state, no 265 accesses of memory, is a pure function. 266 267 imm8 is the Round Key constant. 268 269 gstOffL and gstOffR are the guest state offsets for the two XMM 270 register input and output. We never have to deal with the memory case since 271 that is handled by pre-loading the relevant value into the fake 272 XMM16 register. 273 274 */ 275 extern void amd64g_dirtyhelper_AESKEYGENASSIST ( 276 VexGuestAMD64State* gst, 277 HWord imm8, 278 HWord gstOffL, HWord gstOffR 279 ); 280 281 //extern void amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* ); 282 //extern void amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* ); 283 //extern void amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* ); 284 285 //extern void amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord ); 286 287 //extern VexEmNote 288 // amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord ); 289 290 //extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord ); 291 292 //extern VexEmNote 293 // amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord ); 294 295 296 297 /*---------------------------------------------------------*/ 298 /*--- Condition code stuff ---*/ 299 /*---------------------------------------------------------*/ 300 301 /* rflags masks */ 302 #define AMD64G_CC_SHIFT_O 11 303 #define AMD64G_CC_SHIFT_S 7 304 #define AMD64G_CC_SHIFT_Z 6 305 #define AMD64G_CC_SHIFT_A 4 306 #define AMD64G_CC_SHIFT_C 0 307 #define AMD64G_CC_SHIFT_P 2 308 309 #define AMD64G_CC_MASK_O (1ULL << AMD64G_CC_SHIFT_O) 310 #define AMD64G_CC_MASK_S (1ULL << AMD64G_CC_SHIFT_S) 311 #define AMD64G_CC_MASK_Z (1ULL << AMD64G_CC_SHIFT_Z) 312 #define AMD64G_CC_MASK_A (1ULL << AMD64G_CC_SHIFT_A) 313 #define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C) 314 #define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P) 315 316 /* FPU flag masks */ 317 #define AMD64G_FC_SHIFT_C3 14 318 #define AMD64G_FC_SHIFT_C2 10 319 #define AMD64G_FC_SHIFT_C1 9 320 #define AMD64G_FC_SHIFT_C0 8 321 322 #define AMD64G_FC_MASK_C3 (1ULL << AMD64G_FC_SHIFT_C3) 323 #define AMD64G_FC_MASK_C2 (1ULL << AMD64G_FC_SHIFT_C2) 324 #define AMD64G_FC_MASK_C1 (1ULL << AMD64G_FC_SHIFT_C1) 325 #define AMD64G_FC_MASK_C0 (1ULL << AMD64G_FC_SHIFT_C0) 326 327 328 /* %RFLAGS thunk descriptors. A four-word thunk is used to record 329 details of the most recent flag-setting operation, so the flags can 330 be computed later if needed. It is possible to do this a little 331 more efficiently using a 3-word thunk, but that makes it impossible 332 to describe the flag data dependencies sufficiently accurately for 333 Memcheck. Hence 4 words are used, with minimal loss of efficiency. 334 335 The four words are: 336 337 CC_OP, which describes the operation. 338 339 CC_DEP1 and CC_DEP2. These are arguments to the operation. 340 We want Memcheck to believe that the resulting flags are 341 data-dependent on both CC_DEP1 and CC_DEP2, hence the 342 name DEP. 343 344 CC_NDEP. This is a 3rd argument to the operation which is 345 sometimes needed. We arrange things so that Memcheck does 346 not believe the resulting flags are data-dependent on CC_NDEP 347 ("not dependent"). 348 349 To make Memcheck believe that (the definedness of) the encoded 350 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2 351 requires two things: 352 353 (1) In the guest state layout info (amd64guest_layout), CC_OP and 354 CC_NDEP are marked as always defined. 355 356 (2) When passing the thunk components to an evaluation function 357 (calculate_condition, calculate_eflags, calculate_eflags_c) the 358 IRCallee's mcx_mask must be set so as to exclude from 359 consideration all passed args except CC_DEP1 and CC_DEP2. 360 361 Strictly speaking only (2) is necessary for correctness. However, 362 (1) helps efficiency in that since (2) means we never ask about the 363 definedness of CC_OP or CC_NDEP, we may as well not even bother to 364 track their definedness. 365 366 When building the thunk, it is always necessary to write words into 367 CC_DEP1 and CC_DEP2, even if those args are not used given the 368 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4). 369 This is important because otherwise Memcheck could give false 370 positives as it does not understand the relationship between the 371 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the 372 definedness of the stored flags always depends on both CC_DEP1 and 373 CC_DEP2. 374 375 However, it is only necessary to set CC_NDEP when the CC_OP value 376 requires it, because Memcheck ignores CC_NDEP, and the evaluation 377 functions do understand the CC_OP fields and will only examine 378 CC_NDEP for suitable values of CC_OP. 379 380 A summary of the field usages is: 381 382 Operation DEP1 DEP2 NDEP 383 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 384 385 add/sub/mul first arg second arg unused 386 387 adc/sbb first arg (second arg) 388 XOR old_carry old_carry 389 390 and/or/xor result zero unused 391 392 inc/dec result zero old_carry 393 394 shl/shr/sar result subshifted- unused 395 result 396 397 rol/ror result zero old_flags 398 399 copy old_flags zero unused. 400 401 402 Therefore Memcheck will believe the following: 403 404 * add/sub/mul -- definedness of result flags depends on definedness 405 of both args. 406 407 * adc/sbb -- definedness of result flags depends on definedness of 408 both args and definedness of the old C flag. Because only two 409 DEP fields are available, the old C flag is XOR'd into the second 410 arg so that Memcheck sees the data dependency on it. That means 411 the NDEP field must contain a second copy of the old C flag 412 so that the evaluation functions can correctly recover the second 413 arg. 414 415 * and/or/xor are straightforward -- definedness of result flags 416 depends on definedness of result value. 417 418 * inc/dec -- definedness of result flags depends only on 419 definedness of result. This isn't really true -- it also depends 420 on the old C flag. However, we don't want Memcheck to see that, 421 and so the old C flag must be passed in NDEP and not in DEP2. 422 It's inconceivable that a compiler would generate code that puts 423 the C flag in an undefined state, then does an inc/dec, which 424 leaves C unchanged, and then makes a conditional jump/move based 425 on C. So our fiction seems a good approximation. 426 427 * shl/shr/sar -- straightforward, again, definedness of result 428 flags depends on definedness of result value. The subshifted 429 value (value shifted one less) is also needed, but its 430 definedness is the same as the definedness of the shifted value. 431 432 * rol/ror -- these only set O and C, and leave A Z C P alone. 433 However it seems prudent (as per inc/dec) to say the definedness 434 of all resulting flags depends on the definedness of the result, 435 hence the old flags must go in as NDEP and not DEP2. 436 437 * rcl/rcr are too difficult to do in-line, and so are done by a 438 helper function. They are not part of this scheme. The helper 439 function takes the value to be rotated, the rotate amount and the 440 old flags, and returns the new flags and the rotated value. 441 Since the helper's mcx_mask does not have any set bits, Memcheck 442 will lazily propagate undefinedness from any of the 3 args into 443 both results (flags and actual value). 444 */ 445 enum { 446 AMD64G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */ 447 /* just copy DEP1 to output */ 448 449 AMD64G_CC_OP_ADDB, /* 1 */ 450 AMD64G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */ 451 AMD64G_CC_OP_ADDL, /* 3 */ 452 AMD64G_CC_OP_ADDQ, /* 4 */ 453 454 AMD64G_CC_OP_SUBB, /* 5 */ 455 AMD64G_CC_OP_SUBW, /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */ 456 AMD64G_CC_OP_SUBL, /* 7 */ 457 AMD64G_CC_OP_SUBQ, /* 8 */ 458 459 AMD64G_CC_OP_ADCB, /* 9 */ 460 AMD64G_CC_OP_ADCW, /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */ 461 AMD64G_CC_OP_ADCL, /* 11 */ 462 AMD64G_CC_OP_ADCQ, /* 12 */ 463 464 AMD64G_CC_OP_SBBB, /* 13 */ 465 AMD64G_CC_OP_SBBW, /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */ 466 AMD64G_CC_OP_SBBL, /* 15 */ 467 AMD64G_CC_OP_SBBQ, /* 16 */ 468 469 AMD64G_CC_OP_LOGICB, /* 17 */ 470 AMD64G_CC_OP_LOGICW, /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */ 471 AMD64G_CC_OP_LOGICL, /* 19 */ 472 AMD64G_CC_OP_LOGICQ, /* 20 */ 473 474 AMD64G_CC_OP_INCB, /* 21 */ 475 AMD64G_CC_OP_INCW, /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */ 476 AMD64G_CC_OP_INCL, /* 23 */ 477 AMD64G_CC_OP_INCQ, /* 24 */ 478 479 AMD64G_CC_OP_DECB, /* 25 */ 480 AMD64G_CC_OP_DECW, /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */ 481 AMD64G_CC_OP_DECL, /* 27 */ 482 AMD64G_CC_OP_DECQ, /* 28 */ 483 484 AMD64G_CC_OP_SHLB, /* 29 DEP1 = res, DEP2 = res', NDEP = unused */ 485 AMD64G_CC_OP_SHLW, /* 30 where res' is like res but shifted one bit less */ 486 AMD64G_CC_OP_SHLL, /* 31 */ 487 AMD64G_CC_OP_SHLQ, /* 32 */ 488 489 AMD64G_CC_OP_SHRB, /* 33 DEP1 = res, DEP2 = res', NDEP = unused */ 490 AMD64G_CC_OP_SHRW, /* 34 where res' is like res but shifted one bit less */ 491 AMD64G_CC_OP_SHRL, /* 35 */ 492 AMD64G_CC_OP_SHRQ, /* 36 */ 493 494 AMD64G_CC_OP_ROLB, /* 37 */ 495 AMD64G_CC_OP_ROLW, /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */ 496 AMD64G_CC_OP_ROLL, /* 39 */ 497 AMD64G_CC_OP_ROLQ, /* 40 */ 498 499 AMD64G_CC_OP_RORB, /* 41 */ 500 AMD64G_CC_OP_RORW, /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */ 501 AMD64G_CC_OP_RORL, /* 43 */ 502 AMD64G_CC_OP_RORQ, /* 44 */ 503 504 AMD64G_CC_OP_UMULB, /* 45 */ 505 AMD64G_CC_OP_UMULW, /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */ 506 AMD64G_CC_OP_UMULL, /* 47 */ 507 AMD64G_CC_OP_UMULQ, /* 48 */ 508 509 AMD64G_CC_OP_SMULB, /* 49 */ 510 AMD64G_CC_OP_SMULW, /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */ 511 AMD64G_CC_OP_SMULL, /* 51 */ 512 AMD64G_CC_OP_SMULQ, /* 52 */ 513 514 AMD64G_CC_OP_ANDN32, /* 53 */ 515 AMD64G_CC_OP_ANDN64, /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */ 516 517 AMD64G_CC_OP_BLSI32, /* 55 */ 518 AMD64G_CC_OP_BLSI64, /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */ 519 520 AMD64G_CC_OP_BLSMSK32,/* 57 */ 521 AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */ 522 523 AMD64G_CC_OP_BLSR32, /* 59 */ 524 AMD64G_CC_OP_BLSR64, /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */ 525 526 AMD64G_CC_OP_NUMBER 527 }; 528 529 typedef 530 enum { 531 AMD64CondO = 0, /* overflow */ 532 AMD64CondNO = 1, /* no overflow */ 533 534 AMD64CondB = 2, /* below */ 535 AMD64CondNB = 3, /* not below */ 536 537 AMD64CondZ = 4, /* zero */ 538 AMD64CondNZ = 5, /* not zero */ 539 540 AMD64CondBE = 6, /* below or equal */ 541 AMD64CondNBE = 7, /* not below or equal */ 542 543 AMD64CondS = 8, /* negative */ 544 AMD64CondNS = 9, /* not negative */ 545 546 AMD64CondP = 10, /* parity even */ 547 AMD64CondNP = 11, /* not parity even */ 548 549 AMD64CondL = 12, /* jump less */ 550 AMD64CondNL = 13, /* not less */ 551 552 AMD64CondLE = 14, /* less or equal */ 553 AMD64CondNLE = 15, /* not less or equal */ 554 555 AMD64CondAlways = 16 /* HACK */ 556 } 557 AMD64Condcode; 558 559 #endif /* ndef __VEX_GUEST_AMD64_DEFS_H */ 560 561 /*---------------------------------------------------------------*/ 562 /*--- end guest_amd64_defs.h ---*/ 563 /*---------------------------------------------------------------*/ 564