Home | History | Annotate | Download | only in priv

Lines Matching defs:imm8

7764          /* (sz==4): PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
9214 UInt imm8, Bool all_lanes, Int sz )
9216 if (imm8 >= 32) return False;
9219 the supplied imm8. */
9229 switch (imm8) {
9284 /* Don't forget to add test cases to VCMPSS_128_<imm8> in
9349 UInt imm8;
9359 imm8 = getUChar(delta+1);
9360 if (imm8 >= 8) return delta0; /* FAIL */
9361 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
9363 vassert(!preSwap); /* never needed for imm8 < 8 */
9368 imm8,
9373 imm8 = getUChar(delta+alen);
9374 if (imm8 >= 8) return delta0; /* FAIL */
9375 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
9377 vassert(!preSwap); /* never needed for imm8 < 8 */
9392 imm8,
9948 static IRTemp math_PALIGNR_XMM ( IRTemp sV, IRTemp dV, UInt imm8 )
9963 if (imm8 == 0) {
9967 else if (imm8 >= 1 && imm8 <= 7) {
9968 assign( rHi, dis_PALIGNR_XMM_helper(dLo, sHi, imm8) );
9969 assign( rLo, dis_PALIGNR_XMM_helper(sHi, sLo, imm8) );
9971 else if (imm8 == 8) {
9975 else if (imm8 >= 9 && imm8 <= 15) {
9976 assign( rHi, dis_PALIGNR_XMM_helper(dHi, dLo, imm8-8) );
9977 assign( rLo, dis_PALIGNR_XMM_helper(dLo, sHi, imm8-8) );
9979 else if (imm8 == 16) {
9983 else if (imm8 >= 17 && imm8 <= 23) {
9984 assign( rHi, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(imm8-16))) );
9985 assign( rLo, dis_PALIGNR_XMM_helper(dHi, dLo, imm8-16) );
9987 else if (imm8 == 24) {
9991 else if (imm8 >= 25 && imm8 <= 31) {
9993 assign( rLo, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(imm8-24))) );
9995 else if (imm8 >= 32 && imm8 <= 255) {
10070 80 /0 = addb $imm8, rm8
10072 82 /0 = addb $imm8, rm8
10093 OF BA /7 = btcw $imm8, rm16 and btcw $imm8, rm32
11092 static IRTemp math_SHUFPS_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11096 vassert(imm8 < 256);
11105 mkV128from32s( SELS((imm8>>6)&3), SELS((imm8>>4)&3),
11106 SELD((imm8>>2)&3), SELD((imm8>>0)&3) ) );
11116 static IRTemp math_SHUFPS_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
11122 IRTemp rVhi = math_SHUFPS_128(sVhi, dVhi, imm8);
11123 IRTemp rVlo = math_SHUFPS_128(sVlo, dVlo, imm8);
11130 static IRTemp math_SHUFPD_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11147 SELS((imm8>>1)&1), SELD((imm8>>0)&1) ) );
11155 static IRTemp math_SHUFPD_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
11161 IRTemp rVhi = math_SHUFPD_128(sVhi, dVhi, (imm8 >> 2) & 3);
11162 IRTemp rVlo = math_SHUFPD_128(sVlo, dVlo, imm8 & 3);
11169 static IRTemp math_BLENDPD_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11174 switch( imm8 & 3 ) {
11193 static IRTemp math_BLENDPD_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
11199 IRTemp rVhi = math_BLENDPD_128(sVhi, dVhi, (imm8 >> 2) & 3);
11200 IRTemp rVlo = math_BLENDPD_128(sVlo, dVlo, imm8 & 3);
11207 static IRTemp math_BLENDPS_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11214 assign( imm8_mask, mkV128( imm8_perms[ (imm8 & 15) ] ) );
11226 static IRTemp math_BLENDPS_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
11232 IRTemp rVhi = math_BLENDPS_128(sVhi, dVhi, (imm8 >> 4) & 15);
11233 IRTemp rVlo = math_BLENDPS_128(sVlo, dVlo, imm8 & 15);
11240 static IRTemp math_PBLENDW_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11242 /* Make w be a 16-bit version of imm8, formed by duplicating each
11243 bit in imm8. */
11247 if (imm8 & (1 << i))
11458 UInt imm8;
11469 imm8 = (UInt)getUChar(delta+1);
11473 imm8, nameXMMReg(rE), nameXMMReg(rG));
11477 imm8 = (UInt)getUChar(delta+alen);
11481 imm8, dis_buf, nameXMMReg(rG));
11492 assign(dVmut, mk64from16s( SEL((imm8>>6)&3), SEL((imm8>>4)&3),
11493 SEL((imm8>>2)&3), SEL((imm8>>0)&3) ));
11513 UInt imm8;
11523 imm8 = (UInt)getUChar(delta+1);
11526 imm8, nameYMMReg(rE), nameYMMReg(rG));
11530 imm8 = (UInt)getUChar(delta+alen);
11533 imm8, dis_buf, nameYMMReg(rG));
11540 assign( dVhi, mk64from16s( s[4 + ((imm8>>6)&3)], s[4 + ((imm8>>4)&3)],
11541 s[4 + ((imm8>>2)&3)], s[4 + ((imm8>>0)&3)] ) );
11542 assign( dVlo, mk64from16s( s[0 + ((imm8>>6)&3)], s[0 + ((imm8>>4)&3)],
11543 s[0 + ((imm8>>2)&3)], s[0 + ((imm8>>0)&3)] ) );
11560 UInt imm8;
11565 imm8 = getUChar(delta+1) & 7;
11568 imm8, nameXMMReg(rE), nameIReg32(rG));
11575 switch (imm8) {
12202 static IRTemp math_PINSRW_128 ( IRTemp v128, IRTemp u16, UInt imm8 )
12204 vassert(imm8 >= 0 && imm8 <= 7);
12212 mkU8(16 * (imm8 & 3))));
12213 if (imm8 < 4) {
12219 UShort mask = ~(3 << (imm8 * 2));
14483 Int imm8 = 0;
14492 imm8 = (Int)getUChar(delta+1);
14494 DIP("shufps $%d,%s,%s\n", imm8, nameXMMReg(rE), nameXMMReg(rG));
14498 imm8 = (Int)getUChar(delta+alen);
14500 DIP("shufps $%d,%s,%s\n", imm8, dis_buf, nameXMMReg(rG));
14502 IRTemp res = math_SHUFPS_128( sV, dV, imm8 );
18888 static IRTemp math_PINSRB_128 ( IRTemp v128, IRTemp u8, UInt imm8 )
18890 vassert(imm8 >= 0 && imm8 <= 15);
18898 mkU8(8 * (imm8 & 7))));
18899 if (imm8 < 8) {
18905 UShort mask = ~(1 << imm8);
18914 static IRTemp math_PINSRD_128 ( IRTemp v128, IRTemp u32, UInt imm8 )
18923 switch (imm8) {
18947 static IRTemp math_PINSRQ_128 ( IRTemp v128, IRTemp u64, UInt imm8 )
18953 if (imm8 == 0) {
18957 vassert(imm8 == 1);
18970 static IRTemp math_INSERTPS ( IRTemp dstV, IRTemp toInsertD, UInt imm8 )
18976 vassert(imm8 <= 255);
18977 dstDs[(imm8 >> 4) & 3] = toInsertD; /* "imm8_count_d" */
18979 UInt imm8_zmask = (imm8 & 15);
19004 Int imm8;
19010 imm8 = (Int)getUChar(delta+1);
19013 imm8 = (Int)getUChar(delta+alen);
19015 switch ( (imm8 >> 2) & 3 ) {
19023 binop( Iop_Shr32, mkexpr(sel_lane), mkU8(((imm8 & 3)*8)) ) );
19030 DIP( "%spextrb $%d, %s,%s\n", mbV, imm8,
19037 imm8, nameXMMReg( gregOfRexRM(pfx, modrm) ), dis_buf );
19044 static IRTemp math_DPPD_128 ( IRTemp src_vec, IRTemp dst_vec, UInt imm8 )
19046 vassert(imm8 < 256);
19056 mkV128( imm8_perms[ ((imm8 >> 4) & 3) ] ) ) );
19067 mkV128( imm8_perms[ (imm8 & 3) ] ) ) );
19072 static IRTemp math_DPPS_128 ( IRTemp src_vec, IRTemp dst_vec, UInt imm8 )
19074 vassert(imm8 < 256);
19091 mkV128( imm8_perms[((imm8 >> 4)& 15)] ) ) );
19110 mkV128( imm8_perms[ (imm8 & 15) ] ) ) );
19115 static IRTemp math_MPSADBW_128 ( IRTemp dst_vec, IRTemp src_vec, UInt imm8 )
19126 assign(src_maskV, mkV128( src_mask[ imm8 & 3 ] ));
19127 assign(dst_maskV, mkV128( dst_mask[ (imm8 >> 2) & 1 ] ));
19151 mkU64( 0x80 | (imm8 & 7) ));
19154 mkU64( 0x00 | (imm8 & 7) ));
19217 static IRTemp math_PCLMULQDQ( IRTemp dV, IRTemp sV, UInt imm8 )
19221 assign(t0, unop((imm8&1)? Iop_V128HIto64 : Iop_V128to64,
19223 assign(t1, unop((imm8&16) ? Iop_V128HIto64 : Iop_V128to64,
19263 /* 66 0F 3A 08 /r ib = ROUNDPS imm8, xmm2/m128, xmm1 */
19333 /* 66 0F 3A 09 /r ib = ROUNDPD imm8, xmm2/m128, xmm1 */
19388 /* 66 0F 3A 0A /r ib = ROUNDSS imm8, xmm2/m32, xmm1
19389 66 0F 3A 0B /r ib = ROUNDSD imm8, xmm2/m64, xmm1
19441 /* 66 0F 3A 0C /r ib = BLENDPS xmm1, xmm2/m128, imm8
19445 Int imm8;
19454 imm8 = (Int)getUChar(delta+1);
19457 DIP( "blendps $%d, %s,%s\n", imm8,
19462 1/* imm8 is 1 byte after the amode */ );
19465 imm8 = (Int)getUChar(delta+alen);
19468 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
19472 mkexpr( math_BLENDPS_128( src_vec, dst_vec, imm8) ) );
19478 /* 66 0F 3A 0D /r ib = BLENDPD xmm1, xmm2/m128, imm8
19482 Int imm8;
19490 imm8 = (Int)getUChar(delta+1);
19493 DIP( "blendpd $%d, %s,%s\n", imm8,
19498 1/* imm8 is 1 byte after the amode */ );
19501 imm8 = (Int)getUChar(delta+alen);
19504 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
19508 mkexpr( math_BLENDPD_128( src_vec, dst_vec, imm8) ) );
19514 /* 66 0F 3A 0E /r ib = PBLENDW xmm1, xmm2/m128, imm8
19518 Int imm8;
19527 imm8 = (Int)getUChar(delta+1);
19530 DIP( "pblendw $%d, %s,%s\n", imm8,
19535 1/* imm8 is 1 byte after the amode */ );
19538 imm8 = (Int)getUChar(delta+alen);
19541 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
19545 mkexpr( math_PBLENDW_128( src_vec, dst_vec, imm8) ) );
19551 /* 66 0F 3A 14 /r ib = PEXTRB r/m16, xmm, imm8
19561 /* 66 0F 3A 15 /r ib = PEXTRW r/m16, xmm, imm8
19571 /* 66 no-REX.W 0F 3A 16 /r ib = PEXTRD reg/mem32, xmm2, imm8
19580 /* 66 REX.W 0F 3A 16 /r ib = PEXTRQ reg/mem64, xmm2, imm8
19592 /* 66 0F 3A 17 /r ib = EXTRACTPS reg/mem32, xmm2, imm8 Extract
19604 /* 66 0F 3A 20 /r ib = PINSRB xmm1, r32/m8, imm8
19607 Int imm8;
19613 imm8 = (Int)(getUChar(delta+1) & 0xF);
19616 DIP( "pinsrb $%d,%s,%s\n", imm8,
19620 imm8 = (Int)(getUChar(delta+alen) & 0xF);
19624 imm8, dis_buf, nameXMMReg(rG) );
19628 IRTemp res = math_PINSRB_128( src_vec, new8, imm8 );
19635 /* 66 0F 3A 21 /r ib = INSERTPS imm8, xmm2/m32, xmm1
19638 UInt imm8;
19651 imm8 = getUChar(delta+1);
19652 d2ins = dsE[(imm8 >> 6) & 3]; /* "imm8_count_s" */
19655 imm8, nameXMMReg(rE), nameXMMReg(rG) );
19659 imm8 = getUChar(delta+alen);
19662 imm8, dis_buf, nameXMMReg(rG) );
19668 putXMMReg( rG, mkexpr(math_INSERTPS( vG, d2ins, imm8 )) );
19674 /* 66 no-REX.W 0F 3A 22 /r ib = PINSRD xmm1, r/m32, imm8
19705 /* 66 REX.W 0F 3A 22 /r ib = PINSRQ xmm1, r/m64, imm8
19739 /* 66 0F 3A 40 /r ib = DPPS xmm1, xmm2/m128, imm8
19743 Int imm8;
19750 imm8 = (Int)getUChar(delta+1);
19754 imm8, nameXMMReg(rE), nameXMMReg(rG) );
19757 1/* imm8 is 1 byte after the amode */ );
19760 imm8 = (Int)getUChar(delta+alen);
19763 imm8, dis_buf, nameXMMReg(rG) );
19765 IRTemp res = math_DPPS_128( src_vec, dst_vec, imm8 );
19772 /* 66 0F 3A 41 /r ib = DPPD xmm1, xmm2/m128, imm8
19776 Int imm8;
19783 imm8 = (Int)getUChar(delta+1);
19787 imm8, nameXMMReg(rE), nameXMMReg(rG) );
19790 1/* imm8 is 1 byte after the amode */ );
19793 imm8 = (Int)getUChar(delta+alen);
19796 imm8, dis_buf, nameXMMReg(rG) );
19798 IRTemp res = math_DPPD_128( src_vec, dst_vec, imm8 );
19805 /* 66 0F 3A 42 /r ib = MPSADBW xmm1, xmm2/m128, imm8
19808 Int imm8;
19819 imm8 = (Int)getUChar(delta+1);
19822 DIP( "mpsadbw $%d, %s,%s\n", imm8,
19826 1/* imm8 is 1 byte after the amode */ );
19829 imm8 = (Int)getUChar(delta+alen);
19831 DIP( "mpsadbw $%d, %s,%s\n", imm8, dis_buf, nameXMMReg(rG) );
19834 putXMMReg( rG, mkexpr( math_MPSADBW_128(dst_vec, src_vec, imm8) ) );
19840 /* 66 0F 3A 44 /r ib = PCLMULQDQ xmm1, xmm2/m128, imm8
19846 Int imm8;
19856 imm8 = (Int)getUChar(delta+1);
19859 DIP( "pclmulqdq $%d, %s,%s\n", imm8,
19863 1/* imm8 is 1 byte after the amode */ );
19866 imm8 = (Int)getUChar(delta+alen);
19869 imm8, dis_buf, nameXMMReg(rG) );
19872 putXMMReg( rG, mkexpr( math_PCLMULQDQ(dvec, svec, imm8) ) );
19881 /* 66 0F 3A 63 /r ib = PCMPISTRI imm8, xmm2/m128, xmm1
19882 66 0F 3A 62 /r ib = PCMPISTRM imm8, xmm2/m128, xmm1
19883 66 0F 3A 61 /r ib = PCMPESTRI imm8, xmm2/m128, xmm1
19884 66 0F 3A 60 /r ib = PCMPESTRM imm8, xmm2/m128, xmm1
19897 /* 66 0F 3A DF /r ib = AESKEYGENASSIST imm8, xmm2/m128, xmm1 */
21134 case 0xCD: /* INT imm8 */
21328 case 0xE4: /* IN imm8, AL */
21335 case 0xE5: /* IN imm8, eAX */
21377 case 0xE6: /* OUT AL, imm8 */
21384 case 0xE7: /* OUT eAX, imm8 */
22137 case 0xA4: /* SHLDv imm8,Gv,Ev */
22164 case 0xAC: /* SHRDv imm8,Gv,Ev */
22490 case 0x73: /* PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
23474 UInt imm8;
23488 imm8 = getUChar(delta+1);
23489 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
23495 opname, imm8,
23499 imm8 = getUChar(delta+alen);
23500 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
23508 opname, imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
23584 UInt imm8;
23602 imm8 = getUChar(delta+1);
23603 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8,
23610 opname, imm8,
23614 imm8 = getUChar(delta+alen);
23615 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8,
23621 opname, imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
25941 /* VPSHUFD imm8, xmm2/m128, xmm1 = VEX.128.66.0F.WIG 70 /r ib */
25946 /* VPSHUFD imm8, ymm2/m256, ymm1 = VEX.256.66.0F.WIG 70 /r ib */
25951 /* VPSHUFLW imm8, xmm2/m128, xmm1 = VEX.128.F2.0F.WIG 70 /r ib */
25957 /* VPSHUFLW imm8, ymm2/m256, ymm1 = VEX.256.F2.0F.WIG 70 /r ib */
25962 /* VPSHUFHW imm8, xmm2/m128, xmm1 = VEX.128.F3.0F.WIG 70 /r ib */
25968 /* VPSHUFHW imm8, ymm2/m256, ymm1 = VEX.256.F3.0F.WIG 70 /r ib */
25976 /* VPSRLW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /2 ib */
25977 /* VPSRAW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /4 ib */
25978 /* VPSLLW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /6 ib */
26002 /* VPSRLW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /2 ib */
26003 /* VPSRAW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /4 ib */
26004 /* VPSLLW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /6 ib */
26031 /* VPSRLD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /2 ib */
26032 /* VPSRAD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /4 ib */
26033 /* VPSLLD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /6 ib */
26057 /* VPSRLD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /2 ib */
26058 /* VPSRAD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /4 ib */
26059 /* VPSLLD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /6 ib */
26086 /* VPSRLDQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /3 ib */
26087 /* VPSLLDQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /7 ib */
26088 /* VPSRLQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /2 ib */
26089 /* VPSLLQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /6 ib */
26127 /* VPSRLDQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /3 ib */
26128 /* VPSLLDQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /7 ib */
26129 /* VPSRLQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /2 ib */
26130 /* VPSLLQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /6 ib */
26590 Int imm8;
26594 imm8 = (Int)(getUChar(delta+1) & 7);
26598 DIP( "vpinsrw $%d,%s,%s\n", imm8,
26602 imm8 = (Int)(getUChar(delta+alen) & 7);
26606 imm8, dis_buf, nameXMMReg(rG) );
26611 IRTemp res_vec = math_PINSRW_128( src_vec, new16, imm8 );
26619 /* VPEXTRW imm8, xmm1, reg32 = VEX.128.66.0F.W0 C5 /r ib */
26631 /* VSHUFPS imm8, xmm3/m128, xmm2, xmm1, xmm2 */
26634 Int imm8 = 0;
26644 imm8 = (Int)getUChar(delta+1);
26647 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
26651 imm8 = (Int)getUChar(delta+alen);
26654 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
26656 IRTemp res = math_SHUFPS_128( eV, vV, imm8 );
26661 /* VSHUFPS imm8, ymm3/m256, ymm2, ymm1, ymm2 */
26664 Int imm8 = 0;
26674 imm8 = (Int)getUChar(delta+1);
26677 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
26681 imm8 = (Int)getUChar(delta+alen);
26684 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
26686 IRTemp res = math_SHUFPS_256( eV, vV, imm8 );
26691 /* VSHUFPD imm8, xmm3/m128, xmm2, xmm1, xmm2 */
26694 Int imm8 = 0;
26704 imm8 = (Int)getUChar(delta+1);
26707 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
26711 imm8 = (Int)getUChar(delta+alen);
26714 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
26716 IRTemp res = math_SHUFPD_128( eV, vV, imm8 );
26721 /* VSHUFPD imm8, ymm3/m256, ymm2, ymm1, ymm2 */
26724 Int imm8 = 0;
26734 imm8 = (Int)getUChar(delta+1);
26737 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
26741 imm8 = (Int)getUChar(delta+alen);
26744 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
26746 IRTemp res = math_SHUFPD_256( eV, vV, imm8 );
30369 static IRTemp math_VPERMILPS_128 ( IRTemp sV, UInt imm8 )
30371 vassert(imm8 < 256);
30378 assign(res, mkV128from32s( SEL((imm8 >> 6) & 3),
30379 SEL((imm8 >> 4) & 3),
30380 SEL((imm8 >> 2) & 3),
30381 SEL((imm8 >> 0) & 3) ));
30411 /* VPERMQ imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.W1 00 /r ib */
30412 /* VPERMPD imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.W1 01 /r ib */
30416 UInt imm8 = 0;
30423 imm8 = getUChar(delta);
30425 name, imm8, nameYMMReg(rE), nameYMMReg(rG));
30430 imm8 = getUChar(delta);
30432 name, imm8, dis_buf, nameYMMReg(rG));
30441 mkexpr(s[(imm8 >> 6) & 3]),
30442 mkexpr(s[(imm8 >> 4) & 3]),
30443 mkexpr(s[(imm8 >> 2) & 3]),
30444 mkexpr(s[(imm8 >> 0) & 3])));
30451 /* VPBLENDD imm8, xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F3A.W0 02 /r ib */
30455 UInt imm8 = 0;
30466 imm8 = getUChar(delta);
30468 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
30473 imm8 = getUChar(delta);
30475 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
30486 putYMMRegLane32(rG, i, mkexpr((imm8 & (1<<i)) ? d[i] : s[i]));
30491 /* VPBLENDD imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F3A.W0 02 /r ib */
30495 UInt imm8 = 0;
30506 imm8 = getUChar(delta);
30508 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30513 imm8 = getUChar(delta);
30515 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30528 putYMMRegLane32(rG, i, mkexpr((imm8 & (1<<i)) ? d[i] : s[i]));
30535 /* VPERMILPS imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.WIG 04 /r ib */
30538 UInt imm8 = 0;
30544 imm8 = getUChar(delta);
30546 imm8, nameYMMReg(rE), nameYMMReg(rG));
30551 imm8 = getUChar(delta);
30553 imm8, dis_buf, nameYMMReg(rG));
30559 IRTemp dVhi = math_VPERMILPS_128( sVhi, imm8 );
30560 IRTemp dVlo = math_VPERMILPS_128( sVlo, imm8 );
30565 /* VPERMILPS imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG 04 /r ib */
30568 UInt imm8 = 0;
30574 imm8 = getUChar(delta);
30576 imm8, nameXMMReg(rE), nameXMMReg(rG));
30581 imm8 = getUChar(delta);
30583 imm8, dis_buf, nameXMMReg(rG));
30587 putYMMRegLoAndZU(rG, mkexpr ( math_VPERMILPS_128 ( sV, imm8 ) ) );
30593 /* VPERMILPD imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG 05 /r ib */
30596 UInt imm8 = 0;
30602 imm8 = getUChar(delta);
30604 imm8, nameXMMReg(rE), nameXMMReg(rG));
30609 imm8 = getUChar(delta);
30611 imm8, dis_buf, nameXMMReg(rG));
30621 mkexpr((imm8 & (1<<1)) ? s1 : s0),
30622 mkexpr((imm8 & (1<<0)) ? s1 : s0)));
30626 /* VPERMILPD imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.WIG 05 /r ib */
30629 UInt imm8 = 0;
30635 imm8 = getUChar(delta);
30637 imm8, nameYMMReg(rE), nameYMMReg(rG));
30642 imm8 = getUChar(delta);
30644 imm8, dis_buf, nameYMMReg(rG));
30653 mkexpr((imm8 & (1<<3)) ? s3 : s2),
30654 mkexpr((imm8 & (1<<2)) ? s3 : s2),
30655 mkexpr((imm8 & (1<<1)) ? s1 : s0),
30656 mkexpr((imm8 & (1<<0)) ? s1 : s0)));
30663 /* VPERM2F128 imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.66.0F3A.W0 06 /r ib */
30667 UInt imm8 = 0;
30679 imm8 = getUChar(delta);
30681 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30687 imm8 = getUChar(delta);
30689 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30698 putYMMRegLane128(rG, 0, mkexpr(SEL((imm8 >> 0) & 3)));
30699 putYMMRegLane128(rG, 1, mkexpr(SEL((imm8 >> 4) & 3)));
30701 if (imm8 & (1<<3)) putYMMRegLane128(rG, 0, mkV128(0));
30702 if (imm8 & (1<<7)) putYMMRegLane128(rG, 1, mkV128(0));
30709 /* VROUNDPS imm8, xmm2/m128, xmm1 */
30757 /* VROUNDPS imm8, ymm2/m256, ymm1 */
30814 /* VROUNDPD imm8, xmm2/m128, xmm1 */
30858 /* VROUNDPD imm8, ymm2/m256, ymm1 */
30908 /* VROUNDSS imm8, xmm3/m32, xmm2, xmm1 */
30910 /* VROUNDSD imm8, xmm3/m64, xmm2, xmm1 */
30965 /* VBLENDPS imm8, ymm3/m256, ymm2, ymm1 */
30969 UInt imm8;
30978 imm8 = getUChar(delta);
30980 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30985 imm8 = getUChar(delta);
30987 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30992 mkexpr( math_BLENDPS_256( sE, sV, imm8) ) );
30996 /* VBLENDPS imm8, xmm3/m128, xmm2, xmm1 */
31000 UInt imm8;
31009 imm8 = getUChar(delta);
31011 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
31016 imm8 = getUChar(delta);
31018 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
31023 mkexpr( math_BLENDPS_128( sE, sV, imm8) ) );
31030 /* VBLENDPD imm8, ymm3/m256, ymm2, ymm1 */
31034 UInt imm8;
31043 imm8 = getUChar(delta);
31045 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
31050 imm8 = getUChar(delta);
31052 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
31057 mkexpr( math_BLENDPD_256( sE, sV, imm8) ) );
31061 /* VBLENDPD imm8, xmm3/m128, xmm2, xmm1 */
31065 UInt imm8;
31074 imm8 = getUChar(delta);
31076 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
31081 imm8 = getUChar(delta);
31083 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
31088 mkexpr( math_BLENDPD_128( sE, sV, imm8) ) );
31095 /* VPBLENDW imm8, xmm3/m128, xmm2, xmm1 */
31099 UInt imm8;
31108 imm8 = getUChar(delta);
31110 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
31115 imm8 = getUChar(delta);
31117 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
31122 mkexpr( math_PBLENDW_128( sE, sV, imm8) ) );
31126 /* VPBLENDW imm8, ymm3/m256, ymm2, ymm1 */
31130 UInt imm8;
31141 imm8 = getUChar(delta);
31143 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
31148 imm8
31150 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
31157 mkexpr( math_PBLENDW_128( sEhi, sVhi, imm8) ),
31158 mkexpr( math_PBLENDW_128( sElo, sVlo, imm8) ) ) );
31165 /* VPALIGNR imm8, xmm3/m128, xmm2, xmm1 */
31173 UInt imm8;
31180 imm8 = getUChar(delta+1);
31182 DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameXMMReg(rE),
31187 imm8 = getUChar(delta+alen);
31189 DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
31193 IRTemp res = math_PALIGNR_XMM( sV, dV, imm8 );
31198 /* VPALIGNR imm8, ymm3/m256, ymm2, ymm1 */
31208 UInt imm8;
31215 imm8 = getUChar(delta+1);
31217 DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameYMMReg(rE),
31222 imm8 = getUChar(delta+alen);
31224 DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
31231 mkexpr( math_PALIGNR_XMM( sHi, dHi, imm8 ) ),
31232 mkexpr( math_PALIGNR_XMM( sLo, dLo, imm8 ) ) )
31240 /* VPEXTRB imm8, xmm2, reg/m8 = VEX.128.66.0F3A.W0 14 /r ib */
31249 /* VPEXTRW imm8, reg/m16, xmm2 */
31259 /* VPEXTRD imm8, r32/m32, xmm2 */
31275 /* VEXTRACTPS imm8, xmm1, r32/m32 = VEX.128.66.0F3A.WIG 17 /r ib */
31357 Int imm8;
31362 imm8 = (Int)(getUChar(delta+1) & 15);
31366 imm8, nameIReg32(rE), nameXMMReg(rV), nameXMMReg(rG) );
31369 imm8 = (Int)(getUChar(delta+alen) & 15);
31373 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31378 IRTemp res_vec = math_PINSRB_128( src_vec, src_u8, imm8 );
31386 /* VINSERTPS imm8, xmm3/m32, xmm2, xmm1
31392 UInt imm8;
31402 imm8 = getUChar(delta+1);
31403 d2ins = dsE[(imm8 >> 6) & 3]; /* "imm8_count_s" */
31406 imm8, nameXMMReg(rE), nameXMMReg(rG) );
31410 imm8 = getUChar(delta+alen);
31413 imm8, dis_buf, nameXMMReg(rG) );
31419 putYMMRegLoAndZU( rG, mkexpr(math_INSERTPS( vV, d2ins, imm8 )) );
31561 /* VDPPS imm8, xmm3/m128,xmm2,xmm1 = VEX.NDS.128.66.0F3A.WIG 40 /r ib */
31567 Int imm8;
31570 imm8 = (Int)getUChar(delta+1);
31574 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );
31577 imm8 = (Int)getUChar(delta+alen);
31581 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31586 IRTemp res_vec = math_DPPS_128( src_vec, dst_vec, imm8 );
31591 /* VDPPS imm8, ymm3/m128,ymm2,ymm1 = VEX.NDS.256.66.0F3A.WIG 40 /r ib */
31597 Int imm8;
31600 imm8 = (Int)getUChar(delta+1);
31604 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG) );
31607 imm8 = (Int)getUChar(delta+alen);
31611 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG) );
31621 mkexpr( math_DPPS_128(s1, d1, imm8) ),
31622 mkexpr( math_DPPS_128(s0, d0, imm8) ) ) );
31629 /* VDPPD imm8, xmm3/m128,xmm2,xmm1 = VEX.NDS.128.66.0F3A.WIG 41 /r ib */
31635 Int imm8;
31638 imm8 = (Int)getUChar(delta+1);
31642 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );
31645 imm8 = (Int)getUChar(delta+alen);
31649 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31654 IRTemp res_vec = math_DPPD_128( src_vec, dst_vec, imm8 );
31662 /* VMPSADBW imm8, xmm3/m128,xmm2,xmm1 */
31666 Int imm8;
31677 imm8 = (Int)getUChar(delta+1);
31680 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31684 1/* imm8 is 1 byte after the amode */ );
31686 imm8 = (Int)getUChar(delta+alen);
31688 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31693 src_vec, imm8) ) );
31697 /* VMPSADBW imm8, ymm3/m256,ymm2,ymm1 */
31701 Int imm8;
31714 imm8 = (Int)getUChar(delta+1);
31717 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31721 1/* imm8 is 1 byte after the amode */ );
31723 imm8 = (Int)getUChar(delta+alen);
31725 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31732 mkexpr( math_MPSADBW_128(dHi, sHi, imm8 >> 3) ),
31733 mkexpr( math_MPSADBW_128(dLo, sLo, imm8) ) ) );
31740 /* VPCLMULQDQ imm8, xmm3/m128,xmm2,xmm1 */
31742 /* 66 0F 3A 44 /r ib = PCLMULQDQ xmm1, xmm2/m128, imm8
31748 Int imm8;
31758 imm8 = (Int)getUChar(delta+1);
31761 DIP( "vpclmulqdq $%d, %s,%s,%s\n", imm8,
31765 1/* imm8 is 1 byte after the amode */ );
31767 imm8 = (Int)getUChar(delta+alen);
31770 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31773 putYMMRegLoAndZU( rG, mkexpr( math_PCLMULQDQ(dV, sV, imm8) ) );
31780 /* VPERM2I128 imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.66.0F3A.W0 46 /r ib */
31784 UInt imm8 = 0;
31796 imm8 = getUChar(delta);
31798 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
31804 imm8 = getUChar(delta);
31806 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
31815 putYMMRegLane128(rG, 0, mkexpr(SEL((imm8 >> 0) & 3)));
31816 putYMMRegLane128(rG, 1, mkexpr(SEL((imm8 >> 4) & 3)));
31818 if (imm8 & (1<<3)) putYMMRegLane128(rG, 0, mkV128(0));
31819 if (imm8 & (1<<7)) putYMMRegLane128(rG, 1, mkV128(0));
31892 /* VEX.128.66.0F3A.WIG 63 /r ib = VPCMPISTRI imm8, xmm2/m128, xmm1
31893 VEX.128.66.0F3A.WIG 62 /r ib = VPCMPISTRM imm8, xmm2/m128, xmm1
31894 VEX.128.66.0F3A.WIG 61 /r ib = VPCMPESTRI imm8, xmm2/m128, xmm1
31895 VEX.128.66.0F3A.WIG 60 /r ib = VPCMPESTRM imm8, xmm2/m128, xmm1
31923 /* VAESKEYGENASSIST imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG DF /r */
31931 /* RORX imm8, r/m32, r32a = VEX.LZ.F2.0F3A.W0 F0 /r /i */
31932 /* RORX imm8, r/m64, r64a = VEX.LZ.F2.0F3A.W1 F0 /r /i */
31938 UChar imm8;
31941 imm8 = getUChar(delta+1);
31943 DIP("rorx %d,%s,%s\n", imm8, nameIRegE(size,pfx,rm),
31948 imm8 = getUChar(delta+alen);
31950 DIP("rorx %d,%s,%s\n", imm8, dis_buf, nameIRegG(size,pfx,rm));
31953 imm8 &= 8*size-1;
31955 /* dst = (src >>u imm8) | (src << (size-imm8)) */
31957 imm8 == 0 ? mkexpr(src)
31960 mkU8(imm8) ),
31962 mkU8(8*size-imm8) ) ) );