Home | History | Annotate | Download | only in priv

Lines Matching refs:imm8

7653          /* (sz==4): PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
9090 UInt imm8, Bool all_lanes, Int sz )
9092 if (imm8 >= 32) return False;
9095 the supplied imm8. */
9105 switch (imm8) {
9160 /* Don't forget to add test cases to VCMPSS_128_<imm8> in
9225 UInt imm8;
9235 imm8 = getUChar(delta+1);
9236 if (imm8 >= 8) return delta0; /* FAIL */
9237 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
9239 vassert(!preSwap); /* never needed for imm8 < 8 */
9244 imm8,
9249 imm8 = getUChar(delta+alen);
9250 if (imm8 >= 8) return delta0; /* FAIL */
9251 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
9253 vassert(!preSwap); /* never needed for imm8 < 8 */
9268 imm8,
9824 static IRTemp math_PALIGNR_XMM ( IRTemp sV, IRTemp dV, UInt imm8 )
9839 if (imm8 == 0) {
9843 else if (imm8 >= 1 && imm8 <= 7) {
9844 assign( rHi, dis_PALIGNR_XMM_helper(dLo, sHi, imm8) );
9845 assign( rLo, dis_PALIGNR_XMM_helper(sHi, sLo, imm8) );
9847 else if (imm8 == 8) {
9851 else if (imm8 >= 9 && imm8 <= 15) {
9852 assign( rHi, dis_PALIGNR_XMM_helper(dHi, dLo, imm8-8) );
9853 assign( rLo, dis_PALIGNR_XMM_helper(dLo, sHi, imm8-8) );
9855 else if (imm8 == 16) {
9859 else if (imm8 >= 17 && imm8 <= 23) {
9860 assign( rHi, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(imm8-16))) );
9861 assign( rLo, dis_PALIGNR_XMM_helper(dHi, dLo, imm8-16) );
9863 else if (imm8 == 24) {
9867 else if (imm8 >= 25 && imm8 <= 31) {
9869 assign( rLo, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(imm8-24))) );
9871 else if (imm8 >= 32 && imm8 <= 255) {
9946 80 /0 = addb $imm8, rm8
9948 82 /0 = addb $imm8, rm8
9969 OF BA /7 = btcw $imm8, rm16 and btcw $imm8, rm32
10968 static IRTemp math_SHUFPS_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
10972 vassert(imm8 < 256);
10981 mkV128from32s( SELS((imm8>>6)&3), SELS((imm8>>4)&3),
10982 SELD((imm8>>2)&3), SELD((imm8>>0)&3) ) );
10992 static IRTemp math_SHUFPS_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
10998 IRTemp rVhi = math_SHUFPS_128(sVhi, dVhi, imm8);
10999 IRTemp rVlo = math_SHUFPS_128(sVlo, dVlo, imm8);
11006 static IRTemp math_SHUFPD_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11023 SELS((imm8>>1)&1), SELD((imm8>>0)&1) ) );
11031 static IRTemp math_SHUFPD_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
11037 IRTemp rVhi = math_SHUFPD_128(sVhi, dVhi, (imm8 >> 2) & 3);
11038 IRTemp rVlo = math_SHUFPD_128(sVlo, dVlo, imm8 & 3);
11045 static IRTemp math_BLENDPD_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11050 switch( imm8 & 3 ) {
11069 static IRTemp math_BLENDPD_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
11075 IRTemp rVhi = math_BLENDPD_128(sVhi, dVhi, (imm8 >> 2) & 3);
11076 IRTemp rVlo = math_BLENDPD_128(sVlo, dVlo, imm8 & 3);
11083 static IRTemp math_BLENDPS_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11090 assign( imm8_mask, mkV128( imm8_perms[ (imm8 & 15) ] ) );
11102 static IRTemp math_BLENDPS_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
11108 IRTemp rVhi = math_BLENDPS_128(sVhi, dVhi, (imm8 >> 4) & 15);
11109 IRTemp rVlo = math_BLENDPS_128(sVlo, dVlo, imm8 & 15);
11116 static IRTemp math_PBLENDW_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
11118 /* Make w be a 16-bit version of imm8, formed by duplicating each
11119 bit in imm8. */
11123 if (imm8 & (1 << i))
11334 UInt imm8;
11345 imm8 = (UInt)getUChar(delta+1);
11349 imm8, nameXMMReg(rE), nameXMMReg(rG));
11353 imm8 = (UInt)getUChar(delta+alen);
11357 imm8, dis_buf, nameXMMReg(rG));
11368 assign(dVmut, mk64from16s( SEL((imm8>>6)&3), SEL((imm8>>4)&3),
11369 SEL((imm8>>2)&3), SEL((imm8>>0)&3) ));
11389 UInt imm8;
11399 imm8 = (UInt)getUChar(delta+1);
11402 imm8, nameYMMReg(rE), nameYMMReg(rG));
11406 imm8 = (UInt)getUChar(delta+alen);
11409 imm8, dis_buf, nameYMMReg(rG));
11416 assign( dVhi, mk64from16s( s[4 + ((imm8>>6)&3)], s[4 + ((imm8>>4)&3)],
11417 s[4 + ((imm8>>2)&3)], s[4 + ((imm8>>0)&3)] ) );
11418 assign( dVlo, mk64from16s( s[0 + ((imm8>>6)&3)], s[0 + ((imm8>>4)&3)],
11419 s[0 + ((imm8>>2)&3)], s[0 + ((imm8>>0)&3)] ) );
11436 UInt imm8;
11441 imm8 = getUChar(delta+1) & 7;
11444 imm8, nameXMMReg(rE), nameIReg32(rG));
11451 switch (imm8) {
12078 static IRTemp math_PINSRW_128 ( IRTemp v128, IRTemp u16, UInt imm8 )
12080 vassert(imm8 >= 0 && imm8 <= 7);
12088 mkU8(16 * (imm8 & 3))));
12089 if (imm8 < 4) {
12095 UShort mask = ~(3 << (imm8 * 2));
14355 Int imm8 = 0;
14364 imm8 = (Int)getUChar(delta+1);
14366 DIP("shufps $%d,%s,%s\n", imm8, nameXMMReg(rE), nameXMMReg(rG));
14370 imm8 = (Int)getUChar(delta+alen);
14372 DIP("shufps $%d,%s,%s\n", imm8, dis_buf, nameXMMReg(rG));
14374 IRTemp res = math_SHUFPS_128( sV, dV, imm8 );
18751 static IRTemp math_PINSRB_128 ( IRTemp v128, IRTemp u8, UInt imm8 )
18753 vassert(imm8 >= 0 && imm8 <= 15);
18761 mkU8(8 * (imm8 & 7))));
18762 if (imm8 < 8) {
18768 UShort mask = ~(1 << imm8);
18777 static IRTemp math_PINSRD_128 ( IRTemp v128, IRTemp u32, UInt imm8 )
18786 switch (imm8) {
18810 static IRTemp math_PINSRQ_128 ( IRTemp v128, IRTemp u64, UInt imm8 )
18816 if (imm8 == 0) {
18820 vassert(imm8 == 1);
18833 static IRTemp math_INSERTPS ( IRTemp dstV, IRTemp toInsertD, UInt imm8 )
18839 vassert(imm8 <= 255);
18840 dstDs[(imm8 >> 4) & 3] = toInsertD; /* "imm8_count_d" */
18842 UInt imm8_zmask = (imm8 & 15);
18867 Int imm8;
18873 imm8 = (Int)getUChar(delta+1);
18876 imm8 = (Int)getUChar(delta+alen);
18878 switch ( (imm8 >> 2) & 3 ) {
18886 binop( Iop_Shr32, mkexpr(sel_lane), mkU8(((imm8 & 3)*8)) ) );
18893 DIP( "%spextrb $%d, %s,%s\n", mbV, imm8,
18900 imm8, nameXMMReg( gregOfRexRM(pfx, modrm) ), dis_buf );
18907 static IRTemp math_DPPD_128 ( IRTemp src_vec, IRTemp dst_vec, UInt imm8 )
18909 vassert(imm8 < 256);
18919 mkV128( imm8_perms[ ((imm8 >> 4) & 3) ] ) ) );
18930 mkV128( imm8_perms[ (imm8 & 3) ] ) ) );
18935 static IRTemp math_DPPS_128 ( IRTemp src_vec, IRTemp dst_vec, UInt imm8 )
18937 vassert(imm8 < 256);
18954 mkV128( imm8_perms[((imm8 >> 4)& 15)] ) ) );
18973 mkV128( imm8_perms[ (imm8 & 15) ] ) ) );
18978 static IRTemp math_MPSADBW_128 ( IRTemp dst_vec, IRTemp src_vec, UInt imm8 )
18989 assign(src_maskV, mkV128( src_mask[ imm8 & 3 ] ));
18990 assign(dst_maskV, mkV128( dst_mask[ (imm8 >> 2) & 1 ] ));
19014 mkU64( 0x80 | (imm8 & 7) ));
19017 mkU64( 0x00 | (imm8 & 7) ));
19080 static IRTemp math_PCLMULQDQ( IRTemp dV, IRTemp sV, UInt imm8 )
19084 assign(t0, unop((imm8&1)? Iop_V128HIto64 : Iop_V128to64,
19086 assign(t1, unop((imm8&16) ? Iop_V128HIto64 : Iop_V128to64,
19126 /* 66 0F 3A 08 /r ib = ROUNDPS imm8, xmm2/m128, xmm1 */
19196 /* 66 0F 3A 09 /r ib = ROUNDPD imm8, xmm2/m128, xmm1 */
19251 /* 66 0F 3A 0A /r ib = ROUNDSS imm8, xmm2/m32, xmm1
19252 66 0F 3A 0B /r ib = ROUNDSD imm8, xmm2/m64, xmm1
19304 /* 66 0F 3A 0C /r ib = BLENDPS xmm1, xmm2/m128, imm8
19308 Int imm8;
19317 imm8 = (Int)getUChar(delta+1);
19320 DIP( "blendps $%d, %s,%s\n", imm8,
19325 1/* imm8 is 1 byte after the amode */ );
19328 imm8 = (Int)getUChar(delta+alen);
19331 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
19335 mkexpr( math_BLENDPS_128( src_vec, dst_vec, imm8) ) );
19341 /* 66 0F 3A 0D /r ib = BLENDPD xmm1, xmm2/m128, imm8
19345 Int imm8;
19353 imm8 = (Int)getUChar(delta+1);
19356 DIP( "blendpd $%d, %s,%s\n", imm8,
19361 1/* imm8 is 1 byte after the amode */ );
19364 imm8 = (Int)getUChar(delta+alen);
19367 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
19371 mkexpr( math_BLENDPD_128( src_vec, dst_vec, imm8) ) );
19377 /* 66 0F 3A 0E /r ib = PBLENDW xmm1, xmm2/m128, imm8
19381 Int imm8;
19390 imm8 = (Int)getUChar(delta+1);
19393 DIP( "pblendw $%d, %s,%s\n", imm8,
19398 1/* imm8 is 1 byte after the amode */ );
19401 imm8 = (Int)getUChar(delta+alen);
19404 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
19408 mkexpr( math_PBLENDW_128( src_vec, dst_vec, imm8) ) );
19414 /* 66 0F 3A 14 /r ib = PEXTRB r/m16, xmm, imm8
19424 /* 66 0F 3A 15 /r ib = PEXTRW r/m16, xmm, imm8
19434 /* 66 no-REX.W 0F 3A 16 /r ib = PEXTRD reg/mem32, xmm2, imm8
19443 /* 66 REX.W 0F 3A 16 /r ib = PEXTRQ reg/mem64, xmm2, imm8
19455 /* 66 0F 3A 17 /r ib = EXTRACTPS reg/mem32, xmm2, imm8 Extract
19467 /* 66 0F 3A 20 /r ib = PINSRB xmm1, r32/m8, imm8
19470 Int imm8;
19476 imm8 = (Int)(getUChar(delta+1) & 0xF);
19479 DIP( "pinsrb $%d,%s,%s\n", imm8,
19483 imm8 = (Int)(getUChar(delta+alen) & 0xF);
19487 imm8, dis_buf, nameXMMReg(rG) );
19491 IRTemp res = math_PINSRB_128( src_vec, new8, imm8 );
19498 /* 66 0F 3A 21 /r ib = INSERTPS imm8, xmm2/m32, xmm1
19501 UInt imm8;
19514 imm8 = getUChar(delta+1);
19515 d2ins = dsE[(imm8 >> 6) & 3]; /* "imm8_count_s" */
19518 imm8, nameXMMReg(rE), nameXMMReg(rG) );
19522 imm8 = getUChar(delta+alen);
19525 imm8, dis_buf, nameXMMReg(rG) );
19531 putXMMReg( rG, mkexpr(math_INSERTPS( vG, d2ins, imm8 )) );
19537 /* 66 no-REX.W 0F 3A 22 /r ib = PINSRD xmm1, r/m32, imm8
19568 /* 66 REX.W 0F 3A 22 /r ib = PINSRQ xmm1, r/m64, imm8
19602 /* 66 0F 3A 40 /r ib = DPPS xmm1, xmm2/m128, imm8
19606 Int imm8;
19613 imm8 = (Int)getUChar(delta+1);
19617 imm8, nameXMMReg(rE), nameXMMReg(rG) );
19620 1/* imm8 is 1 byte after the amode */ );
19623 imm8 = (Int)getUChar(delta+alen);
19626 imm8, dis_buf, nameXMMReg(rG) );
19628 IRTemp res = math_DPPS_128( src_vec, dst_vec, imm8 );
19635 /* 66 0F 3A 41 /r ib = DPPD xmm1, xmm2/m128, imm8
19639 Int imm8;
19646 imm8 = (Int)getUChar(delta+1);
19650 imm8, nameXMMReg(rE), nameXMMReg(rG) );
19653 1/* imm8 is 1 byte after the amode */ );
19656 imm8 = (Int)getUChar(delta+alen);
19659 imm8, dis_buf, nameXMMReg(rG) );
19661 IRTemp res = math_DPPD_128( src_vec, dst_vec, imm8 );
19668 /* 66 0F 3A 42 /r ib = MPSADBW xmm1, xmm2/m128, imm8
19671 Int imm8;
19682 imm8 = (Int)getUChar(delta+1);
19685 DIP( "mpsadbw $%d, %s,%s\n", imm8,
19689 1/* imm8 is 1 byte after the amode */ );
19692 imm8 = (Int)getUChar(delta+alen);
19694 DIP( "mpsadbw $%d, %s,%s\n", imm8, dis_buf, nameXMMReg(rG) );
19697 putXMMReg( rG, mkexpr( math_MPSADBW_128(dst_vec, src_vec, imm8) ) );
19703 /* 66 0F 3A 44 /r ib = PCLMULQDQ xmm1, xmm2/m128, imm8
19709 Int imm8;
19719 imm8 = (Int)getUChar(delta+1);
19722 DIP( "pclmulqdq $%d, %s,%s\n", imm8,
19726 1/* imm8 is 1 byte after the amode */ );
19729 imm8 = (Int)getUChar(delta+alen);
19732 imm8, dis_buf, nameXMMReg(rG) );
19735 putXMMReg( rG, mkexpr( math_PCLMULQDQ(dvec, svec, imm8) ) );
19744 /* 66 0F 3A 63 /r ib = PCMPISTRI imm8, xmm2/m128, xmm1
19745 66 0F 3A 62 /r ib = PCMPISTRM imm8, xmm2/m128, xmm1
19746 66 0F 3A 61 /r ib = PCMPESTRI imm8, xmm2/m128, xmm1
19747 66 0F 3A 60 /r ib = PCMPESTRM imm8, xmm2/m128, xmm1
19760 /* 66 0F 3A DF /r ib = AESKEYGENASSIST imm8, xmm2/m128, xmm1 */
20995 case 0xCD: /* INT imm8 */
21189 case 0xE4: /* IN imm8, AL */
21196 case 0xE5: /* IN imm8, eAX */
21238 case 0xE6: /* OUT AL, imm8 */
21245 case 0xE7: /* OUT eAX, imm8 */
21989 case 0xA4: /* SHLDv imm8,Gv,Ev */
22016 case 0xAC: /* SHRDv imm8,Gv,Ev */
22342 case 0x73: /* PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
23293 UInt imm8;
23307 imm8 = getUChar(delta+1);
23308 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
23314 opname, imm8,
23318 imm8 = getUChar(delta+alen);
23319 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
23327 opname, imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
23403 UInt imm8;
23421 imm8 = getUChar(delta+1);
23422 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8,
23429 opname, imm8,
23433 imm8 = getUChar(delta+alen);
23434 Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8,
23440 opname, imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
25760 /* VPSHUFD imm8, xmm2/m128, xmm1 = VEX.128.66.0F.WIG 70 /r ib */
25765 /* VPSHUFD imm8, ymm2/m256, ymm1 = VEX.256.66.0F.WIG 70 /r ib */
25770 /* VPSHUFLW imm8, xmm2/m128, xmm1 = VEX.128.F2.0F.WIG 70 /r ib */
25776 /* VPSHUFLW imm8, ymm2/m256, ymm1 = VEX.256.F2.0F.WIG 70 /r ib */
25781 /* VPSHUFHW imm8, xmm2/m128, xmm1 = VEX.128.F3.0F.WIG 70 /r ib */
25787 /* VPSHUFHW imm8, ymm2/m256, ymm1 = VEX.256.F3.0F.WIG 70 /r ib */
25795 /* VPSRLW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /2 ib */
25796 /* VPSRAW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /4 ib */
25797 /* VPSLLW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /6 ib */
25821 /* VPSRLW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /2 ib */
25822 /* VPSRAW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /4 ib */
25823 /* VPSLLW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /6 ib */
25850 /* VPSRLD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /2 ib */
25851 /* VPSRAD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /4 ib */
25852 /* VPSLLD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /6 ib */
25876 /* VPSRLD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /2 ib */
25877 /* VPSRAD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /4 ib */
25878 /* VPSLLD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /6 ib */
25905 /* VPSRLDQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /3 ib */
25906 /* VPSLLDQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /7 ib */
25907 /* VPSRLQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /2 ib */
25908 /* VPSLLQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /6 ib */
25946 /* VPSRLDQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /3 ib */
25947 /* VPSLLDQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /7 ib */
25948 /* VPSRLQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /2 ib */
25949 /* VPSLLQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /6 ib */
26409 Int imm8;
26413 imm8 = (Int)(getUChar(delta+1) & 7);
26417 DIP( "vpinsrw $%d,%s,%s\n", imm8,
26421 imm8 = (Int)(getUChar(delta+alen) & 7);
26425 imm8, dis_buf, nameXMMReg(rG) );
26430 IRTemp res_vec = math_PINSRW_128( src_vec, new16, imm8 );
26438 /* VPEXTRW imm8, xmm1, reg32 = VEX.128.66.0F.W0 C5 /r ib */
26450 /* VSHUFPS imm8, xmm3/m128, xmm2, xmm1, xmm2 */
26453 Int imm8 = 0;
26463 imm8 = (Int)getUChar(delta+1);
26466 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
26470 imm8 = (Int)getUChar(delta+alen);
26473 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
26475 IRTemp res = math_SHUFPS_128( eV, vV, imm8 );
26480 /* VSHUFPS imm8, ymm3/m256, ymm2, ymm1, ymm2 */
26483 Int imm8 = 0;
26493 imm8 = (Int)getUChar(delta+1);
26496 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
26500 imm8 = (Int)getUChar(delta+alen);
26503 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
26505 IRTemp res = math_SHUFPS_256( eV, vV, imm8 );
26510 /* VSHUFPD imm8, xmm3/m128, xmm2, xmm1, xmm2 */
26513 Int imm8 = 0;
26523 imm8 = (Int)getUChar(delta+1);
26526 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
26530 imm8 = (Int)getUChar(delta+alen);
26533 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
26535 IRTemp res = math_SHUFPD_128( eV, vV, imm8 );
26540 /* VSHUFPD imm8, ymm3/m256, ymm2, ymm1, ymm2 */
26543 Int imm8 = 0;
26553 imm8 = (Int)getUChar(delta+1);
26556 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
26560 imm8 = (Int)getUChar(delta+alen);
26563 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
26565 IRTemp res = math_SHUFPD_256( eV, vV, imm8 );
30096 static IRTemp math_VPERMILPS_128 ( IRTemp sV, UInt imm8 )
30098 vassert(imm8 < 256);
30105 assign(res, mkV128from32s( SEL((imm8 >> 6) & 3),
30106 SEL((imm8 >> 4) & 3),
30107 SEL((imm8 >> 2) & 3),
30108 SEL((imm8 >> 0) & 3) ));
30138 /* VPERMQ imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.W1 00 /r ib */
30139 /* VPERMPD imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.W1 01 /r ib */
30143 UInt imm8 = 0;
30150 imm8 = getUChar(delta);
30152 name, imm8, nameYMMReg(rE), nameYMMReg(rG));
30157 imm8 = getUChar(delta);
30159 name, imm8, dis_buf, nameYMMReg(rG));
30168 mkexpr(s[(imm8 >> 6) & 3]),
30169 mkexpr(s[(imm8 >> 4) & 3]),
30170 mkexpr(s[(imm8 >> 2) & 3]),
30171 mkexpr(s[(imm8 >> 0) & 3])));
30178 /* VPBLENDD imm8, xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F3A.W0 02 /r ib */
30182 UInt imm8 = 0;
30193 imm8 = getUChar(delta);
30195 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
30200 imm8 = getUChar(delta);
30202 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
30213 putYMMRegLane32(rG, i, mkexpr((imm8 & (1<<i)) ? d[i] : s[i]));
30218 /* VPBLENDD imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F3A.W0 02 /r ib */
30222 UInt imm8 = 0;
30233 imm8 = getUChar(delta);
30235 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30240 imm8 = getUChar(delta);
30242 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30255 putYMMRegLane32(rG, i, mkexpr((imm8 & (1<<i)) ? d[i] : s[i]));
30262 /* VPERMILPS imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.WIG 04 /r ib */
30265 UInt imm8 = 0;
30271 imm8 = getUChar(delta);
30273 imm8, nameYMMReg(rE), nameYMMReg(rG));
30278 imm8 = getUChar(delta);
30280 imm8, dis_buf, nameYMMReg(rG));
30286 IRTemp dVhi = math_VPERMILPS_128( sVhi, imm8 );
30287 IRTemp dVlo = math_VPERMILPS_128( sVlo, imm8 );
30292 /* VPERMILPS imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG 04 /r ib */
30295 UInt imm8 = 0;
30301 imm8 = getUChar(delta);
30303 imm8, nameXMMReg(rE), nameXMMReg(rG));
30308 imm8 = getUChar(delta);
30310 imm8, dis_buf, nameXMMReg(rG));
30314 putYMMRegLoAndZU(rG, mkexpr ( math_VPERMILPS_128 ( sV, imm8 ) ) );
30320 /* VPERMILPD imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG 05 /r ib */
30323 UInt imm8 = 0;
30329 imm8 = getUChar(delta);
30331 imm8, nameXMMReg(rE), nameXMMReg(rG));
30336 imm8 = getUChar(delta);
30338 imm8, dis_buf, nameXMMReg(rG));
30348 mkexpr((imm8 & (1<<1)) ? s1 : s0),
30349 mkexpr((imm8 & (1<<0)) ? s1 : s0)));
30353 /* VPERMILPD imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.WIG 05 /r ib */
30356 UInt imm8 = 0;
30362 imm8 = getUChar(delta);
30364 imm8, nameYMMReg(rE), nameYMMReg(rG));
30369 imm8 = getUChar(delta);
30371 imm8, dis_buf, nameYMMReg(rG));
30380 mkexpr((imm8 & (1<<3)) ? s3 : s2),
30381 mkexpr((imm8 & (1<<2)) ? s3 : s2),
30382 mkexpr((imm8 & (1<<1)) ? s1 : s0),
30383 mkexpr((imm8 & (1<<0)) ? s1 : s0)));
30390 /* VPERM2F128 imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.66.0F3A.W0 06 /r ib */
30394 UInt imm8 = 0;
30406 imm8 = getUChar(delta);
30408 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30414 imm8 = getUChar(delta);
30416 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30425 putYMMRegLane128(rG, 0, mkexpr(SEL((imm8 >> 0) & 3)));
30426 putYMMRegLane128(rG, 1, mkexpr(SEL((imm8 >> 4) & 3)));
30428 if (imm8 & (1<<3)) putYMMRegLane128(rG, 0, mkV128(0));
30429 if (imm8 & (1<<7)) putYMMRegLane128(rG, 1, mkV128(0));
30436 /* VROUNDPS imm8, xmm2/m128, xmm1 */
30484 /* VROUNDPS imm8, ymm2/m256, ymm1 */
30541 /* VROUNDPD imm8, xmm2/m128, xmm1 */
30585 /* VROUNDPD imm8, ymm2/m256, ymm1 */
30635 /* VROUNDSS imm8, xmm3/m32, xmm2, xmm1 */
30637 /* VROUNDSD imm8, xmm3/m64, xmm2, xmm1 */
30692 /* VBLENDPS imm8, ymm3/m256, ymm2, ymm1 */
30696 UInt imm8;
30705 imm8 = getUChar(delta);
30707 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30712 imm8 = getUChar(delta);
30714 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30719 mkexpr( math_BLENDPS_256( sE, sV, imm8) ) );
30723 /* VBLENDPS imm8, xmm3/m128, xmm2, xmm1 */
30727 UInt imm8;
30736 imm8 = getUChar(delta);
30738 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
30743 imm8 = getUChar(delta);
30745 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
30750 mkexpr( math_BLENDPS_128( sE, sV, imm8) ) );
30757 /* VBLENDPD imm8, ymm3/m256, ymm2, ymm1 */
30761 UInt imm8;
30770 imm8 = getUChar(delta);
30772 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30777 imm8 = getUChar(delta);
30779 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30784 mkexpr( math_BLENDPD_256( sE, sV, imm8) ) );
30788 /* VBLENDPD imm8, xmm3/m128, xmm2, xmm1 */
30792 UInt imm8;
30801 imm8 = getUChar(delta);
30803 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
30808 imm8 = getUChar(delta);
30810 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
30815 mkexpr( math_BLENDPD_128( sE, sV, imm8) ) );
30822 /* VPBLENDW imm8, xmm3/m128, xmm2, xmm1 */
30826 UInt imm8;
30835 imm8 = getUChar(delta);
30837 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
30842 imm8 = getUChar(delta);
30844 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
30849 mkexpr( math_PBLENDW_128( sE, sV, imm8) ) );
30853 /* VPBLENDW imm8, ymm3/m256, ymm2, ymm1 */
30857 UInt imm8;
30868 imm8 = getUChar(delta);
30870 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
30875 imm8 = getUChar(delta);
30877 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
30884 mkexpr( math_PBLENDW_128( sEhi, sVhi, imm8) ),
30885 mkexpr( math_PBLENDW_128( sElo, sVlo, imm8) ) ) );
30892 /* VPALIGNR imm8, xmm3/m128, xmm2, xmm1 */
30900 UInt imm8;
30907 imm8 = getUChar(delta+1);
30909 DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameXMMReg(rE),
30914 imm8 = getUChar(delta+alen);
30916 DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
30920 IRTemp res = math_PALIGNR_XMM( sV, dV, imm8 );
30925 /* VPALIGNR imm8, ymm3/m256, ymm2, ymm1 */
30935 UInt imm8;
30942 imm8 = getUChar(delta+1);
30944 DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameYMMReg(rE),
30949 imm8 = getUChar(delta+alen);
30951 DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
30958 mkexpr( math_PALIGNR_XMM( sHi, dHi, imm8 ) ),
30959 mkexpr( math_PALIGNR_XMM( sLo, dLo, imm8 ) ) )
30967 /* VPEXTRB imm8, xmm2, reg/m8 = VEX.128.66.0F3A.W0 14 /r ib */
30976 /* VPEXTRW imm8, reg/m16, xmm2 */
30986 /* VPEXTRD imm8, r32/m32, xmm2 */
31002 /* VEXTRACTPS imm8, xmm1, r32/m32 = VEX.128.66.0F3A.WIG 17 /r ib */
31084 Int imm8;
31089 imm8 = (Int)(getUChar(delta+1) & 15);
31093 imm8, nameIReg32(rE), nameXMMReg(rV), nameXMMReg(rG) );
31096 imm8 = (Int)(getUChar(delta+alen) & 15);
31100 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31105 IRTemp res_vec = math_PINSRB_128( src_vec, src_u8, imm8 );
31113 /* VINSERTPS imm8, xmm3/m32, xmm2, xmm1
31119 UInt imm8;
31129 imm8 = getUChar(delta+1);
31130 d2ins = dsE[(imm8 >> 6) & 3]; /* "imm8_count_s" */
31133 imm8, nameXMMReg(rE), nameXMMReg(rG) );
31137 imm8 = getUChar(delta+alen);
31140 imm8, dis_buf, nameXMMReg(rG) );
31146 putYMMRegLoAndZU( rG, mkexpr(math_INSERTPS( vV, d2ins, imm8 )) );
31288 /* VDPPS imm8, xmm3/m128,xmm2,xmm1 = VEX.NDS.128.66.0F3A.WIG 40 /r ib */
31294 Int imm8;
31297 imm8 = (Int)getUChar(delta+1);
31301 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );
31304 imm8 = (Int)getUChar(delta+alen);
31308 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31313 IRTemp res_vec = math_DPPS_128( src_vec, dst_vec, imm8 );
31318 /* VDPPS imm8, ymm3/m128,ymm2,ymm1 = VEX.NDS.256.66.0F3A.WIG 40 /r ib */
31324 Int imm8;
31327 imm8 = (Int)getUChar(delta+1);
31331 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG) );
31334 imm8 = (Int)getUChar(delta+alen);
31338 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG) );
31348 mkexpr( math_DPPS_128(s1, d1, imm8) ),
31349 mkexpr( math_DPPS_128(s0, d0, imm8) ) ) );
31356 /* VDPPD imm8, xmm3/m128,xmm2,xmm1 = VEX.NDS.128.66.0F3A.WIG 41 /r ib */
31362 Int imm8;
31365 imm8 = (Int)getUChar(delta+1);
31369 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );
31372 imm8 = (Int)getUChar(delta+alen);
31376 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31381 IRTemp res_vec = math_DPPD_128( src_vec, dst_vec, imm8 );
31389 /* VMPSADBW imm8, xmm3/m128,xmm2,xmm1 */
31393 Int imm8;
31404 imm8 = (Int)getUChar(delta+1);
31407 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31411 1/* imm8 is 1 byte after the amode */ );
31413 imm8 = (Int)getUChar(delta+alen);
31415 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31420 src_vec, imm8) ) );
31424 /* VMPSADBW imm8, ymm3/m256,ymm2,ymm1 */
31428 Int imm8;
31441 imm8 = (Int)getUChar(delta+1);
31444 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31448 1/* imm8 is 1 byte after the amode */ );
31450 imm8 = (Int)getUChar(delta+alen);
31452 DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
31459 mkexpr( math_MPSADBW_128(dHi, sHi, imm8 >> 3) ),
31460 mkexpr( math_MPSADBW_128(dLo, sLo, imm8) ) ) );
31467 /* VPCLMULQDQ imm8, xmm3/m128,xmm2,xmm1 */
31469 /* 66 0F 3A 44 /r ib = PCLMULQDQ xmm1, xmm2/m128, imm8
31475 Int imm8;
31485 imm8 = (Int)getUChar(delta+1);
31488 DIP( "vpclmulqdq $%d, %s,%s,%s\n", imm8,
31492 1/* imm8 is 1 byte after the amode */ );
31494 imm8 = (Int)getUChar(delta+alen);
31497 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
31500 putYMMRegLoAndZU( rG, mkexpr( math_PCLMULQDQ(dV, sV, imm8) ) );
31507 /* VPERM2I128 imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.66.0F3A.W0 46 /r ib */
31511 UInt imm8 = 0;
31523 imm8 = getUChar(delta);
31525 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
31531 imm8 = getUChar(delta);
31533 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
31542 putYMMRegLane128(rG, 0, mkexpr(SEL((imm8 >> 0) & 3)));
31543 putYMMRegLane128(rG, 1, mkexpr(SEL((imm8 >> 4) & 3)));
31545 if (imm8
31546 if (imm8 & (1<<7)) putYMMRegLane128(rG, 1, mkV128(0));
31619 /* VEX.128.66.0F3A.WIG 63 /r ib = VPCMPISTRI imm8, xmm2/m128, xmm1
31620 VEX.128.66.0F3A.WIG 62 /r ib = VPCMPISTRM imm8, xmm2/m128, xmm1
31621 VEX.128.66.0F3A.WIG 61 /r ib = VPCMPESTRI imm8, xmm2/m128, xmm1
31622 VEX.128.66.0F3A.WIG 60 /r ib = VPCMPESTRM imm8, xmm2/m128, xmm1
31635 /* VAESKEYGENASSIST imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG DF /r */
31643 /* RORX imm8, r/m32, r32a = VEX.LZ.F2.0F3A.W0 F0 /r /i */
31644 /* RORX imm8, r/m64, r64a = VEX.LZ.F2.0F3A.W1 F0 /r /i */
31650 UChar imm8;
31653 imm8 = getUChar(delta+1);
31655 DIP("rorx %d,%s,%s\n", imm8, nameIRegE(size,pfx,rm),
31660 imm8 = getUChar(delta+alen);
31662 DIP("rorx %d,%s,%s\n", imm8, dis_buf, nameIRegG(size,pfx,rm));
31665 imm8 &= 8*size-1;
31667 /* dst = (src >>u imm8) | (src << (size-imm8)) */
31669 imm8 == 0 ? mkexpr(src)
31672 mkU8(imm8) ),
31674 mkU8(8*size-imm8) ) ) );