Home | History | Annotate | Download | only in Utils
      1 //===-- X86ShuffleDecode.cpp - X86 shuffle decode logic -------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Define several functions to decode x86 specific shuffle semantics into a
     11 // generic vector mask.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "X86ShuffleDecode.h"
     16 #include "llvm/ADT/ArrayRef.h"
     17 
     18 //===----------------------------------------------------------------------===//
     19 //  Vector Mask Decoding
     20 //===----------------------------------------------------------------------===//
     21 
     22 namespace llvm {
     23 
     24 void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
     25   // Defaults the copying the dest value.
     26   ShuffleMask.push_back(0);
     27   ShuffleMask.push_back(1);
     28   ShuffleMask.push_back(2);
     29   ShuffleMask.push_back(3);
     30 
     31   // Decode the immediate.
     32   unsigned ZMask = Imm & 15;
     33   unsigned CountD = (Imm >> 4) & 3;
     34   unsigned CountS = (Imm >> 6) & 3;
     35 
     36   // CountS selects which input element to use.
     37   unsigned InVal = 4 + CountS;
     38   // CountD specifies which element of destination to update.
     39   ShuffleMask[CountD] = InVal;
     40   // ZMask zaps values, potentially overriding the CountD elt.
     41   if (ZMask & 1) ShuffleMask[0] = SM_SentinelZero;
     42   if (ZMask & 2) ShuffleMask[1] = SM_SentinelZero;
     43   if (ZMask & 4) ShuffleMask[2] = SM_SentinelZero;
     44   if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero;
     45 }
     46 
     47 void DecodeInsertElementMask(unsigned NumElts, unsigned Idx, unsigned Len,
     48                              SmallVectorImpl<int> &ShuffleMask) {
     49   assert((Idx + Len) <= NumElts && "Insertion out of range");
     50 
     51   for (unsigned i = 0; i != NumElts; ++i)
     52     ShuffleMask.push_back(i);
     53   for (unsigned i = 0; i != Len; ++i)
     54     ShuffleMask[Idx + i] = NumElts + i;
     55 }
     56 
     57 // <3,1> or <6,7,2,3>
     58 void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
     59   for (unsigned i = NElts / 2; i != NElts; ++i)
     60     ShuffleMask.push_back(NElts + i);
     61 
     62   for (unsigned i = NElts / 2; i != NElts; ++i)
     63     ShuffleMask.push_back(i);
     64 }
     65 
     66 // <0,2> or <0,1,4,5>
     67 void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
     68   for (unsigned i = 0; i != NElts / 2; ++i)
     69     ShuffleMask.push_back(i);
     70 
     71   for (unsigned i = 0; i != NElts / 2; ++i)
     72     ShuffleMask.push_back(NElts + i);
     73 }
     74 
     75 void DecodeMOVSLDUPMask(unsigned NumElts, SmallVectorImpl<int> &ShuffleMask) {
     76   for (int i = 0, e = NumElts / 2; i < e; ++i) {
     77     ShuffleMask.push_back(2 * i);
     78     ShuffleMask.push_back(2 * i);
     79   }
     80 }
     81 
     82 void DecodeMOVSHDUPMask(unsigned NumElts, SmallVectorImpl<int> &ShuffleMask) {
     83   for (int i = 0, e = NumElts / 2; i < e; ++i) {
     84     ShuffleMask.push_back(2 * i + 1);
     85     ShuffleMask.push_back(2 * i + 1);
     86   }
     87 }
     88 
     89 void DecodeMOVDDUPMask(unsigned NumElts, SmallVectorImpl<int> &ShuffleMask) {
     90   const unsigned NumLaneElts = 2;
     91 
     92   for (unsigned l = 0; l < NumElts; l += NumLaneElts)
     93     for (unsigned i = 0; i < NumLaneElts; ++i)
     94       ShuffleMask.push_back(l);
     95 }
     96 
     97 void DecodePSLLDQMask(unsigned NumElts, unsigned Imm,
     98                       SmallVectorImpl<int> &ShuffleMask) {
     99   const unsigned NumLaneElts = 16;
    100 
    101   for (unsigned l = 0; l < NumElts; l += NumLaneElts)
    102     for (unsigned i = 0; i < NumLaneElts; ++i) {
    103       int M = SM_SentinelZero;
    104       if (i >= Imm) M = i - Imm + l;
    105       ShuffleMask.push_back(M);
    106     }
    107 }
    108 
    109 void DecodePSRLDQMask(unsigned NumElts, unsigned Imm,
    110                       SmallVectorImpl<int> &ShuffleMask) {
    111   const unsigned NumLaneElts = 16;
    112 
    113   for (unsigned l = 0; l < NumElts; l += NumLaneElts)
    114     for (unsigned i = 0; i < NumLaneElts; ++i) {
    115       unsigned Base = i + Imm;
    116       int M = Base + l;
    117       if (Base >= NumLaneElts) M = SM_SentinelZero;
    118       ShuffleMask.push_back(M);
    119     }
    120 }
    121 
    122 void DecodePALIGNRMask(unsigned NumElts, unsigned Imm,
    123                        SmallVectorImpl<int> &ShuffleMask) {
    124   const unsigned NumLaneElts = 16;
    125 
    126   for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
    127     for (unsigned i = 0; i != NumLaneElts; ++i) {
    128       unsigned Base = i + Imm;
    129       // if i+imm is out of this lane then we actually need the other source
    130       if (Base >= NumLaneElts) Base += NumElts - NumLaneElts;
    131       ShuffleMask.push_back(Base + l);
    132     }
    133   }
    134 }
    135 
    136 void DecodeVALIGNMask(unsigned NumElts, unsigned Imm,
    137                       SmallVectorImpl<int> &ShuffleMask) {
    138   // Not all bits of the immediate are used so mask it.
    139   assert(isPowerOf2_32(NumElts) && "NumElts should be power of 2");
    140   Imm = Imm & (NumElts - 1);
    141   for (unsigned i = 0; i != NumElts; ++i)
    142     ShuffleMask.push_back(i + Imm);
    143 }
    144 
    145 /// DecodePSHUFMask - This decodes the shuffle masks for pshufw, pshufd, and vpermilp*.
    146 /// VT indicates the type of the vector allowing it to handle different
    147 /// datatypes and vector widths.
    148 void DecodePSHUFMask(unsigned NumElts, unsigned ScalarBits, unsigned Imm,
    149                      SmallVectorImpl<int> &ShuffleMask) {
    150   unsigned Size = NumElts * ScalarBits;
    151   unsigned NumLanes = Size / 128;
    152   if (NumLanes == 0) NumLanes = 1;  // Handle MMX
    153   unsigned NumLaneElts = NumElts / NumLanes;
    154 
    155   uint32_t SplatImm = (Imm & 0xff) * 0x01010101;
    156   for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
    157     for (unsigned i = 0; i != NumLaneElts; ++i) {
    158       ShuffleMask.push_back(SplatImm % NumLaneElts + l);
    159       SplatImm /= NumLaneElts;
    160     }
    161   }
    162 }
    163 
    164 void DecodePSHUFHWMask(unsigned NumElts, unsigned Imm,
    165                        SmallVectorImpl<int> &ShuffleMask) {
    166   for (unsigned l = 0; l != NumElts; l += 8) {
    167     unsigned NewImm = Imm;
    168     for (unsigned i = 0, e = 4; i != e; ++i) {
    169       ShuffleMask.push_back(l + i);
    170     }
    171     for (unsigned i = 4, e = 8; i != e; ++i) {
    172       ShuffleMask.push_back(l + 4 + (NewImm & 3));
    173       NewImm >>= 2;
    174     }
    175   }
    176 }
    177 
    178 void DecodePSHUFLWMask(unsigned NumElts, unsigned Imm,
    179                        SmallVectorImpl<int> &ShuffleMask) {
    180   for (unsigned l = 0; l != NumElts; l += 8) {
    181     unsigned NewImm = Imm;
    182     for (unsigned i = 0, e = 4; i != e; ++i) {
    183       ShuffleMask.push_back(l + (NewImm & 3));
    184       NewImm >>= 2;
    185     }
    186     for (unsigned i = 4, e = 8; i != e; ++i) {
    187       ShuffleMask.push_back(l + i);
    188     }
    189   }
    190 }
    191 
    192 void DecodePSWAPMask(unsigned NumElts, SmallVectorImpl<int> &ShuffleMask) {
    193   unsigned NumHalfElts = NumElts / 2;
    194 
    195   for (unsigned l = 0; l != NumHalfElts; ++l)
    196     ShuffleMask.push_back(l + NumHalfElts);
    197   for (unsigned h = 0; h != NumHalfElts; ++h)
    198     ShuffleMask.push_back(h);
    199 }
    200 
    201 /// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
    202 /// the type of the vector allowing it to handle different datatypes and vector
    203 /// widths.
    204 void DecodeSHUFPMask(unsigned NumElts, unsigned ScalarBits,
    205                      unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
    206   unsigned NumLaneElts = 128 / ScalarBits;
    207 
    208   unsigned NewImm = Imm;
    209   for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
    210     // each half of a lane comes from different source
    211     for (unsigned s = 0; s != NumElts * 2; s += NumElts) {
    212       for (unsigned i = 0; i != NumLaneElts / 2; ++i) {
    213         ShuffleMask.push_back(NewImm % NumLaneElts + s + l);
    214         NewImm /= NumLaneElts;
    215       }
    216     }
    217     if (NumLaneElts == 4) NewImm = Imm; // reload imm
    218   }
    219 }
    220 
    221 /// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
    222 /// and punpckh*. VT indicates the type of the vector allowing it to handle
    223 /// different datatypes and vector widths.
    224 void DecodeUNPCKHMask(unsigned NumElts, unsigned ScalarBits,
    225                       SmallVectorImpl<int> &ShuffleMask) {
    226   // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
    227   // independently on 128-bit lanes.
    228   unsigned NumLanes = (NumElts * ScalarBits) / 128;
    229   if (NumLanes == 0) NumLanes = 1;  // Handle MMX
    230   unsigned NumLaneElts = NumElts / NumLanes;
    231 
    232   for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
    233     for (unsigned i = l + NumLaneElts / 2, e = l + NumLaneElts; i != e; ++i) {
    234       ShuffleMask.push_back(i);           // Reads from dest/src1
    235       ShuffleMask.push_back(i + NumElts); // Reads from src/src2
    236     }
    237   }
    238 }
    239 
    240 /// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
    241 /// and punpckl*. VT indicates the type of the vector allowing it to handle
    242 /// different datatypes and vector widths.
    243 void DecodeUNPCKLMask(unsigned NumElts, unsigned ScalarBits,
    244                       SmallVectorImpl<int> &ShuffleMask) {
    245   // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
    246   // independently on 128-bit lanes.
    247   unsigned NumLanes = (NumElts * ScalarBits) / 128;
    248   if (NumLanes == 0 ) NumLanes = 1;  // Handle MMX
    249   unsigned NumLaneElts = NumElts / NumLanes;
    250 
    251   for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
    252     for (unsigned i = l, e = l + NumLaneElts / 2; i != e; ++i) {
    253       ShuffleMask.push_back(i);           // Reads from dest/src1
    254       ShuffleMask.push_back(i + NumElts); // Reads from src/src2
    255     }
    256   }
    257 }
    258 
    259 /// Decodes a broadcast of the first element of a vector.
    260 void DecodeVectorBroadcast(unsigned NumElts,
    261                            SmallVectorImpl<int> &ShuffleMask) {
    262   ShuffleMask.append(NumElts, 0);
    263 }
    264 
    265 /// Decodes a broadcast of a subvector to a larger vector type.
    266 void DecodeSubVectorBroadcast(unsigned DstNumElts, unsigned SrcNumElts,
    267                               SmallVectorImpl<int> &ShuffleMask) {
    268   unsigned Scale = DstNumElts / SrcNumElts;
    269 
    270   for (unsigned i = 0; i != Scale; ++i)
    271     for (unsigned j = 0; j != SrcNumElts; ++j)
    272       ShuffleMask.push_back(j);
    273 }
    274 
    275 /// Decode a shuffle packed values at 128-bit granularity
    276 /// (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2)
    277 /// immediate mask into a shuffle mask.
    278 void decodeVSHUF64x2FamilyMask(unsigned NumElts, unsigned ScalarSize,
    279                                unsigned Imm,
    280                                SmallVectorImpl<int> &ShuffleMask) {
    281   unsigned NumElementsInLane = 128 / ScalarSize;
    282   unsigned NumLanes = NumElts / NumElementsInLane;
    283 
    284   for (unsigned l = 0; l != NumElts; l += NumElementsInLane) {
    285     unsigned Index = (Imm % NumLanes) * NumElementsInLane;
    286     Imm /= NumLanes; // Discard the bits we just used.
    287     // We actually need the other source.
    288     if (l >= (NumElts / 2))
    289       Index += NumElts;
    290     for (unsigned i = 0; i != NumElementsInLane; ++i)
    291       ShuffleMask.push_back(Index + i);
    292   }
    293 }
    294 
    295 void DecodeVPERM2X128Mask(unsigned NumElts, unsigned Imm,
    296                           SmallVectorImpl<int> &ShuffleMask) {
    297   unsigned HalfSize = NumElts / 2;
    298 
    299   for (unsigned l = 0; l != 2; ++l) {
    300     unsigned HalfMask = Imm >> (l * 4);
    301     unsigned HalfBegin = (HalfMask & 0x3) * HalfSize;
    302     for (unsigned i = HalfBegin, e = HalfBegin + HalfSize; i != e; ++i)
    303       ShuffleMask.push_back(HalfMask & 8 ? SM_SentinelZero : (int)i);
    304   }
    305 }
    306 
    307 void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
    308                       SmallVectorImpl<int> &ShuffleMask) {
    309   for (int i = 0, e = RawMask.size(); i < e; ++i) {
    310     uint64_t M = RawMask[i];
    311     if (M == (uint64_t)SM_SentinelUndef) {
    312       ShuffleMask.push_back(M);
    313       continue;
    314     }
    315     // For 256/512-bit vectors the base of the shuffle is the 128-bit
    316     // subvector we're inside.
    317     int Base = (i / 16) * 16;
    318     // If the high bit (7) of the byte is set, the element is zeroed.
    319     if (M & (1 << 7))
    320       ShuffleMask.push_back(SM_SentinelZero);
    321     else {
    322       // Only the least significant 4 bits of the byte are used.
    323       int Index = Base + (M & 0xf);
    324       ShuffleMask.push_back(Index);
    325     }
    326   }
    327 }
    328 
    329 void DecodeBLENDMask(unsigned NumElts, unsigned Imm,
    330                      SmallVectorImpl<int> &ShuffleMask) {
    331   for (unsigned i = 0; i < NumElts; ++i) {
    332     // If there are more than 8 elements in the vector, then any immediate blend
    333     // mask wraps around.
    334     unsigned Bit = i % 8;
    335     ShuffleMask.push_back(((Imm >> Bit) & 1) ? NumElts + i : i);
    336   }
    337 }
    338 
    339 void DecodeVPPERMMask(ArrayRef<uint64_t> RawMask,
    340                       SmallVectorImpl<int> &ShuffleMask) {
    341   assert(RawMask.size() == 16 && "Illegal VPPERM shuffle mask size");
    342 
    343   // VPPERM Operation
    344   // Bits[4:0] - Byte Index (0 - 31)
    345   // Bits[7:5] - Permute Operation
    346   //
    347   // Permute Operation:
    348   // 0 - Source byte (no logical operation).
    349   // 1 - Invert source byte.
    350   // 2 - Bit reverse of source byte.
    351   // 3 - Bit reverse of inverted source byte.
    352   // 4 - 00h (zero - fill).
    353   // 5 - FFh (ones - fill).
    354   // 6 - Most significant bit of source byte replicated in all bit positions.
    355   // 7 - Invert most significant bit of source byte and replicate in all bit positions.
    356   for (int i = 0, e = RawMask.size(); i < e; ++i) {
    357     uint64_t M = RawMask[i];
    358     if (M == (uint64_t)SM_SentinelUndef) {
    359       ShuffleMask.push_back(M);
    360       continue;
    361     }
    362 
    363     uint64_t PermuteOp = (M >> 5) & 0x7;
    364     if (PermuteOp == 4) {
    365       ShuffleMask.push_back(SM_SentinelZero);
    366       continue;
    367     }
    368     if (PermuteOp != 0) {
    369       ShuffleMask.clear();
    370       return;
    371     }
    372 
    373     uint64_t Index = M & 0x1F;
    374     ShuffleMask.push_back((int)Index);
    375   }
    376 }
    377 
    378 /// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
    379 void DecodeVPERMMask(unsigned NumElts, unsigned Imm,
    380                      SmallVectorImpl<int> &ShuffleMask) {
    381   for (unsigned l = 0; l != NumElts; l += 4)
    382     for (unsigned i = 0; i != 4; ++i)
    383       ShuffleMask.push_back(l + ((Imm >> (2 * i)) & 3));
    384 }
    385 
    386 void DecodeZeroExtendMask(unsigned SrcScalarBits, unsigned DstScalarBits,
    387                           unsigned NumDstElts, SmallVectorImpl<int> &Mask) {
    388   unsigned Scale = DstScalarBits / SrcScalarBits;
    389   assert(SrcScalarBits < DstScalarBits &&
    390          "Expected zero extension mask to increase scalar size");
    391 
    392   for (unsigned i = 0; i != NumDstElts; i++) {
    393     Mask.push_back(i);
    394     for (unsigned j = 1; j != Scale; j++)
    395       Mask.push_back(SM_SentinelZero);
    396   }
    397 }
    398 
    399 void DecodeZeroMoveLowMask(unsigned NumElts,
    400                            SmallVectorImpl<int> &ShuffleMask) {
    401   ShuffleMask.push_back(0);
    402   for (unsigned i = 1; i < NumElts; i++)
    403     ShuffleMask.push_back(SM_SentinelZero);
    404 }
    405 
    406 void DecodeScalarMoveMask(unsigned NumElts, bool IsLoad,
    407                           SmallVectorImpl<int> &Mask) {
    408   // First element comes from the first element of second source.
    409   // Remaining elements: Load zero extends / Move copies from first source.
    410   Mask.push_back(NumElts);
    411   for (unsigned i = 1; i < NumElts; i++)
    412     Mask.push_back(IsLoad ? static_cast<int>(SM_SentinelZero) : i);
    413 }
    414 
    415 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
    416                       SmallVectorImpl<int> &ShuffleMask) {
    417   unsigned HalfElts = NumElts / 2;
    418 
    419   // Only the bottom 6 bits are valid for each immediate.
    420   Len &= 0x3F;
    421   Idx &= 0x3F;
    422 
    423   // We can only decode this bit extraction instruction as a shuffle if both the
    424   // length and index work with whole elements.
    425   if (0 != (Len % EltSize) || 0 != (Idx % EltSize))
    426     return;
    427 
    428   // A length of zero is equivalent to a bit length of 64.
    429   if (Len == 0)
    430     Len = 64;
    431 
    432   // If the length + index exceeds the bottom 64 bits the result is undefined.
    433   if ((Len + Idx) > 64) {
    434     ShuffleMask.append(NumElts, SM_SentinelUndef);
    435     return;
    436   }
    437 
    438   // Convert index and index to work with elements.
    439   Len /= EltSize;
    440   Idx /= EltSize;
    441 
    442   // EXTRQ: Extract Len elements starting from Idx. Zero pad the remaining
    443   // elements of the lower 64-bits. The upper 64-bits are undefined.
    444   for (int i = 0; i != Len; ++i)
    445     ShuffleMask.push_back(i + Idx);
    446   for (int i = Len; i != (int)HalfElts; ++i)
    447     ShuffleMask.push_back(SM_SentinelZero);
    448   for (int i = HalfElts; i != (int)NumElts; ++i)
    449     ShuffleMask.push_back(SM_SentinelUndef);
    450 }
    451 
    452 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
    453                         SmallVectorImpl<int> &ShuffleMask) {
    454   unsigned HalfElts = NumElts / 2;
    455 
    456   // Only the bottom 6 bits are valid for each immediate.
    457   Len &= 0x3F;
    458   Idx &= 0x3F;
    459 
    460   // We can only decode this bit insertion instruction as a shuffle if both the
    461   // length and index work with whole elements.
    462   if (0 != (Len % EltSize) || 0 != (Idx % EltSize))
    463     return;
    464 
    465   // A length of zero is equivalent to a bit length of 64.
    466   if (Len == 0)
    467     Len = 64;
    468 
    469   // If the length + index exceeds the bottom 64 bits the result is undefined.
    470   if ((Len + Idx) > 64) {
    471     ShuffleMask.append(NumElts, SM_SentinelUndef);
    472     return;
    473   }
    474 
    475   // Convert index and index to work with elements.
    476   Len /= EltSize;
    477   Idx /= EltSize;
    478 
    479   // INSERTQ: Extract lowest Len elements from lower half of second source and
    480   // insert over first source starting at Idx element. The upper 64-bits are
    481   // undefined.
    482   for (int i = 0; i != Idx; ++i)
    483     ShuffleMask.push_back(i);
    484   for (int i = 0; i != Len; ++i)
    485     ShuffleMask.push_back(i + NumElts);
    486   for (int i = Idx + Len; i != (int)HalfElts; ++i)
    487     ShuffleMask.push_back(i);
    488   for (int i = HalfElts; i != (int)NumElts; ++i)
    489     ShuffleMask.push_back(SM_SentinelUndef);
    490 }
    491 
    492 void DecodeVPERMILPMask(unsigned NumElts, unsigned ScalarBits,
    493                         ArrayRef<uint64_t> RawMask,
    494                         SmallVectorImpl<int> &ShuffleMask) {
    495   unsigned VecSize = NumElts * ScalarBits;
    496   unsigned NumLanes = VecSize / 128;
    497   unsigned NumEltsPerLane = NumElts / NumLanes;
    498   assert((VecSize == 128 || VecSize == 256 || VecSize == 512) &&
    499          "Unexpected vector size");
    500   assert((ScalarBits == 32 || ScalarBits == 64) && "Unexpected element size");
    501 
    502   for (unsigned i = 0, e = RawMask.size(); i < e; ++i) {
    503     uint64_t M = RawMask[i];
    504     M = (ScalarBits == 64 ? ((M >> 1) & 0x1) : (M & 0x3));
    505     unsigned LaneOffset = i & ~(NumEltsPerLane - 1);
    506     ShuffleMask.push_back((int)(LaneOffset + M));
    507   }
    508 }
    509 
    510 void DecodeVPERMIL2PMask(unsigned NumElts, unsigned ScalarBits, unsigned M2Z,
    511                          ArrayRef<uint64_t> RawMask,
    512                          SmallVectorImpl<int> &ShuffleMask) {
    513   unsigned VecSize = NumElts * ScalarBits;
    514   unsigned NumLanes = VecSize / 128;
    515   unsigned NumEltsPerLane = NumElts / NumLanes;
    516   assert((VecSize == 128 || VecSize == 256) && "Unexpected vector size");
    517   assert((ScalarBits == 32 || ScalarBits == 64) && "Unexpected element size");
    518   assert((NumElts == RawMask.size()) && "Unexpected mask size");
    519 
    520   for (unsigned i = 0, e = RawMask.size(); i < e; ++i) {
    521     // VPERMIL2 Operation.
    522     // Bits[3] - Match Bit.
    523     // Bits[2:1] - (Per Lane) PD Shuffle Mask.
    524     // Bits[2:0] - (Per Lane) PS Shuffle Mask.
    525     uint64_t Selector = RawMask[i];
    526     unsigned MatchBit = (Selector >> 3) & 0x1;
    527 
    528     // M2Z[0:1]     MatchBit
    529     //   0Xb           X        Source selected by Selector index.
    530     //   10b           0        Source selected by Selector index.
    531     //   10b           1        Zero.
    532     //   11b           0        Zero.
    533     //   11b           1        Source selected by Selector index.
    534     if ((M2Z & 0x2) != 0 && MatchBit != (M2Z & 0x1)) {
    535       ShuffleMask.push_back(SM_SentinelZero);
    536       continue;
    537     }
    538 
    539     int Index = i & ~(NumEltsPerLane - 1);
    540     if (ScalarBits == 64)
    541       Index += (Selector >> 1) & 0x1;
    542     else
    543       Index += Selector & 0x3;
    544 
    545     int Src = (Selector >> 2) & 0x1;
    546     Index += Src * NumElts;
    547     ShuffleMask.push_back(Index);
    548   }
    549 }
    550 
    551 void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
    552                       SmallVectorImpl<int> &ShuffleMask) {
    553   uint64_t EltMaskSize = RawMask.size() - 1;
    554   for (auto M : RawMask) {
    555     M &= EltMaskSize;
    556     ShuffleMask.push_back((int)M);
    557   }
    558 }
    559 
    560 void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
    561                       SmallVectorImpl<int> &ShuffleMask) {
    562   uint64_t EltMaskSize = (RawMask.size() * 2) - 1;
    563   for (auto M : RawMask) {
    564     M &= EltMaskSize;
    565     ShuffleMask.push_back((int)M);
    566   }
    567 }
    568 
    569 } // llvm namespace
    570