1 //===-- X86ShuffleDecode.cpp - X86 shuffle decode logic -------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Define several functions to decode x86 specific shuffle semantics into a
11 // generic vector mask.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "X86ShuffleDecode.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/CodeGen/MachineValueType.h"
18
19 //===----------------------------------------------------------------------===//
20 // Vector Mask Decoding
21 //===----------------------------------------------------------------------===//
22
23 namespace llvm {
24
DecodeINSERTPSMask(unsigned Imm,SmallVectorImpl<int> & ShuffleMask)25 void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
26 // Defaults the copying the dest value.
27 ShuffleMask.push_back(0);
28 ShuffleMask.push_back(1);
29 ShuffleMask.push_back(2);
30 ShuffleMask.push_back(3);
31
32 // Decode the immediate.
33 unsigned ZMask = Imm & 15;
34 unsigned CountD = (Imm >> 4) & 3;
35 unsigned CountS = (Imm >> 6) & 3;
36
37 // CountS selects which input element to use.
38 unsigned InVal = 4 + CountS;
39 // CountD specifies which element of destination to update.
40 ShuffleMask[CountD] = InVal;
41 // ZMask zaps values, potentially overriding the CountD elt.
42 if (ZMask & 1) ShuffleMask[0] = SM_SentinelZero;
43 if (ZMask & 2) ShuffleMask[1] = SM_SentinelZero;
44 if (ZMask & 4) ShuffleMask[2] = SM_SentinelZero;
45 if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero;
46 }
47
48 // <3,1> or <6,7,2,3>
DecodeMOVHLPSMask(unsigned NElts,SmallVectorImpl<int> & ShuffleMask)49 void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
50 for (unsigned i = NElts / 2; i != NElts; ++i)
51 ShuffleMask.push_back(NElts + i);
52
53 for (unsigned i = NElts / 2; i != NElts; ++i)
54 ShuffleMask.push_back(i);
55 }
56
57 // <0,2> or <0,1,4,5>
DecodeMOVLHPSMask(unsigned NElts,SmallVectorImpl<int> & ShuffleMask)58 void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
59 for (unsigned i = 0; i != NElts / 2; ++i)
60 ShuffleMask.push_back(i);
61
62 for (unsigned i = 0; i != NElts / 2; ++i)
63 ShuffleMask.push_back(NElts + i);
64 }
65
DecodeMOVSLDUPMask(MVT VT,SmallVectorImpl<int> & ShuffleMask)66 void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
67 unsigned NumElts = VT.getVectorNumElements();
68 for (int i = 0, e = NumElts / 2; i < e; ++i) {
69 ShuffleMask.push_back(2 * i);
70 ShuffleMask.push_back(2 * i);
71 }
72 }
73
DecodeMOVSHDUPMask(MVT VT,SmallVectorImpl<int> & ShuffleMask)74 void DecodeMOVSHDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
75 unsigned NumElts = VT.getVectorNumElements();
76 for (int i = 0, e = NumElts / 2; i < e; ++i) {
77 ShuffleMask.push_back(2 * i + 1);
78 ShuffleMask.push_back(2 * i + 1);
79 }
80 }
81
DecodeMOVDDUPMask(MVT VT,SmallVectorImpl<int> & ShuffleMask)82 void DecodeMOVDDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
83 unsigned VectorSizeInBits = VT.getSizeInBits();
84 unsigned ScalarSizeInBits = VT.getScalarSizeInBits();
85 unsigned NumElts = VT.getVectorNumElements();
86 unsigned NumLanes = VectorSizeInBits / 128;
87 unsigned NumLaneElts = NumElts / NumLanes;
88 unsigned NumLaneSubElts = 64 / ScalarSizeInBits;
89
90 for (unsigned l = 0; l < NumElts; l += NumLaneElts)
91 for (unsigned i = 0; i < NumLaneElts; i += NumLaneSubElts)
92 for (unsigned s = 0; s != NumLaneSubElts; s++)
93 ShuffleMask.push_back(l + s);
94 }
95
DecodePSLLDQMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)96 void DecodePSLLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
97 unsigned VectorSizeInBits = VT.getSizeInBits();
98 unsigned NumElts = VectorSizeInBits / 8;
99 unsigned NumLanes = VectorSizeInBits / 128;
100 unsigned NumLaneElts = NumElts / NumLanes;
101
102 for (unsigned l = 0; l < NumElts; l += NumLaneElts)
103 for (unsigned i = 0; i < NumLaneElts; ++i) {
104 int M = SM_SentinelZero;
105 if (i >= Imm) M = i - Imm + l;
106 ShuffleMask.push_back(M);
107 }
108 }
109
DecodePSRLDQMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)110 void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
111 unsigned VectorSizeInBits = VT.getSizeInBits();
112 unsigned NumElts = VectorSizeInBits / 8;
113 unsigned NumLanes = VectorSizeInBits / 128;
114 unsigned NumLaneElts = NumElts / NumLanes;
115
116 for (unsigned l = 0; l < NumElts; l += NumLaneElts)
117 for (unsigned i = 0; i < NumLaneElts; ++i) {
118 unsigned Base = i + Imm;
119 int M = Base + l;
120 if (Base >= NumLaneElts) M = SM_SentinelZero;
121 ShuffleMask.push_back(M);
122 }
123 }
124
DecodePALIGNRMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)125 void DecodePALIGNRMask(MVT VT, unsigned Imm,
126 SmallVectorImpl<int> &ShuffleMask) {
127 unsigned NumElts = VT.getVectorNumElements();
128 unsigned Offset = Imm * (VT.getVectorElementType().getSizeInBits() / 8);
129
130 unsigned NumLanes = VT.getSizeInBits() / 128;
131 unsigned NumLaneElts = NumElts / NumLanes;
132
133 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
134 for (unsigned i = 0; i != NumLaneElts; ++i) {
135 unsigned Base = i + Offset;
136 // if i+offset is out of this lane then we actually need the other source
137 if (Base >= NumLaneElts) Base += NumElts - NumLaneElts;
138 ShuffleMask.push_back(Base + l);
139 }
140 }
141 }
142
143 /// DecodePSHUFMask - This decodes the shuffle masks for pshufw, pshufd, and vpermilp*.
144 /// VT indicates the type of the vector allowing it to handle different
145 /// datatypes and vector widths.
DecodePSHUFMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)146 void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
147 unsigned NumElts = VT.getVectorNumElements();
148
149 unsigned NumLanes = VT.getSizeInBits() / 128;
150 if (NumLanes == 0) NumLanes = 1; // Handle MMX
151 unsigned NumLaneElts = NumElts / NumLanes;
152
153 unsigned NewImm = Imm;
154 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
155 for (unsigned i = 0; i != NumLaneElts; ++i) {
156 ShuffleMask.push_back(NewImm % NumLaneElts + l);
157 NewImm /= NumLaneElts;
158 }
159 if (NumLaneElts == 4) NewImm = Imm; // reload imm
160 }
161 }
162
DecodePSHUFHWMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)163 void DecodePSHUFHWMask(MVT VT, unsigned Imm,
164 SmallVectorImpl<int> &ShuffleMask) {
165 unsigned NumElts = VT.getVectorNumElements();
166
167 for (unsigned l = 0; l != NumElts; l += 8) {
168 unsigned NewImm = Imm;
169 for (unsigned i = 0, e = 4; i != e; ++i) {
170 ShuffleMask.push_back(l + i);
171 }
172 for (unsigned i = 4, e = 8; i != e; ++i) {
173 ShuffleMask.push_back(l + 4 + (NewImm & 3));
174 NewImm >>= 2;
175 }
176 }
177 }
178
DecodePSHUFLWMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)179 void DecodePSHUFLWMask(MVT VT, unsigned Imm,
180 SmallVectorImpl<int> &ShuffleMask) {
181 unsigned NumElts = VT.getVectorNumElements();
182
183 for (unsigned l = 0; l != NumElts; l += 8) {
184 unsigned NewImm = Imm;
185 for (unsigned i = 0, e = 4; i != e; ++i) {
186 ShuffleMask.push_back(l + (NewImm & 3));
187 NewImm >>= 2;
188 }
189 for (unsigned i = 4, e = 8; i != e; ++i) {
190 ShuffleMask.push_back(l + i);
191 }
192 }
193 }
194
DecodePSWAPMask(MVT VT,SmallVectorImpl<int> & ShuffleMask)195 void DecodePSWAPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
196 unsigned NumElts = VT.getVectorNumElements();
197 unsigned NumHalfElts = NumElts / 2;
198
199 for (unsigned l = 0; l != NumHalfElts; ++l)
200 ShuffleMask.push_back(l + NumHalfElts);
201 for (unsigned h = 0; h != NumHalfElts; ++h)
202 ShuffleMask.push_back(h);
203 }
204
205 /// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
206 /// the type of the vector allowing it to handle different datatypes and vector
207 /// widths.
DecodeSHUFPMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)208 void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
209 unsigned NumElts = VT.getVectorNumElements();
210
211 unsigned NumLanes = VT.getSizeInBits() / 128;
212 unsigned NumLaneElts = NumElts / NumLanes;
213
214 unsigned NewImm = Imm;
215 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
216 // each half of a lane comes from different source
217 for (unsigned s = 0; s != NumElts * 2; s += NumElts) {
218 for (unsigned i = 0; i != NumLaneElts / 2; ++i) {
219 ShuffleMask.push_back(NewImm % NumLaneElts + s + l);
220 NewImm /= NumLaneElts;
221 }
222 }
223 if (NumLaneElts == 4) NewImm = Imm; // reload imm
224 }
225 }
226
227 /// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
228 /// and punpckh*. VT indicates the type of the vector allowing it to handle
229 /// different datatypes and vector widths.
DecodeUNPCKHMask(MVT VT,SmallVectorImpl<int> & ShuffleMask)230 void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
231 unsigned NumElts = VT.getVectorNumElements();
232
233 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
234 // independently on 128-bit lanes.
235 unsigned NumLanes = VT.getSizeInBits() / 128;
236 if (NumLanes == 0) NumLanes = 1; // Handle MMX
237 unsigned NumLaneElts = NumElts / NumLanes;
238
239 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
240 for (unsigned i = l + NumLaneElts / 2, e = l + NumLaneElts; i != e; ++i) {
241 ShuffleMask.push_back(i); // Reads from dest/src1
242 ShuffleMask.push_back(i + NumElts); // Reads from src/src2
243 }
244 }
245 }
246
247 /// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
248 /// and punpckl*. VT indicates the type of the vector allowing it to handle
249 /// different datatypes and vector widths.
DecodeUNPCKLMask(MVT VT,SmallVectorImpl<int> & ShuffleMask)250 void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
251 unsigned NumElts = VT.getVectorNumElements();
252
253 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
254 // independently on 128-bit lanes.
255 unsigned NumLanes = VT.getSizeInBits() / 128;
256 if (NumLanes == 0 ) NumLanes = 1; // Handle MMX
257 unsigned NumLaneElts = NumElts / NumLanes;
258
259 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
260 for (unsigned i = l, e = l + NumLaneElts / 2; i != e; ++i) {
261 ShuffleMask.push_back(i); // Reads from dest/src1
262 ShuffleMask.push_back(i + NumElts); // Reads from src/src2
263 }
264 }
265 }
266
267 /// \brief Decode a shuffle packed values at 128-bit granularity
268 /// (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2)
269 /// immediate mask into a shuffle mask.
decodeVSHUF64x2FamilyMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)270 void decodeVSHUF64x2FamilyMask(MVT VT, unsigned Imm,
271 SmallVectorImpl<int> &ShuffleMask) {
272 unsigned NumLanes = VT.getSizeInBits() / 128;
273 unsigned NumElementsInLane = 128 / VT.getScalarSizeInBits();
274 unsigned ControlBitsMask = NumLanes - 1;
275 unsigned NumControlBits = NumLanes / 2;
276
277 for (unsigned l = 0; l != NumLanes; ++l) {
278 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
279 // We actually need the other source.
280 if (l >= NumLanes / 2)
281 LaneMask += NumLanes;
282 for (unsigned i = 0; i != NumElementsInLane; ++i)
283 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
284 }
285 }
286
DecodeVPERM2X128Mask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)287 void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
288 SmallVectorImpl<int> &ShuffleMask) {
289 unsigned HalfSize = VT.getVectorNumElements() / 2;
290
291 for (unsigned l = 0; l != 2; ++l) {
292 unsigned HalfMask = Imm >> (l * 4);
293 unsigned HalfBegin = (HalfMask & 0x3) * HalfSize;
294 for (unsigned i = HalfBegin, e = HalfBegin + HalfSize; i != e; ++i)
295 ShuffleMask.push_back(HalfMask & 8 ? SM_SentinelZero : (int)i);
296 }
297 }
298
DecodePSHUFBMask(const Constant * C,SmallVectorImpl<int> & ShuffleMask)299 void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
300 Type *MaskTy = C->getType();
301 // It is not an error for the PSHUFB mask to not be a vector of i8 because the
302 // constant pool uniques constants by their bit representation.
303 // e.g. the following take up the same space in the constant pool:
304 // i128 -170141183420855150465331762880109871104
305 //
306 // <2 x i64> <i64 -9223372034707292160, i64 -9223372034707292160>
307 //
308 // <4 x i32> <i32 -2147483648, i32 -2147483648,
309 // i32 -2147483648, i32 -2147483648>
310
311 unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
312
313 if (MaskTySize != 128 && MaskTySize != 256) // FIXME: Add support for AVX-512.
314 return;
315
316 // This is a straightforward byte vector.
317 if (MaskTy->isVectorTy() && MaskTy->getVectorElementType()->isIntegerTy(8)) {
318 int NumElements = MaskTy->getVectorNumElements();
319 ShuffleMask.reserve(NumElements);
320
321 for (int i = 0; i < NumElements; ++i) {
322 // For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
323 // lane of the vector we're inside.
324 int Base = i < 16 ? 0 : 16;
325 Constant *COp = C->getAggregateElement(i);
326 if (!COp) {
327 ShuffleMask.clear();
328 return;
329 } else if (isa<UndefValue>(COp)) {
330 ShuffleMask.push_back(SM_SentinelUndef);
331 continue;
332 }
333 uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
334 // If the high bit (7) of the byte is set, the element is zeroed.
335 if (Element & (1 << 7))
336 ShuffleMask.push_back(SM_SentinelZero);
337 else {
338 // Only the least significant 4 bits of the byte are used.
339 int Index = Base + (Element & 0xf);
340 ShuffleMask.push_back(Index);
341 }
342 }
343 }
344 // TODO: Handle funny-looking vectors too.
345 }
346
DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,SmallVectorImpl<int> & ShuffleMask)347 void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
348 SmallVectorImpl<int> &ShuffleMask) {
349 for (int i = 0, e = RawMask.size(); i < e; ++i) {
350 uint64_t M = RawMask[i];
351 if (M == (uint64_t)SM_SentinelUndef) {
352 ShuffleMask.push_back(M);
353 continue;
354 }
355 // For AVX vectors with 32 bytes the base of the shuffle is the half of
356 // the vector we're inside.
357 int Base = i < 16 ? 0 : 16;
358 // If the high bit (7) of the byte is set, the element is zeroed.
359 if (M & (1 << 7))
360 ShuffleMask.push_back(SM_SentinelZero);
361 else {
362 // Only the least significant 4 bits of the byte are used.
363 int Index = Base + (M & 0xf);
364 ShuffleMask.push_back(Index);
365 }
366 }
367 }
368
DecodeBLENDMask(MVT VT,unsigned Imm,SmallVectorImpl<int> & ShuffleMask)369 void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
370 int ElementBits = VT.getScalarSizeInBits();
371 int NumElements = VT.getVectorNumElements();
372 for (int i = 0; i < NumElements; ++i) {
373 // If there are more than 8 elements in the vector, then any immediate blend
374 // mask applies to each 128-bit lane. There can never be more than
375 // 8 elements in a 128-bit lane with an immediate blend.
376 int Bit = NumElements > 8 ? i % (128 / ElementBits) : i;
377 assert(Bit < 8 &&
378 "Immediate blends only operate over 8 elements at a time!");
379 ShuffleMask.push_back(((Imm >> Bit) & 1) ? NumElements + i : i);
380 }
381 }
382
383 /// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
384 /// No VT provided since it only works on 256-bit, 4 element vectors.
DecodeVPERMMask(unsigned Imm,SmallVectorImpl<int> & ShuffleMask)385 void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
386 for (unsigned i = 0; i != 4; ++i) {
387 ShuffleMask.push_back((Imm >> (2 * i)) & 3);
388 }
389 }
390
DecodeVPERMILPMask(const Constant * C,SmallVectorImpl<int> & ShuffleMask)391 void DecodeVPERMILPMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
392 Type *MaskTy = C->getType();
393 assert(MaskTy->isVectorTy() && "Expected a vector constant mask!");
394 assert(MaskTy->getVectorElementType()->isIntegerTy() &&
395 "Expected integer constant mask elements!");
396 int ElementBits = MaskTy->getScalarSizeInBits();
397 int NumElements = MaskTy->getVectorNumElements();
398 assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
399 "Unexpected number of vector elements.");
400 ShuffleMask.reserve(NumElements);
401 if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
402 assert((unsigned)NumElements == CDS->getNumElements() &&
403 "Constant mask has a different number of elements!");
404
405 for (int i = 0; i < NumElements; ++i) {
406 int Base = (i * ElementBits / 128) * (128 / ElementBits);
407 uint64_t Element = CDS->getElementAsInteger(i);
408 // Only the least significant 2 bits of the integer are used.
409 int Index = Base + (Element & 0x3);
410 ShuffleMask.push_back(Index);
411 }
412 } else if (auto *CV = dyn_cast<ConstantVector>(C)) {
413 assert((unsigned)NumElements == C->getNumOperands() &&
414 "Constant mask has a different number of elements!");
415
416 for (int i = 0; i < NumElements; ++i) {
417 int Base = (i * ElementBits / 128) * (128 / ElementBits);
418 Constant *COp = CV->getOperand(i);
419 if (isa<UndefValue>(COp)) {
420 ShuffleMask.push_back(SM_SentinelUndef);
421 continue;
422 }
423 uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
424 // Only the least significant 2 bits of the integer are used.
425 int Index = Base + (Element & 0x3);
426 ShuffleMask.push_back(Index);
427 }
428 }
429 }
430
DecodeZeroExtendMask(MVT SrcVT,MVT DstVT,SmallVectorImpl<int> & Mask)431 void DecodeZeroExtendMask(MVT SrcVT, MVT DstVT, SmallVectorImpl<int> &Mask) {
432 unsigned NumDstElts = DstVT.getVectorNumElements();
433 unsigned SrcScalarBits = SrcVT.getScalarSizeInBits();
434 unsigned DstScalarBits = DstVT.getScalarSizeInBits();
435 unsigned Scale = DstScalarBits / SrcScalarBits;
436 assert(SrcScalarBits < DstScalarBits &&
437 "Expected zero extension mask to increase scalar size");
438 assert(SrcVT.getVectorNumElements() >= NumDstElts &&
439 "Too many zero extension lanes");
440
441 for (unsigned i = 0; i != NumDstElts; i++) {
442 Mask.push_back(i);
443 for (unsigned j = 1; j != Scale; j++)
444 Mask.push_back(SM_SentinelZero);
445 }
446 }
447
DecodeZeroMoveLowMask(MVT VT,SmallVectorImpl<int> & ShuffleMask)448 void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
449 unsigned NumElts = VT.getVectorNumElements();
450 ShuffleMask.push_back(0);
451 for (unsigned i = 1; i < NumElts; i++)
452 ShuffleMask.push_back(SM_SentinelZero);
453 }
454
DecodeScalarMoveMask(MVT VT,bool IsLoad,SmallVectorImpl<int> & Mask)455 void DecodeScalarMoveMask(MVT VT, bool IsLoad, SmallVectorImpl<int> &Mask) {
456 // First element comes from the first element of second source.
457 // Remaining elements: Load zero extends / Move copies from first source.
458 unsigned NumElts = VT.getVectorNumElements();
459 Mask.push_back(NumElts);
460 for (unsigned i = 1; i < NumElts; i++)
461 Mask.push_back(IsLoad ? static_cast<int>(SM_SentinelZero) : i);
462 }
463
DecodeEXTRQIMask(int Len,int Idx,SmallVectorImpl<int> & ShuffleMask)464 void DecodeEXTRQIMask(int Len, int Idx,
465 SmallVectorImpl<int> &ShuffleMask) {
466 // Only the bottom 6 bits are valid for each immediate.
467 Len &= 0x3F;
468 Idx &= 0x3F;
469
470 // We can only decode this bit extraction instruction as a shuffle if both the
471 // length and index work with whole bytes.
472 if (0 != (Len % 8) || 0 != (Idx % 8))
473 return;
474
475 // A length of zero is equivalent to a bit length of 64.
476 if (Len == 0)
477 Len = 64;
478
479 // If the length + index exceeds the bottom 64 bits the result is undefined.
480 if ((Len + Idx) > 64) {
481 ShuffleMask.append(16, SM_SentinelUndef);
482 return;
483 }
484
485 // Convert index and index to work with bytes.
486 Len /= 8;
487 Idx /= 8;
488
489 // EXTRQ: Extract Len bytes starting from Idx. Zero pad the remaining bytes
490 // of the lower 64-bits. The upper 64-bits are undefined.
491 for (int i = 0; i != Len; ++i)
492 ShuffleMask.push_back(i + Idx);
493 for (int i = Len; i != 8; ++i)
494 ShuffleMask.push_back(SM_SentinelZero);
495 for (int i = 8; i != 16; ++i)
496 ShuffleMask.push_back(SM_SentinelUndef);
497 }
498
DecodeINSERTQIMask(int Len,int Idx,SmallVectorImpl<int> & ShuffleMask)499 void DecodeINSERTQIMask(int Len, int Idx,
500 SmallVectorImpl<int> &ShuffleMask) {
501 // Only the bottom 6 bits are valid for each immediate.
502 Len &= 0x3F;
503 Idx &= 0x3F;
504
505 // We can only decode this bit insertion instruction as a shuffle if both the
506 // length and index work with whole bytes.
507 if (0 != (Len % 8) || 0 != (Idx % 8))
508 return;
509
510 // A length of zero is equivalent to a bit length of 64.
511 if (Len == 0)
512 Len = 64;
513
514 // If the length + index exceeds the bottom 64 bits the result is undefined.
515 if ((Len + Idx) > 64) {
516 ShuffleMask.append(16, SM_SentinelUndef);
517 return;
518 }
519
520 // Convert index and index to work with bytes.
521 Len /= 8;
522 Idx /= 8;
523
524 // INSERTQ: Extract lowest Len bytes from lower half of second source and
525 // insert over first source starting at Idx byte. The upper 64-bits are
526 // undefined.
527 for (int i = 0; i != Idx; ++i)
528 ShuffleMask.push_back(i);
529 for (int i = 0; i != Len; ++i)
530 ShuffleMask.push_back(i + 16);
531 for (int i = Idx + Len; i != 8; ++i)
532 ShuffleMask.push_back(i);
533 for (int i = 8; i != 16; ++i)
534 ShuffleMask.push_back(SM_SentinelUndef);
535 }
536
DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,SmallVectorImpl<int> & ShuffleMask)537 void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
538 SmallVectorImpl<int> &ShuffleMask) {
539 for (int i = 0, e = RawMask.size(); i < e; ++i) {
540 uint64_t M = RawMask[i];
541 ShuffleMask.push_back((int)M);
542 }
543 }
544
DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,SmallVectorImpl<int> & ShuffleMask)545 void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
546 SmallVectorImpl<int> &ShuffleMask) {
547 for (int i = 0, e = RawMask.size(); i < e; ++i) {
548 uint64_t M = RawMask[i];
549 ShuffleMask.push_back((int)M);
550 }
551 }
552
DecodeVPERMVMask(const Constant * C,MVT VT,SmallVectorImpl<int> & ShuffleMask)553 void DecodeVPERMVMask(const Constant *C, MVT VT,
554 SmallVectorImpl<int> &ShuffleMask) {
555 Type *MaskTy = C->getType();
556 if (MaskTy->isVectorTy()) {
557 unsigned NumElements = MaskTy->getVectorNumElements();
558 if (NumElements == VT.getVectorNumElements()) {
559 for (unsigned i = 0; i < NumElements; ++i) {
560 Constant *COp = C->getAggregateElement(i);
561 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp))) {
562 ShuffleMask.clear();
563 return;
564 }
565 if (isa<UndefValue>(COp))
566 ShuffleMask.push_back(SM_SentinelUndef);
567 else {
568 uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
569 Element &= (1 << NumElements) - 1;
570 ShuffleMask.push_back(Element);
571 }
572 }
573 }
574 return;
575 }
576 // Scalar value; just broadcast it
577 if (!isa<ConstantInt>(C))
578 return;
579 uint64_t Element = cast<ConstantInt>(C)->getZExtValue();
580 int NumElements = VT.getVectorNumElements();
581 Element &= (1 << NumElements) - 1;
582 for (int i = 0; i < NumElements; ++i)
583 ShuffleMask.push_back(Element);
584 }
585
DecodeVPERMV3Mask(const Constant * C,MVT VT,SmallVectorImpl<int> & ShuffleMask)586 void DecodeVPERMV3Mask(const Constant *C, MVT VT,
587 SmallVectorImpl<int> &ShuffleMask) {
588 Type *MaskTy = C->getType();
589 unsigned NumElements = MaskTy->getVectorNumElements();
590 if (NumElements == VT.getVectorNumElements()) {
591 for (unsigned i = 0; i < NumElements; ++i) {
592 Constant *COp = C->getAggregateElement(i);
593 if (!COp) {
594 ShuffleMask.clear();
595 return;
596 }
597 if (isa<UndefValue>(COp))
598 ShuffleMask.push_back(SM_SentinelUndef);
599 else {
600 uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
601 Element &= (1 << NumElements*2) - 1;
602 ShuffleMask.push_back(Element);
603 }
604 }
605 }
606 }
607 } // llvm namespace
608