1//===-- X86InstrAVX512.td - AVX512 Instruction Set ---------*- tablegen -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes the X86 AVX512 instruction set, defining the 11// instructions, and properties of the instructions which are needed for code 12// generation, machine code emission, and analysis. 13// 14//===----------------------------------------------------------------------===// 15 16// Group template arguments that can be derived from the vector type (EltNum x 17// EltVT). These are things like the register class for the writemask, etc. 18// The idea is to pass one of these as the template argument rather than the 19// individual arguments. 20// The template is also used for scalar types, in this case numelts is 1. 21class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc, 22 string suffix = ""> { 23 RegisterClass RC = rc; 24 ValueType EltVT = eltvt; 25 int NumElts = numelts; 26 27 // Corresponding mask register class. 28 RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts); 29 30 // Corresponding write-mask register class. 31 RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM"); 32 33 // The GPR register class that can hold the write mask. Use GR8 for fewer 34 // than 8 elements. Use shift-right and equal to work around the lack of 35 // !lt in tablegen. 36 RegisterClass MRC = 37 !cast<RegisterClass>("GR" # 38 !if (!eq (!srl(NumElts, 3), 0), 8, NumElts)); 39 40 // Suffix used in the instruction mnemonic. 41 string Suffix = suffix; 42 43 // VTName is a string name for vector VT. For vector types it will be 44 // v # NumElts # EltVT, so for vector of 8 elements of i32 it will be v8i32 45 // It is a little bit complex for scalar types, where NumElts = 1. 46 // In this case we build v4f32 or v2f64 47 string VTName = "v" # !if (!eq (NumElts, 1), 48 !if (!eq (EltVT.Size, 32), 4, 49 !if (!eq (EltVT.Size, 64), 2, NumElts)), NumElts) # EltVT; 50 51 // The vector VT. 52 ValueType VT = !cast<ValueType>(VTName); 53 54 string EltTypeName = !cast<string>(EltVT); 55 // Size of the element type in bits, e.g. 32 for v16i32. 56 string EltSizeName = !subst("i", "", !subst("f", "", EltTypeName)); 57 int EltSize = EltVT.Size; 58 59 // "i" for integer types and "f" for floating-point types 60 string TypeVariantName = !subst(EltSizeName, "", EltTypeName); 61 62 // Size of RC in bits, e.g. 512 for VR512. 63 int Size = VT.Size; 64 65 // The corresponding memory operand, e.g. i512mem for VR512. 66 X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem"); 67 X86MemOperand ScalarMemOp = !cast<X86MemOperand>(EltVT # "mem"); 68 69 // Load patterns 70 // Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64 71 // due to load promotion during legalization 72 PatFrag LdFrag = !cast<PatFrag>("load" # 73 !if (!eq (TypeVariantName, "i"), 74 !if (!eq (Size, 128), "v2i64", 75 !if (!eq (Size, 256), "v4i64", 76 VTName)), VTName)); 77 78 PatFrag AlignedLdFrag = !cast<PatFrag>("alignedload" # 79 !if (!eq (TypeVariantName, "i"), 80 !if (!eq (Size, 128), "v2i64", 81 !if (!eq (Size, 256), "v4i64", 82 !if (!eq (Size, 512), 83 !if (!eq (EltSize, 64), "v8i64", "v16i32"), 84 VTName))), VTName)); 85 86 PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT); 87 88 // The corresponding float type, e.g. v16f32 for v16i32 89 // Note: For EltSize < 32, FloatVT is illegal and TableGen 90 // fails to compile, so we choose FloatVT = VT 91 ValueType FloatVT = !cast<ValueType>( 92 !if (!eq (!srl(EltSize,5),0), 93 VTName, 94 !if (!eq(TypeVariantName, "i"), 95 "v" # NumElts # "f" # EltSize, 96 VTName))); 97 98 // The string to specify embedded broadcast in assembly. 99 string BroadcastStr = "{1to" # NumElts # "}"; 100 101 // 8-bit compressed displacement tuple/subvector format. This is only 102 // defined for NumElts <= 8. 103 CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0), 104 !cast<CD8VForm>("CD8VT" # NumElts), ?); 105 106 SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm, 107 !if (!eq (Size, 256), sub_ymm, ?)); 108 109 Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle, 110 !if (!eq (EltTypeName, "f64"), SSEPackedDouble, 111 SSEPackedInt)); 112 113 RegisterClass FRC = !if (!eq (EltTypeName, "f32"), FR32X, FR64X); 114 115 // A vector type of the same width with element type i32. This is used to 116 // create the canonical constant zero node ImmAllZerosV. 117 ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32"); 118 dag ImmAllZerosV = (VT (bitconvert (i32VT immAllZerosV))); 119 120 string ZSuffix = !if (!eq (Size, 128), "Z128", 121 !if (!eq (Size, 256), "Z256", "Z")); 122} 123 124def v64i8_info : X86VectorVTInfo<64, i8, VR512, "b">; 125def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">; 126def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">; 127def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">; 128def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">; 129def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">; 130 131// "x" in v32i8x_info means RC = VR256X 132def v32i8x_info : X86VectorVTInfo<32, i8, VR256X, "b">; 133def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">; 134def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">; 135def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">; 136def v8f32x_info : X86VectorVTInfo<8, f32, VR256X, "ps">; 137def v4f64x_info : X86VectorVTInfo<4, f64, VR256X, "pd">; 138 139def v16i8x_info : X86VectorVTInfo<16, i8, VR128X, "b">; 140def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">; 141def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">; 142def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">; 143def v4f32x_info : X86VectorVTInfo<4, f32, VR128X, "ps">; 144def v2f64x_info : X86VectorVTInfo<2, f64, VR128X, "pd">; 145 146// We map scalar types to the smallest (128-bit) vector type 147// with the appropriate element type. This allows to use the same masking logic. 148def f32x_info : X86VectorVTInfo<1, f32, VR128X, "ss">; 149def f64x_info : X86VectorVTInfo<1, f64, VR128X, "sd">; 150 151class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256, 152 X86VectorVTInfo i128> { 153 X86VectorVTInfo info512 = i512; 154 X86VectorVTInfo info256 = i256; 155 X86VectorVTInfo info128 = i128; 156} 157 158def avx512vl_i8_info : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info, 159 v16i8x_info>; 160def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info, 161 v8i16x_info>; 162def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info, 163 v4i32x_info>; 164def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info, 165 v2i64x_info>; 166def avx512vl_f32_info : AVX512VLVectorVTInfo<v16f32_info, v8f32x_info, 167 v4f32x_info>; 168def avx512vl_f64_info : AVX512VLVectorVTInfo<v8f64_info, v4f64x_info, 169 v2f64x_info>; 170 171// This multiclass generates the masking variants from the non-masking 172// variant. It only provides the assembly pieces for the masking variants. 173// It assumes custom ISel patterns for masking which can be provided as 174// template arguments. 175multiclass AVX512_maskable_custom<bits<8> O, Format F, 176 dag Outs, 177 dag Ins, dag MaskingIns, dag ZeroMaskingIns, 178 string OpcodeStr, 179 string AttSrcAsm, string IntelSrcAsm, 180 list<dag> Pattern, 181 list<dag> MaskingPattern, 182 list<dag> ZeroMaskingPattern, 183 string Round = "", 184 string MaskingConstraint = "", 185 InstrItinClass itin = NoItinerary, 186 bit IsCommutable = 0> { 187 let isCommutable = IsCommutable in 188 def NAME: AVX512<O, F, Outs, Ins, 189 OpcodeStr#"\t{"#AttSrcAsm#", $dst "#Round#"|"# 190 "$dst "#Round#", "#IntelSrcAsm#"}", 191 Pattern, itin>; 192 193 // Prefer over VMOV*rrk Pat<> 194 let AddedComplexity = 20 in 195 def NAME#k: AVX512<O, F, Outs, MaskingIns, 196 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}"#Round#"|"# 197 "$dst {${mask}}"#Round#", "#IntelSrcAsm#"}", 198 MaskingPattern, itin>, 199 EVEX_K { 200 // In case of the 3src subclass this is overridden with a let. 201 string Constraints = MaskingConstraint; 202 } 203 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<> 204 def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns, 205 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}} {z}"#Round#"|"# 206 "$dst {${mask}} {z}"#Round#", "#IntelSrcAsm#"}", 207 ZeroMaskingPattern, 208 itin>, 209 EVEX_KZ; 210} 211 212 213// Common base class of AVX512_maskable and AVX512_maskable_3src. 214multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _, 215 dag Outs, 216 dag Ins, dag MaskingIns, dag ZeroMaskingIns, 217 string OpcodeStr, 218 string AttSrcAsm, string IntelSrcAsm, 219 dag RHS, dag MaskingRHS, 220 SDNode Select = vselect, string Round = "", 221 string MaskingConstraint = "", 222 InstrItinClass itin = NoItinerary, 223 bit IsCommutable = 0> : 224 AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr, 225 AttSrcAsm, IntelSrcAsm, 226 [(set _.RC:$dst, RHS)], 227 [(set _.RC:$dst, MaskingRHS)], 228 [(set _.RC:$dst, 229 (Select _.KRCWM:$mask, RHS, _.ImmAllZerosV))], 230 Round, MaskingConstraint, NoItinerary, IsCommutable>; 231 232// This multiclass generates the unconditional/non-masking, the masking and 233// the zero-masking variant of the vector instruction. In the masking case, the 234// perserved vector elements come from a new dummy input operand tied to $dst. 235multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _, 236 dag Outs, dag Ins, string OpcodeStr, 237 string AttSrcAsm, string IntelSrcAsm, 238 dag RHS, string Round = "", 239 InstrItinClass itin = NoItinerary, 240 bit IsCommutable = 0> : 241 AVX512_maskable_common<O, F, _, Outs, Ins, 242 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins), 243 !con((ins _.KRCWM:$mask), Ins), 244 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS, 245 (vselect _.KRCWM:$mask, RHS, _.RC:$src0), vselect, 246 Round, "$src0 = $dst", itin, IsCommutable>; 247 248// This multiclass generates the unconditional/non-masking, the masking and 249// the zero-masking variant of the scalar instruction. 250multiclass AVX512_maskable_scalar<bits<8> O, Format F, X86VectorVTInfo _, 251 dag Outs, dag Ins, string OpcodeStr, 252 string AttSrcAsm, string IntelSrcAsm, 253 dag RHS, string Round = "", 254 InstrItinClass itin = NoItinerary, 255 bit IsCommutable = 0> : 256 AVX512_maskable_common<O, F, _, Outs, Ins, 257 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins), 258 !con((ins _.KRCWM:$mask), Ins), 259 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS, 260 (X86select _.KRCWM:$mask, RHS, _.RC:$src0), X86select, 261 Round, "$src0 = $dst", itin, IsCommutable>; 262 263// Similar to AVX512_maskable but in this case one of the source operands 264// ($src1) is already tied to $dst so we just use that for the preserved 265// vector elements. NOTE that the NonTiedIns (the ins dag) should exclude 266// $src1. 267multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _, 268 dag Outs, dag NonTiedIns, string OpcodeStr, 269 string AttSrcAsm, string IntelSrcAsm, 270 dag RHS> : 271 AVX512_maskable_common<O, F, _, Outs, 272 !con((ins _.RC:$src1), NonTiedIns), 273 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns), 274 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns), 275 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS, 276 (vselect _.KRCWM:$mask, RHS, _.RC:$src1)>; 277 278 279multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _, 280 dag Outs, dag Ins, 281 string OpcodeStr, 282 string AttSrcAsm, string IntelSrcAsm, 283 list<dag> Pattern> : 284 AVX512_maskable_custom<O, F, Outs, Ins, 285 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins), 286 !con((ins _.KRCWM:$mask), Ins), 287 OpcodeStr, AttSrcAsm, IntelSrcAsm, Pattern, [], [], "", 288 "$src0 = $dst">; 289 290// Bitcasts between 512-bit vector types. Return the original type since 291// no instruction is needed for the conversion 292let Predicates = [HasAVX512] in { 293 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>; 294 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>; 295 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>; 296 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>; 297 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>; 298 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>; 299 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>; 300 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>; 301 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>; 302 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>; 303 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>; 304 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>; 305 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>; 306 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>; 307 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>; 308 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>; 309 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>; 310 def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>; 311 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>; 312 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>; 313 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>; 314 def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>; 315 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>; 316 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>; 317 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>; 318 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>; 319 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>; 320 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>; 321 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>; 322 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>; 323 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>; 324 325 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>; 326 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>; 327 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>; 328 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>; 329 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>; 330 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>; 331 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>; 332 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>; 333 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>; 334 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>; 335 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>; 336 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>; 337 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>; 338 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>; 339 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>; 340 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>; 341 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>; 342 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>; 343 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>; 344 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>; 345 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>; 346 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>; 347 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>; 348 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>; 349 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>; 350 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>; 351 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>; 352 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>; 353 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>; 354 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>; 355 356// Bitcasts between 256-bit vector types. Return the original type since 357// no instruction is needed for the conversion 358 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>; 359 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>; 360 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>; 361 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>; 362 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>; 363 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>; 364 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>; 365 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>; 366 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>; 367 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>; 368 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>; 369 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>; 370 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>; 371 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>; 372 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>; 373 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>; 374 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>; 375 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>; 376 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>; 377 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>; 378 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>; 379 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>; 380 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>; 381 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>; 382 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>; 383 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>; 384 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>; 385 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>; 386 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>; 387 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>; 388} 389 390// 391// AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros. 392// 393 394let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, 395 isPseudo = 1, Predicates = [HasAVX512] in { 396def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "", 397 [(set VR512:$dst, (v16f32 immAllZerosV))]>; 398} 399 400let Predicates = [HasAVX512] in { 401def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>; 402def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>; 403def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>; 404} 405 406//===----------------------------------------------------------------------===// 407// AVX-512 - VECTOR INSERT 408// 409 410multiclass vinsert_for_size_no_alt<int Opcode, 411 X86VectorVTInfo From, X86VectorVTInfo To, 412 PatFrag vinsert_insert, 413 SDNodeXForm INSERT_get_vinsert_imm> { 414 let hasSideEffects = 0, ExeDomain = To.ExeDomain in { 415 def rr : AVX512AIi8<Opcode, MRMSrcReg, (outs VR512:$dst), 416 (ins VR512:$src1, From.RC:$src2, u8imm:$src3), 417 "vinsert" # From.EltTypeName # "x" # From.NumElts # 418 "\t{$src3, $src2, $src1, $dst|" 419 "$dst, $src1, $src2, $src3}", 420 [(set To.RC:$dst, (vinsert_insert:$src3 (To.VT VR512:$src1), 421 (From.VT From.RC:$src2), 422 (iPTR imm)))]>, 423 EVEX_4V, EVEX_V512; 424 425 let mayLoad = 1 in 426 def rm : AVX512AIi8<Opcode, MRMSrcMem, (outs VR512:$dst), 427 (ins VR512:$src1, From.MemOp:$src2, u8imm:$src3), 428 "vinsert" # From.EltTypeName # "x" # From.NumElts # 429 "\t{$src3, $src2, $src1, $dst|" 430 "$dst, $src1, $src2, $src3}", 431 []>, 432 EVEX_4V, EVEX_V512, EVEX_CD8<From.EltSize, From.CD8TupleForm>; 433 } 434} 435 436multiclass vinsert_for_size<int Opcode, 437 X86VectorVTInfo From, X86VectorVTInfo To, 438 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo, 439 PatFrag vinsert_insert, 440 SDNodeXForm INSERT_get_vinsert_imm> : 441 vinsert_for_size_no_alt<Opcode, From, To, 442 vinsert_insert, INSERT_get_vinsert_imm> { 443 // Codegen pattern with the alternative types, e.g. v2i64 -> v8i64 for 444 // vinserti32x4. Only add this if 64x2 and friends are not supported 445 // natively via AVX512DQ. 446 let Predicates = [NoDQI] in 447 def : Pat<(vinsert_insert:$ins 448 (AltTo.VT VR512:$src1), (AltFrom.VT From.RC:$src2), (iPTR imm)), 449 (AltTo.VT (!cast<Instruction>(NAME # From.EltSize # "x4rr") 450 VR512:$src1, From.RC:$src2, 451 (INSERT_get_vinsert_imm VR512:$ins)))>; 452} 453 454multiclass vinsert_for_type<ValueType EltVT32, int Opcode128, 455 ValueType EltVT64, int Opcode256> { 456 defm NAME # "32x4" : vinsert_for_size<Opcode128, 457 X86VectorVTInfo< 4, EltVT32, VR128X>, 458 X86VectorVTInfo<16, EltVT32, VR512>, 459 X86VectorVTInfo< 2, EltVT64, VR128X>, 460 X86VectorVTInfo< 8, EltVT64, VR512>, 461 vinsert128_insert, 462 INSERT_get_vinsert128_imm>; 463 let Predicates = [HasDQI] in 464 defm NAME # "64x2" : vinsert_for_size_no_alt<Opcode128, 465 X86VectorVTInfo< 2, EltVT64, VR128X>, 466 X86VectorVTInfo< 8, EltVT64, VR512>, 467 vinsert128_insert, 468 INSERT_get_vinsert128_imm>, VEX_W; 469 defm NAME # "64x4" : vinsert_for_size<Opcode256, 470 X86VectorVTInfo< 4, EltVT64, VR256X>, 471 X86VectorVTInfo< 8, EltVT64, VR512>, 472 X86VectorVTInfo< 8, EltVT32, VR256>, 473 X86VectorVTInfo<16, EltVT32, VR512>, 474 vinsert256_insert, 475 INSERT_get_vinsert256_imm>, VEX_W; 476 let Predicates = [HasDQI] in 477 defm NAME # "32x8" : vinsert_for_size_no_alt<Opcode256, 478 X86VectorVTInfo< 8, EltVT32, VR256X>, 479 X86VectorVTInfo<16, EltVT32, VR512>, 480 vinsert256_insert, 481 INSERT_get_vinsert256_imm>; 482} 483 484defm VINSERTF : vinsert_for_type<f32, 0x18, f64, 0x1a>; 485defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>; 486 487// vinsertps - insert f32 to XMM 488def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst), 489 (ins VR128X:$src1, VR128X:$src2, u8imm:$src3), 490 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", 491 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>, 492 EVEX_4V; 493def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst), 494 (ins VR128X:$src1, f32mem:$src2, u8imm:$src3), 495 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", 496 [(set VR128X:$dst, (X86insertps VR128X:$src1, 497 (v4f32 (scalar_to_vector (loadf32 addr:$src2))), 498 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>; 499 500//===----------------------------------------------------------------------===// 501// AVX-512 VECTOR EXTRACT 502//--- 503 504multiclass vextract_for_size<int Opcode, 505 X86VectorVTInfo From, X86VectorVTInfo To, 506 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo, 507 PatFrag vextract_extract, 508 SDNodeXForm EXTRACT_get_vextract_imm> { 509 let hasSideEffects = 0, ExeDomain = To.ExeDomain in { 510 defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst), 511 (ins VR512:$src1, u8imm:$idx), 512 "vextract" # To.EltTypeName # "x4", 513 "$idx, $src1", "$src1, $idx", 514 [(set To.RC:$dst, (vextract_extract:$idx (From.VT VR512:$src1), 515 (iPTR imm)))]>, 516 AVX512AIi8Base, EVEX, EVEX_V512; 517 let mayStore = 1 in 518 def rm : AVX512AIi8<Opcode, MRMDestMem, (outs), 519 (ins To.MemOp:$dst, VR512:$src1, u8imm:$src2), 520 "vextract" # To.EltTypeName # "x4\t{$src2, $src1, $dst|" 521 "$dst, $src1, $src2}", 522 []>, EVEX, EVEX_V512, EVEX_CD8<To.EltSize, CD8VT4>; 523 } 524 525 // Codegen pattern with the alternative types, e.g. v8i64 -> v2i64 for 526 // vextracti32x4 527 def : Pat<(vextract_extract:$ext (AltFrom.VT VR512:$src1), (iPTR imm)), 528 (AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x4rr") 529 VR512:$src1, 530 (EXTRACT_get_vextract_imm To.RC:$ext)))>; 531 532 // A 128/256-bit subvector extract from the first 512-bit vector position is 533 // a subregister copy that needs no instruction. 534 def : Pat<(To.VT (extract_subvector (From.VT VR512:$src), (iPTR 0))), 535 (To.VT 536 (EXTRACT_SUBREG (From.VT VR512:$src), To.SubRegIdx))>; 537 538 // And for the alternative types. 539 def : Pat<(AltTo.VT (extract_subvector (AltFrom.VT VR512:$src), (iPTR 0))), 540 (AltTo.VT 541 (EXTRACT_SUBREG (AltFrom.VT VR512:$src), AltTo.SubRegIdx))>; 542 543 // Intrinsic call with masking. 544 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName # 545 "x4_512") 546 VR512:$src1, (iPTR imm:$idx), To.RC:$src0, GR8:$mask), 547 (!cast<Instruction>(NAME # To.EltSize # "x4rrk") To.RC:$src0, 548 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)), 549 VR512:$src1, imm:$idx)>; 550 551 // Intrinsic call with zero-masking. 552 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName # 553 "x4_512") 554 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, GR8:$mask), 555 (!cast<Instruction>(NAME # To.EltSize # "x4rrkz") 556 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)), 557 VR512:$src1, imm:$idx)>; 558 559 // Intrinsic call without masking. 560 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName # 561 "x4_512") 562 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, (i8 -1)), 563 (!cast<Instruction>(NAME # To.EltSize # "x4rr") 564 VR512:$src1, imm:$idx)>; 565} 566 567multiclass vextract_for_type<ValueType EltVT32, int Opcode32, 568 ValueType EltVT64, int Opcode64> { 569 defm NAME # "32x4" : vextract_for_size<Opcode32, 570 X86VectorVTInfo<16, EltVT32, VR512>, 571 X86VectorVTInfo< 4, EltVT32, VR128X>, 572 X86VectorVTInfo< 8, EltVT64, VR512>, 573 X86VectorVTInfo< 2, EltVT64, VR128X>, 574 vextract128_extract, 575 EXTRACT_get_vextract128_imm>; 576 defm NAME # "64x4" : vextract_for_size<Opcode64, 577 X86VectorVTInfo< 8, EltVT64, VR512>, 578 X86VectorVTInfo< 4, EltVT64, VR256X>, 579 X86VectorVTInfo<16, EltVT32, VR512>, 580 X86VectorVTInfo< 8, EltVT32, VR256>, 581 vextract256_extract, 582 EXTRACT_get_vextract256_imm>, VEX_W; 583} 584 585defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>; 586defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>; 587 588// A 128-bit subvector insert to the first 512-bit vector position 589// is a subregister copy that needs no instruction. 590def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)), 591 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), 592 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), 593 sub_ymm)>; 594def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)), 595 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), 596 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), 597 sub_ymm)>; 598def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)), 599 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), 600 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), 601 sub_ymm)>; 602def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)), 603 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), 604 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), 605 sub_ymm)>; 606 607def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)), 608 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; 609def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)), 610 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; 611def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)), 612 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; 613def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)), 614 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; 615 616// vextractps - extract 32 bits from XMM 617def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst), 618 (ins VR128X:$src1, u8imm:$src2), 619 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}", 620 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>, 621 EVEX; 622 623def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs), 624 (ins f32mem:$dst, VR128X:$src1, u8imm:$src2), 625 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}", 626 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2), 627 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>; 628 629//===---------------------------------------------------------------------===// 630// AVX-512 BROADCAST 631//--- 632multiclass avx512_fp_broadcast<bits<8> opc, SDNode OpNode, RegisterClass SrcRC, 633 ValueType svt, X86VectorVTInfo _> { 634 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 635 (ins SrcRC:$src), "vbroadcast"## !subst("p", "s", _.Suffix), 636 "$src", "$src", (_.VT (OpNode (svt SrcRC:$src)))>, 637 T8PD, EVEX; 638 639 let mayLoad = 1 in { 640 defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 641 (ins _.ScalarMemOp:$src), 642 "vbroadcast"##!subst("p", "s", _.Suffix), "$src", "$src", 643 (_.VT (OpNode (_.ScalarLdFrag addr:$src)))>, 644 T8PD, EVEX; 645 } 646} 647 648multiclass avx512_fp_broadcast_vl<bits<8> opc, SDNode OpNode, 649 AVX512VLVectorVTInfo _> { 650 defm Z : avx512_fp_broadcast<opc, OpNode, VR128X, _.info128.VT, _.info512>, 651 EVEX_V512; 652 653 let Predicates = [HasVLX] in { 654 defm Z256 : avx512_fp_broadcast<opc, OpNode, VR128X, _.info128.VT, _.info256>, 655 EVEX_V256; 656 } 657} 658 659let ExeDomain = SSEPackedSingle in { 660 defm VBROADCASTSS : avx512_fp_broadcast_vl<0x18, X86VBroadcast, 661 avx512vl_f32_info>, EVEX_CD8<32, CD8VT1>; 662 let Predicates = [HasVLX] in { 663 defm VBROADCASTSSZ128 : avx512_fp_broadcast<0x18, X86VBroadcast, VR128X, 664 v4f32, v4f32x_info>, EVEX_V128, 665 EVEX_CD8<32, CD8VT1>; 666 } 667} 668 669let ExeDomain = SSEPackedDouble in { 670 defm VBROADCASTSD : avx512_fp_broadcast_vl<0x19, X86VBroadcast, 671 avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VT1>; 672} 673 674// avx512_broadcast_pat introduces patterns for broadcast with a scalar argument. 675// Later, we can canonize broadcast instructions before ISel phase and 676// eliminate additional patterns on ISel. 677// SrcRC_v and SrcRC_s are RegisterClasses for vector and scalar 678// representations of source 679multiclass avx512_broadcast_pat<string InstName, SDNode OpNode, 680 X86VectorVTInfo _, RegisterClass SrcRC_v, 681 RegisterClass SrcRC_s> { 682 def : Pat<(_.VT (OpNode (_.EltVT SrcRC_s:$src))), 683 (!cast<Instruction>(InstName##"r") 684 (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>; 685 686 let AddedComplexity = 30 in { 687 def : Pat<(_.VT (vselect _.KRCWM:$mask, 688 (OpNode (_.EltVT SrcRC_s:$src)), _.RC:$src0)), 689 (!cast<Instruction>(InstName##"rk") _.RC:$src0, _.KRCWM:$mask, 690 (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>; 691 692 def : Pat<(_.VT(vselect _.KRCWM:$mask, 693 (OpNode (_.EltVT SrcRC_s:$src)), _.ImmAllZerosV)), 694 (!cast<Instruction>(InstName##"rkz") _.KRCWM:$mask, 695 (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>; 696 } 697} 698 699defm : avx512_broadcast_pat<"VBROADCASTSSZ", X86VBroadcast, v16f32_info, 700 VR128X, FR32X>; 701defm : avx512_broadcast_pat<"VBROADCASTSDZ", X86VBroadcast, v8f64_info, 702 VR128X, FR64X>; 703 704let Predicates = [HasVLX] in { 705 defm : avx512_broadcast_pat<"VBROADCASTSSZ256", X86VBroadcast, 706 v8f32x_info, VR128X, FR32X>; 707 defm : avx512_broadcast_pat<"VBROADCASTSSZ128", X86VBroadcast, 708 v4f32x_info, VR128X, FR32X>; 709 defm : avx512_broadcast_pat<"VBROADCASTSDZ256", X86VBroadcast, 710 v4f64x_info, VR128X, FR64X>; 711} 712 713def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))), 714 (VBROADCASTSSZm addr:$src)>; 715def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))), 716 (VBROADCASTSDZm addr:$src)>; 717 718def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src), 719 (VBROADCASTSSZm addr:$src)>; 720def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src), 721 (VBROADCASTSDZm addr:$src)>; 722 723multiclass avx512_int_broadcast_reg<bits<8> opc, X86VectorVTInfo _, 724 RegisterClass SrcRC> { 725 defm r : AVX512_maskable_in_asm<opc, MRMSrcReg, _, (outs _.RC:$dst), 726 (ins SrcRC:$src), "vpbroadcast"##_.Suffix, 727 "$src", "$src", []>, T8PD, EVEX; 728} 729 730multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _, 731 RegisterClass SrcRC, Predicate prd> { 732 let Predicates = [prd] in 733 defm Z : avx512_int_broadcast_reg<opc, _.info512, SrcRC>, EVEX_V512; 734 let Predicates = [prd, HasVLX] in { 735 defm Z256 : avx512_int_broadcast_reg<opc, _.info256, SrcRC>, EVEX_V256; 736 defm Z128 : avx512_int_broadcast_reg<opc, _.info128, SrcRC>, EVEX_V128; 737 } 738} 739 740defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR32, 741 HasBWI>; 742defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR32, 743 HasBWI>; 744defm VPBROADCASTDr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i32_info, GR32, 745 HasAVX512>; 746defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64, 747 HasAVX512>, VEX_W; 748 749def : Pat <(v16i32 (X86vzext VK16WM:$mask)), 750 (VPBROADCASTDrZrkz VK16WM:$mask, (i32 (MOV32ri 0x1)))>; 751 752def : Pat <(v8i64 (X86vzext VK8WM:$mask)), 753 (VPBROADCASTQrZrkz VK8WM:$mask, (i64 (MOV64ri 0x1)))>; 754 755def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))), 756 (VPBROADCASTDrZr GR32:$src)>; 757def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))), 758 (VPBROADCASTDrZrkz VK16WM:$mask, GR32:$src)>; 759def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))), 760 (VPBROADCASTQrZr GR64:$src)>; 761def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))), 762 (VPBROADCASTQrZrkz VK8WM:$mask, GR64:$src)>; 763 764def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))), 765 (VPBROADCASTDrZr GR32:$src)>; 766def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))), 767 (VPBROADCASTQrZr GR64:$src)>; 768 769def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src), 770 (v16i32 immAllZerosV), (i16 GR16:$mask))), 771 (VPBROADCASTDrZrkz (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>; 772def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src), 773 (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))), 774 (VPBROADCASTQrZrkz (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>; 775 776multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr, 777 X86MemOperand x86memop, PatFrag ld_frag, 778 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT, 779 RegisterClass KRC> { 780 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src), 781 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 782 [(set DstRC:$dst, 783 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX; 784 def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask, 785 VR128X:$src), 786 !strconcat(OpcodeStr, 787 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"), 788 [(set DstRC:$dst, 789 (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>, 790 EVEX, EVEX_KZ; 791 let mayLoad = 1 in { 792 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), 793 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 794 [(set DstRC:$dst, 795 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX; 796 def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask, 797 x86memop:$src), 798 !strconcat(OpcodeStr, 799 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"), 800 [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask, 801 (ld_frag addr:$src))))]>, EVEX, EVEX_KZ; 802 } 803} 804 805defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem, 806 loadi32, VR512, v16i32, v4i32, VK16WM>, 807 EVEX_V512, EVEX_CD8<32, CD8VT1>; 808defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem, 809 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W, 810 EVEX_CD8<64, CD8VT1>; 811 812multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr, 813 X86MemOperand x86memop, PatFrag ld_frag, 814 RegisterClass KRC> { 815 let mayLoad = 1 in { 816 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src), 817 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 818 []>, EVEX; 819 def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask, 820 x86memop:$src), 821 !strconcat(OpcodeStr, 822 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"), 823 []>, EVEX, EVEX_KZ; 824 } 825} 826 827defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4", 828 i128mem, loadv2i64, VK16WM>, 829 EVEX_V512, EVEX_CD8<32, CD8VT4>; 830defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4", 831 i256mem, loadv4i64, VK16WM>, VEX_W, 832 EVEX_V512, EVEX_CD8<64, CD8VT4>; 833 834def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))), 835 (VPBROADCASTDZrr VR128X:$src)>; 836def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))), 837 (VPBROADCASTQZrr VR128X:$src)>; 838 839def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))), 840 (VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>; 841def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))), 842 (VBROADCASTSDZr (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>; 843 844def : Pat<(v16i32 (X86VBroadcast (v16i32 VR512:$src))), 845 (VPBROADCASTDZrr (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>; 846def : Pat<(v8i64 (X86VBroadcast (v8i64 VR512:$src))), 847 (VPBROADCASTQZrr (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>; 848 849def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))), 850 (VBROADCASTSSZr VR128X:$src)>; 851def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))), 852 (VBROADCASTSDZr VR128X:$src)>; 853 854// Provide fallback in case the load node that is used in the patterns above 855// is used by additional users, which prevents the pattern selection. 856def : Pat<(v16f32 (X86VBroadcast FR32X:$src)), 857 (VBROADCASTSSZr (COPY_TO_REGCLASS FR32X:$src, VR128X))>; 858def : Pat<(v8f64 (X86VBroadcast FR64X:$src)), 859 (VBROADCASTSDZr (COPY_TO_REGCLASS FR64X:$src, VR128X))>; 860 861 862let Predicates = [HasAVX512] in { 863def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))), 864 (EXTRACT_SUBREG 865 (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM), 866 addr:$src)), sub_ymm)>; 867} 868//===----------------------------------------------------------------------===// 869// AVX-512 BROADCAST MASK TO VECTOR REGISTER 870//--- 871 872multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr, 873 RegisterClass KRC> { 874let Predicates = [HasCDI] in 875def Zrr : AVX512XS8I<opc, MRMSrcReg, (outs VR512:$dst), (ins KRC:$src), 876 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 877 []>, EVEX, EVEX_V512; 878 879let Predicates = [HasCDI, HasVLX] in { 880def Z128rr : AVX512XS8I<opc, MRMSrcReg, (outs VR128:$dst), (ins KRC:$src), 881 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 882 []>, EVEX, EVEX_V128; 883def Z256rr : AVX512XS8I<opc, MRMSrcReg, (outs VR256:$dst), (ins KRC:$src), 884 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 885 []>, EVEX, EVEX_V256; 886} 887} 888 889let Predicates = [HasCDI] in { 890defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", 891 VK16>; 892defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", 893 VK8>, VEX_W; 894} 895 896//===----------------------------------------------------------------------===// 897// AVX-512 - VPERM 898// 899// -- immediate form -- 900multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, 901 X86VectorVTInfo _> { 902 let ExeDomain = _.ExeDomain in { 903 def ri : AVX512AIi8<opc, MRMSrcReg, (outs _.RC:$dst), 904 (ins _.RC:$src1, u8imm:$src2), 905 !strconcat(OpcodeStr, 906 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 907 [(set _.RC:$dst, 908 (_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>, 909 EVEX; 910 def mi : AVX512AIi8<opc, MRMSrcMem, (outs _.RC:$dst), 911 (ins _.MemOp:$src1, u8imm:$src2), 912 !strconcat(OpcodeStr, 913 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 914 [(set _.RC:$dst, 915 (_.VT (OpNode (_.LdFrag addr:$src1), 916 (i8 imm:$src2))))]>, 917 EVEX, EVEX_CD8<_.EltSize, CD8VF>; 918} 919} 920 921multiclass avx512_permil<bits<8> OpcImm, bits<8> OpcVar, X86VectorVTInfo _, 922 X86VectorVTInfo Ctrl> : 923 avx512_perm_imm<OpcImm, "vpermil" # _.Suffix, X86VPermilpi, _> { 924 let ExeDomain = _.ExeDomain in { 925 def rr : AVX5128I<OpcVar, MRMSrcReg, (outs _.RC:$dst), 926 (ins _.RC:$src1, _.RC:$src2), 927 !strconcat("vpermil" # _.Suffix, 928 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 929 [(set _.RC:$dst, 930 (_.VT (X86VPermilpv _.RC:$src1, 931 (Ctrl.VT Ctrl.RC:$src2))))]>, 932 EVEX_4V; 933 def rm : AVX5128I<OpcVar, MRMSrcMem, (outs _.RC:$dst), 934 (ins _.RC:$src1, Ctrl.MemOp:$src2), 935 !strconcat("vpermil" # _.Suffix, 936 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 937 [(set _.RC:$dst, 938 (_.VT (X86VPermilpv _.RC:$src1, 939 (Ctrl.VT (Ctrl.LdFrag addr:$src2)))))]>, 940 EVEX_4V; 941 } 942} 943 944defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", X86VPermi, v8i64_info>, 945 EVEX_V512, VEX_W; 946defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", X86VPermi, v8f64_info>, 947 EVEX_V512, VEX_W; 948 949defm VPERMILPSZ : avx512_permil<0x04, 0x0C, v16f32_info, v16i32_info>, 950 EVEX_V512; 951defm VPERMILPDZ : avx512_permil<0x05, 0x0D, v8f64_info, v8i64_info>, 952 EVEX_V512, VEX_W; 953 954def : Pat<(v16i32 (X86VPermilpi VR512:$src1, (i8 imm:$imm))), 955 (VPERMILPSZri VR512:$src1, imm:$imm)>; 956def : Pat<(v8i64 (X86VPermilpi VR512:$src1, (i8 imm:$imm))), 957 (VPERMILPDZri VR512:$src1, imm:$imm)>; 958 959// -- VPERM - register form -- 960multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC, 961 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> { 962 963 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 964 (ins RC:$src1, RC:$src2), 965 !strconcat(OpcodeStr, 966 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 967 [(set RC:$dst, 968 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V; 969 970 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 971 (ins RC:$src1, x86memop:$src2), 972 !strconcat(OpcodeStr, 973 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 974 [(set RC:$dst, 975 (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>, 976 EVEX_4V; 977} 978 979defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, loadv16i32, i512mem, 980 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>; 981defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, loadv8i64, i512mem, 982 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 983let ExeDomain = SSEPackedSingle in 984defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, loadv16f32, f512mem, 985 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>; 986let ExeDomain = SSEPackedDouble in 987defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, loadv8f64, f512mem, 988 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 989 990// -- VPERM2I - 3 source operands form -- 991multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC, 992 PatFrag mem_frag, X86MemOperand x86memop, 993 SDNode OpNode, ValueType OpVT, RegisterClass KRC> { 994let Constraints = "$src1 = $dst" in { 995 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 996 (ins RC:$src1, RC:$src2, RC:$src3), 997 !strconcat(OpcodeStr, 998 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 999 [(set RC:$dst, 1000 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, 1001 EVEX_4V; 1002 1003 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 1004 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3), 1005 !strconcat(OpcodeStr, 1006 "\t{$src3, $src2, $dst {${mask}}|" 1007 "$dst {${mask}}, $src2, $src3}"), 1008 [(set RC:$dst, (OpVT (vselect KRC:$mask, 1009 (OpNode RC:$src1, RC:$src2, 1010 RC:$src3), 1011 RC:$src1)))]>, 1012 EVEX_4V, EVEX_K; 1013 1014 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<> 1015 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 1016 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3), 1017 !strconcat(OpcodeStr, 1018 "\t{$src3, $src2, $dst {${mask}} {z} |", 1019 "$dst {${mask}} {z}, $src2, $src3}"), 1020 [(set RC:$dst, (OpVT (vselect KRC:$mask, 1021 (OpNode RC:$src1, RC:$src2, 1022 RC:$src3), 1023 (OpVT (bitconvert 1024 (v16i32 immAllZerosV))))))]>, 1025 EVEX_4V, EVEX_KZ; 1026 1027 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 1028 (ins RC:$src1, RC:$src2, x86memop:$src3), 1029 !strconcat(OpcodeStr, 1030 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 1031 [(set RC:$dst, 1032 (OpVT (OpNode RC:$src1, RC:$src2, 1033 (mem_frag addr:$src3))))]>, EVEX_4V; 1034 1035 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 1036 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3), 1037 !strconcat(OpcodeStr, 1038 "\t{$src3, $src2, $dst {${mask}}|" 1039 "$dst {${mask}}, $src2, $src3}"), 1040 [(set RC:$dst, 1041 (OpVT (vselect KRC:$mask, 1042 (OpNode RC:$src1, RC:$src2, 1043 (mem_frag addr:$src3)), 1044 RC:$src1)))]>, 1045 EVEX_4V, EVEX_K; 1046 1047 let AddedComplexity = 10 in // Prefer over the rrkz variant 1048 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 1049 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3), 1050 !strconcat(OpcodeStr, 1051 "\t{$src3, $src2, $dst {${mask}} {z}|" 1052 "$dst {${mask}} {z}, $src2, $src3}"), 1053 [(set RC:$dst, 1054 (OpVT (vselect KRC:$mask, 1055 (OpNode RC:$src1, RC:$src2, 1056 (mem_frag addr:$src3)), 1057 (OpVT (bitconvert 1058 (v16i32 immAllZerosV))))))]>, 1059 EVEX_4V, EVEX_KZ; 1060 } 1061} 1062defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, loadv16i32, 1063 i512mem, X86VPermiv3, v16i32, VK16WM>, 1064 EVEX_V512, EVEX_CD8<32, CD8VF>; 1065defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, loadv8i64, 1066 i512mem, X86VPermiv3, v8i64, VK8WM>, 1067 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 1068defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, loadv16f32, 1069 i512mem, X86VPermiv3, v16f32, VK16WM>, 1070 EVEX_V512, EVEX_CD8<32, CD8VF>; 1071defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, loadv8f64, 1072 i512mem, X86VPermiv3, v8f64, VK8WM>, 1073 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 1074 1075multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC, 1076 PatFrag mem_frag, X86MemOperand x86memop, 1077 SDNode OpNode, ValueType OpVT, RegisterClass KRC, 1078 ValueType MaskVT, RegisterClass MRC> : 1079 avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode, 1080 OpVT, KRC> { 1081 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512") 1082 VR512:$idx, VR512:$src1, VR512:$src2, -1)), 1083 (!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>; 1084 1085 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512") 1086 VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)), 1087 (!cast<Instruction>(NAME#rrk) VR512:$src1, 1088 (MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>; 1089} 1090 1091defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, loadv16i32, i512mem, 1092 X86VPermv3, v16i32, VK16WM, v16i1, GR16>, 1093 EVEX_V512, EVEX_CD8<32, CD8VF>; 1094defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, loadv8i64, i512mem, 1095 X86VPermv3, v8i64, VK8WM, v8i1, GR8>, 1096 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 1097defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, loadv16f32, i512mem, 1098 X86VPermv3, v16f32, VK16WM, v16i1, GR16>, 1099 EVEX_V512, EVEX_CD8<32, CD8VF>; 1100defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, loadv8f64, i512mem, 1101 X86VPermv3, v8f64, VK8WM, v8i1, GR8>, 1102 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 1103 1104//===----------------------------------------------------------------------===// 1105// AVX-512 - BLEND using mask 1106// 1107multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> { 1108 let ExeDomain = _.ExeDomain in { 1109 def rr : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst), 1110 (ins _.RC:$src1, _.RC:$src2), 1111 !strconcat(OpcodeStr, 1112 "\t{$src2, $src1, ${dst} |${dst}, $src1, $src2}"), 1113 []>, EVEX_4V; 1114 def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst), 1115 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2), 1116 !strconcat(OpcodeStr, 1117 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"), 1118 [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1), 1119 (_.VT _.RC:$src2)))]>, EVEX_4V, EVEX_K; 1120 def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst), 1121 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2), 1122 !strconcat(OpcodeStr, 1123 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"), 1124 []>, EVEX_4V, EVEX_KZ; 1125 let mayLoad = 1 in { 1126 def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst), 1127 (ins _.RC:$src1, _.MemOp:$src2), 1128 !strconcat(OpcodeStr, 1129 "\t{$src2, $src1, ${dst} |${dst}, $src1, $src2}"), 1130 []>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>; 1131 def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst), 1132 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2), 1133 !strconcat(OpcodeStr, 1134 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"), 1135 [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1), 1136 (_.VT (bitconvert (_.LdFrag addr:$src2)))))]>, 1137 EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>; 1138 def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst), 1139 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2), 1140 !strconcat(OpcodeStr, 1141 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"), 1142 []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>; 1143 } 1144 } 1145} 1146multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> { 1147 1148 def rmbk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst), 1149 (ins _.KRCWM:$mask, _.RC:$src1, _.ScalarMemOp:$src2), 1150 !strconcat(OpcodeStr, 1151 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|", 1152 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"), 1153 [(set _.RC:$dst,(X86select _.KRCWM:$mask, (_.VT _.RC:$src1), 1154 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))]>, 1155 EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>; 1156 1157 def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst), 1158 (ins _.RC:$src1, _.ScalarMemOp:$src2), 1159 !strconcat(OpcodeStr, 1160 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|", 1161 "$dst, $src1, ${src2}", _.BroadcastStr, "}"), 1162 []>, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>; 1163 1164} 1165 1166multiclass blendmask_dq <bits<8> opc, string OpcodeStr, 1167 AVX512VLVectorVTInfo VTInfo> { 1168 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>, 1169 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info512>, EVEX_V512; 1170 1171 let Predicates = [HasVLX] in { 1172 defm Z256 : avx512_blendmask<opc, OpcodeStr, VTInfo.info256>, 1173 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info256>, EVEX_V256; 1174 defm Z128 : avx512_blendmask<opc, OpcodeStr, VTInfo.info128>, 1175 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info128>, EVEX_V128; 1176 } 1177} 1178 1179multiclass blendmask_bw <bits<8> opc, string OpcodeStr, 1180 AVX512VLVectorVTInfo VTInfo> { 1181 let Predicates = [HasBWI] in 1182 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>, EVEX_V512; 1183 1184 let Predicates = [HasBWI, HasVLX] in { 1185 defm Z256 : avx512_blendmask <opc, OpcodeStr, VTInfo.info256>, EVEX_V256; 1186 defm Z128 : avx512_blendmask <opc, OpcodeStr, VTInfo.info128>, EVEX_V128; 1187 } 1188} 1189 1190 1191defm VBLENDMPS : blendmask_dq <0x65, "vblendmps", avx512vl_f32_info>; 1192defm VBLENDMPD : blendmask_dq <0x65, "vblendmpd", avx512vl_f64_info>, VEX_W; 1193defm VPBLENDMD : blendmask_dq <0x64, "vpblendmd", avx512vl_i32_info>; 1194defm VPBLENDMQ : blendmask_dq <0x64, "vpblendmq", avx512vl_i64_info>, VEX_W; 1195defm VPBLENDMB : blendmask_bw <0x66, "vpblendmb", avx512vl_i8_info>; 1196defm VPBLENDMW : blendmask_bw <0x66, "vpblendmw", avx512vl_i16_info>, VEX_W; 1197 1198 1199let Predicates = [HasAVX512] in { 1200def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1), 1201 (v8f32 VR256X:$src2))), 1202 (EXTRACT_SUBREG 1203 (v16f32 (VBLENDMPSZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM), 1204 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)), 1205 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>; 1206 1207def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1), 1208 (v8i32 VR256X:$src2))), 1209 (EXTRACT_SUBREG 1210 (v16i32 (VPBLENDMDZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM), 1211 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)), 1212 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>; 1213} 1214//===----------------------------------------------------------------------===// 1215// Compare Instructions 1216//===----------------------------------------------------------------------===// 1217 1218// avx512_cmp_scalar - AVX512 CMPSS and CMPSD 1219multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop, 1220 SDNode OpNode, ValueType VT, 1221 PatFrag ld_frag, string Suffix> { 1222 def rr : AVX512Ii8<0xC2, MRMSrcReg, 1223 (outs VK1:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc), 1224 !strconcat("vcmp${cc}", Suffix, 1225 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1226 [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))], 1227 IIC_SSE_ALU_F32S_RR>, EVEX_4V; 1228 def rm : AVX512Ii8<0xC2, MRMSrcMem, 1229 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc), 1230 !strconcat("vcmp${cc}", Suffix, 1231 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1232 [(set VK1:$dst, (OpNode (VT RC:$src1), 1233 (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V; 1234 let isAsmParserOnly = 1, hasSideEffects = 0 in { 1235 def rri_alt : AVX512Ii8<0xC2, MRMSrcReg, 1236 (outs VK1:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), 1237 !strconcat("vcmp", Suffix, 1238 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), 1239 [], IIC_SSE_ALU_F32S_RR>, EVEX_4V; 1240 let mayLoad = 1 in 1241 def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem, 1242 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), 1243 !strconcat("vcmp", Suffix, 1244 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), 1245 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V; 1246 } 1247} 1248 1249let Predicates = [HasAVX512] in { 1250defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, X86cmpms, f32, loadf32, "ss">, 1251 XS; 1252defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, X86cmpms, f64, loadf64, "sd">, 1253 XD, VEX_W; 1254} 1255 1256multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode, 1257 X86VectorVTInfo _> { 1258 def rr : AVX512BI<opc, MRMSrcReg, 1259 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2), 1260 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1261 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))], 1262 IIC_SSE_ALU_F32P_RR>, EVEX_4V; 1263 let mayLoad = 1 in 1264 def rm : AVX512BI<opc, MRMSrcMem, 1265 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2), 1266 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1267 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), 1268 (_.VT (bitconvert (_.LdFrag addr:$src2)))))], 1269 IIC_SSE_ALU_F32P_RM>, EVEX_4V; 1270 def rrk : AVX512BI<opc, MRMSrcReg, 1271 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2), 1272 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|", 1273 "$dst {${mask}}, $src1, $src2}"), 1274 [(set _.KRC:$dst, (and _.KRCWM:$mask, 1275 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))], 1276 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K; 1277 let mayLoad = 1 in 1278 def rmk : AVX512BI<opc, MRMSrcMem, 1279 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2), 1280 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|", 1281 "$dst {${mask}}, $src1, $src2}"), 1282 [(set _.KRC:$dst, (and _.KRCWM:$mask, 1283 (OpNode (_.VT _.RC:$src1), 1284 (_.VT (bitconvert 1285 (_.LdFrag addr:$src2))))))], 1286 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K; 1287} 1288 1289multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode, 1290 X86VectorVTInfo _> : 1291 avx512_icmp_packed<opc, OpcodeStr, OpNode, _> { 1292 let mayLoad = 1 in { 1293 def rmb : AVX512BI<opc, MRMSrcMem, 1294 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2), 1295 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst", 1296 "|$dst, $src1, ${src2}", _.BroadcastStr, "}"), 1297 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), 1298 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))], 1299 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B; 1300 def rmbk : AVX512BI<opc, MRMSrcMem, 1301 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, 1302 _.ScalarMemOp:$src2), 1303 !strconcat(OpcodeStr, 1304 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|", 1305 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"), 1306 [(set _.KRC:$dst, (and _.KRCWM:$mask, 1307 (OpNode (_.VT _.RC:$src1), 1308 (X86VBroadcast 1309 (_.ScalarLdFrag addr:$src2)))))], 1310 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B; 1311 } 1312} 1313 1314multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode, 1315 AVX512VLVectorVTInfo VTInfo, Predicate prd> { 1316 let Predicates = [prd] in 1317 defm Z : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info512>, 1318 EVEX_V512; 1319 1320 let Predicates = [prd, HasVLX] in { 1321 defm Z256 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info256>, 1322 EVEX_V256; 1323 defm Z128 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info128>, 1324 EVEX_V128; 1325 } 1326} 1327 1328multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr, 1329 SDNode OpNode, AVX512VLVectorVTInfo VTInfo, 1330 Predicate prd> { 1331 let Predicates = [prd] in 1332 defm Z : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info512>, 1333 EVEX_V512; 1334 1335 let Predicates = [prd, HasVLX] in { 1336 defm Z256 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info256>, 1337 EVEX_V256; 1338 defm Z128 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info128>, 1339 EVEX_V128; 1340 } 1341} 1342 1343defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb", X86pcmpeqm, 1344 avx512vl_i8_info, HasBWI>, 1345 EVEX_CD8<8, CD8VF>; 1346 1347defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw", X86pcmpeqm, 1348 avx512vl_i16_info, HasBWI>, 1349 EVEX_CD8<16, CD8VF>; 1350 1351defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd", X86pcmpeqm, 1352 avx512vl_i32_info, HasAVX512>, 1353 EVEX_CD8<32, CD8VF>; 1354 1355defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq", X86pcmpeqm, 1356 avx512vl_i64_info, HasAVX512>, 1357 T8PD, VEX_W, EVEX_CD8<64, CD8VF>; 1358 1359defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb", X86pcmpgtm, 1360 avx512vl_i8_info, HasBWI>, 1361 EVEX_CD8<8, CD8VF>; 1362 1363defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw", X86pcmpgtm, 1364 avx512vl_i16_info, HasBWI>, 1365 EVEX_CD8<16, CD8VF>; 1366 1367defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd", X86pcmpgtm, 1368 avx512vl_i32_info, HasAVX512>, 1369 EVEX_CD8<32, CD8VF>; 1370 1371defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq", X86pcmpgtm, 1372 avx512vl_i64_info, HasAVX512>, 1373 T8PD, VEX_W, EVEX_CD8<64, CD8VF>; 1374 1375def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))), 1376 (COPY_TO_REGCLASS (VPCMPGTDZrr 1377 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)), 1378 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>; 1379 1380def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))), 1381 (COPY_TO_REGCLASS (VPCMPEQDZrr 1382 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)), 1383 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>; 1384 1385multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode, 1386 X86VectorVTInfo _> { 1387 def rri : AVX512AIi8<opc, MRMSrcReg, 1388 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512ICC:$cc), 1389 !strconcat("vpcmp${cc}", Suffix, 1390 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1391 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), 1392 imm:$cc))], 1393 IIC_SSE_ALU_F32P_RR>, EVEX_4V; 1394 let mayLoad = 1 in 1395 def rmi : AVX512AIi8<opc, MRMSrcMem, 1396 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVX512ICC:$cc), 1397 !strconcat("vpcmp${cc}", Suffix, 1398 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1399 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), 1400 (_.VT (bitconvert (_.LdFrag addr:$src2))), 1401 imm:$cc))], 1402 IIC_SSE_ALU_F32P_RM>, EVEX_4V; 1403 def rrik : AVX512AIi8<opc, MRMSrcReg, 1404 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2, 1405 AVX512ICC:$cc), 1406 !strconcat("vpcmp${cc}", Suffix, 1407 "\t{$src2, $src1, $dst {${mask}}|", 1408 "$dst {${mask}}, $src1, $src2}"), 1409 [(set _.KRC:$dst, (and _.KRCWM:$mask, 1410 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), 1411 imm:$cc)))], 1412 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K; 1413 let mayLoad = 1 in 1414 def rmik : AVX512AIi8<opc, MRMSrcMem, 1415 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2, 1416 AVX512ICC:$cc), 1417 !strconcat("vpcmp${cc}", Suffix, 1418 "\t{$src2, $src1, $dst {${mask}}|", 1419 "$dst {${mask}}, $src1, $src2}"), 1420 [(set _.KRC:$dst, (and _.KRCWM:$mask, 1421 (OpNode (_.VT _.RC:$src1), 1422 (_.VT (bitconvert (_.LdFrag addr:$src2))), 1423 imm:$cc)))], 1424 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K; 1425 1426 // Accept explicit immediate argument form instead of comparison code. 1427 let isAsmParserOnly = 1, hasSideEffects = 0 in { 1428 def rri_alt : AVX512AIi8<opc, MRMSrcReg, 1429 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, u8imm:$cc), 1430 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|", 1431 "$dst, $src1, $src2, $cc}"), 1432 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V; 1433 let mayLoad = 1 in 1434 def rmi_alt : AVX512AIi8<opc, MRMSrcMem, 1435 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc), 1436 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|", 1437 "$dst, $src1, $src2, $cc}"), 1438 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V; 1439 def rrik_alt : AVX512AIi8<opc, MRMSrcReg, 1440 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2, 1441 u8imm:$cc), 1442 !strconcat("vpcmp", Suffix, 1443 "\t{$cc, $src2, $src1, $dst {${mask}}|", 1444 "$dst {${mask}}, $src1, $src2, $cc}"), 1445 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K; 1446 let mayLoad = 1 in 1447 def rmik_alt : AVX512AIi8<opc, MRMSrcMem, 1448 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2, 1449 u8imm:$cc), 1450 !strconcat("vpcmp", Suffix, 1451 "\t{$cc, $src2, $src1, $dst {${mask}}|", 1452 "$dst {${mask}}, $src1, $src2, $cc}"), 1453 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K; 1454 } 1455} 1456 1457multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode, 1458 X86VectorVTInfo _> : 1459 avx512_icmp_cc<opc, Suffix, OpNode, _> { 1460 def rmib : AVX512AIi8<opc, MRMSrcMem, 1461 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2, 1462 AVX512ICC:$cc), 1463 !strconcat("vpcmp${cc}", Suffix, 1464 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|", 1465 "$dst, $src1, ${src2}", _.BroadcastStr, "}"), 1466 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), 1467 (X86VBroadcast (_.ScalarLdFrag addr:$src2)), 1468 imm:$cc))], 1469 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B; 1470 def rmibk : AVX512AIi8<opc, MRMSrcMem, 1471 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, 1472 _.ScalarMemOp:$src2, AVX512ICC:$cc), 1473 !strconcat("vpcmp${cc}", Suffix, 1474 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|", 1475 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"), 1476 [(set _.KRC:$dst, (and _.KRCWM:$mask, 1477 (OpNode (_.VT _.RC:$src1), 1478 (X86VBroadcast (_.ScalarLdFrag addr:$src2)), 1479 imm:$cc)))], 1480 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B; 1481 1482 // Accept explicit immediate argument form instead of comparison code. 1483 let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 1 in { 1484 def rmib_alt : AVX512AIi8<opc, MRMSrcMem, 1485 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2, 1486 u8imm:$cc), 1487 !strconcat("vpcmp", Suffix, 1488 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|", 1489 "$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"), 1490 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B; 1491 def rmibk_alt : AVX512AIi8<opc, MRMSrcMem, 1492 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, 1493 _.ScalarMemOp:$src2, u8imm:$cc), 1494 !strconcat("vpcmp", Suffix, 1495 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|", 1496 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"), 1497 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B; 1498 } 1499} 1500 1501multiclass avx512_icmp_cc_vl<bits<8> opc, string Suffix, SDNode OpNode, 1502 AVX512VLVectorVTInfo VTInfo, Predicate prd> { 1503 let Predicates = [prd] in 1504 defm Z : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info512>, EVEX_V512; 1505 1506 let Predicates = [prd, HasVLX] in { 1507 defm Z256 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info256>, EVEX_V256; 1508 defm Z128 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info128>, EVEX_V128; 1509 } 1510} 1511 1512multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, SDNode OpNode, 1513 AVX512VLVectorVTInfo VTInfo, Predicate prd> { 1514 let Predicates = [prd] in 1515 defm Z : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info512>, 1516 EVEX_V512; 1517 1518 let Predicates = [prd, HasVLX] in { 1519 defm Z256 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info256>, 1520 EVEX_V256; 1521 defm Z128 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info128>, 1522 EVEX_V128; 1523 } 1524} 1525 1526defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86cmpm, avx512vl_i8_info, 1527 HasBWI>, EVEX_CD8<8, CD8VF>; 1528defm VPCMPUB : avx512_icmp_cc_vl<0x3E, "ub", X86cmpmu, avx512vl_i8_info, 1529 HasBWI>, EVEX_CD8<8, CD8VF>; 1530 1531defm VPCMPW : avx512_icmp_cc_vl<0x3F, "w", X86cmpm, avx512vl_i16_info, 1532 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>; 1533defm VPCMPUW : avx512_icmp_cc_vl<0x3E, "uw", X86cmpmu, avx512vl_i16_info, 1534 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>; 1535 1536defm VPCMPD : avx512_icmp_cc_rmb_vl<0x1F, "d", X86cmpm, avx512vl_i32_info, 1537 HasAVX512>, EVEX_CD8<32, CD8VF>; 1538defm VPCMPUD : avx512_icmp_cc_rmb_vl<0x1E, "ud", X86cmpmu, avx512vl_i32_info, 1539 HasAVX512>, EVEX_CD8<32, CD8VF>; 1540 1541defm VPCMPQ : avx512_icmp_cc_rmb_vl<0x1F, "q", X86cmpm, avx512vl_i64_info, 1542 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>; 1543defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86cmpmu, avx512vl_i64_info, 1544 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>; 1545 1546// avx512_cmp_packed - compare packed instructions 1547multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC, 1548 X86MemOperand x86memop, ValueType vt, 1549 string suffix, Domain d> { 1550 def rri : AVX512PIi8<0xC2, MRMSrcReg, 1551 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc), 1552 !strconcat("vcmp${cc}", suffix, 1553 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1554 [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>; 1555 let hasSideEffects = 0 in 1556 def rrib: AVX512PIi8<0xC2, MRMSrcReg, 1557 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc), 1558 !strconcat("vcmp${cc}", suffix, 1559 "\t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"), 1560 [], d>, EVEX_B; 1561 def rmi : AVX512PIi8<0xC2, MRMSrcMem, 1562 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc), 1563 !strconcat("vcmp${cc}", suffix, 1564 "\t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"), 1565 [(set KRC:$dst, 1566 (X86cmpm (vt RC:$src1), (load addr:$src2), imm:$cc))], d>; 1567 1568 // Accept explicit immediate argument form instead of comparison code. 1569 let isAsmParserOnly = 1, hasSideEffects = 0 in { 1570 def rri_alt : AVX512PIi8<0xC2, MRMSrcReg, 1571 (outs KRC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), 1572 !strconcat("vcmp", suffix, 1573 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>; 1574 def rrib_alt: AVX512PIi8<0xC2, MRMSrcReg, 1575 (outs KRC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), 1576 !strconcat("vcmp", suffix, 1577 "\t{{sae}, $cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc, {sae}}"), 1578 [], d>, EVEX_B; 1579 let mayLoad = 1 in 1580 def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem, 1581 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), 1582 !strconcat("vcmp", suffix, 1583 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>; 1584 } 1585} 1586 1587defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32, 1588 "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512, 1589 EVEX_CD8<32, CD8VF>; 1590defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64, 1591 "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512, 1592 EVEX_CD8<64, CD8VF>; 1593 1594def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)), 1595 (COPY_TO_REGCLASS (VCMPPSZrri 1596 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)), 1597 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)), 1598 imm:$cc), VK8)>; 1599def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)), 1600 (COPY_TO_REGCLASS (VPCMPDZrri 1601 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)), 1602 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)), 1603 imm:$cc), VK8)>; 1604def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)), 1605 (COPY_TO_REGCLASS (VPCMPUDZrri 1606 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)), 1607 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)), 1608 imm:$cc), VK8)>; 1609 1610def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1), 1611 (v16f32 VR512:$src2), i8immZExt5:$cc, (i16 -1), 1612 FROUND_NO_EXC)), 1613 (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2, 1614 (I8Imm imm:$cc)), GR16)>; 1615 1616def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1), 1617 (v8f64 VR512:$src2), i8immZExt5:$cc, (i8 -1), 1618 FROUND_NO_EXC)), 1619 (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2, 1620 (I8Imm imm:$cc)), GR8)>; 1621 1622def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1), 1623 (v16f32 VR512:$src2), i8immZExt5:$cc, (i16 -1), 1624 FROUND_CURRENT)), 1625 (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2, 1626 (I8Imm imm:$cc)), GR16)>; 1627 1628def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1), 1629 (v8f64 VR512:$src2), i8immZExt5:$cc, (i8 -1), 1630 FROUND_CURRENT)), 1631 (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2, 1632 (I8Imm imm:$cc)), GR8)>; 1633 1634// Mask register copy, including 1635// - copy between mask registers 1636// - load/store mask registers 1637// - copy from GPR to mask register and vice versa 1638// 1639multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk, 1640 string OpcodeStr, RegisterClass KRC, 1641 ValueType vvt, X86MemOperand x86memop> { 1642 let hasSideEffects = 0 in { 1643 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src), 1644 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; 1645 let mayLoad = 1 in 1646 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src), 1647 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 1648 [(set KRC:$dst, (vvt (load addr:$src)))]>; 1649 let mayStore = 1 in 1650 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src), 1651 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 1652 [(store KRC:$src, addr:$dst)]>; 1653 } 1654} 1655 1656multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk, 1657 string OpcodeStr, 1658 RegisterClass KRC, RegisterClass GRC> { 1659 let hasSideEffects = 0 in { 1660 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src), 1661 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; 1662 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src), 1663 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; 1664 } 1665} 1666 1667let Predicates = [HasDQI] in 1668 defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8mem>, 1669 avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>, 1670 VEX, PD; 1671 1672let Predicates = [HasAVX512] in 1673 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>, 1674 avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>, 1675 VEX, PS; 1676 1677let Predicates = [HasBWI] in { 1678 defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem>, 1679 VEX, PD, VEX_W; 1680 defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>, 1681 VEX, XD; 1682} 1683 1684let Predicates = [HasBWI] in { 1685 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>, 1686 VEX, PS, VEX_W; 1687 defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>, 1688 VEX, XD, VEX_W; 1689} 1690 1691// GR from/to mask register 1692let Predicates = [HasDQI] in { 1693 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))), 1694 (KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>; 1695 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))), 1696 (EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>; 1697} 1698let Predicates = [HasAVX512] in { 1699 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))), 1700 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>; 1701 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))), 1702 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>; 1703} 1704let Predicates = [HasBWI] in { 1705 def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>; 1706 def : Pat<(i32 (bitconvert (v32i1 VK32:$src))), (KMOVDrk VK32:$src)>; 1707} 1708let Predicates = [HasBWI] in { 1709 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))), (KMOVQkr GR64:$src)>; 1710 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))), (KMOVQrk VK64:$src)>; 1711} 1712 1713// Load/store kreg 1714let Predicates = [HasDQI] in { 1715 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst), 1716 (KMOVBmk addr:$dst, VK8:$src)>; 1717 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))), 1718 (KMOVBkm addr:$src)>; 1719} 1720let Predicates = [HasAVX512, NoDQI] in { 1721 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst), 1722 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>; 1723 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))), 1724 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>; 1725} 1726let Predicates = [HasAVX512] in { 1727 def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst), 1728 (KMOVWmk addr:$dst, VK16:$src)>; 1729 def : Pat<(i1 (load addr:$src)), 1730 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>; 1731 def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))), 1732 (KMOVWkm addr:$src)>; 1733} 1734let Predicates = [HasBWI] in { 1735 def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst), 1736 (KMOVDmk addr:$dst, VK32:$src)>; 1737 def : Pat<(v32i1 (bitconvert (i32 (load addr:$src)))), 1738 (KMOVDkm addr:$src)>; 1739} 1740let Predicates = [HasBWI] in { 1741 def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst), 1742 (KMOVQmk addr:$dst, VK64:$src)>; 1743 def : Pat<(v64i1 (bitconvert (i64 (load addr:$src)))), 1744 (KMOVQkm addr:$src)>; 1745} 1746 1747let Predicates = [HasAVX512] in { 1748 def : Pat<(i1 (trunc (i64 GR64:$src))), 1749 (COPY_TO_REGCLASS (KMOVWkr (AND32ri (EXTRACT_SUBREG $src, sub_32bit), 1750 (i32 1))), VK1)>; 1751 1752 def : Pat<(i1 (trunc (i32 GR32:$src))), 1753 (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>; 1754 1755 def : Pat<(i1 (trunc (i8 GR8:$src))), 1756 (COPY_TO_REGCLASS 1757 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))), 1758 VK1)>; 1759 def : Pat<(i1 (trunc (i16 GR16:$src))), 1760 (COPY_TO_REGCLASS 1761 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))), 1762 VK1)>; 1763 1764 def : Pat<(i32 (zext VK1:$src)), 1765 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>; 1766 def : Pat<(i8 (zext VK1:$src)), 1767 (EXTRACT_SUBREG 1768 (AND32ri (KMOVWrk 1769 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>; 1770 def : Pat<(i64 (zext VK1:$src)), 1771 (AND64ri8 (SUBREG_TO_REG (i64 0), 1772 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>; 1773 def : Pat<(i16 (zext VK1:$src)), 1774 (EXTRACT_SUBREG 1775 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), 1776 sub_16bit)>; 1777 def : Pat<(v16i1 (scalar_to_vector VK1:$src)), 1778 (COPY_TO_REGCLASS VK1:$src, VK16)>; 1779 def : Pat<(v8i1 (scalar_to_vector VK1:$src)), 1780 (COPY_TO_REGCLASS VK1:$src, VK8)>; 1781} 1782let Predicates = [HasBWI] in { 1783 def : Pat<(v32i1 (scalar_to_vector VK1:$src)), 1784 (COPY_TO_REGCLASS VK1:$src, VK32)>; 1785 def : Pat<(v64i1 (scalar_to_vector VK1:$src)), 1786 (COPY_TO_REGCLASS VK1:$src, VK64)>; 1787} 1788 1789 1790// With AVX-512 only, 8-bit mask is promoted to 16-bit mask. 1791let Predicates = [HasAVX512] in { 1792 // GR from/to 8-bit mask without native support 1793 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))), 1794 (COPY_TO_REGCLASS 1795 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)), 1796 VK8)>; 1797 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))), 1798 (EXTRACT_SUBREG 1799 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)), 1800 sub_8bit)>; 1801 1802 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))), 1803 (COPY_TO_REGCLASS VK16:$src, VK1)>; 1804 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))), 1805 (COPY_TO_REGCLASS VK8:$src, VK1)>; 1806} 1807let Predicates = [HasBWI] in { 1808 def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))), 1809 (COPY_TO_REGCLASS VK32:$src, VK1)>; 1810 def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))), 1811 (COPY_TO_REGCLASS VK64:$src, VK1)>; 1812} 1813 1814// Mask unary operation 1815// - KNOT 1816multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr, 1817 RegisterClass KRC, SDPatternOperator OpNode, 1818 Predicate prd> { 1819 let Predicates = [prd] in 1820 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src), 1821 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 1822 [(set KRC:$dst, (OpNode KRC:$src))]>; 1823} 1824 1825multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr, 1826 SDPatternOperator OpNode> { 1827 defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode, 1828 HasDQI>, VEX, PD; 1829 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode, 1830 HasAVX512>, VEX, PS; 1831 defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode, 1832 HasBWI>, VEX, PD, VEX_W; 1833 defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode, 1834 HasBWI>, VEX, PS, VEX_W; 1835} 1836 1837defm KNOT : avx512_mask_unop_all<0x44, "knot", not>; 1838 1839multiclass avx512_mask_unop_int<string IntName, string InstName> { 1840 let Predicates = [HasAVX512] in 1841 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w") 1842 (i16 GR16:$src)), 1843 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr") 1844 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>; 1845} 1846defm : avx512_mask_unop_int<"knot", "KNOT">; 1847 1848let Predicates = [HasDQI] in 1849def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), (KNOTBrr VK8:$src1)>; 1850let Predicates = [HasAVX512] in 1851def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>; 1852let Predicates = [HasBWI] in 1853def : Pat<(xor VK32:$src1, (v32i1 immAllOnesV)), (KNOTDrr VK32:$src1)>; 1854let Predicates = [HasBWI] in 1855def : Pat<(xor VK64:$src1, (v64i1 immAllOnesV)), (KNOTQrr VK64:$src1)>; 1856 1857// KNL does not support KMOVB, 8-bit mask is promoted to 16-bit 1858let Predicates = [HasAVX512, NoDQI] in { 1859def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), 1860 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>; 1861 1862def : Pat<(not VK8:$src), 1863 (COPY_TO_REGCLASS 1864 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>; 1865} 1866 1867// Mask binary operation 1868// - KAND, KANDN, KOR, KXNOR, KXOR 1869multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr, 1870 RegisterClass KRC, SDPatternOperator OpNode, 1871 Predicate prd> { 1872 let Predicates = [prd] in 1873 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2), 1874 !strconcat(OpcodeStr, 1875 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 1876 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>; 1877} 1878 1879multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr, 1880 SDPatternOperator OpNode> { 1881 defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode, 1882 HasDQI>, VEX_4V, VEX_L, PD; 1883 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode, 1884 HasAVX512>, VEX_4V, VEX_L, PS; 1885 defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode, 1886 HasBWI>, VEX_4V, VEX_L, VEX_W, PD; 1887 defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode, 1888 HasBWI>, VEX_4V, VEX_L, VEX_W, PS; 1889} 1890 1891def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>; 1892def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>; 1893 1894let isCommutable = 1 in { 1895 defm KAND : avx512_mask_binop_all<0x41, "kand", and>; 1896 defm KOR : avx512_mask_binop_all<0x45, "kor", or>; 1897 defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", xnor>; 1898 defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor>; 1899} 1900let isCommutable = 0 in 1901 defm KANDN : avx512_mask_binop_all<0x42, "kandn", andn>; 1902 1903def : Pat<(xor VK1:$src1, VK1:$src2), 1904 (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16), 1905 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>; 1906 1907def : Pat<(or VK1:$src1, VK1:$src2), 1908 (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16), 1909 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>; 1910 1911def : Pat<(and VK1:$src1, VK1:$src2), 1912 (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16), 1913 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>; 1914 1915multiclass avx512_mask_binop_int<string IntName, string InstName> { 1916 let Predicates = [HasAVX512] in 1917 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w") 1918 (i16 GR16:$src1), (i16 GR16:$src2)), 1919 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr") 1920 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)), 1921 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>; 1922} 1923 1924defm : avx512_mask_binop_int<"kand", "KAND">; 1925defm : avx512_mask_binop_int<"kandn", "KANDN">; 1926defm : avx512_mask_binop_int<"kor", "KOR">; 1927defm : avx512_mask_binop_int<"kxnor", "KXNOR">; 1928defm : avx512_mask_binop_int<"kxor", "KXOR">; 1929 1930// With AVX-512, 8-bit mask is promoted to 16-bit mask. 1931multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> { 1932 let Predicates = [HasAVX512] in 1933 def : Pat<(OpNode VK8:$src1, VK8:$src2), 1934 (COPY_TO_REGCLASS 1935 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16), 1936 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>; 1937} 1938 1939defm : avx512_binop_pat<and, KANDWrr>; 1940defm : avx512_binop_pat<andn, KANDNWrr>; 1941defm : avx512_binop_pat<or, KORWrr>; 1942defm : avx512_binop_pat<xnor, KXNORWrr>; 1943defm : avx512_binop_pat<xor, KXORWrr>; 1944 1945// Mask unpacking 1946multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr, 1947 RegisterClass KRC> { 1948 let Predicates = [HasAVX512] in 1949 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2), 1950 !strconcat(OpcodeStr, 1951 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; 1952} 1953 1954multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> { 1955 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>, 1956 VEX_4V, VEX_L, PD; 1957} 1958 1959defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">; 1960def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))), 1961 (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16), 1962 (COPY_TO_REGCLASS VK8:$src1, VK16))>; 1963 1964 1965multiclass avx512_mask_unpck_int<string IntName, string InstName> { 1966 let Predicates = [HasAVX512] in 1967 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw") 1968 (i16 GR16:$src1), (i16 GR16:$src2)), 1969 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr") 1970 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)), 1971 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>; 1972} 1973defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">; 1974 1975// Mask bit testing 1976multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC, 1977 SDNode OpNode> { 1978 let Predicates = [HasAVX512], Defs = [EFLAGS] in 1979 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2), 1980 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"), 1981 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>; 1982} 1983 1984multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> { 1985 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>, 1986 VEX, PS; 1987 let Predicates = [HasDQI] in 1988 defm B : avx512_mask_testop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode>, 1989 VEX, PD; 1990 let Predicates = [HasBWI] in { 1991 defm Q : avx512_mask_testop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode>, 1992 VEX, PS, VEX_W; 1993 defm D : avx512_mask_testop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode>, 1994 VEX, PD, VEX_W; 1995 } 1996} 1997 1998defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>; 1999 2000// Mask shift 2001multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC, 2002 SDNode OpNode> { 2003 let Predicates = [HasAVX512] in 2004 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, u8imm:$imm), 2005 !strconcat(OpcodeStr, 2006 "\t{$imm, $src, $dst|$dst, $src, $imm}"), 2007 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>; 2008} 2009 2010multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr, 2011 SDNode OpNode> { 2012 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>, 2013 VEX, TAPD, VEX_W; 2014 let Predicates = [HasDQI] in 2015 defm B : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "b"), VK8, OpNode>, 2016 VEX, TAPD; 2017 let Predicates = [HasBWI] in { 2018 defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>, 2019 VEX, TAPD, VEX_W; 2020 let Predicates = [HasDQI] in 2021 defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>, 2022 VEX, TAPD; 2023 } 2024} 2025 2026defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>; 2027defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>; 2028 2029// Mask setting all 0s or 1s 2030multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> { 2031 let Predicates = [HasAVX512] in 2032 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in 2033 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "", 2034 [(set KRC:$dst, (VT Val))]>; 2035} 2036 2037multiclass avx512_mask_setop_w<PatFrag Val> { 2038 defm B : avx512_mask_setop<VK8, v8i1, Val>; 2039 defm W : avx512_mask_setop<VK16, v16i1, Val>; 2040} 2041 2042defm KSET0 : avx512_mask_setop_w<immAllZerosV>; 2043defm KSET1 : avx512_mask_setop_w<immAllOnesV>; 2044 2045// With AVX-512 only, 8-bit mask is promoted to 16-bit mask. 2046let Predicates = [HasAVX512] in { 2047 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>; 2048 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>; 2049 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>; 2050 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>; 2051 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>; 2052} 2053def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))), 2054 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>; 2055 2056def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))), 2057 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>; 2058 2059def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))), 2060 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>; 2061 2062let Predicates = [HasVLX] in { 2063 def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))), 2064 (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>; 2065 def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))), 2066 (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>; 2067 def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))), 2068 (v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>; 2069 def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))), 2070 (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>; 2071 def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))), 2072 (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>; 2073} 2074 2075def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))), 2076 (v8i1 (COPY_TO_REGCLASS 2077 (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), 2078 (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>; 2079 2080def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))), 2081 (v8i1 (COPY_TO_REGCLASS 2082 (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), 2083 (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>; 2084 2085def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))), 2086 (v4i1 (COPY_TO_REGCLASS 2087 (KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16), 2088 (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>; 2089 2090def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))), 2091 (v4i1 (COPY_TO_REGCLASS 2092 (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16), 2093 (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>; 2094 2095//===----------------------------------------------------------------------===// 2096// AVX-512 - Aligned and unaligned load and store 2097// 2098 2099 2100multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, 2101 PatFrag ld_frag, PatFrag mload, 2102 bit IsReMaterializable = 1> { 2103 let hasSideEffects = 0 in { 2104 def rr : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src), 2105 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [], 2106 _.ExeDomain>, EVEX; 2107 def rrkz : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), 2108 (ins _.KRCWM:$mask, _.RC:$src), 2109 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|", 2110 "${dst} {${mask}} {z}, $src}"), [], _.ExeDomain>, 2111 EVEX, EVEX_KZ; 2112 2113 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable, 2114 SchedRW = [WriteLoad] in 2115 def rm : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst), (ins _.MemOp:$src), 2116 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 2117 [(set _.RC:$dst, (_.VT (bitconvert (ld_frag addr:$src))))], 2118 _.ExeDomain>, EVEX; 2119 2120 let Constraints = "$src0 = $dst" in { 2121 def rrk : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), 2122 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1), 2123 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|", 2124 "${dst} {${mask}}, $src1}"), 2125 [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask, 2126 (_.VT _.RC:$src1), 2127 (_.VT _.RC:$src0))))], _.ExeDomain>, 2128 EVEX, EVEX_K; 2129 let mayLoad = 1, SchedRW = [WriteLoad] in 2130 def rmk : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst), 2131 (ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src1), 2132 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|", 2133 "${dst} {${mask}}, $src1}"), 2134 [(set _.RC:$dst, (_.VT 2135 (vselect _.KRCWM:$mask, 2136 (_.VT (bitconvert (ld_frag addr:$src1))), 2137 (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K; 2138 } 2139 let mayLoad = 1, SchedRW = [WriteLoad] in 2140 def rmkz : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst), 2141 (ins _.KRCWM:$mask, _.MemOp:$src), 2142 OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"# 2143 "${dst} {${mask}} {z}, $src}", 2144 [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask, 2145 (_.VT (bitconvert (ld_frag addr:$src))), _.ImmAllZerosV)))], 2146 _.ExeDomain>, EVEX, EVEX_KZ; 2147 } 2148 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, undef)), 2149 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>; 2150 2151 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, _.ImmAllZerosV)), 2152 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>; 2153 2154 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src0))), 2155 (!cast<Instruction>(NAME#_.ZSuffix##rmk) _.RC:$src0, 2156 _.KRCWM:$mask, addr:$ptr)>; 2157} 2158 2159multiclass avx512_alignedload_vl<bits<8> opc, string OpcodeStr, 2160 AVX512VLVectorVTInfo _, 2161 Predicate prd, 2162 bit IsReMaterializable = 1> { 2163 let Predicates = [prd] in 2164 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.AlignedLdFrag, 2165 masked_load_aligned512, IsReMaterializable>, EVEX_V512; 2166 2167 let Predicates = [prd, HasVLX] in { 2168 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.AlignedLdFrag, 2169 masked_load_aligned256, IsReMaterializable>, EVEX_V256; 2170 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.AlignedLdFrag, 2171 masked_load_aligned128, IsReMaterializable>, EVEX_V128; 2172 } 2173} 2174 2175multiclass avx512_load_vl<bits<8> opc, string OpcodeStr, 2176 AVX512VLVectorVTInfo _, 2177 Predicate prd, 2178 bit IsReMaterializable = 1> { 2179 let Predicates = [prd] in 2180 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.LdFrag, 2181 masked_load_unaligned, IsReMaterializable>, EVEX_V512; 2182 2183 let Predicates = [prd, HasVLX] in { 2184 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.LdFrag, 2185 masked_load_unaligned, IsReMaterializable>, EVEX_V256; 2186 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.LdFrag, 2187 masked_load_unaligned, IsReMaterializable>, EVEX_V128; 2188 } 2189} 2190 2191multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, 2192 PatFrag st_frag, PatFrag mstore> { 2193 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in { 2194 def rr_alt : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src), 2195 OpcodeStr # "\t{$src, $dst|$dst, $src}", [], 2196 _.ExeDomain>, EVEX; 2197 let Constraints = "$src1 = $dst" in 2198 def rrk_alt : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), 2199 (ins _.RC:$src1, _.KRCWM:$mask, _.RC:$src2), 2200 OpcodeStr # 2201 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}", 2202 [], _.ExeDomain>, EVEX, EVEX_K; 2203 def rrkz_alt : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), 2204 (ins _.KRCWM:$mask, _.RC:$src), 2205 OpcodeStr # 2206 "\t{$src, ${dst} {${mask}} {z}|" # 2207 "${dst} {${mask}} {z}, $src}", 2208 [], _.ExeDomain>, EVEX, EVEX_KZ; 2209 } 2210 let mayStore = 1 in { 2211 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src), 2212 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 2213 [(st_frag (_.VT _.RC:$src), addr:$dst)], _.ExeDomain>, EVEX; 2214 def mrk : AVX512PI<opc, MRMDestMem, (outs), 2215 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src), 2216 OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}", 2217 [], _.ExeDomain>, EVEX, EVEX_K; 2218 } 2219 2220 def: Pat<(mstore addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src)), 2221 (!cast<Instruction>(NAME#_.ZSuffix##mrk) addr:$ptr, 2222 _.KRCWM:$mask, _.RC:$src)>; 2223} 2224 2225 2226multiclass avx512_store_vl< bits<8> opc, string OpcodeStr, 2227 AVX512VLVectorVTInfo _, Predicate prd> { 2228 let Predicates = [prd] in 2229 defm Z : avx512_store<opc, OpcodeStr, _.info512, store, 2230 masked_store_unaligned>, EVEX_V512; 2231 2232 let Predicates = [prd, HasVLX] in { 2233 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, store, 2234 masked_store_unaligned>, EVEX_V256; 2235 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, store, 2236 masked_store_unaligned>, EVEX_V128; 2237 } 2238} 2239 2240multiclass avx512_alignedstore_vl<bits<8> opc, string OpcodeStr, 2241 AVX512VLVectorVTInfo _, Predicate prd> { 2242 let Predicates = [prd] in 2243 defm Z : avx512_store<opc, OpcodeStr, _.info512, alignedstore512, 2244 masked_store_aligned512>, EVEX_V512; 2245 2246 let Predicates = [prd, HasVLX] in { 2247 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, alignedstore256, 2248 masked_store_aligned256>, EVEX_V256; 2249 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, alignedstore, 2250 masked_store_aligned128>, EVEX_V128; 2251 } 2252} 2253 2254defm VMOVAPS : avx512_alignedload_vl<0x28, "vmovaps", avx512vl_f32_info, 2255 HasAVX512>, 2256 avx512_alignedstore_vl<0x29, "vmovaps", avx512vl_f32_info, 2257 HasAVX512>, PS, EVEX_CD8<32, CD8VF>; 2258 2259defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info, 2260 HasAVX512>, 2261 avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info, 2262 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>; 2263 2264defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512>, 2265 avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512>, 2266 PS, EVEX_CD8<32, CD8VF>; 2267 2268defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512, 0>, 2269 avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512>, 2270 PD, VEX_W, EVEX_CD8<64, CD8VF>; 2271 2272def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr, 2273 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)), 2274 (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>; 2275 2276def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr, 2277 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)), 2278 (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>; 2279 2280def: Pat<(v8f64 (int_x86_avx512_mask_load_pd_512 addr:$ptr, 2281 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)), 2282 (VMOVAPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>; 2283 2284def: Pat<(v16f32 (int_x86_avx512_mask_load_ps_512 addr:$ptr, 2285 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)), 2286 (VMOVAPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>; 2287 2288def: Pat<(v8f64 (int_x86_avx512_mask_load_pd_512 addr:$ptr, 2289 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))), 2290 (VMOVAPDZrm addr:$ptr)>; 2291 2292def: Pat<(v16f32 (int_x86_avx512_mask_load_ps_512 addr:$ptr, 2293 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))), 2294 (VMOVAPSZrm addr:$ptr)>; 2295 2296def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src), 2297 GR16:$mask), 2298 (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), 2299 VR512:$src)>; 2300def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src), 2301 GR8:$mask), 2302 (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), 2303 VR512:$src)>; 2304 2305def: Pat<(int_x86_avx512_mask_store_ps_512 addr:$ptr, (v16f32 VR512:$src), 2306 GR16:$mask), 2307 (VMOVAPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), 2308 VR512:$src)>; 2309def: Pat<(int_x86_avx512_mask_store_pd_512 addr:$ptr, (v8f64 VR512:$src), 2310 GR8:$mask), 2311 (VMOVAPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), 2312 VR512:$src)>; 2313 2314let Predicates = [HasAVX512, NoVLX] in { 2315def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8f32 VR256:$src)), 2316 (VMOVUPSZmrk addr:$ptr, 2317 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), 2318 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>; 2319 2320def: Pat<(v8f32 (masked_load addr:$ptr, VK8WM:$mask, undef)), 2321 (v8f32 (EXTRACT_SUBREG (v16f32 (VMOVUPSZrmkz 2322 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>; 2323 2324def: Pat<(v8f32 (masked_load addr:$ptr, VK8WM:$mask, (v8f32 VR256:$src0))), 2325 (v8f32 (EXTRACT_SUBREG (v16f32 (VMOVUPSZrmk 2326 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256:$src0, sub_ymm), 2327 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>; 2328} 2329 2330defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info, 2331 HasAVX512>, 2332 avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info, 2333 HasAVX512>, PD, EVEX_CD8<32, CD8VF>; 2334 2335defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info, 2336 HasAVX512>, 2337 avx512_alignedstore_vl<0x7F, "vmovdqa64", avx512vl_i64_info, 2338 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>; 2339 2340defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", avx512vl_i8_info, HasBWI>, 2341 avx512_store_vl<0x7F, "vmovdqu8", avx512vl_i8_info, 2342 HasBWI>, XD, EVEX_CD8<8, CD8VF>; 2343 2344defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI>, 2345 avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info, 2346 HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>; 2347 2348defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512>, 2349 avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info, 2350 HasAVX512>, XS, EVEX_CD8<32, CD8VF>; 2351 2352defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512>, 2353 avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info, 2354 HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>; 2355 2356def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr, 2357 (v16i32 immAllZerosV), GR16:$mask)), 2358 (VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>; 2359 2360def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr, 2361 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)), 2362 (VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>; 2363 2364def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src), 2365 GR16:$mask), 2366 (VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), 2367 VR512:$src)>; 2368def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src), 2369 GR8:$mask), 2370 (VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), 2371 VR512:$src)>; 2372 2373let AddedComplexity = 20 in { 2374def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src), 2375 (bc_v8i64 (v16i32 immAllZerosV)))), 2376 (VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>; 2377 2378def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)), 2379 (v8i64 VR512:$src))), 2380 (VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)), 2381 VK8), VR512:$src)>; 2382 2383def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src), 2384 (v16i32 immAllZerosV))), 2385 (VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>; 2386 2387def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV), 2388 (v16i32 VR512:$src))), 2389 (VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>; 2390} 2391// NoVLX patterns 2392let Predicates = [HasAVX512, NoVLX] in { 2393def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i32 VR256:$src)), 2394 (VMOVDQU32Zmrk addr:$ptr, 2395 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), 2396 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>; 2397 2398def: Pat<(v8i32 (masked_load addr:$ptr, VK8WM:$mask, undef)), 2399 (v8i32 (EXTRACT_SUBREG (v16i32 (VMOVDQU32Zrmkz 2400 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>; 2401} 2402 2403// Move Int Doubleword to Packed Double Int 2404// 2405def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src), 2406 "vmovd\t{$src, $dst|$dst, $src}", 2407 [(set VR128X:$dst, 2408 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>, 2409 EVEX, VEX_LIG; 2410def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src), 2411 "vmovd\t{$src, $dst|$dst, $src}", 2412 [(set VR128X:$dst, 2413 (v4i32 (scalar_to_vector (loadi32 addr:$src))))], 2414 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>; 2415def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src), 2416 "vmovq\t{$src, $dst|$dst, $src}", 2417 [(set VR128X:$dst, 2418 (v2i64 (scalar_to_vector GR64:$src)))], 2419 IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG; 2420let isCodeGenOnly = 1 in { 2421def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), 2422 "vmovq\t{$src, $dst|$dst, $src}", 2423 [(set FR64:$dst, (bitconvert GR64:$src))], 2424 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>; 2425def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src), 2426 "vmovq\t{$src, $dst|$dst, $src}", 2427 [(set GR64:$dst, (bitconvert FR64:$src))], 2428 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>; 2429} 2430def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), 2431 "vmovq\t{$src, $dst|$dst, $src}", 2432 [(store (i64 (bitconvert FR64:$src)), addr:$dst)], 2433 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>, 2434 EVEX_CD8<64, CD8VT1>; 2435 2436// Move Int Doubleword to Single Scalar 2437// 2438let isCodeGenOnly = 1 in { 2439def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src), 2440 "vmovd\t{$src, $dst|$dst, $src}", 2441 [(set FR32X:$dst, (bitconvert GR32:$src))], 2442 IIC_SSE_MOVDQ>, EVEX, VEX_LIG; 2443 2444def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src), 2445 "vmovd\t{$src, $dst|$dst, $src}", 2446 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))], 2447 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>; 2448} 2449 2450// Move doubleword from xmm register to r/m32 2451// 2452def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src), 2453 "vmovd\t{$src, $dst|$dst, $src}", 2454 [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src), 2455 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, 2456 EVEX, VEX_LIG; 2457def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs), 2458 (ins i32mem:$dst, VR128X:$src), 2459 "vmovd\t{$src, $dst|$dst, $src}", 2460 [(store (i32 (vector_extract (v4i32 VR128X:$src), 2461 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>, 2462 EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>; 2463 2464// Move quadword from xmm1 register to r/m64 2465// 2466def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src), 2467 "vmovq\t{$src, $dst|$dst, $src}", 2468 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src), 2469 (iPTR 0)))], 2470 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W, 2471 Requires<[HasAVX512, In64BitMode]>; 2472 2473def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs), 2474 (ins i64mem:$dst, VR128X:$src), 2475 "vmovq\t{$src, $dst|$dst, $src}", 2476 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)), 2477 addr:$dst)], IIC_SSE_MOVDQ>, 2478 EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>, 2479 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>; 2480 2481// Move Scalar Single to Double Int 2482// 2483let isCodeGenOnly = 1 in { 2484def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), 2485 (ins FR32X:$src), 2486 "vmovd\t{$src, $dst|$dst, $src}", 2487 [(set GR32:$dst, (bitconvert FR32X:$src))], 2488 IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG; 2489def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs), 2490 (ins i32mem:$dst, FR32X:$src), 2491 "vmovd\t{$src, $dst|$dst, $src}", 2492 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)], 2493 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>; 2494} 2495 2496// Move Quadword Int to Packed Quadword Int 2497// 2498def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), 2499 (ins i64mem:$src), 2500 "vmovq\t{$src, $dst|$dst, $src}", 2501 [(set VR128X:$dst, 2502 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, 2503 EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; 2504 2505//===----------------------------------------------------------------------===// 2506// AVX-512 MOVSS, MOVSD 2507//===----------------------------------------------------------------------===// 2508 2509multiclass avx512_move_scalar <string asm, RegisterClass RC, 2510 SDNode OpNode, ValueType vt, 2511 X86MemOperand x86memop, PatFrag mem_pat> { 2512 let hasSideEffects = 0 in { 2513 def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2), 2514 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 2515 [(set VR128X:$dst, (vt (OpNode VR128X:$src1, 2516 (scalar_to_vector RC:$src2))))], 2517 IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG; 2518 let Constraints = "$src1 = $dst" in 2519 def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst), 2520 (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3), 2521 !strconcat(asm, 2522 "\t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"), 2523 [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K; 2524 def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src), 2525 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), 2526 [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>, 2527 EVEX, VEX_LIG; 2528 let mayStore = 1 in { 2529 def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src), 2530 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), 2531 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>, 2532 EVEX, VEX_LIG; 2533 def mrk: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, VK1WM:$mask, RC:$src), 2534 !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"), 2535 [], IIC_SSE_MOV_S_MR>, 2536 EVEX, VEX_LIG, EVEX_K; 2537 } // mayStore 2538 } //hasSideEffects = 0 2539} 2540 2541let ExeDomain = SSEPackedSingle in 2542defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem, 2543 loadf32>, XS, EVEX_CD8<32, CD8VT1>; 2544 2545let ExeDomain = SSEPackedDouble in 2546defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem, 2547 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>; 2548 2549def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))), 2550 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X), 2551 VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>; 2552 2553def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))), 2554 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X), 2555 VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>; 2556 2557def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask), 2558 (VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)), 2559 (COPY_TO_REGCLASS VR128X:$src, FR32X))>; 2560 2561// For the disassembler 2562let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in { 2563 def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst), 2564 (ins VR128X:$src1, FR32X:$src2), 2565 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], 2566 IIC_SSE_MOV_S_RR>, 2567 XS, EVEX_4V, VEX_LIG; 2568 def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst), 2569 (ins VR128X:$src1, FR64X:$src2), 2570 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], 2571 IIC_SSE_MOV_S_RR>, 2572 XD, EVEX_4V, VEX_LIG, VEX_W; 2573} 2574 2575let Predicates = [HasAVX512] in { 2576 let AddedComplexity = 15 in { 2577 // Move scalar to XMM zero-extended, zeroing a VR128X then do a 2578 // MOVS{S,D} to the lower bits. 2579 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))), 2580 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>; 2581 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))), 2582 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>; 2583 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))), 2584 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>; 2585 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))), 2586 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>; 2587 2588 // Move low f32 and clear high bits. 2589 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))), 2590 (SUBREG_TO_REG (i32 0), 2591 (VMOVSSZrr (v4f32 (V_SET0)), 2592 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>; 2593 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))), 2594 (SUBREG_TO_REG (i32 0), 2595 (VMOVSSZrr (v4i32 (V_SET0)), 2596 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>; 2597 } 2598 2599 let AddedComplexity = 20 in { 2600 // MOVSSrm zeros the high parts of the register; represent this 2601 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0 2602 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))), 2603 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>; 2604 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))), 2605 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>; 2606 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))), 2607 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>; 2608 2609 // MOVSDrm zeros the high parts of the register; represent this 2610 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0 2611 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))), 2612 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>; 2613 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))), 2614 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>; 2615 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))), 2616 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>; 2617 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))), 2618 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>; 2619 def : Pat<(v2f64 (X86vzload addr:$src)), 2620 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>; 2621 2622 // Represent the same patterns above but in the form they appear for 2623 // 256-bit types 2624 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef, 2625 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))), 2626 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>; 2627 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef, 2628 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))), 2629 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>; 2630 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef, 2631 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))), 2632 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>; 2633 } 2634 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef, 2635 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))), 2636 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)), 2637 FR32X:$src)), sub_xmm)>; 2638 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef, 2639 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))), 2640 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)), 2641 FR64X:$src)), sub_xmm)>; 2642 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef, 2643 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))), 2644 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>; 2645 2646 // Move low f64 and clear high bits. 2647 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))), 2648 (SUBREG_TO_REG (i32 0), 2649 (VMOVSDZrr (v2f64 (V_SET0)), 2650 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>; 2651 2652 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))), 2653 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)), 2654 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>; 2655 2656 // Extract and store. 2657 def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))), 2658 addr:$dst), 2659 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>; 2660 def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))), 2661 addr:$dst), 2662 (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>; 2663 2664 // Shuffle with VMOVSS 2665 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)), 2666 (VMOVSSZrr (v4i32 VR128X:$src1), 2667 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>; 2668 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)), 2669 (VMOVSSZrr (v4f32 VR128X:$src1), 2670 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>; 2671 2672 // 256-bit variants 2673 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)), 2674 (SUBREG_TO_REG (i32 0), 2675 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm), 2676 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)), 2677 sub_xmm)>; 2678 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)), 2679 (SUBREG_TO_REG (i32 0), 2680 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm), 2681 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)), 2682 sub_xmm)>; 2683 2684 // Shuffle with VMOVSD 2685 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)), 2686 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2687 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)), 2688 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2689 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)), 2690 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2691 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)), 2692 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2693 2694 // 256-bit variants 2695 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)), 2696 (SUBREG_TO_REG (i32 0), 2697 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm), 2698 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)), 2699 sub_xmm)>; 2700 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)), 2701 (SUBREG_TO_REG (i32 0), 2702 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm), 2703 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)), 2704 sub_xmm)>; 2705 2706 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)), 2707 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2708 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)), 2709 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2710 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)), 2711 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2712 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)), 2713 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>; 2714} 2715 2716let AddedComplexity = 15 in 2717def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst), 2718 (ins VR128X:$src), 2719 "vmovq\t{$src, $dst|$dst, $src}", 2720 [(set VR128X:$dst, (v2i64 (X86vzmovl 2721 (v2i64 VR128X:$src))))], 2722 IIC_SSE_MOVQ_RR>, EVEX, VEX_W; 2723 2724let AddedComplexity = 20 in 2725def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst), 2726 (ins i128mem:$src), 2727 "vmovq\t{$src, $dst|$dst, $src}", 2728 [(set VR128X:$dst, (v2i64 (X86vzmovl 2729 (loadv2i64 addr:$src))))], 2730 IIC_SSE_MOVDQ>, EVEX, VEX_W, 2731 EVEX_CD8<8, CD8VT8>; 2732 2733let Predicates = [HasAVX512] in { 2734 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part. 2735 let AddedComplexity = 20 in { 2736 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))), 2737 (VMOVDI2PDIZrm addr:$src)>; 2738 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))), 2739 (VMOV64toPQIZrr GR64:$src)>; 2740 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))), 2741 (VMOVDI2PDIZrr GR32:$src)>; 2742 2743 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))), 2744 (VMOVDI2PDIZrm addr:$src)>; 2745 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))), 2746 (VMOVDI2PDIZrm addr:$src)>; 2747 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))), 2748 (VMOVZPQILo2PQIZrm addr:$src)>; 2749 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))), 2750 (VMOVZPQILo2PQIZrr VR128X:$src)>; 2751 def : Pat<(v2i64 (X86vzload addr:$src)), 2752 (VMOVZPQILo2PQIZrm addr:$src)>; 2753 } 2754 2755 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext. 2756 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef, 2757 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))), 2758 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>; 2759 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef, 2760 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))), 2761 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>; 2762} 2763 2764def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))), 2765 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>; 2766 2767def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))), 2768 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>; 2769 2770def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))), 2771 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>; 2772 2773def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))), 2774 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>; 2775 2776//===----------------------------------------------------------------------===// 2777// AVX-512 - Non-temporals 2778//===----------------------------------------------------------------------===// 2779let SchedRW = [WriteLoad] in { 2780 def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst), 2781 (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}", 2782 [(set VR512:$dst, (int_x86_avx512_movntdqa addr:$src))], 2783 SSEPackedInt>, EVEX, T8PD, EVEX_V512, 2784 EVEX_CD8<64, CD8VF>; 2785 2786 let Predicates = [HasAVX512, HasVLX] in { 2787 def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst), 2788 (ins i256mem:$src), 2789 "vmovntdqa\t{$src, $dst|$dst, $src}", [], 2790 SSEPackedInt>, EVEX, T8PD, EVEX_V256, 2791 EVEX_CD8<64, CD8VF>; 2792 2793 def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst), 2794 (ins i128mem:$src), 2795 "vmovntdqa\t{$src, $dst|$dst, $src}", [], 2796 SSEPackedInt>, EVEX, T8PD, EVEX_V128, 2797 EVEX_CD8<64, CD8VF>; 2798 } 2799} 2800 2801multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag, 2802 ValueType OpVT, RegisterClass RC, X86MemOperand memop, 2803 Domain d, InstrItinClass itin = IIC_SSE_MOVNT> { 2804 let SchedRW = [WriteStore], mayStore = 1, 2805 AddedComplexity = 400 in 2806 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src), 2807 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 2808 [(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX; 2809} 2810 2811multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag, 2812 string elty, string elsz, string vsz512, 2813 string vsz256, string vsz128, Domain d, 2814 Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> { 2815 let Predicates = [prd] in 2816 defm Z : avx512_movnt<opc, OpcodeStr, st_frag, 2817 !cast<ValueType>("v"##vsz512##elty##elsz), VR512, 2818 !cast<X86MemOperand>(elty##"512mem"), d, itin>, 2819 EVEX_V512; 2820 2821 let Predicates = [prd, HasVLX] in { 2822 defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag, 2823 !cast<ValueType>("v"##vsz256##elty##elsz), VR256X, 2824 !cast<X86MemOperand>(elty##"256mem"), d, itin>, 2825 EVEX_V256; 2826 2827 defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag, 2828 !cast<ValueType>("v"##vsz128##elty##elsz), VR128X, 2829 !cast<X86MemOperand>(elty##"128mem"), d, itin>, 2830 EVEX_V128; 2831 } 2832} 2833 2834defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore, 2835 "i", "64", "8", "4", "2", SSEPackedInt, 2836 HasAVX512>, PD, EVEX_CD8<64, CD8VF>; 2837 2838defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore, 2839 "f", "64", "8", "4", "2", SSEPackedDouble, 2840 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>; 2841 2842defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore, 2843 "f", "32", "16", "8", "4", SSEPackedSingle, 2844 HasAVX512>, PS, EVEX_CD8<32, CD8VF>; 2845 2846//===----------------------------------------------------------------------===// 2847// AVX-512 - Integer arithmetic 2848// 2849multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, 2850 X86VectorVTInfo _, OpndItins itins, 2851 bit IsCommutable = 0> { 2852 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 2853 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix, 2854 "$src2, $src1", "$src1, $src2", 2855 (_.VT (OpNode _.RC:$src1, _.RC:$src2)), 2856 "", itins.rr, IsCommutable>, 2857 AVX512BIBase, EVEX_4V; 2858 2859 let mayLoad = 1 in 2860 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 2861 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix, 2862 "$src2, $src1", "$src1, $src2", 2863 (_.VT (OpNode _.RC:$src1, 2864 (bitconvert (_.LdFrag addr:$src2)))), 2865 "", itins.rm>, 2866 AVX512BIBase, EVEX_4V; 2867} 2868 2869multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode, 2870 X86VectorVTInfo _, OpndItins itins, 2871 bit IsCommutable = 0> : 2872 avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> { 2873 let mayLoad = 1 in 2874 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 2875 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix, 2876 "${src2}"##_.BroadcastStr##", $src1", 2877 "$src1, ${src2}"##_.BroadcastStr, 2878 (_.VT (OpNode _.RC:$src1, 2879 (X86VBroadcast 2880 (_.ScalarLdFrag addr:$src2)))), 2881 "", itins.rm>, 2882 AVX512BIBase, EVEX_4V, EVEX_B; 2883} 2884 2885multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode, 2886 AVX512VLVectorVTInfo VTInfo, OpndItins itins, 2887 Predicate prd, bit IsCommutable = 0> { 2888 let Predicates = [prd] in 2889 defm Z : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info512, itins, 2890 IsCommutable>, EVEX_V512; 2891 2892 let Predicates = [prd, HasVLX] in { 2893 defm Z256 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info256, itins, 2894 IsCommutable>, EVEX_V256; 2895 defm Z128 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info128, itins, 2896 IsCommutable>, EVEX_V128; 2897 } 2898} 2899 2900multiclass avx512_binop_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode, 2901 AVX512VLVectorVTInfo VTInfo, OpndItins itins, 2902 Predicate prd, bit IsCommutable = 0> { 2903 let Predicates = [prd] in 2904 defm Z : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins, 2905 IsCommutable>, EVEX_V512; 2906 2907 let Predicates = [prd, HasVLX] in { 2908 defm Z256 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins, 2909 IsCommutable>, EVEX_V256; 2910 defm Z128 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins, 2911 IsCommutable>, EVEX_V128; 2912 } 2913} 2914 2915multiclass avx512_binop_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode, 2916 OpndItins itins, Predicate prd, 2917 bit IsCommutable = 0> { 2918 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info, 2919 itins, prd, IsCommutable>, 2920 VEX_W, EVEX_CD8<64, CD8VF>; 2921} 2922 2923multiclass avx512_binop_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode, 2924 OpndItins itins, Predicate prd, 2925 bit IsCommutable = 0> { 2926 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info, 2927 itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>; 2928} 2929 2930multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode, 2931 OpndItins itins, Predicate prd, 2932 bit IsCommutable = 0> { 2933 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info, 2934 itins, prd, IsCommutable>, EVEX_CD8<16, CD8VF>; 2935} 2936 2937multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode, 2938 OpndItins itins, Predicate prd, 2939 bit IsCommutable = 0> { 2940 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info, 2941 itins, prd, IsCommutable>, EVEX_CD8<8, CD8VF>; 2942} 2943 2944multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr, 2945 SDNode OpNode, OpndItins itins, Predicate prd, 2946 bit IsCommutable = 0> { 2947 defm Q : avx512_binop_rm_vl_q<opc_q, OpcodeStr, OpNode, itins, prd, 2948 IsCommutable>; 2949 2950 defm D : avx512_binop_rm_vl_d<opc_d, OpcodeStr, OpNode, itins, prd, 2951 IsCommutable>; 2952} 2953 2954multiclass avx512_binop_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr, 2955 SDNode OpNode, OpndItins itins, Predicate prd, 2956 bit IsCommutable = 0> { 2957 defm W : avx512_binop_rm_vl_w<opc_w, OpcodeStr, OpNode, itins, prd, 2958 IsCommutable>; 2959 2960 defm B : avx512_binop_rm_vl_b<opc_b, OpcodeStr, OpNode, itins, prd, 2961 IsCommutable>; 2962} 2963 2964multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w, 2965 bits<8> opc_d, bits<8> opc_q, 2966 string OpcodeStr, SDNode OpNode, 2967 OpndItins itins, bit IsCommutable = 0> { 2968 defm NAME : avx512_binop_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode, 2969 itins, HasAVX512, IsCommutable>, 2970 avx512_binop_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode, 2971 itins, HasBWI, IsCommutable>; 2972} 2973 2974multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins, 2975 SDNode OpNode,X86VectorVTInfo _Src, 2976 X86VectorVTInfo _Dst, bit IsCommutable = 0> { 2977 defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst), 2978 (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr, 2979 "$src2, $src1","$src1, $src2", 2980 (_Dst.VT (OpNode 2981 (_Src.VT _Src.RC:$src1), 2982 (_Src.VT _Src.RC:$src2))), 2983 "",itins.rr, IsCommutable>, 2984 AVX512BIBase, EVEX_4V; 2985 let mayLoad = 1 in { 2986 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst), 2987 (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr, 2988 "$src2, $src1", "$src1, $src2", 2989 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), 2990 (bitconvert (_Src.LdFrag addr:$src2)))), 2991 "", itins.rm>, 2992 AVX512BIBase, EVEX_4V; 2993 2994 defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst), 2995 (ins _Src.RC:$src1, _Dst.ScalarMemOp:$src2), 2996 OpcodeStr, 2997 "${src2}"##_Dst.BroadcastStr##", $src1", 2998 "$src1, ${src2}"##_Dst.BroadcastStr, 2999 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bc_v16i32 3000 (_Dst.VT (X86VBroadcast 3001 (_Dst.ScalarLdFrag addr:$src2)))))), 3002 "", itins.rm>, 3003 AVX512BIBase, EVEX_4V, EVEX_B; 3004 } 3005} 3006 3007defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add, 3008 SSE_INTALU_ITINS_P, 1>; 3009defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub, 3010 SSE_INTALU_ITINS_P, 0>; 3011defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmull", mul, 3012 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD; 3013defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmull", mul, 3014 SSE_INTALU_ITINS_P, HasBWI, 1>; 3015defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul, 3016 SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD; 3017 3018defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", SSE_INTALU_ITINS_P, 3019 X86pmuldq, v16i32_info, v8i64_info, 1>, 3020 T8PD, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W; 3021 3022defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", SSE_INTMUL_ITINS_P, 3023 X86pmuludq, v16i32_info, v8i64_info, 1>, 3024 EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W; 3025 3026defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxs", X86smax, 3027 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD; 3028defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxs", X86smax, 3029 SSE_INTALU_ITINS_P, HasBWI, 1>; 3030defm VPMAXS : avx512_binop_rm_vl_dq<0x3D, 0x3D, "vpmaxs", X86smax, 3031 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD; 3032 3033defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxu", X86umax, 3034 SSE_INTALU_ITINS_P, HasBWI, 1>; 3035defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxu", X86umax, 3036 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD; 3037defm VPMAXU : avx512_binop_rm_vl_dq<0x3F, 0x3F, "vpmaxu", X86umax, 3038 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD; 3039 3040defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpmins", X86smin, 3041 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD; 3042defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpmins", X86smin, 3043 SSE_INTALU_ITINS_P, HasBWI, 1>; 3044defm VPMINS : avx512_binop_rm_vl_dq<0x39, 0x39, "vpmins", X86smin, 3045 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD; 3046 3047defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminu", X86umin, 3048 SSE_INTALU_ITINS_P, HasBWI, 1>; 3049defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminu", X86umin, 3050 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD; 3051defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", X86umin, 3052 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD; 3053 3054def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1), 3055 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))), 3056 (VPMAXSDZrr VR512:$src1, VR512:$src2)>; 3057def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1), 3058 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))), 3059 (VPMAXUDZrr VR512:$src1, VR512:$src2)>; 3060def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1), 3061 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))), 3062 (VPMAXSQZrr VR512:$src1, VR512:$src2)>; 3063def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1), 3064 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))), 3065 (VPMAXUQZrr VR512:$src1, VR512:$src2)>; 3066def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1), 3067 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))), 3068 (VPMINSDZrr VR512:$src1, VR512:$src2)>; 3069def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1), 3070 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))), 3071 (VPMINUDZrr VR512:$src1, VR512:$src2)>; 3072def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1), 3073 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))), 3074 (VPMINSQZrr VR512:$src1, VR512:$src2)>; 3075def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1), 3076 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))), 3077 (VPMINUQZrr VR512:$src1, VR512:$src2)>; 3078//===----------------------------------------------------------------------===// 3079// AVX-512 - Unpack Instructions 3080//===----------------------------------------------------------------------===// 3081 3082multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt, 3083 PatFrag mem_frag, RegisterClass RC, 3084 X86MemOperand x86memop, string asm, 3085 Domain d> { 3086 def rr : AVX512PI<opc, MRMSrcReg, 3087 (outs RC:$dst), (ins RC:$src1, RC:$src2), 3088 asm, [(set RC:$dst, 3089 (vt (OpNode RC:$src1, RC:$src2)))], 3090 d>, EVEX_4V; 3091 def rm : AVX512PI<opc, MRMSrcMem, 3092 (outs RC:$dst), (ins RC:$src1, x86memop:$src2), 3093 asm, [(set RC:$dst, 3094 (vt (OpNode RC:$src1, 3095 (bitconvert (mem_frag addr:$src2)))))], 3096 d>, EVEX_4V; 3097} 3098 3099defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, loadv8f64, 3100 VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3101 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>; 3102defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, loadv8f64, 3103 VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3104 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 3105defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, loadv8f64, 3106 VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3107 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>; 3108defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, loadv8f64, 3109 VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3110 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 3111 3112multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode, 3113 ValueType OpVT, RegisterClass RC, PatFrag memop_frag, 3114 X86MemOperand x86memop> { 3115 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst), 3116 (ins RC:$src1, RC:$src2), 3117 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 3118 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))], 3119 IIC_SSE_UNPCK>, EVEX_4V; 3120 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst), 3121 (ins RC:$src1, x86memop:$src2), 3122 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 3123 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), 3124 (bitconvert (memop_frag addr:$src2)))))], 3125 IIC_SSE_UNPCK>, EVEX_4V; 3126} 3127defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32, 3128 VR512, loadv16i32, i512mem>, EVEX_V512, 3129 EVEX_CD8<32, CD8VF>; 3130defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64, 3131 VR512, loadv8i64, i512mem>, EVEX_V512, 3132 VEX_W, EVEX_CD8<64, CD8VF>; 3133defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32, 3134 VR512, loadv16i32, i512mem>, EVEX_V512, 3135 EVEX_CD8<32, CD8VF>; 3136defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64, 3137 VR512, loadv8i64, i512mem>, EVEX_V512, 3138 VEX_W, EVEX_CD8<64, CD8VF>; 3139//===----------------------------------------------------------------------===// 3140// AVX-512 - PSHUFD 3141// 3142 3143multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC, 3144 SDNode OpNode, PatFrag mem_frag, 3145 X86MemOperand x86memop, ValueType OpVT> { 3146 def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst), 3147 (ins RC:$src1, u8imm:$src2), 3148 !strconcat(OpcodeStr, 3149 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 3150 [(set RC:$dst, 3151 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>, 3152 EVEX; 3153 def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst), 3154 (ins x86memop:$src1, u8imm:$src2), 3155 !strconcat(OpcodeStr, 3156 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 3157 [(set RC:$dst, 3158 (OpVT (OpNode (mem_frag addr:$src1), 3159 (i8 imm:$src2))))]>, EVEX; 3160} 3161 3162defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, loadv16i32, 3163 i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>; 3164 3165//===----------------------------------------------------------------------===// 3166// AVX-512 Logical Instructions 3167//===----------------------------------------------------------------------===// 3168 3169defm VPAND : avx512_binop_rm_vl_dq<0xDB, 0xDB, "vpand", and, 3170 SSE_INTALU_ITINS_P, HasAVX512, 1>; 3171defm VPOR : avx512_binop_rm_vl_dq<0xEB, 0xEB, "vpor", or, 3172 SSE_INTALU_ITINS_P, HasAVX512, 1>; 3173defm VPXOR : avx512_binop_rm_vl_dq<0xEF, 0xEF, "vpxor", xor, 3174 SSE_INTALU_ITINS_P, HasAVX512, 1>; 3175defm VPANDN : avx512_binop_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp, 3176 SSE_INTALU_ITINS_P, HasAVX512, 0>; 3177 3178//===----------------------------------------------------------------------===// 3179// AVX-512 FP arithmetic 3180//===----------------------------------------------------------------------===// 3181multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, 3182 SDNode OpNode, SDNode VecNode, OpndItins itins, 3183 bit IsCommutable> { 3184 3185 defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), 3186 (ins _.RC:$src1, _.RC:$src2), OpcodeStr, 3187 "$src2, $src1", "$src1, $src2", 3188 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), 3189 (i32 FROUND_CURRENT)), 3190 "", itins.rr, IsCommutable>; 3191 3192 defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), 3193 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr, 3194 "$src2, $src1", "$src1, $src2", 3195 (VecNode (_.VT _.RC:$src1), 3196 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))), 3197 (i32 FROUND_CURRENT)), 3198 "", itins.rm, IsCommutable>; 3199 let isCodeGenOnly = 1, isCommutable = IsCommutable, 3200 Predicates = [HasAVX512] in { 3201 def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst), 3202 (ins _.FRC:$src1, _.FRC:$src2), 3203 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3204 [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))], 3205 itins.rr>; 3206 def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst), 3207 (ins _.FRC:$src1, _.ScalarMemOp:$src2), 3208 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3209 [(set _.FRC:$dst, (OpNode _.FRC:$src1, 3210 (_.ScalarLdFrag addr:$src2)))], itins.rr>; 3211 } 3212} 3213 3214multiclass avx512_fp_scalar_round<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, 3215 SDNode VecNode, OpndItins itins, bit IsCommutable> { 3216 3217 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), 3218 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr, 3219 "$rc, $src2, $src1", "$src1, $src2, $rc", 3220 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), 3221 (i32 imm:$rc)), "", itins.rr, IsCommutable>, 3222 EVEX_B, EVEX_RC; 3223} 3224multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, 3225 SDNode VecNode, OpndItins itins, bit IsCommutable> { 3226 3227 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), 3228 (ins _.RC:$src1, _.RC:$src2), OpcodeStr, 3229 "$src2, $src1", "$src1, $src2", 3230 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), 3231 (i32 FROUND_NO_EXC)), "{sae}">, EVEX_B; 3232} 3233 3234multiclass avx512_binop_s_round<bits<8> opc, string OpcodeStr, SDNode OpNode, 3235 SDNode VecNode, 3236 SizeItins itins, bit IsCommutable> { 3237 defm SSZ : avx512_fp_scalar<opc, OpcodeStr#"ss", f32x_info, OpNode, VecNode, 3238 itins.s, IsCommutable>, 3239 avx512_fp_scalar_round<opc, OpcodeStr#"ss", f32x_info, VecNode, 3240 itins.s, IsCommutable>, 3241 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>; 3242 defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode, 3243 itins.d, IsCommutable>, 3244 avx512_fp_scalar_round<opc, OpcodeStr#"sd", f64x_info, VecNode, 3245 itins.d, IsCommutable>, 3246 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>; 3247} 3248 3249multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode, 3250 SDNode VecNode, 3251 SizeItins itins, bit IsCommutable> { 3252 defm SSZ : avx512_fp_scalar<opc, OpcodeStr#"ss", f32x_info, OpNode, VecNode, 3253 itins.s, IsCommutable>, 3254 avx512_fp_scalar_sae<opc, OpcodeStr#"ss", f32x_info, VecNode, 3255 itins.s, IsCommutable>, 3256 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>; 3257 defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode, 3258 itins.d, IsCommutable>, 3259 avx512_fp_scalar_sae<opc, OpcodeStr#"sd", f64x_info, VecNode, 3260 itins.d, IsCommutable>, 3261 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>; 3262} 3263defm VADD : avx512_binop_s_round<0x58, "vadd", fadd, X86faddRnd, SSE_ALU_ITINS_S, 1>; 3264defm VMUL : avx512_binop_s_round<0x59, "vmul", fmul, X86fmulRnd, SSE_ALU_ITINS_S, 1>; 3265defm VSUB : avx512_binop_s_round<0x5C, "vsub", fsub, X86fsubRnd, SSE_ALU_ITINS_S, 0>; 3266defm VDIV : avx512_binop_s_round<0x5E, "vdiv", fdiv, X86fdivRnd, SSE_ALU_ITINS_S, 0>; 3267defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fminRnd, SSE_ALU_ITINS_S, 1>; 3268defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxRnd, SSE_ALU_ITINS_S, 1>; 3269 3270multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode, 3271 X86VectorVTInfo _, bit IsCommutable> { 3272 defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 3273 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix, 3274 "$src2, $src1", "$src1, $src2", 3275 (_.VT (OpNode _.RC:$src1, _.RC:$src2))>, EVEX_4V; 3276 let mayLoad = 1 in { 3277 defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 3278 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix, 3279 "$src2, $src1", "$src1, $src2", 3280 (OpNode _.RC:$src1, (_.LdFrag addr:$src2))>, EVEX_4V; 3281 defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 3282 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix, 3283 "${src2}"##_.BroadcastStr##", $src1", 3284 "$src1, ${src2}"##_.BroadcastStr, 3285 (OpNode _.RC:$src1, (_.VT (X86VBroadcast 3286 (_.ScalarLdFrag addr:$src2))))>, 3287 EVEX_4V, EVEX_B; 3288 }//let mayLoad = 1 3289} 3290 3291multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd, 3292 X86VectorVTInfo _, bit IsCommutable> { 3293 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 3294 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr##_.Suffix, 3295 "$rc, $src2, $src1", "$src1, $src2, $rc", 3296 (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src2, (i32 imm:$rc)))>, 3297 EVEX_4V, EVEX_B, EVEX_RC; 3298} 3299 3300multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode, 3301 bit IsCommutable = 0> { 3302 defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info, 3303 IsCommutable>, EVEX_V512, PS, 3304 EVEX_CD8<32, CD8VF>; 3305 defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f64_info, 3306 IsCommutable>, EVEX_V512, PD, VEX_W, 3307 EVEX_CD8<64, CD8VF>; 3308 3309 // Define only if AVX512VL feature is present. 3310 let Predicates = [HasVLX] in { 3311 defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f32x_info, 3312 IsCommutable>, EVEX_V128, PS, 3313 EVEX_CD8<32, CD8VF>; 3314 defm PSZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f32x_info, 3315 IsCommutable>, EVEX_V256, PS, 3316 EVEX_CD8<32, CD8VF>; 3317 defm PDZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v2f64x_info, 3318 IsCommutable>, EVEX_V128, PD, VEX_W, 3319 EVEX_CD8<64, CD8VF>; 3320 defm PDZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f64x_info, 3321 IsCommutable>, EVEX_V256, PD, VEX_W, 3322 EVEX_CD8<64, CD8VF>; 3323 } 3324} 3325 3326multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd> { 3327 defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v16f32_info, 0>, 3328 EVEX_V512, PS, EVEX_CD8<32, CD8VF>; 3329 defm PDZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v8f64_info, 0>, 3330 EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>; 3331} 3332 3333defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, 1>, 3334 avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd>; 3335defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, 1>, 3336 avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd>; 3337defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub>, 3338 avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>; 3339defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv>, 3340 avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>; 3341defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, 1>; 3342defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, 1>; 3343 3344def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1), 3345 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)), 3346 (i16 -1), FROUND_CURRENT)), 3347 (VMAXPSZrr VR512:$src1, VR512:$src2)>; 3348 3349def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1), 3350 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)), 3351 (i8 -1), FROUND_CURRENT)), 3352 (VMAXPDZrr VR512:$src1, VR512:$src2)>; 3353 3354def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1), 3355 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)), 3356 (i16 -1), FROUND_CURRENT)), 3357 (VMINPSZrr VR512:$src1, VR512:$src2)>; 3358 3359def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1), 3360 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)), 3361 (i8 -1), FROUND_CURRENT)), 3362 (VMINPDZrr VR512:$src1, VR512:$src2)>; 3363//===----------------------------------------------------------------------===// 3364// AVX-512 VPTESTM instructions 3365//===----------------------------------------------------------------------===// 3366 3367multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC, 3368 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag, 3369 SDNode OpNode, ValueType vt> { 3370 def rr : AVX512PI<opc, MRMSrcReg, 3371 (outs KRC:$dst), (ins RC:$src1, RC:$src2), 3372 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 3373 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))], 3374 SSEPackedInt>, EVEX_4V; 3375 def rm : AVX512PI<opc, MRMSrcMem, 3376 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2), 3377 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 3378 [(set KRC:$dst, (OpNode (vt RC:$src1), 3379 (bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V; 3380} 3381 3382defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem, 3383 loadv16i32, X86testm, v16i32>, T8PD, EVEX_V512, 3384 EVEX_CD8<32, CD8VF>; 3385defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem, 3386 loadv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W, 3387 EVEX_CD8<64, CD8VF>; 3388 3389let Predicates = [HasCDI] in { 3390defm VPTESTNMDZ : avx512_vptest<0x27, "vptestnmd", VK16, VR512, f512mem, 3391 loadv16i32, X86testnm, v16i32>, T8XS, EVEX_V512, 3392 EVEX_CD8<32, CD8VF>; 3393defm VPTESTNMQZ : avx512_vptest<0x27, "vptestnmq", VK8, VR512, f512mem, 3394 loadv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W, 3395 EVEX_CD8<64, CD8VF>; 3396} 3397 3398def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1), 3399 (v16i32 VR512:$src2), (i16 -1))), 3400 (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>; 3401 3402def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1), 3403 (v8i64 VR512:$src2), (i8 -1))), 3404 (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>; 3405 3406//===----------------------------------------------------------------------===// 3407// AVX-512 Shift instructions 3408//===----------------------------------------------------------------------===// 3409multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM, 3410 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> { 3411 defm ri : AVX512_maskable<opc, ImmFormR, _, (outs _.RC:$dst), 3412 (ins _.RC:$src1, u8imm:$src2), OpcodeStr, 3413 "$src2, $src1", "$src1, $src2", 3414 (_.VT (OpNode _.RC:$src1, (i8 imm:$src2))), 3415 " ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIi8Base, EVEX_4V; 3416 let mayLoad = 1 in 3417 defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst), 3418 (ins _.MemOp:$src1, u8imm:$src2), OpcodeStr, 3419 "$src2, $src1", "$src1, $src2", 3420 (_.VT (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))), 3421 (i8 imm:$src2))), 3422 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIi8Base, EVEX_4V; 3423} 3424 3425multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM, 3426 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> { 3427 let mayLoad = 1 in 3428 defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst), 3429 (ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr, 3430 "$src2, ${src1}"##_.BroadcastStr, "${src1}"##_.BroadcastStr##", $src2", 3431 (_.VT (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src1)), (i8 imm:$src2))), 3432 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIi8Base, EVEX_4V, EVEX_B; 3433} 3434 3435multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode, 3436 ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> { 3437 // src2 is always 128-bit 3438 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 3439 (ins _.RC:$src1, VR128X:$src2), OpcodeStr, 3440 "$src2, $src1", "$src1, $src2", 3441 (_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2))), 3442 " ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIBase, EVEX_4V; 3443 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 3444 (ins _.RC:$src1, i128mem:$src2), OpcodeStr, 3445 "$src2, $src1", "$src1, $src2", 3446 (_.VT (OpNode _.RC:$src1, (bc_frag (loadv2i64 addr:$src2)))), 3447 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIBase, 3448 EVEX_4V; 3449} 3450 3451multiclass avx512_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode, 3452 ValueType SrcVT, PatFrag bc_frag, 3453 AVX512VLVectorVTInfo VTInfo, Predicate prd> { 3454 let Predicates = [prd] in 3455 defm Z : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag, 3456 VTInfo.info512>, EVEX_V512, 3457 EVEX_CD8<VTInfo.info512.EltSize, CD8VQ> ; 3458 let Predicates = [prd, HasVLX] in { 3459 defm Z256 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag, 3460 VTInfo.info256>, EVEX_V256, 3461 EVEX_CD8<VTInfo.info256.EltSize, CD8VH>; 3462 defm Z128 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag, 3463 VTInfo.info128>, EVEX_V128, 3464 EVEX_CD8<VTInfo.info128.EltSize, CD8VF>; 3465 } 3466} 3467 3468multiclass avx512_shift_types<bits<8> opcd, bits<8> opcq, bits<8> opcw, 3469 string OpcodeStr, SDNode OpNode> { 3470 defm D : avx512_shift_sizes<opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32, 3471 avx512vl_i32_info, HasAVX512>; 3472 defm Q : avx512_shift_sizes<opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64, 3473 avx512vl_i64_info, HasAVX512>, VEX_W; 3474 defm W : avx512_shift_sizes<opcw, OpcodeStr#"w", OpNode, v8i16, bc_v8i16, 3475 avx512vl_i16_info, HasBWI>; 3476} 3477 3478multiclass avx512_shift_rmi_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM, 3479 string OpcodeStr, SDNode OpNode, 3480 AVX512VLVectorVTInfo VTInfo> { 3481 let Predicates = [HasAVX512] in 3482 defm Z: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode, 3483 VTInfo.info512>, 3484 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode, 3485 VTInfo.info512>, EVEX_V512; 3486 let Predicates = [HasAVX512, HasVLX] in { 3487 defm Z256: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode, 3488 VTInfo.info256>, 3489 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode, 3490 VTInfo.info256>, EVEX_V256; 3491 defm Z128: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode, 3492 VTInfo.info128>, 3493 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode, 3494 VTInfo.info128>, EVEX_V128; 3495 } 3496} 3497 3498multiclass avx512_shift_rmi_w<bits<8> opcw, 3499 Format ImmFormR, Format ImmFormM, 3500 string OpcodeStr, SDNode OpNode> { 3501 let Predicates = [HasBWI] in 3502 defm WZ: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode, 3503 v32i16_info>, EVEX_V512; 3504 let Predicates = [HasVLX, HasBWI] in { 3505 defm WZ256: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode, 3506 v16i16x_info>, EVEX_V256; 3507 defm WZ128: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode, 3508 v8i16x_info>, EVEX_V128; 3509 } 3510} 3511 3512multiclass avx512_shift_rmi_dq<bits<8> opcd, bits<8> opcq, 3513 Format ImmFormR, Format ImmFormM, 3514 string OpcodeStr, SDNode OpNode> { 3515 defm D: avx512_shift_rmi_sizes<opcd, ImmFormR, ImmFormM, OpcodeStr#"d", OpNode, 3516 avx512vl_i32_info>, EVEX_CD8<32, CD8VF>; 3517 defm Q: avx512_shift_rmi_sizes<opcq, ImmFormR, ImmFormM, OpcodeStr#"q", OpNode, 3518 avx512vl_i64_info>, EVEX_CD8<64, CD8VF>, VEX_W; 3519} 3520 3521defm VPSRL : avx512_shift_rmi_dq<0x72, 0x73, MRM2r, MRM2m, "vpsrl", X86vsrli>, 3522 avx512_shift_rmi_w<0x71, MRM2r, MRM2m, "vpsrlw", X86vsrli>; 3523 3524defm VPSLL : avx512_shift_rmi_dq<0x72, 0x73, MRM6r, MRM6m, "vpsll", X86vshli>, 3525 avx512_shift_rmi_w<0x71, MRM6r, MRM6m, "vpsllw", X86vshli>; 3526 3527defm VPSRA : avx512_shift_rmi_dq<0x72, 0x73, MRM4r, MRM4m, "vpsra", X86vsrai>, 3528 avx512_shift_rmi_w<0x71, MRM4r, MRM4m, "vpsraw", X86vsrai>; 3529 3530defm VPROR : avx512_shift_rmi_dq<0x72, 0x72, MRM0r, MRM0m, "vpror", rotr>; 3531defm VPROL : avx512_shift_rmi_dq<0x72, 0x72, MRM1r, MRM1m, "vprol", rotl>; 3532 3533defm VPSLL : avx512_shift_types<0xF2, 0xF3, 0xF1, "vpsll", X86vshl>; 3534defm VPSRA : avx512_shift_types<0xE2, 0xE2, 0xE1, "vpsra", X86vsra>; 3535defm VPSRL : avx512_shift_types<0xD2, 0xD3, 0xD1, "vpsrl", X86vsrl>; 3536 3537//===-------------------------------------------------------------------===// 3538// Variable Bit Shifts 3539//===-------------------------------------------------------------------===// 3540multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode, 3541 X86VectorVTInfo _> { 3542 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 3543 (ins _.RC:$src1, _.RC:$src2), OpcodeStr, 3544 "$src2, $src1", "$src1, $src2", 3545 (_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2))), 3546 " ", SSE_INTSHIFT_ITINS_P.rr>, AVX5128IBase, EVEX_4V; 3547 let mayLoad = 1 in 3548 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 3549 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr, 3550 "$src2, $src1", "$src1, $src2", 3551 (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2))), 3552 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_4V, 3553 EVEX_CD8<_.EltSize, CD8VF>; 3554} 3555 3556multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode, 3557 X86VectorVTInfo _> { 3558 let mayLoad = 1 in 3559 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 3560 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr, 3561 "${src2}"##_.BroadcastStr##", $src1", 3562 "$src1, ${src2}"##_.BroadcastStr, 3563 (_.VT (OpNode _.RC:$src1, (_.VT (X86VBroadcast 3564 (_.ScalarLdFrag addr:$src2))))), 3565 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_B, 3566 EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>; 3567} 3568multiclass avx512_var_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode, 3569 AVX512VLVectorVTInfo _> { 3570 let Predicates = [HasAVX512] in 3571 defm Z : avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>, 3572 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512; 3573 3574 let Predicates = [HasAVX512, HasVLX] in { 3575 defm Z256 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>, 3576 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256; 3577 defm Z128 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>, 3578 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128; 3579 } 3580} 3581 3582multiclass avx512_var_shift_types<bits<8> opc, string OpcodeStr, 3583 SDNode OpNode> { 3584 defm D : avx512_var_shift_sizes<opc, OpcodeStr#"d", OpNode, 3585 avx512vl_i32_info>; 3586 defm Q : avx512_var_shift_sizes<opc, OpcodeStr#"q", OpNode, 3587 avx512vl_i64_info>, VEX_W; 3588} 3589 3590multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr, 3591 SDNode OpNode> { 3592 let Predicates = [HasBWI] in 3593 defm WZ: avx512_var_shift<opc, OpcodeStr, OpNode, v32i16_info>, 3594 EVEX_V512, VEX_W; 3595 let Predicates = [HasVLX, HasBWI] in { 3596 3597 defm WZ256: avx512_var_shift<opc, OpcodeStr, OpNode, v16i16x_info>, 3598 EVEX_V256, VEX_W; 3599 defm WZ128: avx512_var_shift<opc, OpcodeStr, OpNode, v8i16x_info>, 3600 EVEX_V128, VEX_W; 3601 } 3602} 3603 3604defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>, 3605 avx512_var_shift_w<0x12, "vpsllvw", shl>; 3606defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>, 3607 avx512_var_shift_w<0x11, "vpsravw", sra>; 3608defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>, 3609 avx512_var_shift_w<0x10, "vpsrlvw", srl>; 3610defm VPRORV : avx512_var_shift_types<0x14, "vprorv", rotr>; 3611defm VPROLV : avx512_var_shift_types<0x15, "vprolv", rotl>; 3612 3613//===----------------------------------------------------------------------===// 3614// AVX-512 - MOVDDUP 3615//===----------------------------------------------------------------------===// 3616 3617multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT, 3618 X86MemOperand x86memop, PatFrag memop_frag> { 3619def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src), 3620 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 3621 [(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX; 3622def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src), 3623 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 3624 [(set RC:$dst, 3625 (VT (X86Movddup (memop_frag addr:$src))))]>, EVEX; 3626} 3627 3628defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, loadv8f64>, 3629 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>; 3630def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))), 3631 (VMOVDDUPZrm addr:$src)>; 3632 3633//===---------------------------------------------------------------------===// 3634// Replicate Single FP - MOVSHDUP and MOVSLDUP 3635//===---------------------------------------------------------------------===// 3636multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr, 3637 ValueType vt, RegisterClass RC, PatFrag mem_frag, 3638 X86MemOperand x86memop> { 3639 def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src), 3640 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 3641 [(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX; 3642 let mayLoad = 1 in 3643 def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src), 3644 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 3645 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX; 3646} 3647 3648defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup", 3649 v16f32, VR512, loadv16f32, f512mem>, EVEX_V512, 3650 EVEX_CD8<32, CD8VF>; 3651defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup", 3652 v16f32, VR512, loadv16f32, f512mem>, EVEX_V512, 3653 EVEX_CD8<32, CD8VF>; 3654 3655def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>; 3656def : Pat<(v16i32 (X86Movshdup (loadv16i32 addr:$src))), 3657 (VMOVSHDUPZrm addr:$src)>; 3658def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>; 3659def : Pat<(v16i32 (X86Movsldup (loadv16i32 addr:$src))), 3660 (VMOVSLDUPZrm addr:$src)>; 3661 3662//===----------------------------------------------------------------------===// 3663// Move Low to High and High to Low packed FP Instructions 3664//===----------------------------------------------------------------------===// 3665def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst), 3666 (ins VR128X:$src1, VR128X:$src2), 3667 "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3668 [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))], 3669 IIC_SSE_MOV_LH>, EVEX_4V; 3670def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst), 3671 (ins VR128X:$src1, VR128X:$src2), 3672 "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}", 3673 [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))], 3674 IIC_SSE_MOV_LH>, EVEX_4V; 3675 3676let Predicates = [HasAVX512] in { 3677 // MOVLHPS patterns 3678 def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)), 3679 (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>; 3680 def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)), 3681 (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>; 3682 3683 // MOVHLPS patterns 3684 def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)), 3685 (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>; 3686} 3687 3688//===----------------------------------------------------------------------===// 3689// FMA - Fused Multiply Operations 3690// 3691 3692let Constraints = "$src1 = $dst" in { 3693// Omitting the parameter OpNode (= null_frag) disables ISel pattern matching. 3694multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, 3695 SDPatternOperator OpNode = null_frag> { 3696 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), 3697 (ins _.RC:$src2, _.RC:$src3), 3698 OpcodeStr, "$src3, $src2", "$src2, $src3", 3699 (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>, 3700 AVX512FMA3Base; 3701 3702 let mayLoad = 1 in 3703 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), 3704 (ins _.RC:$src2, _.MemOp:$src3), 3705 OpcodeStr, "$src3, $src2", "$src2, $src3", 3706 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>, 3707 AVX512FMA3Base; 3708 3709 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), 3710 (ins _.RC:$src2, _.ScalarMemOp:$src3), 3711 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"), 3712 !strconcat("$src2, ${src3}", _.BroadcastStr ), 3713 (OpNode _.RC:$src1, 3714 _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>, 3715 AVX512FMA3Base, EVEX_B; 3716 } 3717} // Constraints = "$src1 = $dst" 3718 3719let Constraints = "$src1 = $dst" in { 3720// Omitting the parameter OpNode (= null_frag) disables ISel pattern matching. 3721multiclass avx512_fma3_round_rrb<bits<8> opc, string OpcodeStr, 3722 X86VectorVTInfo _, 3723 SDPatternOperator OpNode> { 3724 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), 3725 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc), 3726 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", 3727 (_.VT ( OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3, (i32 imm:$rc)))>, 3728 AVX512FMA3Base, EVEX_B, EVEX_RC; 3729 } 3730} // Constraints = "$src1 = $dst" 3731 3732multiclass avx512_fma3_round_forms<bits<8> opc213, string OpcodeStr, 3733 X86VectorVTInfo VTI, SDPatternOperator OpNode> { 3734 defm v213r : avx512_fma3_round_rrb<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix), 3735 VTI, OpNode>, EVEX_CD8<VTI.EltSize, CD8VF>; 3736} 3737 3738multiclass avx512_fma3p_forms<bits<8> opc213, bits<8> opc231, 3739 string OpcodeStr, X86VectorVTInfo VTI, 3740 SDPatternOperator OpNode> { 3741 defm v213r : avx512_fma3p_rm<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix), 3742 VTI, OpNode>, EVEX_CD8<VTI.EltSize, CD8VF>; 3743 defm v231r : avx512_fma3p_rm<opc231, !strconcat(OpcodeStr, "231", VTI.Suffix), 3744 VTI>, EVEX_CD8<VTI.EltSize, CD8VF>; 3745} 3746 3747multiclass avx512_fma3p<bits<8> opc213, bits<8> opc231, 3748 string OpcodeStr, 3749 SDPatternOperator OpNode, 3750 SDPatternOperator OpNodeRnd> { 3751let ExeDomain = SSEPackedSingle in { 3752 defm NAME##PSZ : avx512_fma3p_forms<opc213, opc231, OpcodeStr, 3753 v16f32_info, OpNode>, 3754 avx512_fma3_round_forms<opc213, OpcodeStr, 3755 v16f32_info, OpNodeRnd>, EVEX_V512; 3756 defm NAME##PSZ256 : avx512_fma3p_forms<opc213, opc231, OpcodeStr, 3757 v8f32x_info, OpNode>, EVEX_V256; 3758 defm NAME##PSZ128 : avx512_fma3p_forms<opc213, opc231, OpcodeStr, 3759 v4f32x_info, OpNode>, EVEX_V128; 3760 } 3761let ExeDomain = SSEPackedDouble in { 3762 defm NAME##PDZ : avx512_fma3p_forms<opc213, opc231, OpcodeStr, 3763 v8f64_info, OpNode>, 3764 avx512_fma3_round_forms<opc213, OpcodeStr, v8f64_info, 3765 OpNodeRnd>, EVEX_V512, VEX_W; 3766 defm NAME##PDZ256 : avx512_fma3p_forms<opc213, opc231, OpcodeStr, 3767 v4f64x_info, OpNode>, 3768 EVEX_V256, VEX_W; 3769 defm NAME##PDZ128 : avx512_fma3p_forms<opc213, opc231, OpcodeStr, 3770 v2f64x_info, OpNode>, 3771 EVEX_V128, VEX_W; 3772 } 3773} 3774 3775defm VFMADD : avx512_fma3p<0xA8, 0xB8, "vfmadd", X86Fmadd, X86FmaddRnd>; 3776defm VFMSUB : avx512_fma3p<0xAA, 0xBA, "vfmsub", X86Fmsub, X86FmsubRnd>; 3777defm VFMADDSUB : avx512_fma3p<0xA6, 0xB6, "vfmaddsub", X86Fmaddsub, X86FmaddsubRnd>; 3778defm VFMSUBADD : avx512_fma3p<0xA7, 0xB7, "vfmsubadd", X86Fmsubadd, X86FmsubaddRnd>; 3779defm VFNMADD : avx512_fma3p<0xAC, 0xBC, "vfnmadd", X86Fnmadd, X86FnmaddRnd>; 3780defm VFNMSUB : avx512_fma3p<0xAE, 0xBE, "vfnmsub", X86Fnmsub, X86FnmsubRnd>; 3781 3782let Constraints = "$src1 = $dst" in { 3783multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr, SDNode OpNode, 3784 X86VectorVTInfo _> { 3785 let mayLoad = 1 in 3786 def m: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst), 3787 (ins _.RC:$src1, _.RC:$src3, _.MemOp:$src2), 3788 !strconcat(OpcodeStr, "\t{$src2, $src3, $dst|$dst, $src3, $src2}"), 3789 [(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2), 3790 _.RC:$src3)))]>; 3791 def mb: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst), 3792 (ins _.RC:$src1, _.RC:$src3, _.ScalarMemOp:$src2), 3793 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, 3794 ", $src3, $dst|$dst, $src3, ${src2}", _.BroadcastStr, "}"), 3795 [(set _.RC:$dst, 3796 (OpNode _.RC:$src1, (_.VT (X86VBroadcast 3797 (_.ScalarLdFrag addr:$src2))), 3798 _.RC:$src3))]>, EVEX_B; 3799} 3800} // Constraints = "$src1 = $dst" 3801 3802multiclass avx512_fma3p_m132_f<bits<8> opc, string OpcodeStr, SDNode OpNode> { 3803 3804let ExeDomain = SSEPackedSingle in { 3805 defm NAME##PSZ : avx512_fma3p_m132<opc, OpcodeStr##ps, 3806 OpNode,v16f32_info>, EVEX_V512, 3807 EVEX_CD8<32, CD8VF>; 3808 defm NAME##PSZ256 : avx512_fma3p_m132<opc, OpcodeStr##ps, 3809 OpNode, v8f32x_info>, EVEX_V256, 3810 EVEX_CD8<32, CD8VF>; 3811 defm NAME##PSZ128 : avx512_fma3p_m132<opc, OpcodeStr##ps, 3812 OpNode, v4f32x_info>, EVEX_V128, 3813 EVEX_CD8<32, CD8VF>; 3814 } 3815let ExeDomain = SSEPackedDouble in { 3816 defm NAME##PDZ : avx512_fma3p_m132<opc, OpcodeStr##pd, 3817 OpNode, v8f64_info>, EVEX_V512, 3818 VEX_W, EVEX_CD8<32, CD8VF>; 3819 defm NAME##PDZ256 : avx512_fma3p_m132<opc, OpcodeStr##pd, 3820 OpNode, v4f64x_info>, EVEX_V256, 3821 VEX_W, EVEX_CD8<32, CD8VF>; 3822 defm NAME##PDZ128 : avx512_fma3p_m132<opc, OpcodeStr##pd, 3823 OpNode, v2f64x_info>, EVEX_V128, 3824 VEX_W, EVEX_CD8<32, CD8VF>; 3825 } 3826} 3827 3828defm VFMADD132 : avx512_fma3p_m132_f<0x98, "vfmadd132", X86Fmadd>; 3829defm VFMSUB132 : avx512_fma3p_m132_f<0x9A, "vfmsub132", X86Fmsub>; 3830defm VFMADDSUB132 : avx512_fma3p_m132_f<0x96, "vfmaddsub132", X86Fmaddsub>; 3831defm VFMSUBADD132 : avx512_fma3p_m132_f<0x97, "vfmsubadd132", X86Fmsubadd>; 3832defm VFNMADD132 : avx512_fma3p_m132_f<0x9C, "vfnmadd132", X86Fnmadd>; 3833defm VFNMSUB132 : avx512_fma3p_m132_f<0x9E, "vfnmsub132", X86Fnmsub>; 3834 3835// Scalar FMA 3836let Constraints = "$src1 = $dst" in { 3837multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, 3838 RegisterClass RC, ValueType OpVT, 3839 X86MemOperand x86memop, Operand memop, 3840 PatFrag mem_frag> { 3841 let isCommutable = 1 in 3842 def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst), 3843 (ins RC:$src1, RC:$src2, RC:$src3), 3844 !strconcat(OpcodeStr, 3845 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 3846 [(set RC:$dst, 3847 (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>; 3848 let mayLoad = 1 in 3849 def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst), 3850 (ins RC:$src1, RC:$src2, f128mem:$src3), 3851 !strconcat(OpcodeStr, 3852 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 3853 [(set RC:$dst, 3854 (OpVT (OpNode RC:$src2, RC:$src1, 3855 (mem_frag addr:$src3))))]>; 3856} 3857} // Constraints = "$src1 = $dst" 3858 3859defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X, 3860 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>; 3861defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X, 3862 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>; 3863defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X, 3864 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>; 3865defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X, 3866 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>; 3867defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X, 3868 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>; 3869defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X, 3870 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>; 3871defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X, 3872 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>; 3873defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X, 3874 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>; 3875 3876//===----------------------------------------------------------------------===// 3877// AVX-512 Scalar convert from sign integer to float/double 3878//===----------------------------------------------------------------------===// 3879 3880multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, 3881 X86MemOperand x86memop, string asm> { 3882let hasSideEffects = 0 in { 3883 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src), 3884 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>, 3885 EVEX_4V; 3886 let mayLoad = 1 in 3887 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), 3888 (ins DstRC:$src1, x86memop:$src), 3889 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>, 3890 EVEX_4V; 3891} // hasSideEffects = 0 3892} 3893 3894let Predicates = [HasAVX512] in { 3895defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">, 3896 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>; 3897defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">, 3898 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>; 3899defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">, 3900 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>; 3901defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">, 3902 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>; 3903 3904def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))), 3905 (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; 3906def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))), 3907 (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; 3908def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))), 3909 (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; 3910def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))), 3911 (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; 3912 3913def : Pat<(f32 (sint_to_fp GR32:$src)), 3914 (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>; 3915def : Pat<(f32 (sint_to_fp GR64:$src)), 3916 (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>; 3917def : Pat<(f64 (sint_to_fp GR32:$src)), 3918 (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>; 3919def : Pat<(f64 (sint_to_fp GR64:$src)), 3920 (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>; 3921 3922defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">, 3923 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>; 3924defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">, 3925 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>; 3926defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">, 3927 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>; 3928defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">, 3929 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>; 3930 3931def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))), 3932 (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; 3933def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))), 3934 (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; 3935def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))), 3936 (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; 3937def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))), 3938 (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; 3939 3940def : Pat<(f32 (uint_to_fp GR32:$src)), 3941 (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>; 3942def : Pat<(f32 (uint_to_fp GR64:$src)), 3943 (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>; 3944def : Pat<(f64 (uint_to_fp GR32:$src)), 3945 (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>; 3946def : Pat<(f64 (uint_to_fp GR64:$src)), 3947 (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>; 3948} 3949 3950//===----------------------------------------------------------------------===// 3951// AVX-512 Scalar convert from float/double to integer 3952//===----------------------------------------------------------------------===// 3953multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, 3954 Intrinsic Int, Operand memop, ComplexPattern mem_cpat, 3955 string asm> { 3956let hasSideEffects = 0 in { 3957 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), 3958 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 3959 [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG, 3960 Requires<[HasAVX512]>; 3961 let mayLoad = 1 in 3962 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src), 3963 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG, 3964 Requires<[HasAVX512]>; 3965} // hasSideEffects = 0 3966} 3967let Predicates = [HasAVX512] in { 3968// Convert float/double to signed/unsigned int 32/64 3969defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si, 3970 ssmem, sse_load_f32, "cvtss2si">, 3971 XS, EVEX_CD8<32, CD8VT1>; 3972defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64, 3973 ssmem, sse_load_f32, "cvtss2si">, 3974 XS, VEX_W, EVEX_CD8<32, CD8VT1>; 3975defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi, 3976 ssmem, sse_load_f32, "cvtss2usi">, 3977 XS, EVEX_CD8<32, CD8VT1>; 3978defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64, 3979 int_x86_avx512_cvtss2usi64, ssmem, 3980 sse_load_f32, "cvtss2usi">, XS, VEX_W, 3981 EVEX_CD8<32, CD8VT1>; 3982defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si, 3983 sdmem, sse_load_f64, "cvtsd2si">, 3984 XD, EVEX_CD8<64, CD8VT1>; 3985defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64, 3986 sdmem, sse_load_f64, "cvtsd2si">, 3987 XD, VEX_W, EVEX_CD8<64, CD8VT1>; 3988defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi, 3989 sdmem, sse_load_f64, "cvtsd2usi">, 3990 XD, EVEX_CD8<64, CD8VT1>; 3991defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64, 3992 int_x86_avx512_cvtsd2usi64, sdmem, 3993 sse_load_f64, "cvtsd2usi">, XD, VEX_W, 3994 EVEX_CD8<64, CD8VT1>; 3995 3996let isCodeGenOnly = 1 in { 3997 defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X, 3998 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}", 3999 SSE_CVT_Scalar, 0>, XS, EVEX_4V; 4000 defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X, 4001 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}", 4002 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W; 4003 defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X, 4004 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}", 4005 SSE_CVT_Scalar, 0>, XD, EVEX_4V; 4006 defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X, 4007 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}", 4008 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W; 4009 4010 defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X, 4011 int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}", 4012 SSE_CVT_Scalar, 0>, XS, EVEX_4V; 4013 defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X, 4014 int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}", 4015 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W; 4016 defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X, 4017 int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}", 4018 SSE_CVT_Scalar, 0>, XD, EVEX_4V; 4019 defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X, 4020 int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}", 4021 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W; 4022} // isCodeGenOnly = 1 4023 4024// Convert float/double to signed/unsigned int 32/64 with truncation 4025let isCodeGenOnly = 1 in { 4026 defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si, 4027 ssmem, sse_load_f32, "cvttss2si">, 4028 XS, EVEX_CD8<32, CD8VT1>; 4029 defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64, 4030 int_x86_sse_cvttss2si64, ssmem, sse_load_f32, 4031 "cvttss2si">, XS, VEX_W, 4032 EVEX_CD8<32, CD8VT1>; 4033 defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si, 4034 sdmem, sse_load_f64, "cvttsd2si">, XD, 4035 EVEX_CD8<64, CD8VT1>; 4036 defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64, 4037 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64, 4038 "cvttsd2si">, XD, VEX_W, 4039 EVEX_CD8<64, CD8VT1>; 4040 defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32, 4041 int_x86_avx512_cvttss2usi, ssmem, sse_load_f32, 4042 "cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>; 4043 defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64, 4044 int_x86_avx512_cvttss2usi64, ssmem, 4045 sse_load_f32, "cvttss2usi">, XS, VEX_W, 4046 EVEX_CD8<32, CD8VT1>; 4047 defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32, 4048 int_x86_avx512_cvttsd2usi, 4049 sdmem, sse_load_f64, "cvttsd2usi">, XD, 4050 EVEX_CD8<64, CD8VT1>; 4051 defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64, 4052 int_x86_avx512_cvttsd2usi64, sdmem, 4053 sse_load_f64, "cvttsd2usi">, XD, VEX_W, 4054 EVEX_CD8<64, CD8VT1>; 4055} // isCodeGenOnly = 1 4056 4057multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, 4058 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag, 4059 string asm> { 4060 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), 4061 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4062 [(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX; 4063 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), 4064 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4065 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX; 4066} 4067 4068defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem, 4069 loadf32, "cvttss2si">, XS, 4070 EVEX_CD8<32, CD8VT1>; 4071defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem, 4072 loadf32, "cvttss2usi">, XS, 4073 EVEX_CD8<32, CD8VT1>; 4074defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem, 4075 loadf32, "cvttss2si">, XS, VEX_W, 4076 EVEX_CD8<32, CD8VT1>; 4077defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem, 4078 loadf32, "cvttss2usi">, XS, VEX_W, 4079 EVEX_CD8<32, CD8VT1>; 4080defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem, 4081 loadf64, "cvttsd2si">, XD, 4082 EVEX_CD8<64, CD8VT1>; 4083defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem, 4084 loadf64, "cvttsd2usi">, XD, 4085 EVEX_CD8<64, CD8VT1>; 4086defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem, 4087 loadf64, "cvttsd2si">, XD, VEX_W, 4088 EVEX_CD8<64, CD8VT1>; 4089defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem, 4090 loadf64, "cvttsd2usi">, XD, VEX_W, 4091 EVEX_CD8<64, CD8VT1>; 4092} // HasAVX512 4093//===----------------------------------------------------------------------===// 4094// AVX-512 Convert form float to double and back 4095//===----------------------------------------------------------------------===// 4096let hasSideEffects = 0 in { 4097def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst), 4098 (ins FR32X:$src1, FR32X:$src2), 4099 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", 4100 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>; 4101let mayLoad = 1 in 4102def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst), 4103 (ins FR32X:$src1, f32mem:$src2), 4104 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", 4105 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>, 4106 EVEX_CD8<32, CD8VT1>; 4107 4108// Convert scalar double to scalar single 4109def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst), 4110 (ins FR64X:$src1, FR64X:$src2), 4111 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", 4112 []>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>; 4113let mayLoad = 1 in 4114def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst), 4115 (ins FR64X:$src1, f64mem:$src2), 4116 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", 4117 []>, EVEX_4V, VEX_LIG, VEX_W, 4118 Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>; 4119} 4120 4121def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>, 4122 Requires<[HasAVX512]>; 4123def : Pat<(fextend (loadf32 addr:$src)), 4124 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>; 4125 4126def : Pat<(extloadf32 addr:$src), 4127 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, 4128 Requires<[HasAVX512, OptForSize]>; 4129 4130def : Pat<(extloadf32 addr:$src), 4131 (VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>, 4132 Requires<[HasAVX512, OptForSpeed]>; 4133 4134def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>, 4135 Requires<[HasAVX512]>; 4136 4137multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC, 4138 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag, 4139 X86MemOperand x86memop, ValueType OpVT, ValueType InVT, 4140 Domain d> { 4141let hasSideEffects = 0 in { 4142 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), 4143 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4144 [(set DstRC:$dst, 4145 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX; 4146 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc), 4147 !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"), 4148 [], d>, EVEX, EVEX_B, EVEX_RC; 4149 let mayLoad = 1 in 4150 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), 4151 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4152 [(set DstRC:$dst, 4153 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX; 4154} // hasSideEffects = 0 4155} 4156 4157multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC, 4158 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag, 4159 X86MemOperand x86memop, ValueType OpVT, ValueType InVT, 4160 Domain d> { 4161let hasSideEffects = 0 in { 4162 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), 4163 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4164 [(set DstRC:$dst, 4165 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX; 4166 let mayLoad = 1 in 4167 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), 4168 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4169 [(set DstRC:$dst, 4170 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX; 4171} // hasSideEffects = 0 4172} 4173 4174defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround, 4175 loadv8f64, f512mem, v8f32, v8f64, 4176 SSEPackedSingle>, EVEX_V512, VEX_W, PD, 4177 EVEX_CD8<64, CD8VF>; 4178 4179defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend, 4180 loadv4f64, f256mem, v8f64, v8f32, 4181 SSEPackedDouble>, EVEX_V512, PS, 4182 EVEX_CD8<32, CD8VH>; 4183def : Pat<(v8f64 (extloadv8f32 addr:$src)), 4184 (VCVTPS2PDZrm addr:$src)>; 4185 4186def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src), 4187 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))), 4188 (VCVTPD2PSZrr VR512:$src)>; 4189 4190def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src), 4191 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)), 4192 (VCVTPD2PSZrrb VR512:$src, imm:$rc)>; 4193 4194//===----------------------------------------------------------------------===// 4195// AVX-512 Vector convert from sign integer to float/double 4196//===----------------------------------------------------------------------===// 4197 4198defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp, 4199 loadv8i64, i512mem, v16f32, v16i32, 4200 SSEPackedSingle>, EVEX_V512, PS, 4201 EVEX_CD8<32, CD8VF>; 4202 4203defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp, 4204 loadv4i64, i256mem, v8f64, v8i32, 4205 SSEPackedDouble>, EVEX_V512, XS, 4206 EVEX_CD8<32, CD8VH>; 4207 4208defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint, 4209 loadv16f32, f512mem, v16i32, v16f32, 4210 SSEPackedSingle>, EVEX_V512, XS, 4211 EVEX_CD8<32, CD8VF>; 4212 4213defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint, 4214 loadv8f64, f512mem, v8i32, v8f64, 4215 SSEPackedDouble>, EVEX_V512, PD, VEX_W, 4216 EVEX_CD8<64, CD8VF>; 4217 4218defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint, 4219 loadv16f32, f512mem, v16i32, v16f32, 4220 SSEPackedSingle>, EVEX_V512, PS, 4221 EVEX_CD8<32, CD8VF>; 4222 4223// cvttps2udq (src, 0, mask-all-ones, sae-current) 4224def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src), 4225 (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)), 4226 (VCVTTPS2UDQZrr VR512:$src)>; 4227 4228defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint, 4229 loadv8f64, f512mem, v8i32, v8f64, 4230 SSEPackedDouble>, EVEX_V512, PS, VEX_W, 4231 EVEX_CD8<64, CD8VF>; 4232 4233// cvttpd2udq (src, 0, mask-all-ones, sae-current) 4234def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src), 4235 (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)), 4236 (VCVTTPD2UDQZrr VR512:$src)>; 4237 4238defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp, 4239 loadv4i64, f256mem, v8f64, v8i32, 4240 SSEPackedDouble>, EVEX_V512, XS, 4241 EVEX_CD8<32, CD8VH>; 4242 4243defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp, 4244 loadv16i32, f512mem, v16f32, v16i32, 4245 SSEPackedSingle>, EVEX_V512, XD, 4246 EVEX_CD8<32, CD8VF>; 4247 4248def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))), 4249 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr 4250 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>; 4251 4252def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))), 4253 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr 4254 (v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>; 4255 4256def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))), 4257 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr 4258 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>; 4259 4260def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))), 4261 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr 4262 (v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>; 4263 4264def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))), 4265 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr 4266 (v8i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_ymm)>; 4267 4268def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src), 4269 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)), 4270 (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>; 4271def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src), 4272 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))), 4273 (VCVTDQ2PDZrr VR256X:$src)>; 4274def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src), 4275 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)), 4276 (VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>; 4277def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src), 4278 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))), 4279 (VCVTUDQ2PDZrr VR256X:$src)>; 4280 4281multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC, 4282 RegisterClass DstRC, PatFrag mem_frag, 4283 X86MemOperand x86memop, Domain d> { 4284let hasSideEffects = 0 in { 4285 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), 4286 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4287 [], d>, EVEX; 4288 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc), 4289 !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"), 4290 [], d>, EVEX, EVEX_B, EVEX_RC; 4291 let mayLoad = 1 in 4292 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), 4293 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), 4294 [], d>, EVEX; 4295} // hasSideEffects = 0 4296} 4297 4298defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512, 4299 loadv16f32, f512mem, SSEPackedSingle>, PD, 4300 EVEX_V512, EVEX_CD8<32, CD8VF>; 4301defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X, 4302 loadv8f64, f512mem, SSEPackedDouble>, XD, VEX_W, 4303 EVEX_V512, EVEX_CD8<64, CD8VF>; 4304 4305def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src), 4306 (v16i32 immAllZerosV), (i16 -1), imm:$rc)), 4307 (VCVTPS2DQZrrb VR512:$src, imm:$rc)>; 4308 4309def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src), 4310 (v8i32 immAllZerosV), (i8 -1), imm:$rc)), 4311 (VCVTPD2DQZrrb VR512:$src, imm:$rc)>; 4312 4313defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512, 4314 loadv16f32, f512mem, SSEPackedSingle>, 4315 PS, EVEX_V512, EVEX_CD8<32, CD8VF>; 4316defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X, 4317 loadv8f64, f512mem, SSEPackedDouble>, VEX_W, 4318 PS, EVEX_V512, EVEX_CD8<64, CD8VF>; 4319 4320def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src), 4321 (v16i32 immAllZerosV), (i16 -1), imm:$rc)), 4322 (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>; 4323 4324def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src), 4325 (v8i32 immAllZerosV), (i8 -1), imm:$rc)), 4326 (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>; 4327 4328let Predicates = [HasAVX512] in { 4329 def : Pat<(v8f32 (fround (loadv8f64 addr:$src))), 4330 (VCVTPD2PSZrm addr:$src)>; 4331 def : Pat<(v8f64 (extloadv8f32 addr:$src)), 4332 (VCVTPS2PDZrm addr:$src)>; 4333} 4334 4335//===----------------------------------------------------------------------===// 4336// Half precision conversion instructions 4337//===----------------------------------------------------------------------===// 4338multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC, 4339 X86MemOperand x86memop> { 4340 def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src), 4341 "vcvtph2ps\t{$src, $dst|$dst, $src}", 4342 []>, EVEX; 4343 let hasSideEffects = 0, mayLoad = 1 in 4344 def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src), 4345 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX; 4346} 4347 4348multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC, 4349 X86MemOperand x86memop> { 4350 def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst), 4351 (ins srcRC:$src1, i32u8imm:$src2), 4352 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", 4353 []>, EVEX; 4354 let hasSideEffects = 0, mayStore = 1 in 4355 def mr : AVX512AIi8<0x1D, MRMDestMem, (outs), 4356 (ins x86memop:$dst, srcRC:$src1, i32u8imm:$src2), 4357 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX; 4358} 4359 4360defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512, 4361 EVEX_CD8<32, CD8VH>; 4362defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512, 4363 EVEX_CD8<32, CD8VH>; 4364 4365def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src), 4366 imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))), 4367 (VCVTPS2PHZrr VR512:$src, imm:$rc)>; 4368 4369def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src), 4370 (bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))), 4371 (VCVTPH2PSZrr VR256X:$src)>; 4372 4373let Defs = [EFLAGS], Predicates = [HasAVX512] in { 4374 defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32, 4375 "ucomiss">, PS, EVEX, VEX_LIG, 4376 EVEX_CD8<32, CD8VT1>; 4377 defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64, 4378 "ucomisd">, PD, EVEX, 4379 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; 4380 let Pattern = []<dag> in { 4381 defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load, 4382 "comiss">, PS, EVEX, VEX_LIG, 4383 EVEX_CD8<32, CD8VT1>; 4384 defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load, 4385 "comisd">, PD, EVEX, 4386 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; 4387 } 4388 let isCodeGenOnly = 1 in { 4389 defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem, 4390 load, "ucomiss">, PS, EVEX, VEX_LIG, 4391 EVEX_CD8<32, CD8VT1>; 4392 defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem, 4393 load, "ucomisd">, PD, EVEX, 4394 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; 4395 4396 defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem, 4397 load, "comiss">, PS, EVEX, VEX_LIG, 4398 EVEX_CD8<32, CD8VT1>; 4399 defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem, 4400 load, "comisd">, PD, EVEX, 4401 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; 4402 } 4403} 4404 4405/// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd 4406multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC, 4407 X86MemOperand x86memop> { 4408 let hasSideEffects = 0 in { 4409 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 4410 (ins RC:$src1, RC:$src2), 4411 !strconcat(OpcodeStr, 4412 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V; 4413 let mayLoad = 1 in { 4414 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 4415 (ins RC:$src1, x86memop:$src2), 4416 !strconcat(OpcodeStr, 4417 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V; 4418 } 4419} 4420} 4421 4422defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>, 4423 EVEX_CD8<32, CD8VT1>; 4424defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>, 4425 VEX_W, EVEX_CD8<64, CD8VT1>; 4426defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>, 4427 EVEX_CD8<32, CD8VT1>; 4428defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>, 4429 VEX_W, EVEX_CD8<64, CD8VT1>; 4430 4431def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1), 4432 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))), 4433 (COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X), 4434 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>; 4435 4436def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1), 4437 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))), 4438 (COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X), 4439 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>; 4440 4441def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1), 4442 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))), 4443 (COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X), 4444 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>; 4445 4446def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1), 4447 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))), 4448 (COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X), 4449 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>; 4450 4451/// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd 4452multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode, 4453 X86VectorVTInfo _> { 4454 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 4455 (ins _.RC:$src), OpcodeStr, "$src", "$src", 4456 (_.FloatVT (OpNode _.RC:$src))>, EVEX, T8PD; 4457 let mayLoad = 1 in { 4458 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 4459 (ins _.MemOp:$src), OpcodeStr, "$src", "$src", 4460 (OpNode (_.FloatVT 4461 (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD; 4462 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 4463 (ins _.ScalarMemOp:$src), OpcodeStr, 4464 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr, 4465 (OpNode (_.FloatVT 4466 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>, 4467 EVEX, T8PD, EVEX_B; 4468 } 4469} 4470 4471multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode> { 4472 defm PSZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), OpNode, v16f32_info>, 4473 EVEX_V512, EVEX_CD8<32, CD8VF>; 4474 defm PDZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"), OpNode, v8f64_info>, 4475 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 4476 4477 // Define only if AVX512VL feature is present. 4478 let Predicates = [HasVLX] in { 4479 defm PSZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), 4480 OpNode, v4f32x_info>, 4481 EVEX_V128, EVEX_CD8<32, CD8VF>; 4482 defm PSZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), 4483 OpNode, v8f32x_info>, 4484 EVEX_V256, EVEX_CD8<32, CD8VF>; 4485 defm PDZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"), 4486 OpNode, v2f64x_info>, 4487 EVEX_V128, VEX_W, EVEX_CD8<64, CD8VF>; 4488 defm PDZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"), 4489 OpNode, v4f64x_info>, 4490 EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>; 4491 } 4492} 4493 4494defm VRSQRT14 : avx512_fp14_p_vl_all<0x4E, "vrsqrt14", X86frsqrt>; 4495defm VRCP14 : avx512_fp14_p_vl_all<0x4C, "vrcp14", X86frcp>; 4496 4497def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src), 4498 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))), 4499 (VRSQRT14PSZr VR512:$src)>; 4500def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src), 4501 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))), 4502 (VRSQRT14PDZr VR512:$src)>; 4503 4504def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src), 4505 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))), 4506 (VRCP14PSZr VR512:$src)>; 4507def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src), 4508 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))), 4509 (VRCP14PDZr VR512:$src)>; 4510 4511/// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd 4512multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, 4513 SDNode OpNode> { 4514 4515 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), 4516 (ins _.RC:$src1, _.RC:$src2), OpcodeStr, 4517 "$src2, $src1", "$src1, $src2", 4518 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), 4519 (i32 FROUND_CURRENT))>; 4520 4521 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), 4522 (ins _.RC:$src1, _.RC:$src2), OpcodeStr, 4523 "$src2, $src1", "$src1, $src2", 4524 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), 4525 (i32 FROUND_NO_EXC)), "{sae}">, EVEX_B; 4526 4527 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), 4528 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr, 4529 "$src2, $src1", "$src1, $src2", 4530 (OpNode (_.VT _.RC:$src1), 4531 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))), 4532 (i32 FROUND_CURRENT))>; 4533} 4534 4535multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode> { 4536 defm SS : avx512_fp28_s<opc, OpcodeStr#"ss", f32x_info, OpNode>, 4537 EVEX_CD8<32, CD8VT1>; 4538 defm SD : avx512_fp28_s<opc, OpcodeStr#"sd", f64x_info, OpNode>, 4539 EVEX_CD8<64, CD8VT1>, VEX_W; 4540} 4541 4542let hasSideEffects = 0, Predicates = [HasERI] in { 4543 defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s>, T8PD, EVEX_4V; 4544 defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s>, T8PD, EVEX_4V; 4545} 4546/// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd 4547 4548multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, 4549 SDNode OpNode> { 4550 4551 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 4552 (ins _.RC:$src), OpcodeStr, "$src", "$src", 4553 (OpNode (_.VT _.RC:$src), (i32 FROUND_CURRENT))>; 4554 4555 defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 4556 (ins _.RC:$src), OpcodeStr, 4557 "$src", "$src", 4558 (OpNode (_.VT _.RC:$src), (i32 FROUND_NO_EXC)), 4559 "{sae}">, EVEX_B; 4560 4561 defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 4562 (ins _.MemOp:$src), OpcodeStr, "$src", "$src", 4563 (OpNode (_.FloatVT 4564 (bitconvert (_.LdFrag addr:$src))), 4565 (i32 FROUND_CURRENT))>; 4566 4567 defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 4568 (ins _.MemOp:$src), OpcodeStr, "$src", "$src", 4569 (OpNode (_.FloatVT 4570 (X86VBroadcast (_.ScalarLdFrag addr:$src))), 4571 (i32 FROUND_CURRENT))>, EVEX_B; 4572} 4573 4574multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode> { 4575 defm PS : avx512_fp28_p<opc, OpcodeStr#"ps", v16f32_info, OpNode>, 4576 EVEX_CD8<32, CD8VF>; 4577 defm PD : avx512_fp28_p<opc, OpcodeStr#"pd", v8f64_info, OpNode>, 4578 VEX_W, EVEX_CD8<32, CD8VF>; 4579} 4580 4581let Predicates = [HasERI], hasSideEffects = 0 in { 4582 4583 defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX, EVEX_V512, T8PD; 4584 defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX, EVEX_V512, T8PD; 4585 defm VEXP2 : avx512_eri<0xC8, "vexp2", X86exp2>, EVEX, EVEX_V512, T8PD; 4586} 4587 4588multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, 4589 SDNode OpNode, X86VectorVTInfo _>{ 4590 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), 4591 (ins _.RC:$src), OpcodeStr, "$src", "$src", 4592 (_.FloatVT (OpNode _.RC:$src))>, EVEX; 4593 let mayLoad = 1 in { 4594 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 4595 (ins _.MemOp:$src), OpcodeStr, "$src", "$src", 4596 (OpNode (_.FloatVT 4597 (bitconvert (_.LdFrag addr:$src))))>, EVEX; 4598 4599 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), 4600 (ins _.ScalarMemOp:$src), OpcodeStr, 4601 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr, 4602 (OpNode (_.FloatVT 4603 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>, 4604 EVEX, EVEX_B; 4605 } 4606} 4607 4608multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr, 4609 Intrinsic F32Int, Intrinsic F64Int, 4610 OpndItins itins_s, OpndItins itins_d> { 4611 def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst), 4612 (ins FR32X:$src1, FR32X:$src2), 4613 !strconcat(OpcodeStr, 4614 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4615 [], itins_s.rr>, XS, EVEX_4V; 4616 let isCodeGenOnly = 1 in 4617 def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst), 4618 (ins VR128X:$src1, VR128X:$src2), 4619 !strconcat(OpcodeStr, 4620 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4621 [(set VR128X:$dst, 4622 (F32Int VR128X:$src1, VR128X:$src2))], 4623 itins_s.rr>, XS, EVEX_4V; 4624 let mayLoad = 1 in { 4625 def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst), 4626 (ins FR32X:$src1, f32mem:$src2), 4627 !strconcat(OpcodeStr, 4628 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4629 [], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>; 4630 let isCodeGenOnly = 1 in 4631 def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst), 4632 (ins VR128X:$src1, ssmem:$src2), 4633 !strconcat(OpcodeStr, 4634 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4635 [(set VR128X:$dst, 4636 (F32Int VR128X:$src1, sse_load_f32:$src2))], 4637 itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>; 4638 } 4639 def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst), 4640 (ins FR64X:$src1, FR64X:$src2), 4641 !strconcat(OpcodeStr, 4642 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, 4643 XD, EVEX_4V, VEX_W; 4644 let isCodeGenOnly = 1 in 4645 def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst), 4646 (ins VR128X:$src1, VR128X:$src2), 4647 !strconcat(OpcodeStr, 4648 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4649 [(set VR128X:$dst, 4650 (F64Int VR128X:$src1, VR128X:$src2))], 4651 itins_s.rr>, XD, EVEX_4V, VEX_W; 4652 let mayLoad = 1 in { 4653 def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst), 4654 (ins FR64X:$src1, f64mem:$src2), 4655 !strconcat(OpcodeStr, 4656 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, 4657 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>; 4658 let isCodeGenOnly = 1 in 4659 def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst), 4660 (ins VR128X:$src1, sdmem:$src2), 4661 !strconcat(OpcodeStr, 4662 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4663 [(set VR128X:$dst, 4664 (F64Int VR128X:$src1, sse_load_f64:$src2))]>, 4665 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>; 4666 } 4667} 4668 4669multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr, 4670 SDNode OpNode> { 4671 defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, 4672 v16f32_info>, 4673 EVEX_V512, PS, EVEX_CD8<32, CD8VF>; 4674 defm PDZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, 4675 v8f64_info>, 4676 EVEX_V512, VEX_W, PD, EVEX_CD8<64, CD8VF>; 4677 // Define only if AVX512VL feature is present. 4678 let Predicates = [HasVLX] in { 4679 defm PSZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), 4680 OpNode, v4f32x_info>, 4681 EVEX_V128, PS, EVEX_CD8<32, CD8VF>; 4682 defm PSZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), 4683 OpNode, v8f32x_info>, 4684 EVEX_V256, PS, EVEX_CD8<32, CD8VF>; 4685 defm PDZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"), 4686 OpNode, v2f64x_info>, 4687 EVEX_V128, VEX_W, PD, EVEX_CD8<64, CD8VF>; 4688 defm PDZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"), 4689 OpNode, v4f64x_info>, 4690 EVEX_V256, VEX_W, PD, EVEX_CD8<64, CD8VF>; 4691 } 4692} 4693 4694defm VSQRT : avx512_sqrt_packed_all<0x51, "vsqrt", fsqrt>; 4695 4696defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt", 4697 int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd, 4698 SSE_SQRTSS, SSE_SQRTSD>; 4699 4700let Predicates = [HasAVX512] in { 4701 def : Pat<(v16f32 (int_x86_avx512_sqrt_ps_512 (v16f32 VR512:$src1), 4702 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_CURRENT)), 4703 (VSQRTPSZr VR512:$src1)>; 4704 def : Pat<(v8f64 (int_x86_avx512_sqrt_pd_512 (v8f64 VR512:$src1), 4705 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_CURRENT)), 4706 (VSQRTPDZr VR512:$src1)>; 4707 4708 def : Pat<(f32 (fsqrt FR32X:$src)), 4709 (VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>; 4710 def : Pat<(f32 (fsqrt (load addr:$src))), 4711 (VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>, 4712 Requires<[OptForSize]>; 4713 def : Pat<(f64 (fsqrt FR64X:$src)), 4714 (VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>; 4715 def : Pat<(f64 (fsqrt (load addr:$src))), 4716 (VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>, 4717 Requires<[OptForSize]>; 4718 4719 def : Pat<(f32 (X86frsqrt FR32X:$src)), 4720 (VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>; 4721 def : Pat<(f32 (X86frsqrt (load addr:$src))), 4722 (VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>, 4723 Requires<[OptForSize]>; 4724 4725 def : Pat<(f32 (X86frcp FR32X:$src)), 4726 (VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>; 4727 def : Pat<(f32 (X86frcp (load addr:$src))), 4728 (VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>, 4729 Requires<[OptForSize]>; 4730 4731 def : Pat<(int_x86_sse_sqrt_ss VR128X:$src), 4732 (COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)), 4733 (COPY_TO_REGCLASS VR128X:$src, FR32)), 4734 VR128X)>; 4735 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src), 4736 (VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>; 4737 4738 def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src), 4739 (COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)), 4740 (COPY_TO_REGCLASS VR128X:$src, FR64)), 4741 VR128X)>; 4742 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src), 4743 (VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>; 4744} 4745 4746 4747multiclass avx512_rndscale<bits<8> opc, string OpcodeStr, 4748 X86MemOperand x86memop, RegisterClass RC, 4749 PatFrag mem_frag, Domain d> { 4750let ExeDomain = d in { 4751 // Intrinsic operation, reg. 4752 // Vector intrinsic operation, reg 4753 def r : AVX512AIi8<opc, MRMSrcReg, 4754 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2), 4755 !strconcat(OpcodeStr, 4756 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4757 []>, EVEX; 4758 4759 // Vector intrinsic operation, mem 4760 def m : AVX512AIi8<opc, MRMSrcMem, 4761 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2), 4762 !strconcat(OpcodeStr, 4763 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 4764 []>, EVEX; 4765} // ExeDomain 4766} 4767 4768defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512, 4769 loadv16f32, SSEPackedSingle>, EVEX_V512, 4770 EVEX_CD8<32, CD8VF>; 4771 4772def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1), 4773 imm:$src2, (v16f32 VR512:$src1), (i16 -1), 4774 FROUND_CURRENT)), 4775 (VRNDSCALEPSZr VR512:$src1, imm:$src2)>; 4776 4777 4778defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512, 4779 loadv8f64, SSEPackedDouble>, EVEX_V512, 4780 VEX_W, EVEX_CD8<64, CD8VF>; 4781 4782def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1), 4783 imm:$src2, (v8f64 VR512:$src1), (i8 -1), 4784 FROUND_CURRENT)), 4785 (VRNDSCALEPDZr VR512:$src1, imm:$src2)>; 4786 4787multiclass 4788avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> { 4789 4790 let ExeDomain = _.ExeDomain in { 4791 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), 4792 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr, 4793 "$src3, $src2, $src1", "$src1, $src2, $src3", 4794 (_.VT (X86RndScale (_.VT _.RC:$src1), (_.VT _.RC:$src2), 4795 (i32 imm:$src3), (i32 FROUND_CURRENT)))>; 4796 4797 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), 4798 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr, 4799 "$src3, $src2, $src1", "$src1, $src2, $src3", 4800 (_.VT (X86RndScale (_.VT _.RC:$src1), (_.VT _.RC:$src2), 4801 (i32 imm:$src3), (i32 FROUND_NO_EXC))), "{sae}">, EVEX_B; 4802 4803 let mayLoad = 1 in 4804 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), 4805 (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3), OpcodeStr, 4806 "$src3, $src2, $src1", "$src1, $src2, $src3", 4807 (_.VT (X86RndScale (_.VT _.RC:$src1), 4808 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))), 4809 (i32 imm:$src3), (i32 FROUND_CURRENT)))>; 4810 } 4811 let Predicates = [HasAVX512] in { 4812 def : Pat<(ffloor _.FRC:$src), (COPY_TO_REGCLASS 4813 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)), 4814 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x1))), _.FRC)>; 4815 def : Pat<(fceil _.FRC:$src), (COPY_TO_REGCLASS 4816 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)), 4817 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x2))), _.FRC)>; 4818 def : Pat<(ftrunc _.FRC:$src), (COPY_TO_REGCLASS 4819 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)), 4820 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x3))), _.FRC)>; 4821 def : Pat<(frint _.FRC:$src), (COPY_TO_REGCLASS 4822 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)), 4823 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x4))), _.FRC)>; 4824 def : Pat<(fnearbyint _.FRC:$src), (COPY_TO_REGCLASS 4825 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)), 4826 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0xc))), _.FRC)>; 4827 4828 def : Pat<(ffloor (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS 4829 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)), 4830 addr:$src, (i32 0x1))), _.FRC)>; 4831 def : Pat<(fceil (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS 4832 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)), 4833 addr:$src, (i32 0x2))), _.FRC)>; 4834 def : Pat<(ftrunc (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS 4835 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)), 4836 addr:$src, (i32 0x3))), _.FRC)>; 4837 def : Pat<(frint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS 4838 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)), 4839 addr:$src, (i32 0x4))), _.FRC)>; 4840 def : Pat<(fnearbyint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS 4841 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)), 4842 addr:$src, (i32 0xc))), _.FRC)>; 4843 } 4844} 4845 4846defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", f32x_info>, 4847 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VT1>; 4848 4849defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", f64x_info>, VEX_W, 4850 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VT1>; 4851 4852let Predicates = [HasAVX512] in { 4853def : Pat<(v16f32 (ffloor VR512:$src)), 4854 (VRNDSCALEPSZr VR512:$src, (i32 0x1))>; 4855def : Pat<(v16f32 (fnearbyint VR512:$src)), 4856 (VRNDSCALEPSZr VR512:$src, (i32 0xC))>; 4857def : Pat<(v16f32 (fceil VR512:$src)), 4858 (VRNDSCALEPSZr VR512:$src, (i32 0x2))>; 4859def : Pat<(v16f32 (frint VR512:$src)), 4860 (VRNDSCALEPSZr VR512:$src, (i32 0x4))>; 4861def : Pat<(v16f32 (ftrunc VR512:$src)), 4862 (VRNDSCALEPSZr VR512:$src, (i32 0x3))>; 4863 4864def : Pat<(v8f64 (ffloor VR512:$src)), 4865 (VRNDSCALEPDZr VR512:$src, (i32 0x1))>; 4866def : Pat<(v8f64 (fnearbyint VR512:$src)), 4867 (VRNDSCALEPDZr VR512:$src, (i32 0xC))>; 4868def : Pat<(v8f64 (fceil VR512:$src)), 4869 (VRNDSCALEPDZr VR512:$src, (i32 0x2))>; 4870def : Pat<(v8f64 (frint VR512:$src)), 4871 (VRNDSCALEPDZr VR512:$src, (i32 0x4))>; 4872def : Pat<(v8f64 (ftrunc VR512:$src)), 4873 (VRNDSCALEPDZr VR512:$src, (i32 0x3))>; 4874} 4875//------------------------------------------------- 4876// Integer truncate and extend operations 4877//------------------------------------------------- 4878 4879multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr, 4880 RegisterClass dstRC, RegisterClass srcRC, 4881 RegisterClass KRC, X86MemOperand x86memop> { 4882 def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst), 4883 (ins srcRC:$src), 4884 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), 4885 []>, EVEX; 4886 4887 def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst), 4888 (ins KRC:$mask, srcRC:$src), 4889 !strconcat(OpcodeStr, 4890 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"), 4891 []>, EVEX, EVEX_K; 4892 4893 def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst), 4894 (ins KRC:$mask, srcRC:$src), 4895 !strconcat(OpcodeStr, 4896 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"), 4897 []>, EVEX, EVEX_KZ; 4898 4899 def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src), 4900 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 4901 []>, EVEX; 4902 4903 def mrk : AVX512XS8I<opc, MRMDestMem, (outs), 4904 (ins x86memop:$dst, KRC:$mask, srcRC:$src), 4905 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"), 4906 []>, EVEX, EVEX_K; 4907 4908} 4909defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM, 4910 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>; 4911defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM, 4912 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>; 4913defm VPMOVUSQB : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM, 4914 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>; 4915defm VPMOVQW : avx512_trunc_sat<0x34, "vpmovqw", VR128X, VR512, VK8WM, 4916 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>; 4917defm VPMOVSQW : avx512_trunc_sat<0x24, "vpmovsqw", VR128X, VR512, VK8WM, 4918 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>; 4919defm VPMOVUSQW : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM, 4920 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>; 4921defm VPMOVQD : avx512_trunc_sat<0x35, "vpmovqd", VR256X, VR512, VK8WM, 4922 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>; 4923defm VPMOVSQD : avx512_trunc_sat<0x25, "vpmovsqd", VR256X, VR512, VK8WM, 4924 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>; 4925defm VPMOVUSQD : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM, 4926 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>; 4927defm VPMOVDW : avx512_trunc_sat<0x33, "vpmovdw", VR256X, VR512, VK16WM, 4928 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>; 4929defm VPMOVSDW : avx512_trunc_sat<0x23, "vpmovsdw", VR256X, VR512, VK16WM, 4930 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>; 4931defm VPMOVUSDW : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM, 4932 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>; 4933defm VPMOVDB : avx512_trunc_sat<0x31, "vpmovdb", VR128X, VR512, VK16WM, 4934 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>; 4935defm VPMOVSDB : avx512_trunc_sat<0x21, "vpmovsdb", VR128X, VR512, VK16WM, 4936 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>; 4937defm VPMOVUSDB : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM, 4938 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>; 4939 4940def : Pat<(v16i8 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQBrr VR512:$src)>; 4941def : Pat<(v8i16 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQWrr VR512:$src)>; 4942def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr VR512:$src)>; 4943def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>; 4944def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>; 4945 4946def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))), 4947 (VPMOVDBrrkz VK16WM:$mask, VR512:$src)>; 4948def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))), 4949 (VPMOVDWrrkz VK16WM:$mask, VR512:$src)>; 4950def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))), 4951 (VPMOVQWrrkz VK8WM:$mask, VR512:$src)>; 4952def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))), 4953 (VPMOVQDrrkz VK8WM:$mask, VR512:$src)>; 4954 4955 4956multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC, 4957 RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode, 4958 PatFrag mem_frag, X86MemOperand x86memop, 4959 ValueType OpVT, ValueType InVT> { 4960 4961 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), 4962 (ins SrcRC:$src), 4963 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 4964 [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX; 4965 4966 def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), 4967 (ins KRC:$mask, SrcRC:$src), 4968 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"), 4969 []>, EVEX, EVEX_K; 4970 4971 def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), 4972 (ins KRC:$mask, SrcRC:$src), 4973 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"), 4974 []>, EVEX, EVEX_KZ; 4975 4976 let mayLoad = 1 in { 4977 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), 4978 (ins x86memop:$src), 4979 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), 4980 [(set DstRC:$dst, 4981 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>, 4982 EVEX; 4983 4984 def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), 4985 (ins KRC:$mask, x86memop:$src), 4986 !strconcat(OpcodeStr,"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"), 4987 []>, 4988 EVEX, EVEX_K; 4989 4990 def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), 4991 (ins KRC:$mask, x86memop:$src), 4992 !strconcat(OpcodeStr,"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"), 4993 []>, 4994 EVEX, EVEX_KZ; 4995 } 4996} 4997 4998defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext, 4999 loadv2i64, i128mem, v16i32, v16i8>, EVEX_V512, 5000 EVEX_CD8<8, CD8VQ>; 5001defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext, 5002 loadv2i64, i128mem, v8i64, v16i8>, EVEX_V512, 5003 EVEX_CD8<8, CD8VO>; 5004defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext, 5005 loadv4i64, i256mem, v16i32, v16i16>, EVEX_V512, 5006 EVEX_CD8<16, CD8VH>; 5007defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext, 5008 loadv2i64, i128mem, v8i64, v8i16>, EVEX_V512, 5009 EVEX_CD8<16, CD8VQ>; 5010defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext, 5011 loadv4i64, i256mem, v8i64, v8i32>, EVEX_V512, 5012 EVEX_CD8<32, CD8VH>; 5013 5014defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext, 5015 loadv2i64, i128mem, v16i32, v16i8>, EVEX_V512, 5016 EVEX_CD8<8, CD8VQ>; 5017defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext, 5018 loadv2i64, i128mem, v8i64, v16i8>, EVEX_V512, 5019 EVEX_CD8<8, CD8VO>; 5020defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext, 5021 loadv4i64, i256mem, v16i32, v16i16>, EVEX_V512, 5022 EVEX_CD8<16, CD8VH>; 5023defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext, 5024 loadv2i64, i128mem, v8i64, v8i16>, EVEX_V512, 5025 EVEX_CD8<16, CD8VQ>; 5026defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext, 5027 loadv4i64, i256mem, v8i64, v8i32>, EVEX_V512, 5028 EVEX_CD8<32, CD8VH>; 5029 5030//===----------------------------------------------------------------------===// 5031// GATHER - SCATTER Operations 5032 5033multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC, 5034 RegisterClass RC, X86MemOperand memop> { 5035let mayLoad = 1, 5036 Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in 5037 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb), 5038 (ins RC:$src1, KRC:$mask, memop:$src2), 5039 !strconcat(OpcodeStr, 5040 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), 5041 []>, EVEX, EVEX_K; 5042} 5043 5044let ExeDomain = SSEPackedDouble in { 5045defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>, 5046 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5047defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>, 5048 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5049} 5050 5051let ExeDomain = SSEPackedSingle in { 5052defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>, 5053 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5054defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>, 5055 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5056} 5057 5058defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>, 5059 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5060defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>, 5061 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5062 5063defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512, vz64mem>, 5064 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5065defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X, vz64mem>, 5066 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5067 5068multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC, 5069 RegisterClass RC, X86MemOperand memop> { 5070let mayStore = 1, Constraints = "$mask = $mask_wb" in 5071 def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb), 5072 (ins memop:$dst, KRC:$mask, RC:$src2), 5073 !strconcat(OpcodeStr, 5074 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), 5075 []>, EVEX, EVEX_K; 5076} 5077 5078let ExeDomain = SSEPackedDouble in { 5079defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>, 5080 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5081defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>, 5082 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5083} 5084 5085let ExeDomain = SSEPackedSingle in { 5086defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>, 5087 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5088defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>, 5089 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5090} 5091 5092defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>, 5093 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5094defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>, 5095 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5096 5097defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>, 5098 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5099defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>, 5100 EVEX_V512, EVEX_CD8<32, CD8VT1>; 5101 5102// prefetch 5103multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr, 5104 RegisterClass KRC, X86MemOperand memop> { 5105 let Predicates = [HasPFI], hasSideEffects = 1 in 5106 def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src), 5107 !strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"), 5108 []>, EVEX, EVEX_K; 5109} 5110 5111defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps", 5112 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>; 5113 5114defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps", 5115 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>; 5116 5117defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd", 5118 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>; 5119 5120defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd", 5121 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5122 5123defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps", 5124 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>; 5125 5126defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps", 5127 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>; 5128 5129defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd", 5130 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>; 5131 5132defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd", 5133 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5134 5135defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps", 5136 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>; 5137 5138defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps", 5139 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>; 5140 5141defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd", 5142 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>; 5143 5144defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd", 5145 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5146 5147defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps", 5148 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>; 5149 5150defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps", 5151 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>; 5152 5153defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd", 5154 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>; 5155 5156defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd", 5157 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; 5158//===----------------------------------------------------------------------===// 5159// VSHUFPS - VSHUFPD Operations 5160 5161multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop, 5162 ValueType vt, string OpcodeStr, PatFrag mem_frag, 5163 Domain d> { 5164 def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst), 5165 (ins RC:$src1, x86memop:$src2, u8imm:$src3), 5166 !strconcat(OpcodeStr, 5167 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 5168 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2), 5169 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>, 5170 EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>; 5171 def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst), 5172 (ins RC:$src1, RC:$src2, u8imm:$src3), 5173 !strconcat(OpcodeStr, 5174 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 5175 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2, 5176 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>, 5177 EVEX_4V, Sched<[WriteShuffle]>; 5178} 5179 5180defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", loadv16f32, 5181 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>; 5182defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", loadv8f64, 5183 SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>; 5184 5185def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))), 5186 (VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>; 5187def : Pat<(v16i32 (X86Shufp VR512:$src1, 5188 (loadv16i32 addr:$src2), (i8 imm:$imm))), 5189 (VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>; 5190 5191def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))), 5192 (VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>; 5193def : Pat<(v8i64 (X86Shufp VR512:$src1, 5194 (loadv8i64 addr:$src2), (i8 imm:$imm))), 5195 (VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>; 5196 5197multiclass avx512_valign<X86VectorVTInfo _> { 5198 defm rri : AVX512_maskable<0x03, MRMSrcReg, _, (outs _.RC:$dst), 5199 (ins _.RC:$src1, _.RC:$src2, u8imm:$src3), 5200 "valign"##_.Suffix, 5201 "$src3, $src2, $src1", "$src1, $src2, $src3", 5202 (_.VT (X86VAlign _.RC:$src2, _.RC:$src1, 5203 (i8 imm:$src3)))>, 5204 AVX512AIi8Base, EVEX_4V; 5205 5206 // Also match valign of packed floats. 5207 def : Pat<(_.FloatVT (X86VAlign _.RC:$src1, _.RC:$src2, (i8 imm:$imm))), 5208 (!cast<Instruction>(NAME##rri) _.RC:$src2, _.RC:$src1, imm:$imm)>; 5209 5210 let mayLoad = 1 in 5211 def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs _.RC:$dst), 5212 (ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3), 5213 !strconcat("valign"##_.Suffix, 5214 "\t{$src3, $src2, $src1, $dst|" 5215 "$dst, $src1, $src2, $src3}"), 5216 []>, EVEX_4V; 5217} 5218defm VALIGND : avx512_valign<v16i32_info>, EVEX_V512, EVEX_CD8<32, CD8VF>; 5219defm VALIGNQ : avx512_valign<v8i64_info>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>; 5220 5221// Helper fragments to match sext vXi1 to vXiY. 5222def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>; 5223def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>; 5224 5225multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT, 5226 RegisterClass KRC, RegisterClass RC, 5227 X86MemOperand x86memop, X86MemOperand x86scalar_mop, 5228 string BrdcstStr> { 5229 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src), 5230 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 5231 []>, EVEX; 5232 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src), 5233 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"), 5234 []>, EVEX, EVEX_K; 5235 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src), 5236 !strconcat(OpcodeStr, 5237 "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"), 5238 []>, EVEX, EVEX_KZ; 5239 let mayLoad = 1 in { 5240 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), 5241 (ins x86memop:$src), 5242 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), 5243 []>, EVEX; 5244 def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), 5245 (ins KRC:$mask, x86memop:$src), 5246 !strconcat(OpcodeStr, 5247 "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"), 5248 []>, EVEX, EVEX_K; 5249 def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), 5250 (ins KRC:$mask, x86memop:$src), 5251 !strconcat(OpcodeStr, 5252 "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"), 5253 []>, EVEX, EVEX_KZ; 5254 def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), 5255 (ins x86scalar_mop:$src), 5256 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr, 5257 ", $dst|$dst, ${src}", BrdcstStr, "}"), 5258 []>, EVEX, EVEX_B; 5259 def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), 5260 (ins KRC:$mask, x86scalar_mop:$src), 5261 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr, 5262 ", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"), 5263 []>, EVEX, EVEX_B, EVEX_K; 5264 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), 5265 (ins KRC:$mask, x86scalar_mop:$src), 5266 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr, 5267 ", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}", 5268 BrdcstStr, "}"), 5269 []>, EVEX, EVEX_B, EVEX_KZ; 5270 } 5271} 5272 5273defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512, 5274 i512mem, i32mem, "{1to16}">, EVEX_V512, 5275 EVEX_CD8<32, CD8VF>; 5276defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512, 5277 i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W, 5278 EVEX_CD8<64, CD8VF>; 5279 5280def : Pat<(xor 5281 (bc_v16i32 (v16i1sextv16i32)), 5282 (bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))), 5283 (VPABSDZrr VR512:$src)>; 5284def : Pat<(xor 5285 (bc_v8i64 (v8i1sextv8i64)), 5286 (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))), 5287 (VPABSQZrr VR512:$src)>; 5288 5289def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src), 5290 (v16i32 immAllZerosV), (i16 -1))), 5291 (VPABSDZrr VR512:$src)>; 5292def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src), 5293 (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))), 5294 (VPABSQZrr VR512:$src)>; 5295 5296multiclass avx512_conflict<bits<8> opc, string OpcodeStr, 5297 RegisterClass RC, RegisterClass KRC, 5298 X86MemOperand x86memop, 5299 X86MemOperand x86scalar_mop, string BrdcstStr> { 5300 let hasSideEffects = 0 in { 5301 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 5302 (ins RC:$src), 5303 !strconcat(OpcodeStr, "\t{$src, ${dst} |${dst}, $src}"), 5304 []>, EVEX; 5305 let mayLoad = 1 in 5306 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 5307 (ins x86memop:$src), 5308 !strconcat(OpcodeStr, "\t{$src, ${dst}|${dst}, $src}"), 5309 []>, EVEX; 5310 let mayLoad = 1 in 5311 def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 5312 (ins x86scalar_mop:$src), 5313 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr, 5314 ", ${dst}|${dst}, ${src}", BrdcstStr, "}"), 5315 []>, EVEX, EVEX_B; 5316 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 5317 (ins KRC:$mask, RC:$src), 5318 !strconcat(OpcodeStr, 5319 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"), 5320 []>, EVEX, EVEX_KZ; 5321 let mayLoad = 1 in 5322 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 5323 (ins KRC:$mask, x86memop:$src), 5324 !strconcat(OpcodeStr, 5325 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"), 5326 []>, EVEX, EVEX_KZ; 5327 let mayLoad = 1 in 5328 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 5329 (ins KRC:$mask, x86scalar_mop:$src), 5330 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr, 5331 ", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}", 5332 BrdcstStr, "}"), 5333 []>, EVEX, EVEX_KZ, EVEX_B; 5334 5335 let Constraints = "$src1 = $dst" in { 5336 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), 5337 (ins RC:$src1, KRC:$mask, RC:$src2), 5338 !strconcat(OpcodeStr, 5339 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), 5340 []>, EVEX, EVEX_K; 5341 let mayLoad = 1 in 5342 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 5343 (ins RC:$src1, KRC:$mask, x86memop:$src2), 5344 !strconcat(OpcodeStr, 5345 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), 5346 []>, EVEX, EVEX_K; 5347 let mayLoad = 1 in 5348 def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), 5349 (ins RC:$src1, KRC:$mask, x86scalar_mop:$src2), 5350 !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr, 5351 ", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"), 5352 []>, EVEX, EVEX_K, EVEX_B; 5353 } 5354 } 5355} 5356 5357let Predicates = [HasCDI] in { 5358defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM, 5359 i512mem, i32mem, "{1to16}">, 5360 EVEX_V512, EVEX_CD8<32, CD8VF>; 5361 5362 5363defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM, 5364 i512mem, i64mem, "{1to8}">, 5365 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 5366 5367} 5368 5369def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1, 5370 GR16:$mask), 5371 (VPCONFLICTDrrk VR512:$src1, 5372 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>; 5373 5374def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1, 5375 GR8:$mask), 5376 (VPCONFLICTQrrk VR512:$src1, 5377 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>; 5378 5379let Predicates = [HasCDI] in { 5380defm VPLZCNTD : avx512_conflict<0x44, "vplzcntd", VR512, VK16WM, 5381 i512mem, i32mem, "{1to16}">, 5382 EVEX_V512, EVEX_CD8<32, CD8VF>; 5383 5384 5385defm VPLZCNTQ : avx512_conflict<0x44, "vplzcntq", VR512, VK8WM, 5386 i512mem, i64mem, "{1to8}">, 5387 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 5388 5389} 5390 5391def : Pat<(int_x86_avx512_mask_lzcnt_d_512 VR512:$src2, VR512:$src1, 5392 GR16:$mask), 5393 (VPLZCNTDrrk VR512:$src1, 5394 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>; 5395 5396def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1, 5397 GR8:$mask), 5398 (VPLZCNTQrrk VR512:$src1, 5399 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>; 5400 5401def : Pat<(v16i32 (ctlz (loadv16i32 addr:$src))), 5402 (VPLZCNTDrm addr:$src)>; 5403def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))), 5404 (VPLZCNTDrr VR512:$src)>; 5405def : Pat<(v8i64 (ctlz (loadv8i64 addr:$src))), 5406 (VPLZCNTQrm addr:$src)>; 5407def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))), 5408 (VPLZCNTQrr VR512:$src)>; 5409 5410def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>; 5411def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>; 5412def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>; 5413 5414def : Pat<(store VK1:$src, addr:$dst), 5415 (MOV8mr addr:$dst, 5416 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), 5417 sub_8bit))>, Requires<[HasAVX512, NoDQI]>; 5418 5419def : Pat<(store VK8:$src, addr:$dst), 5420 (MOV8mr addr:$dst, 5421 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)), 5422 sub_8bit))>, Requires<[HasAVX512, NoDQI]>; 5423 5424def truncstorei1 : PatFrag<(ops node:$val, node:$ptr), 5425 (truncstore node:$val, node:$ptr), [{ 5426 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1; 5427}]>; 5428 5429def : Pat<(truncstorei1 GR8:$src, addr:$dst), 5430 (MOV8mr addr:$dst, GR8:$src)>; 5431 5432multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > { 5433def rr : AVX512XS8I<opc, MRMDestReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src), 5434 !strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"), 5435 [(set Vec.RC:$dst, (Vec.VT (X86vsext Vec.KRC:$src)))]>, EVEX; 5436} 5437 5438multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo, 5439 string OpcodeStr, Predicate prd> { 5440let Predicates = [prd] in 5441 defm Z : cvt_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512; 5442 5443 let Predicates = [prd, HasVLX] in { 5444 defm Z256 : cvt_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256; 5445 defm Z128 : cvt_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128; 5446 } 5447} 5448 5449multiclass avx512_convert_mask_to_vector<string OpcodeStr> { 5450 defm NAME##B : cvt_mask_by_elt_width<0x28, avx512vl_i8_info, OpcodeStr, 5451 HasBWI>; 5452 defm NAME##W : cvt_mask_by_elt_width<0x28, avx512vl_i16_info, OpcodeStr, 5453 HasBWI>, VEX_W; 5454 defm NAME##D : cvt_mask_by_elt_width<0x38, avx512vl_i32_info, OpcodeStr, 5455 HasDQI>; 5456 defm NAME##Q : cvt_mask_by_elt_width<0x38, avx512vl_i64_info, OpcodeStr, 5457 HasDQI>, VEX_W; 5458} 5459 5460defm VPMOVM2 : avx512_convert_mask_to_vector<"vpmovm2">; 5461 5462//===----------------------------------------------------------------------===// 5463// AVX-512 - COMPRESS and EXPAND 5464// 5465multiclass compress_by_vec_width<bits<8> opc, X86VectorVTInfo _, 5466 string OpcodeStr> { 5467 def rrkz : AVX5128I<opc, MRMDestReg, (outs _.RC:$dst), 5468 (ins _.KRCWM:$mask, _.RC:$src), 5469 OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}", 5470 [(set _.RC:$dst, (_.VT (X86compress _.KRCWM:$mask, _.RC:$src, 5471 _.ImmAllZerosV)))]>, EVEX_KZ; 5472 5473 let Constraints = "$src0 = $dst" in 5474 def rrk : AVX5128I<opc, MRMDestReg, (outs _.RC:$dst), 5475 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src), 5476 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}", 5477 [(set _.RC:$dst, (_.VT (X86compress _.KRCWM:$mask, _.RC:$src, 5478 _.RC:$src0)))]>, EVEX_K; 5479 5480 let mayStore = 1 in { 5481 def mrk : AVX5128I<opc, MRMDestMem, (outs), 5482 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src), 5483 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}", 5484 [(store (_.VT (X86compress _.KRCWM:$mask, _.RC:$src, undef)), 5485 addr:$dst)]>, 5486 EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>; 5487 } 5488} 5489 5490multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr, 5491 AVX512VLVectorVTInfo VTInfo> { 5492 defm Z : compress_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512; 5493 5494 let Predicates = [HasVLX] in { 5495 defm Z256 : compress_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256; 5496 defm Z128 : compress_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128; 5497 } 5498} 5499 5500defm VPCOMPRESSD : compress_by_elt_width <0x8B, "vpcompressd", avx512vl_i32_info>, 5501 EVEX; 5502defm VPCOMPRESSQ : compress_by_elt_width <0x8B, "vpcompressq", avx512vl_i64_info>, 5503 EVEX, VEX_W; 5504defm VCOMPRESSPS : compress_by_elt_width <0x8A, "vcompressps", avx512vl_f32_info>, 5505 EVEX; 5506defm VCOMPRESSPD : compress_by_elt_width <0x8A, "vcompresspd", avx512vl_f64_info>, 5507 EVEX, VEX_W; 5508 5509// expand 5510multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _, 5511 string OpcodeStr> { 5512 def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst), 5513 (ins _.KRCWM:$mask, _.RC:$src), 5514 OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}", 5515 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask, (_.VT _.RC:$src), 5516 _.ImmAllZerosV)))]>, EVEX_KZ; 5517 5518 let Constraints = "$src0 = $dst" in 5519 def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst), 5520 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src), 5521 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}", 5522 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask, 5523 (_.VT _.RC:$src), _.RC:$src0)))]>, EVEX_K; 5524 5525 let mayLoad = 1, Constraints = "$src0 = $dst" in 5526 def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst), 5527 (ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src), 5528 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}", 5529 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask, 5530 (_.VT (bitconvert 5531 (_.LdFrag addr:$src))), 5532 _.RC:$src0)))]>, 5533 EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>; 5534 5535 let mayLoad = 1 in 5536 def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst), 5537 (ins _.KRCWM:$mask, _.MemOp:$src), 5538 OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}", 5539 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask, 5540 (_.VT (bitconvert (_.LdFrag addr:$src))), 5541 _.ImmAllZerosV)))]>, 5542 EVEX_KZ, EVEX_CD8<_.EltSize, CD8VT1>; 5543 5544} 5545 5546multiclass expand_by_elt_width<bits<8> opc, string OpcodeStr, 5547 AVX512VLVectorVTInfo VTInfo> { 5548 defm Z : expand_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512; 5549 5550 let Predicates = [HasVLX] in { 5551 defm Z256 : expand_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256; 5552 defm Z128 : expand_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128; 5553 } 5554} 5555 5556defm VPEXPANDD : expand_by_elt_width <0x89, "vpexpandd", avx512vl_i32_info>, 5557 EVEX; 5558defm VPEXPANDQ : expand_by_elt_width <0x89, "vpexpandq", avx512vl_i64_info>, 5559 EVEX, VEX_W; 5560defm VEXPANDPS : expand_by_elt_width <0x88, "vexpandps", avx512vl_f32_info>, 5561 EVEX; 5562defm VEXPANDPD : expand_by_elt_width <0x88, "vexpandpd", avx512vl_f64_info>, 5563 EVEX, VEX_W; 5564