1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=X32 3; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=X64 4; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X32-AVX2 5; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX2 6 7define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { 8; X32-LABEL: and_masks: 9; X32: ## %bb.0: 10; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 11; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx 12; X32-NEXT: movl {{[0-9]+}}(%esp), %edx 13; X32-NEXT: vmovups (%edx), %ymm0 14; X32-NEXT: vmovups (%ecx), %ymm1 15; X32-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 16; X32-NEXT: vmovups (%eax), %ymm2 17; X32-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 18; X32-NEXT: vandps %ymm1, %ymm0, %ymm0 19; X32-NEXT: vandps LCPI0_0, %ymm0, %ymm0 20; X32-NEXT: vmovaps %ymm0, (%eax) 21; X32-NEXT: vzeroupper 22; X32-NEXT: retl 23; 24; X64-LABEL: and_masks: 25; X64: ## %bb.0: 26; X64-NEXT: vmovups (%rdi), %ymm0 27; X64-NEXT: vmovups (%rsi), %ymm1 28; X64-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 29; X64-NEXT: vmovups (%rdx), %ymm2 30; X64-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 31; X64-NEXT: vandps %ymm1, %ymm0, %ymm0 32; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 33; X64-NEXT: vmovaps %ymm0, (%rax) 34; X64-NEXT: vzeroupper 35; X64-NEXT: retq 36; 37; X32-AVX2-LABEL: and_masks: 38; X32-AVX2: ## %bb.0: 39; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax 40; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx 41; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx 42; X32-AVX2-NEXT: vmovups (%edx), %ymm0 43; X32-AVX2-NEXT: vmovups (%ecx), %ymm1 44; X32-AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 45; X32-AVX2-NEXT: vmovups (%eax), %ymm2 46; X32-AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 47; X32-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 48; X32-AVX2-NEXT: vpsrld $31, %ymm0, %ymm0 49; X32-AVX2-NEXT: vmovdqa %ymm0, (%eax) 50; X32-AVX2-NEXT: vzeroupper 51; X32-AVX2-NEXT: retl 52; 53; X64-AVX2-LABEL: and_masks: 54; X64-AVX2: ## %bb.0: 55; X64-AVX2-NEXT: vmovups (%rdi), %ymm0 56; X64-AVX2-NEXT: vmovups (%rsi), %ymm1 57; X64-AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 58; X64-AVX2-NEXT: vmovups (%rdx), %ymm2 59; X64-AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 60; X64-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 61; X64-AVX2-NEXT: vpsrld $31, %ymm0, %ymm0 62; X64-AVX2-NEXT: vmovdqa %ymm0, (%rax) 63; X64-AVX2-NEXT: vzeroupper 64; X64-AVX2-NEXT: retq 65 %v0 = load <8 x float>, <8 x float>* %a, align 16 66 %v1 = load <8 x float>, <8 x float>* %b, align 16 67 %m0 = fcmp olt <8 x float> %v1, %v0 68 %v2 = load <8 x float>, <8 x float>* %c, align 16 69 %m1 = fcmp olt <8 x float> %v2, %v0 70 %mand = and <8 x i1> %m1, %m0 71 %r = zext <8 x i1> %mand to <8 x i32> 72 store <8 x i32> %r, <8 x i32>* undef, align 32 73 ret void 74} 75 76define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { 77; X32-LABEL: neg_masks: 78; X32: ## %bb.0: 79; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 80; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx 81; X32-NEXT: vmovups (%ecx), %ymm0 82; X32-NEXT: vcmpnltps (%eax), %ymm0, %ymm0 83; X32-NEXT: vandps LCPI1_0, %ymm0, %ymm0 84; X32-NEXT: vmovaps %ymm0, (%eax) 85; X32-NEXT: vzeroupper 86; X32-NEXT: retl 87; 88; X64-LABEL: neg_masks: 89; X64: ## %bb.0: 90; X64-NEXT: vmovups (%rsi), %ymm0 91; X64-NEXT: vcmpnltps (%rdi), %ymm0, %ymm0 92; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 93; X64-NEXT: vmovaps %ymm0, (%rax) 94; X64-NEXT: vzeroupper 95; X64-NEXT: retq 96; 97; X32-AVX2-LABEL: neg_masks: 98; X32-AVX2: ## %bb.0: 99; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax 100; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx 101; X32-AVX2-NEXT: vmovups (%ecx), %ymm0 102; X32-AVX2-NEXT: vcmpnltps (%eax), %ymm0, %ymm0 103; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] 104; X32-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 105; X32-AVX2-NEXT: vmovaps %ymm0, (%eax) 106; X32-AVX2-NEXT: vzeroupper 107; X32-AVX2-NEXT: retl 108; 109; X64-AVX2-LABEL: neg_masks: 110; X64-AVX2: ## %bb.0: 111; X64-AVX2-NEXT: vmovups (%rsi), %ymm0 112; X64-AVX2-NEXT: vcmpnltps (%rdi), %ymm0, %ymm0 113; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] 114; X64-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 115; X64-AVX2-NEXT: vmovaps %ymm0, (%rax) 116; X64-AVX2-NEXT: vzeroupper 117; X64-AVX2-NEXT: retq 118 %v0 = load <8 x float>, <8 x float>* %a, align 16 119 %v1 = load <8 x float>, <8 x float>* %b, align 16 120 %m0 = fcmp olt <8 x float> %v1, %v0 121 %mand = xor <8 x i1> %m0, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1> 122 %r = zext <8 x i1> %mand to <8 x i32> 123 store <8 x i32> %r, <8 x i32>* undef, align 32 124 ret void 125} 126 127define <8 x i32> @and_mask_constant(<8 x i32> %v0, <8 x i32> %v1) { 128; X32-LABEL: and_mask_constant: 129; X32: ## %bb.0: 130; X32-NEXT: vextractf128 $1, %ymm0, %xmm1 131; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2 132; X32-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 133; X32-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 134; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 135; X32-NEXT: vandps LCPI2_0, %ymm0, %ymm0 136; X32-NEXT: retl 137; 138; X64-LABEL: and_mask_constant: 139; X64: ## %bb.0: 140; X64-NEXT: vextractf128 $1, %ymm0, %xmm1 141; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2 142; X64-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 143; X64-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 144; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 145; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 146; X64-NEXT: retq 147; 148; X32-AVX2-LABEL: and_mask_constant: 149; X32-AVX2: ## %bb.0: 150; X32-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 151; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 152; X32-AVX2-NEXT: vpand LCPI2_0, %ymm0, %ymm0 153; X32-AVX2-NEXT: retl 154; 155; X64-AVX2-LABEL: and_mask_constant: 156; X64-AVX2: ## %bb.0: 157; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 158; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 159; X64-AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 160; X64-AVX2-NEXT: retq 161 %m = icmp eq <8 x i32> %v0, zeroinitializer 162 %mand = and <8 x i1> %m, <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false> 163 %r = zext <8 x i1> %mand to <8 x i32> 164 ret <8 x i32> %r 165} 166 167define <8 x i32> @two_ands(<8 x float> %x) local_unnamed_addr #0 { 168; X32-LABEL: two_ands: 169; X32: ## %bb.0: ## %entry 170; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 171; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 172; X32-NEXT: vcmpltps LCPI3_1, %ymm0, %ymm0 173; X32-NEXT: vandps %ymm0, %ymm1, %ymm0 174; X32-NEXT: retl 175; 176; X64-LABEL: two_ands: 177; X64: ## %bb.0: ## %entry 178; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 179; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 180; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm0 181; X64-NEXT: vandps %ymm0, %ymm1, %ymm0 182; X64-NEXT: retq 183; 184; X32-AVX2-LABEL: two_ands: 185; X32-AVX2: ## %bb.0: ## %entry 186; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 187; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 188; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 189; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm0 190; X32-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 191; X32-AVX2-NEXT: retl 192; 193; X64-AVX2-LABEL: two_ands: 194; X64-AVX2: ## %bb.0: ## %entry 195; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 196; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 197; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 198; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm0 199; X64-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 200; X64-AVX2-NEXT: retq 201entry: 202 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 203 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 204 %and4 = and <8 x i1> %cmp, %cmp1 205 %and = sext <8 x i1> %and4 to <8 x i32> 206 ret <8 x i32> %and 207} 208 209define <8 x i32> @three_ands(<8 x float> %x) { 210; X32-LABEL: three_ands: 211; X32: ## %bb.0: ## %entry 212; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 213; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 214; X32-NEXT: vcmpltps LCPI4_1, %ymm0, %ymm2 215; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 216; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 217; X32-NEXT: vandps %ymm0, %ymm2, %ymm0 218; X32-NEXT: vandps %ymm0, %ymm1, %ymm0 219; X32-NEXT: retl 220; 221; X64-LABEL: three_ands: 222; X64: ## %bb.0: ## %entry 223; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 224; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 225; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 226; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 227; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 228; X64-NEXT: vandps %ymm0, %ymm2, %ymm0 229; X64-NEXT: vandps %ymm0, %ymm1, %ymm0 230; X64-NEXT: retq 231; 232; X32-AVX2-LABEL: three_ands: 233; X32-AVX2: ## %bb.0: ## %entry 234; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 235; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 236; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 237; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 238; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 239; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 240; X32-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 241; X32-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 242; X32-AVX2-NEXT: retl 243; 244; X64-AVX2-LABEL: three_ands: 245; X64-AVX2: ## %bb.0: ## %entry 246; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 247; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 248; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 249; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 250; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 251; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 252; X64-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 253; X64-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 254; X64-AVX2-NEXT: retq 255entry: 256 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 257 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 258 %and8 = and <8 x i1> %cmp, %cmp1 259 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 260 %and59 = and <8 x i1> %and8, %cmp3 261 %and5 = sext <8 x i1> %and59 to <8 x i32> 262 ret <8 x i32> %and5 263} 264 265define <8 x i32> @four_ands(<8 x float> %x) { 266; X32-LABEL: four_ands: 267; X32: ## %bb.0: ## %entry 268; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 269; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 270; X32-NEXT: vcmpltps LCPI5_1, %ymm0, %ymm2 271; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 272; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 273; X32-NEXT: vandps %ymm3, %ymm2, %ymm2 274; X32-NEXT: vcmpneqps LCPI5_2, %ymm0, %ymm0 275; X32-NEXT: vandps %ymm0, %ymm2, %ymm0 276; X32-NEXT: vandps %ymm0, %ymm1, %ymm0 277; X32-NEXT: retl 278; 279; X64-LABEL: four_ands: 280; X64: ## %bb.0: ## %entry 281; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 282; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 283; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 284; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 285; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 286; X64-NEXT: vandps %ymm3, %ymm2, %ymm2 287; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 288; X64-NEXT: vandps %ymm0, %ymm2, %ymm0 289; X64-NEXT: vandps %ymm0, %ymm1, %ymm0 290; X64-NEXT: retq 291; 292; X32-AVX2-LABEL: four_ands: 293; X32-AVX2: ## %bb.0: ## %entry 294; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 295; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 296; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 297; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 298; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 299; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 300; X32-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 301; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 302; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 303; X32-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 304; X32-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 305; X32-AVX2-NEXT: retl 306; 307; X64-AVX2-LABEL: four_ands: 308; X64-AVX2: ## %bb.0: ## %entry 309; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 310; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 311; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 312; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 313; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 314; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 315; X64-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 316; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 317; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 318; X64-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 319; X64-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 320; X64-AVX2-NEXT: retq 321entry: 322 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 323 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 324 %and12 = and <8 x i1> %cmp, %cmp1 325 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 326 %and513 = and <8 x i1> %and12, %cmp3 327 %cmp6 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 328 %and814 = and <8 x i1> %and513, %cmp6 329 %and8 = sext <8 x i1> %and814 to <8 x i32> 330 ret <8 x i32> %and8 331} 332 333define <8 x i32> @five_ands(<8 x float> %x) { 334; X32-LABEL: five_ands: 335; X32: ## %bb.0: ## %entry 336; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 337; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 338; X32-NEXT: vcmpltps LCPI6_1, %ymm0, %ymm2 339; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 340; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 341; X32-NEXT: vandps %ymm3, %ymm2, %ymm2 342; X32-NEXT: vcmpneqps LCPI6_2, %ymm0, %ymm3 343; X32-NEXT: vandps %ymm3, %ymm2, %ymm2 344; X32-NEXT: vcmpneqps LCPI6_3, %ymm0, %ymm0 345; X32-NEXT: vandps %ymm0, %ymm2, %ymm0 346; X32-NEXT: vandps %ymm0, %ymm1, %ymm0 347; X32-NEXT: retl 348; 349; X64-LABEL: five_ands: 350; X64: ## %bb.0: ## %entry 351; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 352; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 353; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 354; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 355; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 356; X64-NEXT: vandps %ymm3, %ymm2, %ymm2 357; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm3 358; X64-NEXT: vandps %ymm3, %ymm2, %ymm2 359; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 360; X64-NEXT: vandps %ymm0, %ymm2, %ymm0 361; X64-NEXT: vandps %ymm0, %ymm1, %ymm0 362; X64-NEXT: retq 363; 364; X32-AVX2-LABEL: five_ands: 365; X32-AVX2: ## %bb.0: ## %entry 366; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 367; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 368; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 369; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 370; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 371; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 372; X32-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 373; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 374; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 375; X32-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 376; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 377; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 378; X32-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 379; X32-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 380; X32-AVX2-NEXT: retl 381; 382; X64-AVX2-LABEL: five_ands: 383; X64-AVX2: ## %bb.0: ## %entry 384; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 385; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 386; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 387; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 388; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 389; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 390; X64-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 391; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 392; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 393; X64-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 394; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 395; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 396; X64-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 397; X64-AVX2-NEXT: vandps %ymm0, %ymm1, %ymm0 398; X64-AVX2-NEXT: retq 399entry: 400 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 401 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 402 %and16 = and <8 x i1> %cmp, %cmp1 403 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 404 %and517 = and <8 x i1> %and16, %cmp3 405 %cmp6 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 406 %and818 = and <8 x i1> %and517, %cmp6 407 %cmp9 = fcmp une <8 x float> %x, <float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000> 408 %and1119 = and <8 x i1> %and818, %cmp9 409 %and11 = sext <8 x i1> %and1119 to <8 x i32> 410 ret <8 x i32> %and11 411} 412 413define <8 x i32> @two_or(<8 x float> %x) { 414; X32-LABEL: two_or: 415; X32: ## %bb.0: ## %entry 416; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 417; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 418; X32-NEXT: vcmpltps LCPI7_1, %ymm0, %ymm0 419; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 420; X32-NEXT: retl 421; 422; X64-LABEL: two_or: 423; X64: ## %bb.0: ## %entry 424; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 425; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 426; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm0 427; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 428; X64-NEXT: retq 429; 430; X32-AVX2-LABEL: two_or: 431; X32-AVX2: ## %bb.0: ## %entry 432; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 433; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 434; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 435; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm0 436; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 437; X32-AVX2-NEXT: retl 438; 439; X64-AVX2-LABEL: two_or: 440; X64-AVX2: ## %bb.0: ## %entry 441; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 442; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 443; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 444; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm0 445; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 446; X64-AVX2-NEXT: retq 447entry: 448 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 449 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 450 %or4 = or <8 x i1> %cmp, %cmp1 451 %or = sext <8 x i1> %or4 to <8 x i32> 452 ret <8 x i32> %or 453} 454 455define <8 x i32> @three_or(<8 x float> %x) { 456; X32-LABEL: three_or: 457; X32: ## %bb.0: ## %entry 458; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 459; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 460; X32-NEXT: vcmpltps LCPI8_1, %ymm0, %ymm2 461; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 462; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 463; X32-NEXT: vorps %ymm0, %ymm2, %ymm0 464; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 465; X32-NEXT: retl 466; 467; X64-LABEL: three_or: 468; X64: ## %bb.0: ## %entry 469; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 470; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 471; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 472; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 473; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 474; X64-NEXT: vorps %ymm0, %ymm2, %ymm0 475; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 476; X64-NEXT: retq 477; 478; X32-AVX2-LABEL: three_or: 479; X32-AVX2: ## %bb.0: ## %entry 480; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 481; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 482; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 483; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 484; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 485; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 486; X32-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 487; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 488; X32-AVX2-NEXT: retl 489; 490; X64-AVX2-LABEL: three_or: 491; X64-AVX2: ## %bb.0: ## %entry 492; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 493; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 494; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 495; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 496; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 497; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 498; X64-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 499; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 500; X64-AVX2-NEXT: retq 501entry: 502 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 503 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 504 %or8 = or <8 x i1> %cmp, %cmp1 505 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 506 %or59 = or <8 x i1> %or8, %cmp3 507 %or5 = sext <8 x i1> %or59 to <8 x i32> 508 ret <8 x i32> %or5 509} 510; Function Attrs: norecurse nounwind readnone ssp uwtable 511define <8 x i32> @four_or(<8 x float> %x) { 512; X32-LABEL: four_or: 513; X32: ## %bb.0: ## %entry 514; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 515; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 516; X32-NEXT: vcmpltps LCPI9_1, %ymm0, %ymm2 517; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 518; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 519; X32-NEXT: vorps %ymm3, %ymm2, %ymm2 520; X32-NEXT: vcmpneqps LCPI9_2, %ymm0, %ymm0 521; X32-NEXT: vorps %ymm0, %ymm2, %ymm0 522; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 523; X32-NEXT: retl 524; 525; X64-LABEL: four_or: 526; X64: ## %bb.0: ## %entry 527; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 528; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 529; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 530; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 531; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 532; X64-NEXT: vorps %ymm3, %ymm2, %ymm2 533; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 534; X64-NEXT: vorps %ymm0, %ymm2, %ymm0 535; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 536; X64-NEXT: retq 537; 538; X32-AVX2-LABEL: four_or: 539; X32-AVX2: ## %bb.0: ## %entry 540; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 541; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 542; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 543; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 544; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 545; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 546; X32-AVX2-NEXT: vorps %ymm3, %ymm2, %ymm2 547; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 548; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 549; X32-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 550; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 551; X32-AVX2-NEXT: retl 552; 553; X64-AVX2-LABEL: four_or: 554; X64-AVX2: ## %bb.0: ## %entry 555; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 556; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 557; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 558; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 559; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 560; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 561; X64-AVX2-NEXT: vorps %ymm3, %ymm2, %ymm2 562; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 563; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 564; X64-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 565; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 566; X64-AVX2-NEXT: retq 567entry: 568 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 569 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 570 %or12 = or <8 x i1> %cmp, %cmp1 571 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 572 %or513 = or <8 x i1> %or12, %cmp3 573 %cmp6 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 574 %or814 = or <8 x i1> %or513, %cmp6 575 %or8 = sext <8 x i1> %or814 to <8 x i32> 576 ret <8 x i32> %or8 577} 578; Function Attrs: norecurse nounwind readnone ssp uwtable 579define <8 x i32> @five_or(<8 x float> %x) { 580; X32-LABEL: five_or: 581; X32: ## %bb.0: ## %entry 582; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 583; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 584; X32-NEXT: vcmpltps LCPI10_1, %ymm0, %ymm2 585; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 586; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 587; X32-NEXT: vorps %ymm3, %ymm2, %ymm2 588; X32-NEXT: vcmpneqps LCPI10_2, %ymm0, %ymm3 589; X32-NEXT: vorps %ymm3, %ymm2, %ymm2 590; X32-NEXT: vcmpneqps LCPI10_3, %ymm0, %ymm0 591; X32-NEXT: vorps %ymm0, %ymm2, %ymm0 592; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 593; X32-NEXT: retl 594; 595; X64-LABEL: five_or: 596; X64: ## %bb.0: ## %entry 597; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 598; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 599; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 600; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 601; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 602; X64-NEXT: vorps %ymm3, %ymm2, %ymm2 603; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm3 604; X64-NEXT: vorps %ymm3, %ymm2, %ymm2 605; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 606; X64-NEXT: vorps %ymm0, %ymm2, %ymm0 607; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 608; X64-NEXT: retq 609; 610; X32-AVX2-LABEL: five_or: 611; X32-AVX2: ## %bb.0: ## %entry 612; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 613; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 614; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 615; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 616; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 617; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 618; X32-AVX2-NEXT: vorps %ymm3, %ymm2, %ymm2 619; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 620; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 621; X32-AVX2-NEXT: vorps %ymm3, %ymm2, %ymm2 622; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 623; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 624; X32-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 625; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 626; X32-AVX2-NEXT: retl 627; 628; X64-AVX2-LABEL: five_or: 629; X64-AVX2: ## %bb.0: ## %entry 630; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 631; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 632; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 633; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 634; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 635; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 636; X64-AVX2-NEXT: vorps %ymm3, %ymm2, %ymm2 637; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 638; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 639; X64-AVX2-NEXT: vorps %ymm3, %ymm2, %ymm2 640; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 641; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 642; X64-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 643; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 644; X64-AVX2-NEXT: retq 645entry: 646 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 647 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 648 %or16 = or <8 x i1> %cmp, %cmp1 649 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 650 %or517 = or <8 x i1> %or16, %cmp3 651 %cmp6 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 652 %or818 = or <8 x i1> %or517, %cmp6 653 %cmp9 = fcmp une <8 x float> %x, <float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000> 654 %or1119 = or <8 x i1> %or818, %cmp9 655 %or11 = sext <8 x i1> %or1119 to <8 x i32> 656 ret <8 x i32> %or11 657} 658 659define <8 x i32> @three_or_and(<8 x float> %x) { 660; X32-LABEL: three_or_and: 661; X32: ## %bb.0: ## %entry 662; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 663; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 664; X32-NEXT: vcmpltps LCPI11_1, %ymm0, %ymm2 665; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 666; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 667; X32-NEXT: vandps %ymm0, %ymm2, %ymm0 668; X32-NEXT: vorps %ymm1, %ymm0, %ymm0 669; X32-NEXT: retl 670; 671; X64-LABEL: three_or_and: 672; X64: ## %bb.0: ## %entry 673; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 674; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 675; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 676; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 677; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 678; X64-NEXT: vandps %ymm0, %ymm2, %ymm0 679; X64-NEXT: vorps %ymm1, %ymm0, %ymm0 680; X64-NEXT: retq 681; 682; X32-AVX2-LABEL: three_or_and: 683; X32-AVX2: ## %bb.0: ## %entry 684; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 685; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 686; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 687; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 688; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 689; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 690; X32-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 691; X32-AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0 692; X32-AVX2-NEXT: retl 693; 694; X64-AVX2-LABEL: three_or_and: 695; X64-AVX2: ## %bb.0: ## %entry 696; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 697; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 698; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 699; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 700; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 701; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 702; X64-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 703; X64-AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0 704; X64-AVX2-NEXT: retq 705entry: 706 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 707 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 708 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 709 %and7 = and <8 x i1> %cmp1, %cmp3 710 %or8 = or <8 x i1> %and7, %cmp 711 %or = sext <8 x i1> %or8 to <8 x i32> 712 ret <8 x i32> %or 713} 714 715define <8 x i32> @four_or_and(<8 x float> %x) { 716; X32-LABEL: four_or_and: 717; X32: ## %bb.0: ## %entry 718; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 719; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 720; X32-NEXT: vcmpltps LCPI12_1, %ymm0, %ymm2 721; X32-NEXT: vandps %ymm2, %ymm1, %ymm1 722; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2 723; X32-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 724; X32-NEXT: vcmpneqps LCPI12_2, %ymm0, %ymm0 725; X32-NEXT: vandps %ymm0, %ymm2, %ymm0 726; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 727; X32-NEXT: retl 728; 729; X64-LABEL: four_or_and: 730; X64: ## %bb.0: ## %entry 731; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 732; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 733; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 734; X64-NEXT: vandps %ymm2, %ymm1, %ymm1 735; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2 736; X64-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 737; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 738; X64-NEXT: vandps %ymm0, %ymm2, %ymm0 739; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 740; X64-NEXT: retq 741; 742; X32-AVX2-LABEL: four_or_and: 743; X32-AVX2: ## %bb.0: ## %entry 744; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 745; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 746; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 747; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 748; X32-AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1 749; X32-AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2 750; X32-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 751; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 752; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 753; X32-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 754; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 755; X32-AVX2-NEXT: retl 756; 757; X64-AVX2-LABEL: four_or_and: 758; X64-AVX2: ## %bb.0: ## %entry 759; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 760; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 761; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 762; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 763; X64-AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1 764; X64-AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2 765; X64-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 766; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 767; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 768; X64-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 769; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 770; X64-AVX2-NEXT: retq 771entry: 772 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 773 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 774 %and11 = and <8 x i1> %cmp, %cmp1 775 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 776 %cmp5 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 777 %and712 = and <8 x i1> %cmp3, %cmp5 778 %or13 = or <8 x i1> %and11, %and712 779 %or = sext <8 x i1> %or13 to <8 x i32> 780 ret <8 x i32> %or 781} 782 783define <8 x i32> @five_or_and(<8 x float> %x) { 784; X32-LABEL: five_or_and: 785; X32: ## %bb.0: ## %entry 786; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 787; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 788; X32-NEXT: vcmpltps LCPI13_1, %ymm0, %ymm2 789; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 790; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 791; X32-NEXT: vandps %ymm3, %ymm2, %ymm2 792; X32-NEXT: vcmpneqps LCPI13_2, %ymm0, %ymm3 793; X32-NEXT: vcmpneqps LCPI13_3, %ymm0, %ymm0 794; X32-NEXT: vandps %ymm0, %ymm3, %ymm0 795; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 796; X32-NEXT: vorps %ymm0, %ymm2, %ymm0 797; X32-NEXT: retl 798; 799; X64-LABEL: five_or_and: 800; X64: ## %bb.0: ## %entry 801; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 802; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 803; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 804; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 805; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 806; X64-NEXT: vandps %ymm3, %ymm2, %ymm2 807; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm3 808; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 809; X64-NEXT: vandps %ymm0, %ymm3, %ymm0 810; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 811; X64-NEXT: vorps %ymm0, %ymm2, %ymm0 812; X64-NEXT: retq 813; 814; X32-AVX2-LABEL: five_or_and: 815; X32-AVX2: ## %bb.0: ## %entry 816; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 817; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 818; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 819; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 820; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 821; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 822; X32-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 823; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 824; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 825; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 826; X32-AVX2-NEXT: vcmpneqps %ymm4, %ymm0, %ymm0 827; X32-AVX2-NEXT: vandps %ymm0, %ymm3, %ymm0 828; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 829; X32-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 830; X32-AVX2-NEXT: retl 831; 832; X64-AVX2-LABEL: five_or_and: 833; X64-AVX2: ## %bb.0: ## %entry 834; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 835; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 836; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 837; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 838; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 839; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 840; X64-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 841; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 842; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 843; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 844; X64-AVX2-NEXT: vcmpneqps %ymm4, %ymm0, %ymm0 845; X64-AVX2-NEXT: vandps %ymm0, %ymm3, %ymm0 846; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 847; X64-AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0 848; X64-AVX2-NEXT: retq 849entry: 850 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 851 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 852 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 853 %and15 = and <8 x i1> %cmp1, %cmp3 854 %or16 = or <8 x i1> %and15, %cmp 855 %cmp5 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 856 %cmp7 = fcmp une <8 x float> %x, <float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000> 857 %and917 = and <8 x i1> %cmp5, %cmp7 858 %or1018 = or <8 x i1> %or16, %and917 859 %or10 = sext <8 x i1> %or1018 to <8 x i32> 860 ret <8 x i32> %or10 861} 862 863define <8 x i32> @four_or_and_xor(<8 x float> %x) { 864; X32-LABEL: four_or_and_xor: 865; X32: ## %bb.0: ## %entry 866; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 867; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 868; X32-NEXT: vcmpltps LCPI14_1, %ymm0, %ymm2 869; X32-NEXT: vxorps %ymm2, %ymm1, %ymm1 870; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2 871; X32-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 872; X32-NEXT: vcmpneqps LCPI14_2, %ymm0, %ymm0 873; X32-NEXT: vandps %ymm0, %ymm2, %ymm0 874; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 875; X32-NEXT: retl 876; 877; X64-LABEL: four_or_and_xor: 878; X64: ## %bb.0: ## %entry 879; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 880; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 881; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 882; X64-NEXT: vxorps %ymm2, %ymm1, %ymm1 883; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2 884; X64-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 885; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 886; X64-NEXT: vandps %ymm0, %ymm2, %ymm0 887; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 888; X64-NEXT: retq 889; 890; X32-AVX2-LABEL: four_or_and_xor: 891; X32-AVX2: ## %bb.0: ## %entry 892; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 893; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 894; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 895; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 896; X32-AVX2-NEXT: vxorps %ymm2, %ymm1, %ymm1 897; X32-AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2 898; X32-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 899; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 900; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 901; X32-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 902; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 903; X32-AVX2-NEXT: retl 904; 905; X64-AVX2-LABEL: four_or_and_xor: 906; X64-AVX2: ## %bb.0: ## %entry 907; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 908; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 909; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 910; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 911; X64-AVX2-NEXT: vxorps %ymm2, %ymm1, %ymm1 912; X64-AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2 913; X64-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 914; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 915; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm0 916; X64-AVX2-NEXT: vandps %ymm0, %ymm2, %ymm0 917; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 918; X64-AVX2-NEXT: retq 919entry: 920 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 921 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 922 %xor10 = xor <8 x i1> %cmp, %cmp1 923 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 924 %cmp5 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 925 %and11 = and <8 x i1> %cmp3, %cmp5 926 %or12 = or <8 x i1> %xor10, %and11 927 %or = sext <8 x i1> %or12 to <8 x i32> 928 ret <8 x i32> %or 929} 930; Function Attrs: norecurse nounwind readnone ssp uwtable 931define <8 x i32> @five_or_and_xor(<8 x float> %x) { 932; X32-LABEL: five_or_and_xor: 933; X32: ## %bb.0: ## %entry 934; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 935; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 936; X32-NEXT: vcmpltps LCPI15_1, %ymm0, %ymm2 937; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 938; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 939; X32-NEXT: vcmpneqps LCPI15_2, %ymm0, %ymm4 940; X32-NEXT: vcmpneqps LCPI15_3, %ymm0, %ymm0 941; X32-NEXT: vandps %ymm0, %ymm4, %ymm0 942; X32-NEXT: vxorps %ymm0, %ymm3, %ymm0 943; X32-NEXT: vxorps %ymm0, %ymm2, %ymm0 944; X32-NEXT: vorps %ymm1, %ymm0, %ymm0 945; X32-NEXT: retl 946; 947; X64-LABEL: five_or_and_xor: 948; X64: ## %bb.0: ## %entry 949; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 950; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 951; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 952; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 953; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 954; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm4 955; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 956; X64-NEXT: vandps %ymm0, %ymm4, %ymm0 957; X64-NEXT: vxorps %ymm0, %ymm3, %ymm0 958; X64-NEXT: vxorps %ymm0, %ymm2, %ymm0 959; X64-NEXT: vorps %ymm1, %ymm0, %ymm0 960; X64-NEXT: retq 961; 962; X32-AVX2-LABEL: five_or_and_xor: 963; X32-AVX2: ## %bb.0: ## %entry 964; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 965; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 966; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 967; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 968; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 969; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 970; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 971; X32-AVX2-NEXT: vcmpneqps %ymm4, %ymm0, %ymm4 972; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm5 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 973; X32-AVX2-NEXT: vcmpneqps %ymm5, %ymm0, %ymm0 974; X32-AVX2-NEXT: vandps %ymm0, %ymm4, %ymm0 975; X32-AVX2-NEXT: vxorps %ymm0, %ymm3, %ymm0 976; X32-AVX2-NEXT: vxorps %ymm0, %ymm2, %ymm0 977; X32-AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0 978; X32-AVX2-NEXT: retl 979; 980; X64-AVX2-LABEL: five_or_and_xor: 981; X64-AVX2: ## %bb.0: ## %entry 982; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 983; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 984; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 985; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 986; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 987; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 988; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 989; X64-AVX2-NEXT: vcmpneqps %ymm4, %ymm0, %ymm4 990; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm5 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 991; X64-AVX2-NEXT: vcmpneqps %ymm5, %ymm0, %ymm0 992; X64-AVX2-NEXT: vandps %ymm0, %ymm4, %ymm0 993; X64-AVX2-NEXT: vxorps %ymm0, %ymm3, %ymm0 994; X64-AVX2-NEXT: vxorps %ymm0, %ymm2, %ymm0 995; X64-AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0 996; X64-AVX2-NEXT: retq 997entry: 998 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 999 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 1000 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 1001 %xor14 = xor <8 x i1> %cmp1, %cmp3 1002 %cmp5 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 1003 %cmp7 = fcmp une <8 x float> %x, <float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000> 1004 %and15 = and <8 x i1> %cmp5, %cmp7 1005 %xor916 = xor <8 x i1> %xor14, %and15 1006 %or17 = or <8 x i1> %xor916, %cmp 1007 %or = sext <8 x i1> %or17 to <8 x i32> 1008 ret <8 x i32> %or 1009} 1010define <8 x i32> @six_or_and_xor(<8 x float> %x) { 1011; X32-LABEL: six_or_and_xor: 1012; X32: ## %bb.0: ## %entry 1013; X32-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 1014; X32-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 1015; X32-NEXT: vcmpltps LCPI16_1, %ymm0, %ymm2 1016; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3 1017; X32-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 1018; X32-NEXT: vcmpneqps LCPI16_2, %ymm0, %ymm4 1019; X32-NEXT: vandps %ymm4, %ymm3, %ymm3 1020; X32-NEXT: vandps %ymm3, %ymm2, %ymm2 1021; X32-NEXT: vxorps %ymm2, %ymm1, %ymm1 1022; X32-NEXT: vcmpneqps LCPI16_3, %ymm0, %ymm2 1023; X32-NEXT: vxorps %ymm1, %ymm2, %ymm1 1024; X32-NEXT: vcmpneqps LCPI16_4, %ymm0, %ymm0 1025; X32-NEXT: vorps %ymm0, %ymm1, %ymm0 1026; X32-NEXT: retl 1027; 1028; X64-LABEL: six_or_and_xor: 1029; X64: ## %bb.0: ## %entry 1030; X64-NEXT: vmovaps {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 1031; X64-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 1032; X64-NEXT: vcmpltps {{.*}}(%rip), %ymm0, %ymm2 1033; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3 1034; X64-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 1035; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm4 1036; X64-NEXT: vandps %ymm4, %ymm3, %ymm3 1037; X64-NEXT: vandps %ymm3, %ymm2, %ymm2 1038; X64-NEXT: vxorps %ymm2, %ymm1, %ymm1 1039; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm2 1040; X64-NEXT: vxorps %ymm1, %ymm2, %ymm1 1041; X64-NEXT: vcmpneqps {{.*}}(%rip), %ymm0, %ymm0 1042; X64-NEXT: vorps %ymm0, %ymm1, %ymm0 1043; X64-NEXT: retq 1044; 1045; X32-AVX2-LABEL: six_or_and_xor: 1046; X32-AVX2: ## %bb.0: ## %entry 1047; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 1048; X32-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 1049; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 1050; X32-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 1051; X32-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 1052; X32-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 1053; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 1054; X32-AVX2-NEXT: vcmpneqps %ymm4, %ymm0, %ymm4 1055; X32-AVX2-NEXT: vandps %ymm4, %ymm3, %ymm3 1056; X32-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 1057; X32-AVX2-NEXT: vxorps %ymm2, %ymm1, %ymm1 1058; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 1059; X32-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 1060; X32-AVX2-NEXT: vxorps %ymm1, %ymm2, %ymm1 1061; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1] 1062; X32-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm0 1063; X32-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 1064; X32-AVX2-NEXT: retl 1065; 1066; X64-AVX2-LABEL: six_or_and_xor: 1067; X64-AVX2: ## %bb.0: ## %entry 1068; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1] 1069; X64-AVX2-NEXT: vcmpleps %ymm0, %ymm1, %ymm1 1070; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] 1071; X64-AVX2-NEXT: vcmpltps %ymm2, %ymm0, %ymm2 1072; X64-AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 1073; X64-AVX2-NEXT: vcmpneqps %ymm3, %ymm0, %ymm3 1074; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1,1.00000001E-1] 1075; X64-AVX2-NEXT: vcmpneqps %ymm4, %ymm0, %ymm4 1076; X64-AVX2-NEXT: vandps %ymm4, %ymm3, %ymm3 1077; X64-AVX2-NEXT: vandps %ymm3, %ymm2, %ymm2 1078; X64-AVX2-NEXT: vxorps %ymm2, %ymm1, %ymm1 1079; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1,2.00000003E-1] 1080; X64-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm2 1081; X64-AVX2-NEXT: vxorps %ymm1, %ymm2, %ymm1 1082; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1,4.00000006E-1] 1083; X64-AVX2-NEXT: vcmpneqps %ymm2, %ymm0, %ymm0 1084; X64-AVX2-NEXT: vorps %ymm0, %ymm1, %ymm0 1085; X64-AVX2-NEXT: retq 1086entry: 1087 %cmp = fcmp oge <8 x float> %x, <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01> 1088 %cmp1 = fcmp olt <8 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> 1089 %cmp3 = fcmp une <8 x float> %x, zeroinitializer 1090 %and18 = and <8 x i1> %cmp1, %cmp3 1091 %cmp5 = fcmp une <8 x float> %x, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> 1092 %and719 = and <8 x i1> %and18, %cmp5 1093 %cmp8 = fcmp une <8 x float> %x, <float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000, float 0x3FC99999A0000000> 1094 %xor20 = xor <8 x i1> %cmp8, %cmp 1095 %xor1021 = xor <8 x i1> %xor20, %and719 1096 %cmp11 = fcmp une <8 x float> %x, <float 0x3FD99999A0000000, float 0x3FD99999A0000000, float 0x3FD99999A0000000, float 0x3FD99999A0000000, float 0x3FD99999A0000000, float 0x3FD99999A0000000, float 0x3FD99999A0000000, float 0x3FD99999A0000000> 1097 %or22 = or <8 x i1> %xor1021, %cmp11 1098 %or = sext <8 x i1> %or22 to <8 x i32> 1099 ret <8 x i32> %or 1100} 1101