1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple powerpc64le -verify-machineinstrs \ 3; RUN: | FileCheck -check-prefix=VSX %s 4; RUN: llc < %s -mtriple powerpc64le -verify-machineinstrs -mattr=-vsx \ 5; RUN: | FileCheck -check-prefix=NO-VSX %s 6 7define double @test_mul_sub_f64(double %a, double %b, double %c) { 8; VSX-LABEL: test_mul_sub_f64: 9; VSX: # %bb.0: # %entry 10; VSX-NEXT: xsnegdp 0, 2 11; VSX-NEXT: xsmaddadp 1, 0, 3 12; VSX-NEXT: blr 13; 14; NO-VSX-LABEL: test_mul_sub_f64: 15; NO-VSX: # %bb.0: # %entry 16; NO-VSX-NEXT: fneg 0, 2 17; NO-VSX-NEXT: fmadd 1, 0, 3, 1 18; NO-VSX-NEXT: blr 19entry: 20 %0 = fmul contract reassoc double %b, %c 21 %1 = fsub contract reassoc double %a, %0 22 ret double %1 23} 24 25define double @test_2mul_sub_f64(double %a, double %b, double %c, double %d) { 26; VSX-LABEL: test_2mul_sub_f64: 27; VSX: # %bb.0: # %entry 28; VSX-NEXT: xsmuldp 0, 3, 4 29; VSX-NEXT: xsmsubadp 0, 1, 2 30; VSX-NEXT: fmr 1, 0 31; VSX-NEXT: blr 32; 33; NO-VSX-LABEL: test_2mul_sub_f64: 34; NO-VSX: # %bb.0: # %entry 35; NO-VSX-NEXT: fmul 0, 3, 4 36; NO-VSX-NEXT: fmsub 1, 1, 2, 0 37; NO-VSX-NEXT: blr 38entry: 39 %0 = fmul contract reassoc double %a, %b 40 %1 = fmul contract reassoc double %c, %d 41 %2 = fsub contract reassoc double %0, %1 42 ret double %2 43} 44 45define double @test_neg_fma_f64(double %a, double %b, double %c) { 46; VSX-LABEL: test_neg_fma_f64: 47; VSX: # %bb.0: # %entry 48; VSX-NEXT: xsnegdp 0, 1 49; VSX-NEXT: xsmaddadp 3, 0, 2 50; VSX-NEXT: fmr 1, 3 51; VSX-NEXT: blr 52; 53; NO-VSX-LABEL: test_neg_fma_f64: 54; NO-VSX: # %bb.0: # %entry 55; NO-VSX-NEXT: fneg 0, 1 56; NO-VSX-NEXT: fmadd 1, 0, 2, 3 57; NO-VSX-NEXT: blr 58entry: 59 %0 = fsub contract reassoc double -0.0, %a 60 %1 = call contract reassoc double @llvm.fma.f64(double %0, double %b, 61 double %c) 62 ret double %1 63} 64 65define float @test_mul_sub_f32(float %a, float %b, float %c) { 66; VSX-LABEL: test_mul_sub_f32: 67; VSX: # %bb.0: # %entry 68; VSX-NEXT: xsnegdp 0, 2 69; VSX-NEXT: xsmaddasp 1, 0, 3 70; VSX-NEXT: blr 71; 72; NO-VSX-LABEL: test_mul_sub_f32: 73; NO-VSX: # %bb.0: # %entry 74; NO-VSX-NEXT: fneg 0, 2 75; NO-VSX-NEXT: fmadds 1, 0, 3, 1 76; NO-VSX-NEXT: blr 77entry: 78 %0 = fmul contract reassoc float %b, %c 79 %1 = fsub contract reassoc float %a, %0 80 ret float %1 81} 82 83define float @test_2mul_sub_f32(float %a, float %b, float %c, float %d) { 84; VSX-LABEL: test_2mul_sub_f32: 85; VSX: # %bb.0: # %entry 86; VSX-NEXT: xsmulsp 0, 3, 4 87; VSX-NEXT: xsmsubasp 0, 1, 2 88; VSX-NEXT: fmr 1, 0 89; VSX-NEXT: blr 90; 91; NO-VSX-LABEL: test_2mul_sub_f32: 92; NO-VSX: # %bb.0: # %entry 93; NO-VSX-NEXT: fmuls 0, 3, 4 94; NO-VSX-NEXT: fmsubs 1, 1, 2, 0 95; NO-VSX-NEXT: blr 96entry: 97 %0 = fmul contract reassoc float %a, %b 98 %1 = fmul contract reassoc float %c, %d 99 %2 = fsub contract reassoc float %0, %1 100 ret float %2 101} 102 103define float @test_neg_fma_f32(float %a, float %b, float %c) { 104; VSX-LABEL: test_neg_fma_f32: 105; VSX: # %bb.0: # %entry 106; VSX-NEXT: xsnegdp 0, 1 107; VSX-NEXT: xsmaddasp 3, 0, 2 108; VSX-NEXT: fmr 1, 3 109; VSX-NEXT: blr 110; 111; NO-VSX-LABEL: test_neg_fma_f32: 112; NO-VSX: # %bb.0: # %entry 113; NO-VSX-NEXT: fneg 0, 1 114; NO-VSX-NEXT: fmadds 1, 0, 2, 3 115; NO-VSX-NEXT: blr 116entry: 117 %0 = fsub contract reassoc float -0.0, %a 118 %1 = call contract reassoc float @llvm.fma.f32(float %0, float %b, float %c) 119 ret float %1 120} 121 122define <2 x double> @test_neg_fma_v2f64(<2 x double> %a, <2 x double> %b, 123; VSX-LABEL: test_neg_fma_v2f64: 124; VSX: # %bb.0: # %entry 125; VSX-NEXT: xvnegdp 0, 34 126; VSX-NEXT: xvmaddadp 36, 0, 35 127; VSX-NEXT: vmr 2, 4 128; VSX-NEXT: blr 129; 130; NO-VSX-LABEL: test_neg_fma_v2f64: 131; NO-VSX: # %bb.0: # %entry 132; NO-VSX-NEXT: fneg 0, 2 133; NO-VSX-NEXT: fneg 1, 1 134; NO-VSX-NEXT: fmadd 1, 1, 3, 5 135; NO-VSX-NEXT: fmadd 2, 0, 4, 6 136; NO-VSX-NEXT: blr 137 <2 x double> %c) { 138entry: 139 %0 = fsub contract reassoc <2 x double> <double -0.0, double -0.0>, %a 140 %1 = call contract reassoc <2 x double> @llvm.fma.v2f64(<2 x double> %0, 141 <2 x double> %b, 142 <2 x double> %c) 143 ret <2 x double> %1 144} 145 146define <4 x float> @test_neg_fma_v4f32(<4 x float> %a, <4 x float> %b, 147; VSX-LABEL: test_neg_fma_v4f32: 148; VSX: # %bb.0: # %entry 149; VSX-NEXT: xvnegsp 0, 34 150; VSX-NEXT: xvmaddasp 36, 0, 35 151; VSX-NEXT: vmr 2, 4 152; VSX-NEXT: blr 153; 154; NO-VSX-LABEL: test_neg_fma_v4f32: 155; NO-VSX: # %bb.0: # %entry 156; NO-VSX-NEXT: vspltisb 5, -1 157; NO-VSX-NEXT: vslw 5, 5, 5 158; NO-VSX-NEXT: vsubfp 2, 5, 2 159; NO-VSX-NEXT: vmaddfp 2, 2, 3, 4 160; NO-VSX-NEXT: blr 161 <4 x float> %c) { 162entry: 163 %0 = fsub contract reassoc <4 x float> <float -0.0, float -0.0, float -0.0, 164 float -0.0>, %a 165 %1 = call contract reassoc <4 x float> @llvm.fma.v4f32(<4 x float> %0, 166 <4 x float> %b, 167 <4 x float> %c) 168 ret <4 x float> %1 169} 170 171define double @test_fast_mul_sub_f64(double %a, double %b, double %c) { 172; VSX-LABEL: test_fast_mul_sub_f64: 173; VSX: # %bb.0: # %entry 174; VSX-NEXT: xsnmsubadp 1, 2, 3 175; VSX-NEXT: blr 176; 177; NO-VSX-LABEL: test_fast_mul_sub_f64: 178; NO-VSX: # %bb.0: # %entry 179; NO-VSX-NEXT: fnmsub 1, 2, 3, 1 180; NO-VSX-NEXT: blr 181entry: 182 %0 = fmul reassoc nsz double %b, %c 183 %1 = fsub reassoc nsz double %a, %0 184 ret double %1 185} 186 187define double @test_fast_2mul_sub_f64(double %a, double %b, double %c, 188; VSX-LABEL: test_fast_2mul_sub_f64: 189; VSX: # %bb.0: # %entry 190; VSX-NEXT: xsmuldp 0, 3, 4 191; VSX-NEXT: xsmsubadp 0, 1, 2 192; VSX-NEXT: fmr 1, 0 193; VSX-NEXT: blr 194; 195; NO-VSX-LABEL: test_fast_2mul_sub_f64: 196; NO-VSX: # %bb.0: # %entry 197; NO-VSX-NEXT: fmul 0, 3, 4 198; NO-VSX-NEXT: fmsub 1, 1, 2, 0 199; NO-VSX-NEXT: blr 200 double %d) { 201entry: 202 %0 = fmul reassoc double %a, %b 203 %1 = fmul reassoc double %c, %d 204 %2 = fsub reassoc double %0, %1 205 ret double %2 206} 207 208define double @test_fast_neg_fma_f64(double %a, double %b, double %c) { 209; VSX-LABEL: test_fast_neg_fma_f64: 210; VSX: # %bb.0: # %entry 211; VSX-NEXT: xsnmsubadp 3, 1, 2 212; VSX-NEXT: fmr 1, 3 213; VSX-NEXT: blr 214; 215; NO-VSX-LABEL: test_fast_neg_fma_f64: 216; NO-VSX: # %bb.0: # %entry 217; NO-VSX-NEXT: fnmsub 1, 1, 2, 3 218; NO-VSX-NEXT: blr 219entry: 220 %0 = fsub reassoc double -0.0, %a 221 %1 = call reassoc nsz double @llvm.fma.f64(double %0, double %b, double %c) 222 ret double %1 223} 224 225define float @test_fast_mul_sub_f32(float %a, float %b, float %c) { 226; VSX-LABEL: test_fast_mul_sub_f32: 227; VSX: # %bb.0: # %entry 228; VSX-NEXT: xsnmsubasp 1, 2, 3 229; VSX-NEXT: blr 230; 231; NO-VSX-LABEL: test_fast_mul_sub_f32: 232; NO-VSX: # %bb.0: # %entry 233; NO-VSX-NEXT: fnmsubs 1, 2, 3, 1 234; NO-VSX-NEXT: blr 235entry: 236 %0 = fmul reassoc float %b, %c 237 %1 = fsub reassoc nsz float %a, %0 238 ret float %1 239} 240 241define float @test_fast_2mul_sub_f32(float %a, float %b, float %c, float %d) { 242; VSX-LABEL: test_fast_2mul_sub_f32: 243; VSX: # %bb.0: # %entry 244; VSX-NEXT: xsmulsp 0, 3, 4 245; VSX-NEXT: xsmsubasp 0, 1, 2 246; VSX-NEXT: fmr 1, 0 247; VSX-NEXT: blr 248; 249; NO-VSX-LABEL: test_fast_2mul_sub_f32: 250; NO-VSX: # %bb.0: # %entry 251; NO-VSX-NEXT: fmuls 0, 3, 4 252; NO-VSX-NEXT: fmsubs 1, 1, 2, 0 253; NO-VSX-NEXT: blr 254entry: 255 %0 = fmul reassoc float %a, %b 256 %1 = fmul reassoc float %c, %d 257 %2 = fsub reassoc nsz float %0, %1 258 ret float %2 259} 260 261define float @test_fast_neg_fma_f32(float %a, float %b, float %c) { 262; VSX-LABEL: test_fast_neg_fma_f32: 263; VSX: # %bb.0: # %entry 264; VSX-NEXT: xsnmsubasp 3, 1, 2 265; VSX-NEXT: fmr 1, 3 266; VSX-NEXT: blr 267; 268; NO-VSX-LABEL: test_fast_neg_fma_f32: 269; NO-VSX: # %bb.0: # %entry 270; NO-VSX-NEXT: fnmsubs 1, 1, 2, 3 271; NO-VSX-NEXT: blr 272entry: 273 %0 = fsub reassoc float -0.0, %a 274 %1 = call reassoc nsz float @llvm.fma.f32(float %0, float %b, float %c) 275 ret float %1 276} 277 278define <2 x double> @test_fast_neg_fma_v2f64(<2 x double> %a, <2 x double> %b, 279; VSX-LABEL: test_fast_neg_fma_v2f64: 280; VSX: # %bb.0: # %entry 281; VSX-NEXT: xvnmsubadp 36, 34, 35 282; VSX-NEXT: vmr 2, 4 283; VSX-NEXT: blr 284; 285; NO-VSX-LABEL: test_fast_neg_fma_v2f64: 286; NO-VSX: # %bb.0: # %entry 287; NO-VSX-NEXT: fnmsub 1, 1, 3, 5 288; NO-VSX-NEXT: fnmsub 2, 2, 4, 6 289; NO-VSX-NEXT: blr 290 <2 x double> %c) { 291entry: 292 %0 = fsub reassoc <2 x double> <double -0.0, double -0.0>, %a 293 %1 = call reassoc nsz <2 x double> @llvm.fma.v2f64(<2 x double> %0, <2 x double> %b, 294 <2 x double> %c) 295 ret <2 x double> %1 296} 297 298define <4 x float> @test_fast_neg_fma_v4f32(<4 x float> %a, <4 x float> %b, 299; VSX-LABEL: test_fast_neg_fma_v4f32: 300; VSX: # %bb.0: # %entry 301; VSX-NEXT: xvnmsubasp 36, 34, 35 302; VSX-NEXT: vmr 2, 4 303; VSX-NEXT: blr 304; 305; NO-VSX-LABEL: test_fast_neg_fma_v4f32: 306; NO-VSX: # %bb.0: # %entry 307; NO-VSX-NEXT: vnmsubfp 2, 2, 3, 4 308; NO-VSX-NEXT: blr 309 <4 x float> %c) { 310entry: 311 %0 = fsub reassoc <4 x float> <float -0.0, float -0.0, float -0.0, 312 float -0.0>, %a 313 %1 = call reassoc nsz <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %b, 314 <4 x float> %c) 315 ret <4 x float> %1 316} 317 318declare float @llvm.fma.f32(float %a, float %b, float %c) 319declare double @llvm.fma.f64(double %a, double %b, double %c) 320declare <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, 321 <4 x float> %c) 322declare <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, 323 <2 x double> %c) 324