1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -instcombine < %s | FileCheck %s
3
4declare double @llvm.sqrt.f64(double) nounwind readnone speculatable
5declare void @use(double)
6
7; sqrt(a) * sqrt(b) no math flags
8
9define double @sqrt_a_sqrt_b(double %a, double %b) {
10; CHECK-LABEL: @sqrt_a_sqrt_b(
11; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.sqrt.f64(double [[A:%.*]])
12; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.sqrt.f64(double [[B:%.*]])
13; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]]
14; CHECK-NEXT:    ret double [[MUL]]
15;
16  %1 = call double @llvm.sqrt.f64(double %a)
17  %2 = call double @llvm.sqrt.f64(double %b)
18  %mul = fmul double %1, %2
19  ret double %mul
20}
21
22; sqrt(a) * sqrt(b) fast-math, multiple uses
23
24define double @sqrt_a_sqrt_b_multiple_uses(double %a, double %b) {
25; CHECK-LABEL: @sqrt_a_sqrt_b_multiple_uses(
26; CHECK-NEXT:    [[TMP1:%.*]] = call fast double @llvm.sqrt.f64(double [[A:%.*]])
27; CHECK-NEXT:    [[TMP2:%.*]] = call fast double @llvm.sqrt.f64(double [[B:%.*]])
28; CHECK-NEXT:    [[MUL:%.*]] = fmul fast double [[TMP1]], [[TMP2]]
29; CHECK-NEXT:    call void @use(double [[TMP2]])
30; CHECK-NEXT:    ret double [[MUL]]
31;
32  %1 = call fast double @llvm.sqrt.f64(double %a)
33  %2 = call fast double @llvm.sqrt.f64(double %b)
34  %mul = fmul fast double %1, %2
35  call void @use(double %2)
36  ret double %mul
37}
38
39; sqrt(a) * sqrt(b) => sqrt(a*b) with fast-math
40
41define double @sqrt_a_sqrt_b_reassoc_nnan(double %a, double %b) {
42; CHECK-LABEL: @sqrt_a_sqrt_b_reassoc_nnan(
43; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan double [[A:%.*]], [[B:%.*]]
44; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc nnan double @llvm.sqrt.f64(double [[TMP1]])
45; CHECK-NEXT:    ret double [[TMP2]]
46;
47  %1 = call double @llvm.sqrt.f64(double %a)
48  %2 = call double @llvm.sqrt.f64(double %b)
49  %mul = fmul reassoc nnan double %1, %2
50  ret double %mul
51}
52
53; nnan disallows the possibility that both operands are negative,
54; so we won't return a number when the answer should be NaN.
55
56define double @sqrt_a_sqrt_b_reassoc(double %a, double %b) {
57; CHECK-LABEL: @sqrt_a_sqrt_b_reassoc(
58; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.sqrt.f64(double [[A:%.*]])
59; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.sqrt.f64(double [[B:%.*]])
60; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[TMP1]], [[TMP2]]
61; CHECK-NEXT:    ret double [[MUL]]
62;
63  %1 = call double @llvm.sqrt.f64(double %a)
64  %2 = call double @llvm.sqrt.f64(double %b)
65  %mul = fmul reassoc double %1, %2
66  ret double %mul
67}
68
69; sqrt(a) * sqrt(b) * sqrt(c) * sqrt(d) => sqrt(a*b*c*d) with fast-math
70; 'reassoc nnan' on the fmuls is all that is required, but check propagation of other FMF.
71
72define double @sqrt_a_sqrt_b_sqrt_c_sqrt_d_reassoc(double %a, double %b, double %c, double %d) {
73; CHECK-LABEL: @sqrt_a_sqrt_b_sqrt_c_sqrt_d_reassoc(
74; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan arcp double [[A:%.*]], [[B:%.*]]
75; CHECK-NEXT:    [[TMP2:%.*]] = fmul reassoc nnan double [[TMP1]], [[C:%.*]]
76; CHECK-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf double [[TMP2]], [[D:%.*]]
77; CHECK-NEXT:    [[TMP4:%.*]] = call reassoc nnan ninf double @llvm.sqrt.f64(double [[TMP3]])
78; CHECK-NEXT:    ret double [[TMP4]]
79;
80  %1 = call double @llvm.sqrt.f64(double %a)
81  %2 = call double @llvm.sqrt.f64(double %b)
82  %3 = call double @llvm.sqrt.f64(double %c)
83  %4 = call double @llvm.sqrt.f64(double %d)
84  %mul = fmul reassoc nnan arcp double %1, %2
85  %mul1 = fmul reassoc nnan double %mul, %3
86  %mul2 = fmul reassoc nnan ninf double %mul1, %4
87  ret double %mul2
88}
89
90