1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -mtriple=amdgcn-- -amdgpu-codegenprepare %s | FileCheck %s
3
4define amdgpu_kernel void @divrem24_assume(i32 addrspace(1)* %arg, i32 %arg1) {
5; CHECK-LABEL: @divrem24_assume(
6; CHECK-NEXT:  bb:
7; CHECK-NEXT:    [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
8; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ARG1:%.*]], 42
9; CHECK-NEXT:    tail call void @llvm.assume(i1 [[TMP2]])
10; CHECK-NEXT:    [[TMP0:%.*]] = uitofp i32 [[TMP]] to float
11; CHECK-NEXT:    [[TMP1:%.*]] = uitofp i32 [[ARG1]] to float
12; CHECK-NEXT:    [[TMP2:%.*]] = fdiv fast float 1.000000e+00, [[TMP1]]
13; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast float [[TMP0]], [[TMP2]]
14; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.trunc.f32(float [[TMP3]])
15; CHECK-NEXT:    [[TMP5:%.*]] = fsub fast float -0.000000e+00, [[TMP4]]
16; CHECK-NEXT:    [[TMP6:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP5]], float [[TMP1]], float [[TMP0]])
17; CHECK-NEXT:    [[TMP7:%.*]] = fptoui float [[TMP4]] to i32
18; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
19; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
20; CHECK-NEXT:    [[TMP10:%.*]] = fcmp fast oge float [[TMP8]], [[TMP9]]
21; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP10]], i32 1, i32 0
22; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP7]], [[TMP11]]
23; CHECK-NEXT:    [[TMP13:%.*]] = and i32 [[TMP12]], 1023
24; CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP13]] to i64
25; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[ARG:%.*]], i64 [[TMP4]]
26; CHECK-NEXT:    store i32 0, i32 addrspace(1)* [[TMP5]], align 4
27; CHECK-NEXT:    ret void
28;
29bb:
30  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
31  %tmp2 = icmp ult i32 %arg1, 42
32  tail call void @llvm.assume(i1 %tmp2)
33  %tmp3 = udiv i32 %tmp, %arg1
34  %tmp4 = zext i32 %tmp3 to i64
35  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4
36  store i32 0, i32 addrspace(1)* %tmp5, align 4
37  ret void
38}
39
40declare void @llvm.assume(i1)
41declare i32 @llvm.amdgcn.workitem.id.x()
42
43!0 = !{i32 0, i32 1024}
44