1; RUN: opt -basicaa -load-combine -instcombine -S < %s | FileCheck %s
2target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
3target triple = "x86_64-unknown-linux-gnu"
4
5declare void @llvm.assume(i1) nounwind
6
7; 'load' before the 'call' gets optimized:
8define i64 @test1(i32* nocapture readonly %a, i1 %b) {
9; CHECK-LABEL: @test1
10
11; CHECK-DAG: load i64, i64* %1, align 4
12; CHECK-DAG: tail call void @llvm.assume(i1 %b)
13; CHECK: ret i64
14
15  %load1 = load i32, i32* %a, align 4
16  %conv = zext i32 %load1 to i64
17  %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
18  %load2 = load i32, i32* %arrayidx1, align 4
19  tail call void @llvm.assume(i1 %b)
20  %conv2 = zext i32 %load2 to i64
21  %shl = shl nuw i64 %conv2, 32
22  %add = or i64 %shl, %conv
23  ret i64 %add
24}
25
26; 'call' before the 'load' doesn't get optimized:
27define i64 @test2(i32* nocapture readonly %a, i1 %b) {
28; CHECK-LABEL: @test2
29
30; CHECK-DAG: load i64, i64* %1, align 4
31; CHECK-DAG: tail call void @llvm.assume(i1 %b)
32; CHECK: ret i64
33
34  %load1 = load i32, i32* %a, align 4
35  %conv = zext i32 %load1 to i64
36  %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
37  tail call void @llvm.assume(i1 %b)
38  %load2 = load i32, i32* %arrayidx1, align 4
39  %conv2 = zext i32 %load2 to i64
40  %shl = shl nuw i64 %conv2, 32
41  %add = or i64 %shl, %conv
42  ret i64 %add
43}
44
45