1;; X's live range extends beyond the shift, so the register allocator 2;; cannot coalesce it with Y. Because of this, a copy needs to be 3;; emitted before the shift to save the register value before it is 4;; clobbered. However, this copy is not needed if the register 5;; allocator turns the shift into an LEA. This also occurs for ADD. 6 7; Check that the shift gets turned into an LEA. 8; RUN: llc < %s -mcpu=generic -mtriple=x86_64-apple-darwin | FileCheck %s 9 10@G = external global i32 11 12define i32 @test1(i32 %X) nounwind { 13; CHECK-LABEL: test1: 14; CHECK: movl %edi, %eax 15; CHECK: leal 1(%rax) 16 %Z = add i32 %X, 1 17 store volatile i32 %Z, i32* @G 18 ret i32 %X 19} 20 21; rdar://8977508 22; The second add should not be transformed to leal nor should it be 23; commutted (which would require inserting a copy). 24define i32 @test2(i32 inreg %a, i32 inreg %b, i32 %c, i32 %d) nounwind { 25entry: 26; CHECK-LABEL: test2: 27; CHECK: leal 28; CHECK-NEXT: addl 29; CHECK-NEXT: addl 30; CHECK-NEXT: ret 31 %add = add i32 %b, %a 32 %add3 = add i32 %add, %c 33 %add5 = add i32 %add3, %d 34 ret i32 %add5 35} 36 37; rdar://9002648 38define i64 @test3(i64 %x) nounwind readnone ssp { 39entry: 40; CHECK-LABEL: test3: 41; CHECK: leaq (%rdi,%rdi), %rax 42; CHECK-NOT: addq 43; CHECK-NEXT: ret 44 %0 = shl i64 %x, 1 45 ret i64 %0 46} 47 48@global = external global i32, align 4 49@global2 = external global i64, align 8 50 51; Test that liveness is properly updated and we do not encounter the 52; assert/crash from http://llvm.org/PR28301 53; CHECK-LABEL: ham 54define void @ham() { 55bb: 56 br label %bb1 57 58bb1: 59 %tmp = phi i64 [ %tmp40, %bb9 ], [ 0, %bb ] 60 %tmp2 = phi i32 [ %tmp39, %bb9 ], [ 0, %bb ] 61 %tmp3 = icmp sgt i32 undef, 10 62 br i1 %tmp3, label %bb2, label %bb3 63 64bb2: 65 %tmp6 = load i32, i32* @global, align 4 66 %tmp8 = add nsw i32 %tmp6, %tmp2 67 %tmp9 = sext i32 %tmp8 to i64 68 br label %bb6 69 70bb3: 71; CHECK: LBB3_3: 72; CHECK: addq $4, %r 73; CHECK: subl %e 74 %tmp14 = phi i64 [ %tmp15, %bb5 ], [ 0, %bb1 ] 75 %tmp15 = add nuw i64 %tmp14, 4 76 %tmp16 = trunc i64 %tmp14 to i32 77 %tmp17 = sub i32 %tmp2, %tmp16 78 br label %bb4 79 80bb4: 81 %tmp20 = phi i64 [ %tmp14, %bb3 ], [ %tmp34, %bb5 ] 82 %tmp28 = icmp eq i32 %tmp17, 0 83 br i1 %tmp28, label %bb5, label %bb8 84 85bb5: 86 %tmp34 = add nuw nsw i64 %tmp20, 1 87 %tmp35 = icmp slt i64 %tmp34, %tmp15 88 br i1 %tmp35, label %bb4, label %bb3 89 90bb6: 91 store volatile i64 %tmp, i64* @global2, align 8 92 store volatile i64 %tmp9, i64* @global2, align 8 93 store volatile i32 %tmp6, i32* @global, align 4 94 %tmp45 = icmp slt i32 undef, undef 95 br i1 %tmp45, label %bb6, label %bb9 96 97bb8: 98 unreachable 99 100bb9: 101 %tmp39 = add nuw nsw i32 %tmp2, 4 102 %tmp40 = add nuw i64 %tmp, 4 103 br label %bb1 104} 105