1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -instcombine -S | FileCheck %s 3 4; These would crash if we didn't check for a negative shift. 5 6; https://llvm.org/bugs/show_bug.cgi?id=12967 7 8define void @pr12967() { 9; CHECK-LABEL: @pr12967( 10; CHECK-NEXT: entry: 11; CHECK-NEXT: br label [[LOOP:%.*]] 12; CHECK: loop: 13; CHECK-NEXT: br label [[LOOP]] 14; 15entry: 16 br label %loop 17 18loop: 19 %c = phi i32 [ %shl, %loop ], [ undef, %entry ] 20 %shr = shl i32 %c, 7 21 %shl = lshr i32 %shr, -2 22 br label %loop 23} 24 25; https://llvm.org/bugs/show_bug.cgi?id=26760 26 27define void @pr26760() { 28; CHECK-LABEL: @pr26760( 29; CHECK-NEXT: entry: 30; CHECK-NEXT: br label [[LOOP:%.*]] 31; CHECK: loop: 32; CHECK-NEXT: br label [[LOOP]] 33; 34entry: 35 br label %loop 36 37loop: 38 %c = phi i32 [ %shl, %loop ], [ undef, %entry ] 39 %shr = lshr i32 %c, 7 40 %shl = shl i32 %shr, -2 41 br label %loop 42} 43 44; Converting the 2 shifts to SHL 6 without the AND is wrong. 45; https://llvm.org/bugs/show_bug.cgi?id=8547 46 47define i32 @pr8547(i32* %g) { 48; CHECK-LABEL: @pr8547( 49; CHECK-NEXT: codeRepl: 50; CHECK-NEXT: br label [[FOR_COND:%.*]] 51; CHECK: for.cond: 52; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 0, [[CODEREPL:%.*]] ], [ 5, [[FOR_COND]] ] 53; CHECK-NEXT: store i32 [[STOREMERGE]], i32* [[G:%.*]], align 4 54; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i32 [[STOREMERGE]], 6 55; CHECK-NEXT: [[CONV2:%.*]] = and i32 [[TMP0]], 64 56; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[CONV2]], 0 57; CHECK-NEXT: br i1 [[TOBOOL]], label [[FOR_COND]], label [[CODEREPL2:%.*]] 58; CHECK: codeRepl2: 59; CHECK-NEXT: ret i32 [[CONV2]] 60; 61codeRepl: 62 br label %for.cond 63 64for.cond: 65 %storemerge = phi i32 [ 0, %codeRepl ], [ 5, %for.cond ] 66 store i32 %storemerge, i32* %g, align 4 67 %shl = shl i32 %storemerge, 30 68 %conv2 = lshr i32 %shl, 24 69 %tobool = icmp eq i32 %conv2, 0 70 br i1 %tobool, label %for.cond, label %codeRepl2 71 72codeRepl2: 73 ret i32 %conv2 74} 75 76; Two same direction shifts that add up to more than the bitwidth should get 77; folded to zero. 78 79define i32 @shl_shl(i32 %A) { 80; CHECK-LABEL: @shl_shl( 81; CHECK-NEXT: ret i32 0 82; 83 %B = shl i32 %A, 6 84 %C = shl i32 %B, 28 85 ret i32 %C 86} 87 88define <2 x i33> @shl_shl_splat_vec(<2 x i33> %A) { 89; CHECK-LABEL: @shl_shl_splat_vec( 90; CHECK-NEXT: ret <2 x i33> zeroinitializer 91; 92 %B = shl <2 x i33> %A, <i33 5, i33 5> 93 %C = shl <2 x i33> %B, <i33 28, i33 28> 94 ret <2 x i33> %C 95} 96 97; FIXME 98 99define <2 x i33> @shl_shl_vec(<2 x i33> %A) { 100; CHECK-LABEL: @shl_shl_vec( 101; CHECK-NEXT: [[B:%.*]] = shl <2 x i33> [[A:%.*]], <i33 6, i33 5> 102; CHECK-NEXT: [[C:%.*]] = shl <2 x i33> [[B]], <i33 27, i33 28> 103; CHECK-NEXT: ret <2 x i33> [[C]] 104; 105 %B = shl <2 x i33> %A, <i33 6, i33 5> 106 %C = shl <2 x i33> %B, <i33 27, i33 28> 107 ret <2 x i33> %C 108} 109 110define i232 @lshr_lshr(i232 %A) { 111; CHECK-LABEL: @lshr_lshr( 112; CHECK-NEXT: ret i232 0 113; 114 %B = lshr i232 %A, 231 115 %C = lshr i232 %B, 1 116 ret i232 %C 117} 118 119define <2 x i32> @lshr_lshr_splat_vec(<2 x i32> %A) { 120; CHECK-LABEL: @lshr_lshr_splat_vec( 121; CHECK-NEXT: ret <2 x i32> zeroinitializer 122; 123 %B = lshr <2 x i32> %A, <i32 28, i32 28> 124 %C = lshr <2 x i32> %B, <i32 4, i32 4> 125 ret <2 x i32> %C 126} 127 128define <2 x i32> @lshr_lshr_vec(<2 x i32> %A) { 129; CHECK-LABEL: @lshr_lshr_vec( 130; CHECK-NEXT: ret <2 x i32> zeroinitializer 131; 132 %B = lshr <2 x i32> %A, <i32 29, i32 28> 133 %C = lshr <2 x i32> %B, <i32 4, i32 5> 134 ret <2 x i32> %C 135} 136