1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; Test removal of AND operations that don't affect last 6 bits of shift amount 3; operand. 4; 5; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s 6 7; Test that AND is not removed when some lower 6 bits are not set. 8define i32 @f1(i32 %a, i32 %sh) { 9; CHECK-LABEL: f1: 10; CHECK: # %bb.0: 11; CHECK-NEXT: nill %r3, 31 12; CHECK-NEXT: sll %r2, 0(%r3) 13; CHECK-NEXT: br %r14 14 %and = and i32 %sh, 31 15 %shift = shl i32 %a, %and 16 ret i32 %shift 17} 18 19; Test removal of AND mask with only bottom 6 bits set. 20define i32 @f2(i32 %a, i32 %sh) { 21; CHECK-LABEL: f2: 22; CHECK: # %bb.0: 23; CHECK-NEXT: sll %r2, 0(%r3) 24; CHECK-NEXT: br %r14 25 %and = and i32 %sh, 63 26 %shift = shl i32 %a, %and 27 ret i32 %shift 28} 29 30; Test removal of AND mask including but not limited to bottom 6 bits. 31define i32 @f3(i32 %a, i32 %sh) { 32; CHECK-LABEL: f3: 33; CHECK: # %bb.0: 34; CHECK-NEXT: sll %r2, 0(%r3) 35; CHECK-NEXT: br %r14 36 %and = and i32 %sh, 255 37 %shift = shl i32 %a, %and 38 ret i32 %shift 39} 40 41; Test removal of AND mask from SRA. 42define i32 @f4(i32 %a, i32 %sh) { 43; CHECK-LABEL: f4: 44; CHECK: # %bb.0: 45; CHECK-NEXT: sra %r2, 0(%r3) 46; CHECK-NEXT: br %r14 47 %and = and i32 %sh, 63 48 %shift = ashr i32 %a, %and 49 ret i32 %shift 50} 51 52; Test removal of AND mask from SRL. 53define i32 @f5(i32 %a, i32 %sh) { 54; CHECK-LABEL: f5: 55; CHECK: # %bb.0: 56; CHECK-NEXT: srl %r2, 0(%r3) 57; CHECK-NEXT: br %r14 58 %and = and i32 %sh, 63 59 %shift = lshr i32 %a, %and 60 ret i32 %shift 61} 62 63; Test removal of AND mask from SLLG. 64define i64 @f6(i64 %a, i64 %sh) { 65; CHECK-LABEL: f6: 66; CHECK: # %bb.0: 67; CHECK-NEXT: sllg %r2, %r2, 0(%r3) 68; CHECK-NEXT: br %r14 69 %and = and i64 %sh, 63 70 %shift = shl i64 %a, %and 71 ret i64 %shift 72} 73 74; Test removal of AND mask from SRAG. 75define i64 @f7(i64 %a, i64 %sh) { 76; CHECK-LABEL: f7: 77; CHECK: # %bb.0: 78; CHECK-NEXT: srag %r2, %r2, 0(%r3) 79; CHECK-NEXT: br %r14 80 %and = and i64 %sh, 63 81 %shift = ashr i64 %a, %and 82 ret i64 %shift 83} 84 85; Test removal of AND mask from SRLG. 86define i64 @f8(i64 %a, i64 %sh) { 87; CHECK-LABEL: f8: 88; CHECK: # %bb.0: 89; CHECK-NEXT: srlg %r2, %r2, 0(%r3) 90; CHECK-NEXT: br %r14 91 %and = and i64 %sh, 63 92 %shift = lshr i64 %a, %and 93 ret i64 %shift 94} 95 96; Test that AND with two register operands is not affected. 97define i32 @f9(i32 %a, i32 %b, i32 %sh) { 98; CHECK-LABEL: f9: 99; CHECK: # %bb.0: 100; CHECK-NEXT: nr %r3, %r4 101; CHECK-NEXT: sll %r2, 0(%r3) 102; CHECK-NEXT: br %r14 103 %and = and i32 %sh, %b 104 %shift = shl i32 %a, %and 105 ret i32 %shift 106} 107 108; Test that AND is not entirely removed if the result is reused. 109define i32 @f10(i32 %a, i32 %sh) { 110; CHECK-LABEL: f10: 111; CHECK: # %bb.0: 112; CHECK-NEXT: sll %r2, 0(%r3) 113; CHECK-NEXT: nilf %r3, 63 114; CHECK-NEXT: ar %r2, %r3 115; CHECK-NEXT: br %r14 116 %and = and i32 %sh, 63 117 %shift = shl i32 %a, %and 118 %reuse = add i32 %and, %shift 119 ret i32 %reuse 120} 121 122; Test that AND is not removed for i128 (which calls __ashlti3) 123define i128 @f11(i128 %a, i32 %sh) { 124; CHECK-LABEL: f11: 125; CHECK: # %bb.0: 126; CHECK-NEXT: stmg %r13, %r15, 104(%r15) 127; CHECK-NEXT: .cfi_offset %r13, -56 128; CHECK-NEXT: .cfi_offset %r14, -48 129; CHECK-NEXT: .cfi_offset %r15, -40 130; CHECK-NEXT: aghi %r15, -192 131; CHECK-NEXT: .cfi_def_cfa_offset 352 132; CHECK-NEXT: lg %r0, 8(%r3) 133; CHECK-NEXT: # kill: def $r4l killed $r4l def $r4d 134; CHECK-NEXT: lgr %r13, %r2 135; CHECK-NEXT: lg %r1, 0(%r3) 136; CHECK-NEXT: stg %r0, 168(%r15) 137; CHECK-NEXT: risbg %r4, %r4, 57, 191, 0 138; CHECK-NEXT: la %r2, 176(%r15) 139; CHECK-NEXT: la %r3, 160(%r15) 140; CHECK-NEXT: stg %r1, 160(%r15) 141; CHECK-NEXT: brasl %r14, __ashlti3@PLT 142; CHECK-NEXT: lg %r0, 184(%r15) 143; CHECK-NEXT: lg %r1, 176(%r15) 144; CHECK-NEXT: stg %r0, 8(%r13) 145; CHECK-NEXT: stg %r1, 0(%r13) 146; CHECK-NEXT: lmg %r13, %r15, 296(%r15) 147; CHECK-NEXT: br %r14 148 %and = and i32 %sh, 127 149 %ext = zext i32 %and to i128 150 %shift = shl i128 %a, %ext 151 ret i128 %shift 152} 153 154