1; RUN: llc -mtriple=armv5e-arm-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-ARMV5TE,CHECK 2; RUN: llc -mtriple=thumbv6t2-arm-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-T2,CHECK 3; RUN: llc -mtriple=armv4t-arm-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-ARMV4T,CHECK 4 5@x = common dso_local global i64 0, align 8 6@y = common dso_local global i64 0, align 8 7 8define void @test() { 9entry: 10; CHECK-LABEL: test: 11; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]] 12; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]] 13; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 14; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]] 15; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x 16; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y 17; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x 18; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y 19; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 20; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]] 21; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]] 22; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]] 23; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 24; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #4] 25; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #4] 26; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]]] 27 %0 = load volatile i64, i64* @x, align 8 28 store volatile i64 %0, i64* @y, align 8 29 ret void 30} 31 32define void @test_offset() { 33entry: 34; CHECK-LABEL: test_offset: 35; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]] 36; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]] 37; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #-4] 38; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #-4] 39; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x 40; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y 41; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x 42; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y 43; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #-4] 44; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #-4] 45; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]] 46; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]] 47; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #-4] 48; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 49; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]]] 50; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #-4] 51 %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 -4) to i64*), align 8 52 store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 -4) to i64*), align 8 53 ret void 54} 55 56define void @test_offset_1() { 57; CHECK-LABEL: test_offset_1: 58; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]] 59; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]] 60; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #255] 61; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #255] 62; CHECK-T2: adds [[ADDR0:r[0-9]+]], #255 63; CHECK-T2-NEXT: adds [[ADDR1:r[0-9]+]], #255 64; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 65; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]] 66; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]] 67; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]] 68; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #255] 69; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #259] 70; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #259] 71; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #255] 72entry: 73 %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 255) to i64*), align 8 74 store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 255) to i64*), align 8 75 ret void 76} 77 78define void @test_offset_2() { 79; CHECK-LABEL: test_offset_2: 80; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]] 81; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]] 82; CHECK-ARMV5TE-NEXT: add [[ADDR0]], [[ADDR0]], #256 83; CHECK-ARMV5TE-NEXT: add [[ADDR1]], [[ADDR1]], #256 84; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 85; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]] 86; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x 87; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y 88; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x 89; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y 90; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #256] 91; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #256] 92; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]] 93; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]] 94; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #256] 95; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #260] 96; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #260] 97; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #256] 98entry: 99 %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 256) to i64*), align 8 100 store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 256) to i64*), align 8 101 ret void 102} 103 104define void @test_offset_3() { 105; CHECK-LABEL: test_offset_3: 106; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]] 107; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]] 108; CHECK-ARMV5TE-NEXT: add [[ADDR0]], [[ADDR0]], #1020 109; CHECK-ARMV5TE-NEXT: add [[ADDR1]], [[ADDR1]], #1020 110; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 111; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]] 112; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x 113; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y 114; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x 115; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y 116; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #1020] 117; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #1020] 118; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]] 119; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]] 120; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #1020] 121; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #1024] 122; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #1024] 123; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #1020] 124entry: 125 %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 1020) to i64*), align 8 126 store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 1020) to i64*), align 8 127 ret void 128} 129 130define void @test_offset_4() { 131; CHECK-LABEL: test_offset_4: 132; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]] 133; CHECK-ARMV5TE: ldr [[ADDR1:r[0-9]+]] 134; CHECK-ARMV5TE-NEXT: add [[ADDR0]], [[ADDR0]], #1024 135; CHECK-ARMV5TE-NEXT: add [[ADDR1]], [[ADDR1]], #1024 136; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 137; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]] 138; CHECK-T2: movw [[ADDR1:r[0-9]+]], :lower16:y 139; CHECK-T2-NEXT: movw [[ADDR0:r[0-9]+]], :lower16:x 140; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y 141; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x 142; CHECK-T2-NEXT: add.w [[ADDR0]], [[ADDR0]], #1024 143; CHECK-T2-NEXT: add.w [[ADDR1]], [[ADDR1]], #1024 144; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]] 145; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]] 146; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]] 147; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]] 148; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #1024] 149; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #1028] 150; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #1028] 151; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #1024] 152entry: 153 %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 1024) to i64*), align 8 154 store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 1024) to i64*), align 8 155 ret void 156} 157 158define i64 @test_stack() { 159; CHECK-LABEL: test_stack: 160; CHECK-ARMV5TE: sub sp, sp, #80 161; CHECK-ARMV5TE-NEXT: mov [[R0:r[0-9]+]], #0 162; CHECK-ARMV5TE-NEXT: mov [[R1:r[0-9]+]], #1 163; CHECK-ARMV5TE-NEXT: strd [[R1]], [[R0]], [sp, #8] 164; CHECK-ARMV5TE-NEXT: ldrd r0, r1, [sp, #8] 165; CHECK-ARMV5TE-NEXT: add sp, sp, #80 166; CHECK-ARMV5TE-NEXT: bx lr 167; CHECK-T2: sub sp, #80 168; CHECK-T2-NEXT: movs [[R0:r[0-9]+]], #0 169; CHECK-T2-NEXT: movs [[R1:r[0-9]+]], #1 170; CHECK-T2-NEXT: strd [[R1]], [[R0]], [sp, #8] 171; CHECK-T2-NEXT: ldrd r0, r1, [sp, #8] 172; CHECK-T2-NEXT: add sp, #80 173; CHECK-T2-NEXT: bx lr 174; CHECK-ARMV4T: sub sp, sp, #80 175; CHECK-ARMV4T-NEXT: mov [[R0:r[0-9]+]], #0 176; CHECK-ARMV4T-NEXT: str [[R0]], [sp, #12] 177; CHECK-ARMV4T-NEXT: mov [[R1:r[0-9]+]], #1 178; CHECK-ARMV4T-NEXT: str [[R1]], [sp, #8] 179; CHECK-ARMV4T-NEXT: ldr r0, [sp, #8] 180; CHECK-ARMV4T-NEXT: ldr r1, [sp, #12] 181; CHECK-ARMV4T-NEXT: add sp, sp, #80 182; CHECK-ARMV4T-NEXT: bx lr 183entry: 184 %a = alloca [10 x i64], align 8 185 %arrayidx = getelementptr inbounds [10 x i64], [10 x i64]* %a, i32 0, i32 1 186 store volatile i64 1, i64* %arrayidx, align 8 187 %arrayidx1 = getelementptr inbounds [10 x i64], [10 x i64]* %a, i32 0, i32 1 188 %0 = load volatile i64, i64* %arrayidx1, align 8 189 ret i64 %0 190} 191 192