1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2; RUN: llc -mtriple=aarch64 -global-isel %s -o - -stop-after=irtranslator | FileCheck %s 3 4define i8* @test_simple_alloca(i32 %numelts) { 5 ; CHECK-LABEL: name: test_simple_alloca 6 ; CHECK: bb.1 (%ir-block.0): 7 ; CHECK: liveins: $w0 8 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 9 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 10 ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) 11 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] 12 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 13 ; CHECK: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]] 14 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 15 ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]] 16 ; CHECK: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1 17 ; CHECK: $x0 = COPY [[DYN_STACKALLOC]](p0) 18 ; CHECK: RET_ReallyLR implicit $x0 19 %addr = alloca i8, i32 %numelts 20 ret i8* %addr 21} 22 23define i8* @test_aligned_alloca(i32 %numelts) { 24 ; CHECK-LABEL: name: test_aligned_alloca 25 ; CHECK: bb.1 (%ir-block.0): 26 ; CHECK: liveins: $w0 27 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 28 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 29 ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) 30 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] 31 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 32 ; CHECK: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]] 33 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 34 ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]] 35 ; CHECK: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 32 36 ; CHECK: $x0 = COPY [[DYN_STACKALLOC]](p0) 37 ; CHECK: RET_ReallyLR implicit $x0 38 %addr = alloca i8, i32 %numelts, align 32 39 ret i8* %addr 40} 41 42define i128* @test_natural_alloca(i32 %numelts) { 43 ; CHECK-LABEL: name: test_natural_alloca 44 ; CHECK: bb.1 (%ir-block.0): 45 ; CHECK: liveins: $w0 46 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 47 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 48 ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) 49 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] 50 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 51 ; CHECK: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]] 52 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 53 ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]] 54 ; CHECK: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1 55 ; CHECK: $x0 = COPY [[DYN_STACKALLOC]](p0) 56 ; CHECK: RET_ReallyLR implicit $x0 57 %addr = alloca i128, i32 %numelts 58 ret i128* %addr 59} 60