1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX6 %s 3; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s 4 5define amdgpu_ps <4 x float> @load_2darraymsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice, i32 %fragid) { 6 ; GFX6-LABEL: name: load_2darraymsaa 7 ; GFX6: bb.1 (%ir-block.0): 8 ; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 9 ; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 10 ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 11 ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 12 ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 13 ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 14 ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 15 ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 16 ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 17 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 18 ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 19 ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 20 ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 21 ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 22 ; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) 23 ; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[BUILD_VECTOR1]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") 24 ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>) 25 ; GFX6: $vgpr0 = COPY [[UV]](s32) 26 ; GFX6: $vgpr1 = COPY [[UV1]](s32) 27 ; GFX6: $vgpr2 = COPY [[UV2]](s32) 28 ; GFX6: $vgpr3 = COPY [[UV3]](s32) 29 ; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 30 ; GFX10NSA-LABEL: name: load_2darraymsaa 31 ; GFX10NSA: bb.1 (%ir-block.0): 32 ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 33 ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 34 ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 35 ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 36 ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 37 ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 38 ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 39 ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 40 ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 41 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 42 ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 43 ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 44 ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 45 ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 46 ; GFX10NSA: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") 47 ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>) 48 ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) 49 ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) 50 ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) 51 ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) 52 ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 53 %v = call <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0) 54 ret <4 x float> %v 55} 56 57define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) { 58 ; GFX6-LABEL: name: load_2darraymsaa_tfe 59 ; GFX6: bb.1 (%ir-block.0): 60 ; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3 61 ; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 62 ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 63 ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 64 ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 65 ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 66 ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 67 ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 68 ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 69 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 70 ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11 71 ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr0 72 ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr1 73 ; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr2 74 ; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr3 75 ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 76 ; GFX6: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32) 77 ; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32) 78 ; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<5 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[BUILD_VECTOR1]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") 79 ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<5 x s32>) 80 ; GFX6: G_STORE [[UV4]](s32), [[MV]](p1) :: (store 4 into %ir.out, addrspace 1) 81 ; GFX6: $vgpr0 = COPY [[UV]](s32) 82 ; GFX6: $vgpr1 = COPY [[UV1]](s32) 83 ; GFX6: $vgpr2 = COPY [[UV2]](s32) 84 ; GFX6: $vgpr3 = COPY [[UV3]](s32) 85 ; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 86 ; GFX10NSA-LABEL: name: load_2darraymsaa_tfe 87 ; GFX10NSA: bb.1 (%ir-block.0): 88 ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3 89 ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 90 ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 91 ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 92 ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 93 ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 94 ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 95 ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 96 ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 97 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 98 ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11 99 ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr0 100 ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr1 101 ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr2 102 ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr3 103 ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 104 ; GFX10NSA: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32) 105 ; GFX10NSA: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<5 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") 106 ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<5 x s32>) 107 ; GFX10NSA: G_STORE [[UV4]](s32), [[MV]](p1) :: (store 4 into %ir.out, addrspace 1) 108 ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) 109 ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) 110 ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) 111 ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) 112 ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 113 %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.2darraymsaa.sl_v4f32i32s.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0) 114 %v.vec = extractvalue { <4 x float>, i32 } %v, 0 115 %v.err = extractvalue { <4 x float>, i32 } %v, 1 116 store i32 %v.err, i32 addrspace(1)* %out, align 4 117 ret <4 x float> %v.vec 118} 119 120declare <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0 121declare { <4 x float>, i32 } @llvm.amdgcn.image.load.2darraymsaa.sl_v4f32i32s.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0 122 123attributes #0 = { nounwind readonly } 124