1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX6 %s 3; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s 4 5define amdgpu_ps float @image_load_3d_f32(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %r) { 6 ; GFX6-LABEL: name: image_load_3d_f32 7 ; GFX6: bb.1 (%ir-block.0): 8 ; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 9 ; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 10 ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 11 ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 12 ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 13 ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 14 ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 15 ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 16 ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 17 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 18 ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 19 ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 20 ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 21 ; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32) 22 ; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.3d), 1, [[BUILD_VECTOR1]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load 4 from custom "TargetCustom8") 23 ; GFX6: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32) 24 ; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0 25 ; GFX10NSA-LABEL: name: image_load_3d_f32 26 ; GFX10NSA: bb.1 (%ir-block.0): 27 ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 28 ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 29 ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 30 ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 31 ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 32 ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 33 ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 34 ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 35 ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 36 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 37 ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 38 ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 39 ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 40 ; GFX10NSA: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.3d), 1, [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load 4 from custom "TargetCustom8") 41 ; GFX10NSA: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32) 42 ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 43 %tex = call float @llvm.amdgcn.image.load.3d.f32.i32(i32 1, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 0, i32 0) 44 ret float %tex 45} 46 47define amdgpu_ps float @image_load_3d_tfe_f32(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %r) { 48 ; GFX6-LABEL: name: image_load_3d_tfe_f32 49 ; GFX6: bb.1 (%ir-block.0): 50 ; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 51 ; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 52 ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 53 ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 54 ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 55 ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 56 ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 57 ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 58 ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 59 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 60 ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 61 ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 62 ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 63 ; GFX6: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF 64 ; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32) 65 ; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.3d), 1, [[BUILD_VECTOR1]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0, 0 :: (dereferenceable load 4 from custom "TargetCustom8") 66 ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>) 67 ; GFX6: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) 68 ; GFX6: $vgpr0 = COPY [[UV]](s32) 69 ; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0 70 ; GFX10NSA-LABEL: name: image_load_3d_tfe_f32 71 ; GFX10NSA: bb.1 (%ir-block.0): 72 ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 73 ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 74 ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 75 ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 76 ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 77 ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 78 ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 79 ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 80 ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 81 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 82 ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 83 ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 84 ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) 85 ; GFX10NSA: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF 86 ; GFX10NSA: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.3d), 1, [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0, 0 :: (dereferenceable load 4 from custom "TargetCustom8") 87 ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>) 88 ; GFX10NSA: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) 89 ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) 90 ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 91 %val = call { float, i32 } @llvm.amdgcn.image.load.3d.sl_f32i32s.i32(i32 1, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 1, i32 0) 92 %tex = extractvalue { float, i32 } %val, 0 93 %tfe = extractvalue { float, i32 } %val, 1 94 store i32 %tfe, i32 addrspace(1)* undef 95 ret float %tex 96} 97 98declare float @llvm.amdgcn.image.load.3d.f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0 99declare { float, i32 } @llvm.amdgcn.image.load.3d.sl_f32i32s.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0 100 101attributes #0 = { nounwind readonly } 102