1; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-ALLOCA %s 2; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-PROMOTE %s 3; RUN: opt -S -mtriple=amdgcn-- -amdgpu-promote-alloca -sroa -instcombine < %s | FileCheck -check-prefix=OPT %s 4 5target datalayout = "A5" 6 7; OPT-LABEL: @vector_read_alloca_bitcast( 8; OPT-NOT: alloca 9; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index 10; OPT-NEXT: store i32 %0, i32 addrspace(1)* %out, align 4 11 12; GCN-LABEL: {{^}}vector_read_alloca_bitcast: 13; GCN-ALLOCA-COUNT-4: buffer_store_dword 14; GCN-ALLOCA: buffer_load_dword 15 16; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2 17; GCN-PROMOTE: v_cmp_eq_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 1 18; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 19; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]] 20; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3 21; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc 22; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 23; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc 24; GCN-PROMOTE: ScratchSize: 0 25 26define amdgpu_kernel void @vector_read_alloca_bitcast(i32 addrspace(1)* %out, i32 %index) { 27entry: 28 %tmp = alloca [4 x i32], addrspace(5) 29 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)* 30 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 31 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 32 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 33 store i32 0, i32 addrspace(5)* %x 34 store i32 1, i32 addrspace(5)* %y 35 store i32 2, i32 addrspace(5)* %z 36 store i32 3, i32 addrspace(5)* %w 37 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index 38 %tmp2 = load i32, i32 addrspace(5)* %tmp1 39 store i32 %tmp2, i32 addrspace(1)* %out 40 ret void 41} 42 43; OPT-LABEL: @vector_write_alloca_bitcast( 44; OPT-NOT: alloca 45; OPT: %0 = insertelement <4 x i32> zeroinitializer, i32 1, i32 %w_index 46; OPT-NEXT: %1 = extractelement <4 x i32> %0, i32 %r_index 47; OPT-NEXT: store i32 %1, i32 addrspace(1)* %out, align 48 49; GCN-LABEL: {{^}}vector_write_alloca_bitcast: 50; GCN-ALLOCA-COUNT-5: buffer_store_dword 51; GCN-ALLOCA: buffer_load_dword 52 53; GCN-PROMOTE-COUNT-7: v_cndmask 54 55; GCN-PROMOTE: ScratchSize: 0 56 57define amdgpu_kernel void @vector_write_alloca_bitcast(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) { 58entry: 59 %tmp = alloca [4 x i32], addrspace(5) 60 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)* 61 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 62 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 63 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 64 store i32 0, i32 addrspace(5)* %x 65 store i32 0, i32 addrspace(5)* %y 66 store i32 0, i32 addrspace(5)* %z 67 store i32 0, i32 addrspace(5)* %w 68 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %w_index 69 store i32 1, i32 addrspace(5)* %tmp1 70 %tmp2 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %r_index 71 %tmp3 = load i32, i32 addrspace(5)* %tmp2 72 store i32 %tmp3, i32 addrspace(1)* %out 73 ret void 74} 75 76; OPT-LABEL: @vector_write_read_bitcast_to_float( 77; OPT-NOT: alloca 78; OPT: bb2: 79; OPT: %tmp.sroa.0.0 = phi <6 x float> [ undef, %bb ], [ %0, %bb2 ] 80; OPT: %0 = insertelement <6 x float> %tmp.sroa.0.0, float %tmp73, i32 %tmp10 81; OPT: .preheader: 82; OPT: %bc = bitcast <6 x float> %0 to <6 x i32> 83; OPT: %1 = extractelement <6 x i32> %bc, i32 %tmp20 84 85; GCN-LABEL: {{^}}vector_write_read_bitcast_to_float: 86; GCN-ALLOCA: buffer_store_dword 87 88; GCN-PROMOTE-COUNT-6: v_cmp_eq_u16 89; GCN-PROMOTE-COUNT-6: v_cndmask 90 91; GCN: s_cbranch 92 93; GCN-ALLOCA: buffer_load_dword 94 95; GCN-PROMOTE: v_cmp_eq_u16 96; GCN-PROMOTE: v_cndmask 97; GCN-PROMOTE: v_cmp_eq_u16 98; GCN-PROMOTE: v_cndmask 99; GCN-PROMOTE: v_cmp_eq_u16 100; GCN-PROMOTE: v_cndmask 101; GCN-PROMOTE: v_cmp_eq_u16 102; GCN-PROMOTE: v_cndmask 103; GCN-PROMOTE: v_cmp_eq_u16 104; GCN-PROMOTE: v_cndmask 105 106; GCN-PROMOTE: ScratchSize: 0 107 108define amdgpu_kernel void @vector_write_read_bitcast_to_float(float addrspace(1)* %arg) { 109bb: 110 %tmp = alloca [6 x float], align 4, addrspace(5) 111 %tmp1 = bitcast [6 x float] addrspace(5)* %tmp to i8 addrspace(5)* 112 call void @llvm.lifetime.start.p5i8(i64 24, i8 addrspace(5)* %tmp1) #2 113 br label %bb2 114 115bb2: ; preds = %bb2, %bb 116 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ] 117 %tmp4 = zext i32 %tmp3 to i64 118 %tmp5 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp4 119 %tmp6 = bitcast float addrspace(1)* %tmp5 to i32 addrspace(1)* 120 %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4 121 %tmp8 = trunc i32 %tmp3 to i16 122 %tmp9 = urem i16 %tmp8, 6 123 %tmp10 = zext i16 %tmp9 to i32 124 %tmp11 = getelementptr inbounds [6 x float], [6 x float] addrspace(5)* %tmp, i32 0, i32 %tmp10 125 %tmp12 = bitcast float addrspace(5)* %tmp11 to i32 addrspace(5)* 126 store i32 %tmp7, i32 addrspace(5)* %tmp12, align 4 127 %tmp13 = add nuw nsw i32 %tmp3, 1 128 %tmp14 = icmp eq i32 %tmp13, 1000 129 br i1 %tmp14, label %.preheader, label %bb2 130 131bb15: ; preds = %.preheader 132 call void @llvm.lifetime.end.p5i8(i64 24, i8 addrspace(5)* %tmp1) #2 133 ret void 134 135.preheader: ; preds = %.preheader, %bb2 136 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ] 137 %tmp17 = trunc i32 %tmp16 to i16 138 %tmp18 = urem i16 %tmp17, 6 139 %tmp19 = sub nuw nsw i16 5, %tmp18 140 %tmp20 = zext i16 %tmp19 to i32 141 %tmp21 = getelementptr inbounds [6 x float], [6 x float] addrspace(5)* %tmp, i32 0, i32 %tmp20 142 %tmp22 = bitcast float addrspace(5)* %tmp21 to i32 addrspace(5)* 143 %tmp23 = load i32, i32 addrspace(5)* %tmp22, align 4 144 %tmp24 = zext i32 %tmp16 to i64 145 %tmp25 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp24 146 %tmp26 = bitcast float addrspace(1)* %tmp25 to i32 addrspace(1)* 147 store i32 %tmp23, i32 addrspace(1)* %tmp26, align 4 148 %tmp27 = add nuw nsw i32 %tmp16, 1 149 %tmp28 = icmp eq i32 %tmp27, 1000 150 br i1 %tmp28, label %bb15, label %.preheader 151} 152 153; OPT-LABEL: @vector_write_read_bitcast_to_double( 154; OPT-NOT: alloca 155; OPT: bb2: 156; OPT: %tmp.sroa.0.0 = phi <6 x double> [ undef, %bb ], [ %0, %bb2 ] 157; OPT: %0 = insertelement <6 x double> %tmp.sroa.0.0, double %tmp73, i32 %tmp10 158; OPT: .preheader: 159; OPT: %bc = bitcast <6 x double> %0 to <6 x i64> 160; OPT: %1 = extractelement <6 x i64> %bc, i32 %tmp20 161 162; GCN-LABEL: {{^}}vector_write_read_bitcast_to_double: 163 164; GCN-ALLOCA-COUNT-2: buffer_store_dword 165; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32 166 167; GCN: s_cbranch 168 169; GCN-ALLOCA-COUNT-2: buffer_load_dword 170; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32 171 172; GCN-PROMOTE: ScratchSize: 0 173 174define amdgpu_kernel void @vector_write_read_bitcast_to_double(double addrspace(1)* %arg) { 175bb: 176 %tmp = alloca [6 x double], align 8, addrspace(5) 177 %tmp1 = bitcast [6 x double] addrspace(5)* %tmp to i8 addrspace(5)* 178 call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 179 br label %bb2 180 181bb2: ; preds = %bb2, %bb 182 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ] 183 %tmp4 = zext i32 %tmp3 to i64 184 %tmp5 = getelementptr inbounds double, double addrspace(1)* %arg, i64 %tmp4 185 %tmp6 = bitcast double addrspace(1)* %tmp5 to i64 addrspace(1)* 186 %tmp7 = load i64, i64 addrspace(1)* %tmp6, align 8 187 %tmp8 = trunc i32 %tmp3 to i16 188 %tmp9 = urem i16 %tmp8, 6 189 %tmp10 = zext i16 %tmp9 to i32 190 %tmp11 = getelementptr inbounds [6 x double], [6 x double] addrspace(5)* %tmp, i32 0, i32 %tmp10 191 %tmp12 = bitcast double addrspace(5)* %tmp11 to i64 addrspace(5)* 192 store i64 %tmp7, i64 addrspace(5)* %tmp12, align 8 193 %tmp13 = add nuw nsw i32 %tmp3, 1 194 %tmp14 = icmp eq i32 %tmp13, 1000 195 br i1 %tmp14, label %.preheader, label %bb2 196 197bb15: ; preds = %.preheader 198 call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 199 ret void 200 201.preheader: ; preds = %.preheader, %bb2 202 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ] 203 %tmp17 = trunc i32 %tmp16 to i16 204 %tmp18 = urem i16 %tmp17, 6 205 %tmp19 = sub nuw nsw i16 5, %tmp18 206 %tmp20 = zext i16 %tmp19 to i32 207 %tmp21 = getelementptr inbounds [6 x double], [6 x double] addrspace(5)* %tmp, i32 0, i32 %tmp20 208 %tmp22 = bitcast double addrspace(5)* %tmp21 to i64 addrspace(5)* 209 %tmp23 = load i64, i64 addrspace(5)* %tmp22, align 8 210 %tmp24 = zext i32 %tmp16 to i64 211 %tmp25 = getelementptr inbounds double, double addrspace(1)* %arg, i64 %tmp24 212 %tmp26 = bitcast double addrspace(1)* %tmp25 to i64 addrspace(1)* 213 store i64 %tmp23, i64 addrspace(1)* %tmp26, align 8 214 %tmp27 = add nuw nsw i32 %tmp16, 1 215 %tmp28 = icmp eq i32 %tmp27, 1000 216 br i1 %tmp28, label %bb15, label %.preheader 217} 218 219; OPT-LABEL: @vector_write_read_bitcast_to_i64( 220; OPT-NOT: alloca 221; OPT: bb2: 222; OPT: %tmp.sroa.0.0 = phi <6 x i64> [ undef, %bb ], [ %0, %bb2 ] 223; OPT: %0 = insertelement <6 x i64> %tmp.sroa.0.0, i64 %tmp6, i32 %tmp9 224; OPT: .preheader: 225; OPT: %1 = extractelement <6 x i64> %0, i32 %tmp18 226 227; GCN-LABEL: {{^}}vector_write_read_bitcast_to_i64: 228 229; GCN-ALLOCA-COUNT-2: buffer_store_dword 230; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32 231 232; GCN: s_cbranch 233 234; GCN-ALLOCA-COUNT-2: buffer_load_dword 235; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32 236 237; GCN-PROMOTE: ScratchSize: 0 238 239define amdgpu_kernel void @vector_write_read_bitcast_to_i64(i64 addrspace(1)* %arg) { 240bb: 241 %tmp = alloca [6 x i64], align 8, addrspace(5) 242 %tmp1 = bitcast [6 x i64] addrspace(5)* %tmp to i8 addrspace(5)* 243 call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 244 br label %bb2 245 246bb2: ; preds = %bb2, %bb 247 %tmp3 = phi i32 [ 0, %bb ], [ %tmp11, %bb2 ] 248 %tmp4 = zext i32 %tmp3 to i64 249 %tmp5 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp4 250 %tmp6 = load i64, i64 addrspace(1)* %tmp5, align 8 251 %tmp7 = trunc i32 %tmp3 to i16 252 %tmp8 = urem i16 %tmp7, 6 253 %tmp9 = zext i16 %tmp8 to i32 254 %tmp10 = getelementptr inbounds [6 x i64], [6 x i64] addrspace(5)* %tmp, i32 0, i32 %tmp9 255 store i64 %tmp6, i64 addrspace(5)* %tmp10, align 8 256 %tmp11 = add nuw nsw i32 %tmp3, 1 257 %tmp12 = icmp eq i32 %tmp11, 1000 258 br i1 %tmp12, label %.preheader, label %bb2 259 260bb13: ; preds = %.preheader 261 call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 262 ret void 263 264.preheader: ; preds = %.preheader, %bb2 265 %tmp14 = phi i32 [ %tmp23, %.preheader ], [ 0, %bb2 ] 266 %tmp15 = trunc i32 %tmp14 to i16 267 %tmp16 = urem i16 %tmp15, 6 268 %tmp17 = sub nuw nsw i16 5, %tmp16 269 %tmp18 = zext i16 %tmp17 to i32 270 %tmp19 = getelementptr inbounds [6 x i64], [6 x i64] addrspace(5)* %tmp, i32 0, i32 %tmp18 271 %tmp20 = load i64, i64 addrspace(5)* %tmp19, align 8 272 %tmp21 = zext i32 %tmp14 to i64 273 %tmp22 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp21 274 store i64 %tmp20, i64 addrspace(1)* %tmp22, align 8 275 %tmp23 = add nuw nsw i32 %tmp14, 1 276 %tmp24 = icmp eq i32 %tmp23, 1000 277 br i1 %tmp24, label %bb13, label %.preheader 278} 279 280; TODO: llvm.assume can be ingored 281 282; OPT-LABEL: @vector_read_alloca_bitcast_assume( 283; OPT: %tmp = alloca <4 x i32>, align 16, addrspace(5) 284; OPT-NEXT: %x = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %tmp, i64 0, i64 0 285; OPT-NEXT: store i32 0, i32 addrspace(5)* %x, align 16 286; OPT-NEXT: %0 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp, align 16 287; OPT-NEXT: %1 = shufflevector <4 x i32> %0, <4 x i32> <i32 undef, i32 1, i32 2, i32 3>, <4 x i32> <i32 0, i32 5, i32 6, i32 7> 288; OPT-NEXT: store <4 x i32> %1, <4 x i32> addrspace(5)* %tmp, align 16 289; OPT-NEXT: %2 = extractelement <4 x i32> %1, i32 %index 290; OPT-NEXT: store i32 %2, i32 addrspace(1)* %out, align 4 291 292; GCN-LABEL: {{^}}vector_read_alloca_bitcast_assume: 293; GCN-COUNT-4: buffer_store_dword 294 295define amdgpu_kernel void @vector_read_alloca_bitcast_assume(i32 addrspace(1)* %out, i32 %index) { 296entry: 297 %tmp = alloca [4 x i32], addrspace(5) 298 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)* 299 %cmp = icmp ne i32 addrspace(5)* %x, null 300 call void @llvm.assume(i1 %cmp) 301 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 302 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 303 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 304 store i32 0, i32 addrspace(5)* %x 305 store i32 1, i32 addrspace(5)* %y 306 store i32 2, i32 addrspace(5)* %z 307 store i32 3, i32 addrspace(5)* %w 308 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index 309 %tmp2 = load i32, i32 addrspace(5)* %tmp1 310 store i32 %tmp2, i32 addrspace(1)* %out 311 ret void 312} 313 314; OPT-LABEL: @vector_read_alloca_multiuse( 315; OPT-NOT: alloca 316; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index 317; OPT-NEXT: %add2 = add nuw nsw i32 %0, 1 318; OPT-NEXT: store i32 %add2, i32 addrspace(1)* %out, align 4 319 320; GCN-LABEL: {{^}}vector_read_alloca_multiuse: 321; GCN-ALLOCA-COUNT-4: buffer_store_dword 322; GCN-ALLOCA: buffer_load_dword 323 324; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2 325; GCN-PROMOTE: v_cmp_eq_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 1 326; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 327; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]] 328; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3 329; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc 330; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 331; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc 332 333; GCN-PROMOTE: ScratchSize: 0 334 335define amdgpu_kernel void @vector_read_alloca_multiuse(i32 addrspace(1)* %out, i32 %index) { 336entry: 337 %tmp = alloca [4 x i32], addrspace(5) 338 %b = bitcast [4 x i32] addrspace(5)* %tmp to float addrspace(5)* 339 %x = bitcast float addrspace(5)* %b to i32 addrspace(5)* 340 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 341 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 342 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 343 store i32 0, i32 addrspace(5)* %x 344 store i32 1, i32 addrspace(5)* %y 345 store i32 2, i32 addrspace(5)* %z 346 store i32 3, i32 addrspace(5)* %w 347 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index 348 %tmp2 = load i32, i32 addrspace(5)* %tmp1 349 %tmp3 = load i32, i32 addrspace(5)* %x 350 %tmp4 = load i32, i32 addrspace(5)* %y 351 %add1 = add i32 %tmp2, %tmp3 352 %add2 = add i32 %add1, %tmp4 353 store i32 %add2, i32 addrspace(1)* %out 354 ret void 355} 356 357; OPT-LABEL: @bitcast_vector_to_vector( 358; OPT-NOT: alloca 359; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(1)* %out, align 16 360 361; GCN-LABEL: {{^}}bitcast_vector_to_vector: 362; GCN: v_mov_b32_e32 v0, 1 363; GCN: v_mov_b32_e32 v1, 2 364; GCN: v_mov_b32_e32 v2, 3 365; GCN: v_mov_b32_e32 v3, 4 366 367; GCN: ScratchSize: 0 368 369define amdgpu_kernel void @bitcast_vector_to_vector(<4 x i32> addrspace(1)* %out) { 370.entry: 371 %alloca = alloca <4 x float>, align 16, addrspace(5) 372 %cast = bitcast <4 x float> addrspace(5)* %alloca to <4 x i32> addrspace(5)* 373 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %cast 374 %load = load <4 x i32>, <4 x i32> addrspace(5)* %cast, align 16 375 store <4 x i32> %load, <4 x i32> addrspace(1)* %out 376 ret void 377} 378 379; OPT-LABEL: @vector_bitcast_from_alloca_array( 380; OPT-NOT: alloca 381; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(1)* %out, align 16 382 383; GCN-LABEL: {{^}}vector_bitcast_from_alloca_array: 384; GCN: v_mov_b32_e32 v0, 1 385; GCN: v_mov_b32_e32 v1, 2 386; GCN: v_mov_b32_e32 v2, 3 387; GCN: v_mov_b32_e32 v3, 4 388 389; GCN: ScratchSize: 0 390 391define amdgpu_kernel void @vector_bitcast_from_alloca_array(<4 x i32> addrspace(1)* %out) { 392.entry: 393 %alloca = alloca [4 x float], align 16, addrspace(5) 394 %cast = bitcast [4 x float] addrspace(5)* %alloca to <4 x i32> addrspace(5)* 395 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %cast 396 %load = load <4 x i32>, <4 x i32> addrspace(5)* %cast, align 16 397 store <4 x i32> %load, <4 x i32> addrspace(1)* %out 398 ret void 399} 400 401; OPT-LABEL: @vector_bitcast_to_array_from_alloca_array( 402; OPT-NOT: alloca 403; OPT: %out.repack = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 0 404; OPT-NEXT: store i32 1, i32 addrspace(1)* %out.repack, align 4 405; OPT-NEXT: %out.repack1 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 1 406; OPT-NEXT: store i32 2, i32 addrspace(1)* %out.repack1, align 4 407; OPT-NEXT: %out.repack2 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 2 408; OPT-NEXT: store i32 3, i32 addrspace(1)* %out.repack2, align 4 409; OPT-NEXT: %out.repack3 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 3 410; OPT-NEXT: store i32 4, i32 addrspace(1)* %out.repack3, align 4 411 412; GCN-LABEL: {{^}}vector_bitcast_to_array_from_alloca_array: 413; GCN: v_mov_b32_e32 v0, 1 414; GCN: v_mov_b32_e32 v1, 2 415; GCN: v_mov_b32_e32 v2, 3 416; GCN: v_mov_b32_e32 v3, 4 417 418; GCN: ScratchSize: 0 419 420define amdgpu_kernel void @vector_bitcast_to_array_from_alloca_array([4 x i32] addrspace(1)* %out) { 421.entry: 422 %alloca = alloca [4 x float], align 16, addrspace(5) 423 %cast = bitcast [4 x float] addrspace(5)* %alloca to [4 x i32] addrspace(5)* 424 store [4 x i32] [i32 1, i32 2, i32 3, i32 4], [4 x i32] addrspace(5)* %cast 425 %load = load [4 x i32], [4 x i32] addrspace(5)* %cast, align 16 426 store [4 x i32] %load, [4 x i32] addrspace(1)* %out 427 ret void 428} 429 430; OPT-LABEL: @vector_bitcast_to_struct_from_alloca_array( 431; OPT-NOT: alloca 432; OPT: %out.repack = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 0 433; OPT-NEXT: store i32 1, i32 addrspace(1)* %out.repack, align 4 434; OPT-NEXT: %out.repack1 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 1 435; OPT-NEXT: store i32 2, i32 addrspace(1)* %out.repack1, align 4 436; OPT-NEXT: %out.repack2 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 2 437; OPT-NEXT: store i32 3, i32 addrspace(1)* %out.repack2, align 4 438; OPT-NEXT: %out.repack3 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 3 439; OPT-NEXT: store i32 4, i32 addrspace(1)* %out.repack3, align 4 440 441; GCN-LABEL: {{^}}vector_bitcast_to_struct_from_alloca_array: 442; GCN: v_mov_b32_e32 v0, 1 443; GCN: v_mov_b32_e32 v1, 2 444; GCN: v_mov_b32_e32 v2, 3 445; GCN: v_mov_b32_e32 v3, 4 446 447; GCN: ScratchSize: 0 448 449%struct.v4 = type { i32, i32, i32, i32 } 450 451define amdgpu_kernel void @vector_bitcast_to_struct_from_alloca_array(%struct.v4 addrspace(1)* %out) { 452.entry: 453 %alloca = alloca [4 x float], align 16, addrspace(5) 454 %cast = bitcast [4 x float] addrspace(5)* %alloca to %struct.v4 addrspace(5)* 455 store %struct.v4 { i32 1, i32 2, i32 3, i32 4 }, %struct.v4 addrspace(5)* %cast 456 %load = load %struct.v4, %struct.v4 addrspace(5)* %cast, align 16 457 store %struct.v4 %load, %struct.v4 addrspace(1)* %out 458 ret void 459} 460 461declare void @llvm.lifetime.start.p5i8(i64 immarg, i8 addrspace(5)* nocapture) 462 463declare void @llvm.lifetime.end.p5i8(i64 immarg, i8 addrspace(5)* nocapture) 464 465declare void @llvm.assume(i1) 466