1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
3
4; We may have subregister live ranges that are undefined on some paths. The
5; verifier should not complain about this.
6
7define amdgpu_kernel void @func() #0 {
8; CHECK-LABEL: func:
9; CHECK:       ; %bb.0: ; %B0
10; CHECK-NEXT:    s_mov_b32 s0, 0
11; CHECK-NEXT:    s_cbranch_scc1 BB0_2
12; CHECK-NEXT:  ; %bb.1: ; %B30.1
13; CHECK-NEXT:    s_mov_b32 s0, 0x7fc00000
14; CHECK-NEXT:  BB0_2: ; %B30.2
15; CHECK-NEXT:    v_mov_b32_e32 v0, s0
16; CHECK-NEXT:    s_mov_b32 m0, -1
17; CHECK-NEXT:    ds_write_b32 v0, v0
18; CHECK-NEXT:    s_endpgm
19B0:
20  br i1 undef, label %B1, label %B2
21
22B1:
23  br label %B2
24
25B2:
26  %v0 = phi <4 x float> [ zeroinitializer, %B1 ], [ <float 0.0, float 0.0, float 0.0, float undef>, %B0 ]
27  br i1 undef, label %B30.1, label %B30.2
28
29B30.1:
30  %sub = fsub <4 x float> %v0, undef
31  br label %B30.2
32
33B30.2:
34  %v3 = phi <4 x float> [ %sub, %B30.1 ], [ %v0, %B2 ]
35  %ve0 = extractelement <4 x float> %v3, i32 0
36  store float %ve0, float addrspace(3)* undef, align 4
37  ret void
38}
39
40; FIXME: Extra undef subregister copy should be removed before
41; overwritten with defined copy
42define amdgpu_ps float @valley_partially_undef_copy() #0 {
43; CHECK-LABEL: valley_partially_undef_copy:
44; CHECK:       ; %bb.0: ; %bb
45; CHECK-NEXT:    s_mov_b32 s3, 0xf000
46; CHECK-NEXT:    s_mov_b32 s2, -1
47; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], 0
48; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], 0
49; CHECK-NEXT:    v_mov_b32_e32 v2, 0x7fc00000
50; CHECK-NEXT:    buffer_store_dword v2, off, s[0:3], 0
51; CHECK-NEXT:    s_waitcnt vmcnt(1)
52; CHECK-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
53; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
54; CHECK-NEXT:  BB1_1: ; %bb9
55; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
56; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
57; CHECK-NEXT:    s_cbranch_vccnz BB1_1
58; CHECK-NEXT:  ; %bb.2: ; %bb11
59; CHECK-NEXT:    s_mov_b32 s3, 0xf000
60; CHECK-NEXT:    s_mov_b32 s2, -1
61; CHECK-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
62; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
63; CHECK-NEXT:    ; return to shader part epilog
64bb:
65  %tmp = load volatile i32, i32 addrspace(1)* undef, align 4
66  %tmp1 = load volatile i32, i32 addrspace(1)* undef, align 4
67  %tmp2 = insertelement <4 x i32> undef, i32 %tmp1, i32 0
68  %tmp3 = bitcast i32 %tmp1 to float
69  %tmp4 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %tmp3, float %tmp3, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0)
70  %tmp5 = extractelement <4 x float> %tmp4, i32 0
71  %tmp6 = fmul float %tmp5, undef
72  %tmp7 = fadd float %tmp6, %tmp6
73  %tmp8 = insertelement <4 x i32> %tmp2, i32 %tmp, i32 1
74  store <4 x i32> %tmp8, <4 x i32> addrspace(1)* undef, align 16
75  store float %tmp7, float addrspace(1)* undef, align 4
76  br label %bb9
77
78bb9:                                              ; preds = %bb9, %bb
79  %tmp10 = icmp eq i32 %tmp, 0
80  br i1 %tmp10, label %bb9, label %bb11
81
82bb11:                                             ; preds = %bb9
83  store <4 x i32> %tmp2, <4 x i32> addrspace(1)* undef, align 16
84  ret float undef
85}
86
87; FIXME: Should be able to remove the undef copies
88define amdgpu_kernel void @partially_undef_copy() #0 {
89; CHECK-LABEL: partially_undef_copy:
90; CHECK:       ; %bb.0:
91; CHECK-NEXT:    ;;#ASMSTART
92; CHECK-NEXT:    v_mov_b32_e32 v5, 5
93; CHECK-NEXT:    ;;#ASMEND
94; CHECK-NEXT:    ;;#ASMSTART
95; CHECK-NEXT:    v_mov_b32_e32 v6, 6
96; CHECK-NEXT:    ;;#ASMEND
97; CHECK-NEXT:    v_mov_b32_e32 v0, v5
98; CHECK-NEXT:    v_mov_b32_e32 v1, v6
99; CHECK-NEXT:    v_mov_b32_e32 v2, v7
100; CHECK-NEXT:    v_mov_b32_e32 v3, v8
101; CHECK-NEXT:    s_mov_b32 s3, 0xf000
102; CHECK-NEXT:    s_mov_b32 s2, -1
103; CHECK-NEXT:    v_mov_b32_e32 v0, v6
104; CHECK-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
105; CHECK-NEXT:    ;;#ASMSTART
106; CHECK-NEXT:    v_nop
107; CHECK-NEXT:    ;;#ASMEND
108; CHECK-NEXT:    s_endpgm
109  %tmp0 = call i32 asm sideeffect "v_mov_b32_e32 v5, 5", "={v5}"()
110  %tmp1 = call i32 asm sideeffect "v_mov_b32_e32 v6, 6", "={v6}"()
111
112  %partially.undef.0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
113  %partially.undef.1 = insertelement <4 x i32> %partially.undef.0, i32 %tmp1, i32 0
114
115  store volatile <4 x i32> %partially.undef.1, <4 x i32> addrspace(1)* undef, align 16
116  tail call void asm sideeffect "v_nop", "v={v[5:8]}"(<4 x i32> %partially.undef.0)
117  ret void
118}
119
120declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
121
122attributes #0 = { nounwind }
123attributes #1 = { nounwind readonly }
124