1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -O0 -mtriple aarch64-- -run-pass=legalizer %s -o - | FileCheck %s
3
4---
5name:            test_unmerge
6body:             |
7  bb.1:
8    liveins: $w0
9    ; CHECK-LABEL: name: test_unmerge
10    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
11    ; CHECK: $w0 = COPY [[COPY]](s32)
12    %0:_(s32) = COPY $w0
13    %1:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %0(s32), %0(s32), %0(s32)
14    %2:_(s32), %3:_(s32), %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<4 x s32>)
15    $w0 = COPY %2(s32)
16...
17
18---
19name:            test_legal_const_ext
20body:             |
21  bb.1:
22    liveins: $w0
23    ; CHECK-LABEL: name: test_legal_const_ext
24    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
25    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
26    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
27    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[C]]
28    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
29    ; CHECK: $w0 = COPY [[COPY2]](s32)
30    %0:_(s32) = COPY $w0
31    %1:_(s1) = G_TRUNC %0(s32)
32    %2:_(s1) = G_CONSTANT i1 2
33    %3:_(s1) = G_ADD %1(s1), %2(s1)
34    %4:_(s32) = G_ANYEXT %3(s1)
35    $w0 = COPY %4(s32)
36...
37
38# Check that the artifact combiner can get rid of the big
39# vector type (4 x s64) by combining the G_UNMERGE_VALUES
40# with the G_CONCAT_VECTORS and turning that into bitcast.
41---
42name:            concat_vectors_unmerge_to_bitcast
43tracksRegLiveness: true
44body:             |
45  bb.0:
46    liveins: $q0, $q1
47
48    ; CHECK-LABEL: name: concat_vectors_unmerge_to_bitcast
49    ; CHECK: liveins: $q0, $q1
50    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
51    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
52    ; CHECK: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[COPY]](<2 x s64>)
53    ; CHECK: [[BITCAST1:%[0-9]+]]:_(s128) = G_BITCAST [[COPY1]](<2 x s64>)
54    ; CHECK: $q0 = COPY [[BITCAST]](s128)
55    ; CHECK: $q1 = COPY [[BITCAST1]](s128)
56    %0:_(<2 x s64>) = COPY $q0
57    %1:_(<2 x s64>) = COPY $q1
58    %2:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
59    %3:_(s128), %4:_(s128) = G_UNMERGE_VALUES %2(<4 x s64>)
60    $q0 = COPY %3(s128)
61    $q1 = COPY %4(s128)
62...
63
64# Check that the artifact combiner can get rid of the big
65# vector type (4 x s64) by combining the G_UNMERGE_VALUES
66# with the G_CONCAT_VECTORS and turning that into smaller
67# 2x64-bit G_UNMERGE_VALUES.
68---
69name:            concat_vectors_unmerge_to_unmerge
70tracksRegLiveness: true
71body:             |
72  bb.0:
73    liveins: $q0, $q1
74
75    ; CHECK-LABEL: name: concat_vectors_unmerge_to_unmerge
76    ; CHECK: liveins: $q0, $q1
77    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
78    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
79    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
80    ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
81    ; CHECK: $x0 = COPY [[UV]](s64)
82    ; CHECK: $x1 = COPY [[UV1]](s64)
83    ; CHECK: $x2 = COPY [[UV2]](s64)
84    ; CHECK: $x3 = COPY [[UV3]](s64)
85    %0:_(<2 x s64>) = COPY $q0
86    %1:_(<2 x s64>) = COPY $q1
87    %2:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
88    %3:_(s64), %4:_(s64), %5:_(s64), %6:_(s64) = G_UNMERGE_VALUES %2(<4 x s64>)
89    $x0 = COPY %3(s64)
90    $x1 = COPY %4(s64)
91    $x2 = COPY %5(s64)
92    $x3 = COPY %6(s64)
93...
94---
95name:            unmerge_merge_combine
96tracksRegLiveness: true
97body:             |
98  bb.1:
99    liveins: $x0
100    ; CHECK-LABEL: name: unmerge_merge_combine
101    ; CHECK: liveins: $x0
102    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
103    ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
104    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
105    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16)
106    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD1]](s128)
107    ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV]]
108    ; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[UV]]
109    ; CHECK: [[MUL2:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV1]]
110    ; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[UV]]
111    ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
112    ; CHECK: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]]
113    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MUL]](s64), [[ADD1]](s64)
114    ; CHECK: $q0 = COPY [[MV]](s128)
115    ; CHECK: RET_ReallyLR
116    %0:_(p0) = COPY $x0
117    %1:_(s128) = G_ZEXTLOAD %0:_(p0) :: (load 8)
118    %2:_(s128) = G_LOAD %0:_(p0) :: (load 16)
119    %3:_(s128) = G_MUL %1:_, %2:_
120    $q0 = COPY %3
121    RET_ReallyLR
122...
123