/external/one-true-awk/ |
D | lex.c | 92 #define RET(x) { if(dbg)printf("lex %s\n", tokname(x)); return(x); } macro 180 RET('}'); in yylex() 202 RET(NUMBER); in yylex() 209 RET(NL); in yylex() 225 RET(';'); in yylex() 234 RET(c); in yylex() 239 input(); RET(AND); in yylex() 241 RET('&'); in yylex() 244 input(); RET(BOR); in yylex() 246 RET('|'); in yylex() [all …]
|
/external/llvm-project/clang/test/CodeGenOpenCL/ |
D | to_addr_builtin.cl | 17 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @__to_global(i8 addrspace(4)* %[[ARG]]) 18 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 22 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @__to_global(i8 addrspace(4)* %[[ARG]]) 23 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 27 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @__to_global(i8 addrspace(4)* %[[ARG]]) 28 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 32 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @__to_global(i8 addrspace(4)* %[[ARG]]) 33 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 37 //CHECK: %[[RET:.*]] = call i8 addrspace(3)* @__to_local(i8 addrspace(4)* %[[ARG]]) 38 //CHECK: %{{.*}} = bitcast i8 addrspace(3)* %[[RET]] to i32 addrspace(3)* [all …]
|
/external/clang/test/CodeGenOpenCL/ |
D | to_addr_builtin.cl | 17 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @to_global(i8 addrspace(4)* %[[ARG]]) 18 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 22 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @to_global(i8 addrspace(4)* %[[ARG]]) 23 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 27 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @to_global(i8 addrspace(4)* %[[ARG]]) 28 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 32 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @to_global(i8 addrspace(4)* %[[ARG]]) 33 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 37 //CHECK: %[[RET:.*]] = call i8 addrspace(3)* @to_local(i8 addrspace(4)* %[[ARG]]) 38 //CHECK: %{{.*}} = bitcast i8 addrspace(3)* %[[RET]] to i32 addrspace(3)* [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | canonicalize-ashr-shl-to-masking.ll | 19 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], [[X:%.*]] 20 ; CHECK-NEXT: ret i8 [[RET]] 40 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -8 41 ; CHECK-NEXT: ret i8 [[RET]] 51 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64 52 ; CHECK-NEXT: ret i8 [[RET]] 66 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], [[X:%.*]] 67 ; CHECK-NEXT: ret i8 [[RET]] 76 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[X:%.*]], -8 77 ; CHECK-NEXT: ret i8 [[RET]] [all …]
|
D | canonicalize-lshr-shl-to-masking.ll | 19 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], [[X:%.*]] 20 ; CHECK-NEXT: ret i8 [[RET]] 40 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], 24 41 ; CHECK-NEXT: ret i8 [[RET]] 51 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64 52 ; CHECK-NEXT: ret i8 [[RET]] 66 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], [[X:%.*]] 67 ; CHECK-NEXT: ret i8 [[RET]] 87 ; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], 24 88 ; CHECK-NEXT: ret i8 [[RET]] [all …]
|
D | canonicalize-shl-lshr-to-masking.ll | 18 ; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], [[X:%.*]] 19 ; CHECK-NEXT: ret i32 [[RET]] 39 ; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], 134217696 40 ; CHECK-NEXT: ret i32 [[RET]] 50 ; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], 4194303 51 ; CHECK-NEXT: ret i32 [[RET]] 61 ; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], 4194303 62 ; CHECK-NEXT: ret i32 [[RET]] 93 ; CHECK-NEXT: [[RET:%.*]] = shl nuw i32 [[X:%.*]], 5 94 ; CHECK-NEXT: ret i32 [[RET]] [all …]
|
D | set-lowbits-mask-canonicalize.ll | 21 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1 22 ; CHECK-NEXT: ret i32 [[RET]] 32 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1 33 ; CHECK-NEXT: ret i32 [[RET]] 63 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1 64 ; CHECK-NEXT: ret i32 [[RET]] 74 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1 75 ; CHECK-NEXT: ret i32 [[RET]] 105 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1 106 ; CHECK-NEXT: ret i32 [[RET]] [all …]
|
D | masked-merge-and-of-ors.ll | 22 ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] 23 ; CHECK-NEXT: ret i32 [[RET]] 37 ; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[OR]], [[OR1]] 38 ; CHECK-NEXT: ret <2 x i32> [[RET]] 52 ; CHECK-NEXT: [[RET:%.*]] = and <3 x i32> [[OR]], [[OR1]] 53 ; CHECK-NEXT: ret <3 x i32> [[RET]] 70 ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] 71 ; CHECK-NEXT: ret i32 [[RET]] 83 ; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[OR]], [[OR1]] 84 ; CHECK-NEXT: ret <2 x i32> [[RET]] [all …]
|
D | masked-merge-add.ll | 24 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] 25 ; CHECK-NEXT: ret i32 [[RET]] 39 ; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] 40 ; CHECK-NEXT: ret <2 x i32> [[RET]] 54 ; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]] 55 ; CHECK-NEXT: ret <3 x i32> [[RET]] 72 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] 73 ; CHECK-NEXT: ret i32 [[RET]] 85 ; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] 86 ; CHECK-NEXT: ret <2 x i32> [[RET]] [all …]
|
D | masked-merge-or.ll | 24 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] 25 ; CHECK-NEXT: ret i32 [[RET]] 39 ; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] 40 ; CHECK-NEXT: ret <2 x i32> [[RET]] 54 ; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]] 55 ; CHECK-NEXT: ret <3 x i32> [[RET]] 72 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] 73 ; CHECK-NEXT: ret i32 [[RET]] 85 ; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] 86 ; CHECK-NEXT: ret <2 x i32> [[RET]] [all …]
|
D | memrchr.ll | 9 ; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 [[N:%.*]]) 10 ; CHECK-NEXT: ret i8* [[RET]] 19 ; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* nonnull [[STR:%.*]], i32 [[C:%.*]], i32 [[N:%.… 20 ; CHECK-NEXT: ret i8* [[RET]] 29 ; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 5) 30 ; CHECK-NEXT: ret i8* [[RET]] 39 ; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 5) 40 ; CHECK-NEXT: ret i8* [[RET]] 49 ; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 0) 50 ; CHECK-NEXT: ret i8* [[RET]]
|
D | ffs-1.ll | 27 ; GENERIC-NEXT: [[RET:%.*]] = call i32 @ffsl(i32 0) 28 ; GENERIC-NEXT: ret i32 [[RET]] 39 ; GENERIC-NEXT: [[RET:%.*]] = call i32 @ffsll(i64 0) 40 ; GENERIC-NEXT: ret i32 [[RET]] 77 ; GENERIC-NEXT: [[RET:%.*]] = call i32 @ffsl(i32 65536) 78 ; GENERIC-NEXT: ret i32 [[RET]] 89 ; GENERIC-NEXT: [[RET:%.*]] = call i32 @ffsll(i64 1024) 90 ; GENERIC-NEXT: ret i32 [[RET]] 101 ; GENERIC-NEXT: [[RET:%.*]] = call i32 @ffsll(i64 65536) 102 ; GENERIC-NEXT: ret i32 [[RET]] [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | global_atomics_i64.ll | 17 ; CIVI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32… 18 ; CIVI: buffer_store_dwordx2 [[RET]] 42 ; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-… 43 ; VI: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} g… 44 ; CIVI: buffer_store_dwordx2 [[RET]] 46 ; GFX9: global_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]… 66 ; CIVI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 67 ; CIVI: buffer_store_dwordx2 [[RET]] 69 ; GFX9: global_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s{{\[[0… 89 ; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-… [all …]
|
D | global_atomics.ll | 57 ; SIVI: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}} 58 ; SIVI: buffer_store_dword [[RET]] 82 ; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr… 83 ; VI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 84 ; SIVI: buffer_store_dword [[RET]] 86 ; GFX9: global_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, off offset:16 glc{{$}} 87 ; GFX9: global_store_dword v{{[0-9]+}}, [[RET]], s 107 ; SIVI: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 108 ; SIVI: buffer_store_dword [[RET]] 110 ; GFX9: global_atomic_add [[RET:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}] glc{{… [all …]
|
D | flat_atomics.ll | 35 ; CIVI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} 36 ; GFX9: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}} 37 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 58 ; CIVI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 59 ; GFX9: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}} 60 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 79 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 80 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 98 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 99 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] [all …]
|
D | flat_atomics_i64.ll | 14 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 15 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 35 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 36 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 55 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 56 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 74 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 75 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 94 ; GCN: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 95 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | branchfolding-debug-invariant.mir | 13 ; CHECK-NOT: RET 16 ; CHECK: RET 0 24 RET 0 28 RET 0 45 ; CHECK-NOT: RET 50 ; CHECK: RET 0 58 RET 0 64 RET 0 80 ; CHECK: RET 0 84 ; CHECK: RET 0 [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/GlobalISel/ |
D | legalize-ext.mir | 112 ; X32: RET 0, implicit $al 119 ; X64: RET 0, implicit $al 124 RET 0, implicit $al 145 ; X32: RET 0, implicit $ax 152 ; X64: RET 0, implicit $ax 157 RET 0, implicit $ax 179 ; X32: RET 0, implicit $eax 186 ; X64: RET 0, implicit $eax 191 RET 0, implicit $eax 210 ; X32: RET 0, implicit $ax [all …]
|
D | select-constant.mir | 51 ; CHECK: RET 0, implicit $al 54 RET 0, implicit $al 69 ; CHECK: RET 0, implicit $ax 72 RET 0, implicit $ax 87 ; CHECK: RET 0, implicit $eax 90 RET 0, implicit $eax 104 ; CHECK: RET 0, implicit $eax 107 RET 0, implicit $eax 122 ; CHECK: RET 0, implicit $rax 125 RET 0, implicit $rax [all …]
|
D | select-memop-scalar-unordered.mir | 118 ; SSE: RET 0, implicit $al 123 ; AVX: RET 0, implicit $al 128 ; AVX512F: RET 0, implicit $al 133 ; AVX512VL: RET 0, implicit $al 137 RET 0, implicit $al 156 ; SSE: RET 0, implicit $ax 161 ; AVX: RET 0, implicit $ax 166 ; AVX512F: RET 0, implicit $ax 171 ; AVX512VL: RET 0, implicit $ax 175 RET 0, implicit $ax [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | global_atomics_i64.ll | 14 ; GCN: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 … 15 ; GCN: buffer_store_dwordx2 [[RET]] 36 ; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-… 37 ; VI: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} g… 38 ; GCN: buffer_store_dwordx2 [[RET]] 57 ; GCN: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 58 ; GCN: buffer_store_dwordx2 [[RET]] 77 ; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-… 78 ; VI: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} g… 79 ; GCN: buffer_store_dwordx2 [[RET]] [all …]
|
D | global_atomics.ll | 37 ; GCN: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}} 38 ; GCN: buffer_store_dword [[RET]] 59 ; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr… 60 ; VI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 61 ; GCN: buffer_store_dword [[RET]] 80 ; GCN: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 81 ; GCN: buffer_store_dword [[RET]] 100 ; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr… 101 ; VI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 102 ; GCN: buffer_store_dword [[RET]] [all …]
|
D | flat_atomics_i64.ll | 14 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 15 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 35 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 36 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 55 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 56 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 74 ; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 75 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 94 ; GCN: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} … 95 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]] [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_cmp.ll | 56 ; CHECK: vcmpequb [[RET:[0-9]+]], 2, 3 57 ; CHECK-NEXT: vnor 2, [[RET]], [[RET]] 66 ; CHECK: vcmpgtsb [[RET:[0-9]+]], 2, 3 67 ; CHECK-NEXT: vnor 2, [[RET]], [[RET]] 76 ; CHECK: vcmpgtub [[RET:[0-9]+]], 2, 3 77 ; CHECK-NEXT: vnor 2, [[RET]], [[RET]] 122 ; CHECK: vcmpgtsb [[RET:[0-9]+]], 3, 2 123 ; CHECK-NEXT: vnor 2, [[RET]], [[RET]] 132 ; CHECK: vcmpgtub [[RET:[0-9]+]], 3, 2 133 ; CHECK-NEXT: vnor 2, [[RET]], [[RET]] [all …]
|
/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | constantfold-shl-nuw-C-to-C.ll | 47 ; CHECK-NEXT: [[RET:%.*]] = shl nuw i8 [[X]], [[Y:%.*]] 48 ; CHECK-NEXT: ret i8 [[RET]] 60 ; CHECK-NEXT: [[RET:%.*]] = shl nuw i8 [[X]], [[Y:%.*]] 61 ; CHECK-NEXT: ret i8 [[RET]] 103 ; CHECK-NEXT: [[RET:%.*]] = shl nuw i8 127, [[X:%.*]] 104 ; CHECK-NEXT: ret i8 [[RET]] 113 ; CHECK-NEXT: [[RET:%.*]] = shl i8 -1, [[X:%.*]] 114 ; CHECK-NEXT: ret i8 [[RET]] 122 ; CHECK-NEXT: [[RET:%.*]] = shl nsw i8 -1, [[X:%.*]] 123 ; CHECK-NEXT: ret i8 [[RET]] [all …]
|