Searched refs:SREG (Results 1 – 17 of 17) sorted by relevance
/external/libunwind/src/mips/ |
D | getcontext-android.S | 36 # define SREG(X) \ macro 46 # define SREG(X) sd $X, (LINUX_UC_MCONTEXT_GREGS + 8 * X) ($4) macro 56 SREG (0) 57 SREG (1) 58 SREG (2) 59 SREG (3) 60 SREG (4) 61 SREG (5) 62 SREG (6) 63 SREG (7) [all …]
|
D | getcontext.S | 37 # define SREG(X) \ macro 47 # define SREG(X) sd $X, (LINUX_UC_MCONTEXT_GREGS + 8 * X) ($4) macro 57 SREG (1) 58 SREG (0) 59 SREG (2) 60 SREG (3) 61 SREG (4) 62 SREG (5) 63 SREG (6) 64 SREG (7) [all …]
|
/external/llvm/lib/Target/AVR/ |
D | AVRInstrInfo.td | 297 // sub / add which can clobber SREG. 298 let Defs = [SP, SREG], 322 Defs = [SREG] in 332 (implicit SREG)]>; 344 (implicit SREG)]>; 348 let Uses = [SREG] in 355 (implicit SREG)]>; 364 let Uses = [SREG] in 369 (implicit SREG)]>; 378 (implicit SREG)]>, [all …]
|
D | AVRRegisterInfo.td | 211 def SREG : AVRReg<14, "FLAGS">, DwarfRegNum<[88]>; 212 def CCR : RegisterClass<"AVR", [i8], 8, (add SREG)>
|
D | AVRInstrFormats.td | 553 let Defs = [SREG]; 567 let Uses = [SREG]; 575 let Defs = [SREG];
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | trunc-store-i1.ll | 7 ; SI: s_and_b32 [[SREG:s[0-9]+]], [[LOAD]], 1 8 ; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], [[SREG]] 26 ; SI: s_and_b32 [[SREG:s[0-9]+]], [[LOAD]], 1 27 ; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], [[SREG]]
|
D | setuo.ll | 5 ; CHECK: v_cmp_u_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
|
D | seto.ll | 5 ; CHECK: v_cmp_o_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
|
D | debugger-emit-prologue.ll | 5 ; CHECK: debug_private_segment_buffer_sgpr = [[SREG:[0-9]+]] 20 ; CHECK: DebuggerPrivateSegmentBufferSGPR: s[[SREG]]
|
D | trunc.ll | 23 ; SI-DAG: s_load_dword [[SREG:s[0-9]+]], 24 ; SI: s_lshl_b32 [[SHL:s[0-9]+]], [[SREG]], 2
|
D | global_atomics_i64.ll | 862 ; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x11940 863 ; GCN: buffer_atomic_cmpswap_x2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{…
|
D | global_atomics.ll | 14 ; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x8ca0 15 ; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{$}}
|
/external/llvm/test/CodeGen/ARM/ |
D | fp16-args.ll | 35 ; HARD: vcvtb.f16.f32 [[SREG:s[0-9]+]], {{s[0-9]+}} 36 ; HARD-NEXT: vmov [[REG0:r[0-9]+]], [[SREG]]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-sitofp-combine-chains.ll | 14 ; CHECK: ldr [[SREG:s[0-9]+]], [x[[VARBASE]],
|
D | arm64-xaluo.ll | 196 ; CHECK-NEXT: lsr x[[SREG:[0-9]+]], x[[MREG]], #32 197 ; CHECK-NEXT: cmp w[[SREG]], w[[MREG]], asr #31 366 ; CHECK-NEXT: lsr x[[SREG:[0-9]+]], x[[MREG]], #32 367 ; CHECK-NEXT: cmp w[[SREG]], w[[MREG]], asr #31 556 ; CHECK-NEXT: lsr x[[SREG:[0-9]+]], x8, #32 557 ; CHECK-NEXT: cmp w[[SREG]], w[[MREG]], asr #31
|
D | arm64-inline-asm.ll | 251 ; CHECK: fmov [[SREG:s[0-9]+]], {{w[0-9]+}} 252 ; CHECK: sqxtn h0, [[SREG]]
|
/external/vixl/src/aarch64/ |
D | operands-aarch64.cc | 170 #define SREG(n) s##n, macro 171 const VRegister VRegister::sregisters[] = {AARCH64_REGISTER_CODE_LIST(SREG)}; 172 #undef SREG
|