/external/llvm/test/CodeGen/X86/ |
D | avx-intel-ocl.ll | 69 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 70 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 71 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 72 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 73 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 74 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 75 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 76 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 77 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 78 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill [all …]
|
D | sse-intel-ocl.ll | 73 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 74 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 75 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 76 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 77 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 78 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 79 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 80 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill
|
D | avx512-intel-ocl.ll | 64 ; WIN64: vmovups %zmm21, {{.*(%rbp).*}} # 64-byte Spill 65 ; WIN64: vmovups %zmm6, {{.*(%rbp).*}} # 64-byte Spill 71 ; X64: kmovw %k7, {{.*}}(%rsp) ## 8-byte Folded Spill 72 ; X64: kmovw %k6, {{.*}}(%rsp) ## 8-byte Folded Spill 73 ; X64: kmovw %k5, {{.*}}(%rsp) ## 8-byte Folded Spill 74 ; X64: kmovw %k4, {{.*}}(%rsp) ## 8-byte Folded Spill 75 ; X64: vmovups %zmm31, {{.*}}(%rsp) ## 64-byte Spill 76 ; X64: vmovups %zmm16, {{.*}}(%rsp) ## 64-byte Spill
|
D | statepoint-stackmap-format.ll | 126 ; Direct Spill Slot [RSP+0] 146 ; Direct Spill Slot [RSP+16] 151 ; Direct Spill Slot [RSP+8] 156 ; Direct Spill Slot [RSP+16] 161 ; Direct Spill Slot [RSP+16] 195 ; Direct Spill Slot [RSP+0] 215 ; Direct Spill Slot [RSP+16] 220 ; Direct Spill Slot [RSP+8] 225 ; Direct Spill Slot [RSP+16] 230 ; Direct Spill Slot [RSP+16]
|
D | 2013-10-14-FastISel-incorrect-vreg.ll | 19 ; Spill %arg2. 21 ; Spill %loaded_ptr. 58 ; Spill %arg2. 60 ; Spill %loaded_ptr. 97 ; Spill %arg2. 99 ; Spill %loaded_ptr.
|
D | unaligned-spill-folding.ll | 37 ; UNALIGNED: movdqu {{.*}} # 16-byte Folded Spill 42 ; ALIGNED: movdqa {{.*}} # 16-byte Spill 47 ; FORCEALIGNED: movdqa {{.*}} # 16-byte Spill
|
D | pmul.ll | 259 ; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill 260 ; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill 277 ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill 278 ; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill 288 ; AVX2-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill 289 ; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill 306 ; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill 307 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill 328 ; AVX2-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill 329 ; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
D | sink-cheap-instructions.ll | 7 ; CHECK: Spill 8 ; SINK-NOT: Spill
|
D | win32-seh-catchpad-realign.ll | 50 ; Spill EBP 52 ; Spill ESP
|
D | win-catchpad-varargs.ll | 38 ; X64: movl $-1, -20(%rbp) # 4-byte Folded Spill 60 ; X64: movl %eax, -20(%rbp) # 4-byte Spill
|
D | fold-tied-op.ll | 9 ; CHECK: shldl {{.*#+}} 4-byte Folded Spill 11 ; CHECK: shldl {{.*#+}} 4-byte Folded Spill
|
D | statepoint-allocas.ll | 91 ; Direct Spill Slot [RSP+0] 120 ; Direct Spill Slot [RSP+0]
|
D | pr23103.ll | 12 ; CHECK-NEXT: vmovsd %xmm0, {{.*}}(%rsp) {{.*#+}} 8-byte Spill
|
D | win64_eh.ll | 162 ; WIN64: movaps %xmm7, -16(%rbp) # 16-byte Spill 164 ; WIN64: movaps %xmm6, -32(%rbp) # 16-byte Spill
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.h | 264 void setHasSpilledSGPRs(bool Spill = true) { 265 HasSpilledSGPRs = Spill; 272 void setHasSpilledVGPRs(bool Spill = true) { 273 HasSpilledVGPRs = Spill;
|
D | SIMachineFunctionInfo.cpp | 155 struct SpilledReg Spill; in getSpilledReg() local 169 Spill.VGPR = LaneVGPRs[LaneVGPRIdx]; in getSpilledReg() 170 Spill.Lane = Lane; in getSpilledReg() 171 return Spill; in getSpilledReg()
|
D | SIRegisterInfo.cpp | 282 struct SIMachineFunctionInfo::SpilledReg Spill = in eliminateFrameIndex() local 285 if (Spill.VGPR == AMDGPU::NoRegister) { in eliminateFrameIndex() 292 Spill.VGPR) in eliminateFrameIndex() 294 .addImm(Spill.Lane); in eliminateFrameIndex() 315 struct SIMachineFunctionInfo::SpilledReg Spill = in eliminateFrameIndex() local 318 if (Spill.VGPR == AMDGPU::NoRegister) { in eliminateFrameIndex() 326 .addReg(Spill.VGPR) in eliminateFrameIndex() 327 .addImm(Spill.Lane) in eliminateFrameIndex()
|
/external/llvm/lib/CodeGen/ |
D | RegAllocBasic.cpp | 191 LiveInterval &Spill = *Intfs[i]; in spillInterferences() local 194 if (!VRM->hasPhys(Spill.reg)) in spillInterferences() 199 Matrix->unassign(Spill); in spillInterferences() 202 LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM); in spillInterferences()
|
/external/llvm/test/MC/Mips/ |
D | elf-tls.s | 34 sw $ra, 20($sp) # 4-byte Folded Spill 66 sw $ra, 20($sp) # 4-byte Folded Spill 98 sw $ra, 20($sp) # 4-byte Folded Spill
|
D | elf-N64.s | 34 sd $ra, 8($sp) # 8-byte Folded Spill 35 sd $gp, 0($sp) # 8-byte Folded Spill
|
D | r-mips-got-disp.s | 27 sd $ra, 8($sp) # 8-byte Folded Spill 28 sd $gp, 0($sp) # 8-byte Folded Spill
|
D | xgot.s | 38 sw $ra, 20($sp) # 4-byte Folded Spill
|
/external/llvm/test/CodeGen/Mips/ |
D | stldst.ll | 36 ; 16: sw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Spill 38 ; 16: sw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Spill
|
/external/llvm/test/CodeGen/Thumb2/ |
D | aligned-spill.ll | 47 ; Spill 7 d-registers. 71 ; Spill 7 d-registers, leave a hole.
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-platform-reg.ll | 22 ; CHECK-RESERVE-X18: Spill
|