1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "X86AsmInstrumentation.h"
11 #include "MCTargetDesc/X86MCTargetDesc.h"
12 #include "X86Operand.h"
13 #include "llvm/ADT/Triple.h"
14 #include "llvm/ADT/Twine.h"
15 #include "llvm/MC/MCContext.h"
16 #include "llvm/MC/MCDwarf.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCInstBuilder.h"
20 #include "llvm/MC/MCInstrInfo.h"
21 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
22 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCStreamer.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetOptions.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/SMLoc.h"
30 #include <algorithm>
31 #include <cassert>
32 #include <cstdint>
33 #include <limits>
34 #include <memory>
35 #include <vector>
36 
37 // Following comment describes how assembly instrumentation works.
38 // Currently we have only AddressSanitizer instrumentation, but we're
39 // planning to implement MemorySanitizer for inline assembly too. If
40 // you're not familiar with AddressSanitizer algorithm, please, read
41 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
42 //
43 // When inline assembly is parsed by an instance of X86AsmParser, all
44 // instructions are emitted via EmitInstruction method. That's the
45 // place where X86AsmInstrumentation analyzes an instruction and
46 // decides, whether the instruction should be emitted as is or
47 // instrumentation is required. The latter case happens when an
48 // instruction reads from or writes to memory. Now instruction opcode
49 // is explicitly checked, and if an instruction has a memory operand
50 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
51 // instrumented.  There're also exist instructions that modify
52 // memory but don't have an explicit memory operands, for instance,
53 // movs.
54 //
55 // Let's consider at first 8-byte memory accesses when an instruction
56 // has an explicit memory operand. In this case we need two registers -
57 // AddressReg to compute address of a memory cells which are accessed
58 // and ShadowReg to compute corresponding shadow address. So, we need
59 // to spill both registers before instrumentation code and restore them
60 // after instrumentation. Thus, in general, instrumentation code will
61 // look like this:
62 // PUSHF  # Store flags, otherwise they will be overwritten
63 // PUSH AddressReg  # spill AddressReg
64 // PUSH ShadowReg   # spill ShadowReg
65 // LEA MemOp, AddressReg  # compute address of the memory operand
66 // MOV AddressReg, ShadowReg
67 // SHR ShadowReg, 3
68 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
69 // # corresponding to MemOp.
70 // CMP ShadowOffset(ShadowReg), 0  # test shadow value
71 // JZ .Done  # when shadow equals to zero, everything is fine
72 // MOV AddressReg, RDI
73 // # Call __asan_report function with AddressReg as an argument
74 // CALL __asan_report
75 // .Done:
76 // POP ShadowReg  # Restore ShadowReg
77 // POP AddressReg  # Restore AddressReg
78 // POPF  # Restore flags
79 //
80 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
81 // handled in a similar manner, but small memory accesses (less than 8
82 // byte) require an additional ScratchReg, which is used for shadow value.
83 //
84 // If, suppose, we're instrumenting an instruction like movs, only
85 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
86 // RCX are checked.  In this case there're no need to spill and restore
87 // AddressReg , ShadowReg or flags four times, they're saved on stack
88 // just once, before instrumentation of these four addresses, and restored
89 // at the end of the instrumentation.
90 //
91 // There exist several things which complicate this simple algorithm.
92 // * Instrumented memory operand can have RSP as a base or an index
93 //   register.  So we need to add a constant offset before computation
94 //   of memory address, since flags, AddressReg, ShadowReg, etc. were
95 //   already stored on stack and RSP was modified.
96 // * Debug info (usually, DWARF) should be adjusted, because sometimes
97 //   RSP is used as a frame register. So, we need to select some
98 //   register as a frame register and temprorary override current CFA
99 //   register.
100 
101 using namespace llvm;
102 
103 static cl::opt<bool> ClAsanInstrumentAssembly(
104     "asan-instrument-assembly",
105     cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
106     cl::init(false));
107 
108 static const int64_t MinAllowedDisplacement =
109     std::numeric_limits<int32_t>::min();
110 static const int64_t MaxAllowedDisplacement =
111     std::numeric_limits<int32_t>::max();
112 
ApplyDisplacementBounds(int64_t Displacement)113 static int64_t ApplyDisplacementBounds(int64_t Displacement) {
114   return std::max(std::min(MaxAllowedDisplacement, Displacement),
115                   MinAllowedDisplacement);
116 }
117 
CheckDisplacementBounds(int64_t Displacement)118 static void CheckDisplacementBounds(int64_t Displacement) {
119   assert(Displacement >= MinAllowedDisplacement &&
120          Displacement <= MaxAllowedDisplacement);
121 }
122 
IsStackReg(unsigned Reg)123 static bool IsStackReg(unsigned Reg) {
124   return Reg == X86::RSP || Reg == X86::ESP;
125 }
126 
IsSmallMemAccess(unsigned AccessSize)127 static bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
128 
129 namespace {
130 
131 class X86AddressSanitizer : public X86AsmInstrumentation {
132 public:
133   struct RegisterContext {
134   private:
135     enum RegOffset {
136       REG_OFFSET_ADDRESS = 0,
137       REG_OFFSET_SHADOW,
138       REG_OFFSET_SCRATCH
139     };
140 
141   public:
RegisterContext__anon253bcfad0111::X86AddressSanitizer::RegisterContext142     RegisterContext(unsigned AddressReg, unsigned ShadowReg,
143                     unsigned ScratchReg) {
144       BusyRegs.push_back(convReg(AddressReg, 64));
145       BusyRegs.push_back(convReg(ShadowReg, 64));
146       BusyRegs.push_back(convReg(ScratchReg, 64));
147     }
148 
AddressReg__anon253bcfad0111::X86AddressSanitizer::RegisterContext149     unsigned AddressReg(unsigned Size) const {
150       return convReg(BusyRegs[REG_OFFSET_ADDRESS], Size);
151     }
152 
ShadowReg__anon253bcfad0111::X86AddressSanitizer::RegisterContext153     unsigned ShadowReg(unsigned Size) const {
154       return convReg(BusyRegs[REG_OFFSET_SHADOW], Size);
155     }
156 
ScratchReg__anon253bcfad0111::X86AddressSanitizer::RegisterContext157     unsigned ScratchReg(unsigned Size) const {
158       return convReg(BusyRegs[REG_OFFSET_SCRATCH], Size);
159     }
160 
AddBusyReg__anon253bcfad0111::X86AddressSanitizer::RegisterContext161     void AddBusyReg(unsigned Reg) {
162       if (Reg != X86::NoRegister)
163         BusyRegs.push_back(convReg(Reg, 64));
164     }
165 
AddBusyRegs__anon253bcfad0111::X86AddressSanitizer::RegisterContext166     void AddBusyRegs(const X86Operand &Op) {
167       AddBusyReg(Op.getMemBaseReg());
168       AddBusyReg(Op.getMemIndexReg());
169     }
170 
ChooseFrameReg__anon253bcfad0111::X86AddressSanitizer::RegisterContext171     unsigned ChooseFrameReg(unsigned Size) const {
172       static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
173                                               X86::RCX, X86::RDX, X86::RDI,
174                                               X86::RSI };
175       for (unsigned Reg : Candidates) {
176         if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
177           return convReg(Reg, Size);
178       }
179       return X86::NoRegister;
180     }
181 
182   private:
convReg__anon253bcfad0111::X86AddressSanitizer::RegisterContext183     unsigned convReg(unsigned Reg, unsigned Size) const {
184       return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, Size);
185     }
186 
187     std::vector<unsigned> BusyRegs;
188   };
189 
X86AddressSanitizer(const MCSubtargetInfo * & STI)190   X86AddressSanitizer(const MCSubtargetInfo *&STI)
191       : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
192 
193   ~X86AddressSanitizer() override = default;
194 
195   // X86AsmInstrumentation implementation:
InstrumentAndEmitInstruction(const MCInst & Inst,OperandVector & Operands,MCContext & Ctx,const MCInstrInfo & MII,MCStreamer & Out,bool)196   void InstrumentAndEmitInstruction(const MCInst &Inst, OperandVector &Operands,
197                                     MCContext &Ctx, const MCInstrInfo &MII,
198                                     MCStreamer &Out,
199                                     /* unused */ bool) override {
200     InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
201     if (RepPrefix)
202       EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
203 
204     InstrumentMOV(Inst, Operands, Ctx, MII, Out);
205 
206     RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX);
207     if (!RepPrefix)
208       EmitInstruction(Out, Inst);
209   }
210 
211   // Adjusts up stack and saves all registers used in instrumentation.
212   virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
213                                             MCContext &Ctx,
214                                             MCStreamer &Out) = 0;
215 
216   // Restores all registers used in instrumentation and adjusts stack.
217   virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
218                                             MCContext &Ctx,
219                                             MCStreamer &Out) = 0;
220 
221   virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
222                                          bool IsWrite,
223                                          const RegisterContext &RegCtx,
224                                          MCContext &Ctx, MCStreamer &Out) = 0;
225   virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
226                                          bool IsWrite,
227                                          const RegisterContext &RegCtx,
228                                          MCContext &Ctx, MCStreamer &Out) = 0;
229 
230   virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
231                                   MCStreamer &Out) = 0;
232 
233   void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite,
234                             const RegisterContext &RegCtx, MCContext &Ctx,
235                             MCStreamer &Out);
236   void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg,
237                           unsigned AccessSize, MCContext &Ctx, MCStreamer &Out);
238 
239   void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands,
240                       MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
241   void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
242                      MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
243 
244 protected:
EmitLabel(MCStreamer & Out,MCSymbol * Label)245   void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
246 
EmitLEA(X86Operand & Op,unsigned Size,unsigned Reg,MCStreamer & Out)247   void EmitLEA(X86Operand &Op, unsigned Size, unsigned Reg, MCStreamer &Out) {
248     assert(Size == 32 || Size == 64);
249     MCInst Inst;
250     Inst.setOpcode(Size == 32 ? X86::LEA32r : X86::LEA64r);
251     Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, Size)));
252     Op.addMemOperands(Inst, 5);
253     EmitInstruction(Out, Inst);
254   }
255 
256   void ComputeMemOperandAddress(X86Operand &Op, unsigned Size,
257                                 unsigned Reg, MCContext &Ctx, MCStreamer &Out);
258 
259   // Creates new memory operand with Displacement added to an original
260   // displacement. Residue will contain a residue which could happen when the
261   // total displacement exceeds 32-bit limitation.
262   std::unique_ptr<X86Operand> AddDisplacement(X86Operand &Op,
263                                               int64_t Displacement,
264                                               MCContext &Ctx, int64_t *Residue);
265 
is64BitMode() const266   bool is64BitMode() const {
267     return STI->getFeatureBits()[X86::Mode64Bit];
268   }
269 
is32BitMode() const270   bool is32BitMode() const {
271     return STI->getFeatureBits()[X86::Mode32Bit];
272   }
273 
is16BitMode() const274   bool is16BitMode() const {
275     return STI->getFeatureBits()[X86::Mode16Bit];
276   }
277 
getPointerWidth()278   unsigned getPointerWidth() {
279     if (is16BitMode()) return 16;
280     if (is32BitMode()) return 32;
281     if (is64BitMode()) return 64;
282     llvm_unreachable("invalid mode");
283   }
284 
285   // True when previous instruction was actually REP prefix.
286   bool RepPrefix;
287 
288   // Offset from the original SP register.
289   int64_t OrigSPOffset;
290 };
291 
InstrumentMemOperand(X86Operand & Op,unsigned AccessSize,bool IsWrite,const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)292 void X86AddressSanitizer::InstrumentMemOperand(
293     X86Operand &Op, unsigned AccessSize, bool IsWrite,
294     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
295   assert(Op.isMem() && "Op should be a memory operand.");
296   assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
297          "AccessSize should be a power of two, less or equal than 16.");
298   // FIXME: take into account load/store alignment.
299   if (IsSmallMemAccess(AccessSize))
300     InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
301   else
302     InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
303 }
304 
InstrumentMOVSBase(unsigned DstReg,unsigned SrcReg,unsigned CntReg,unsigned AccessSize,MCContext & Ctx,MCStreamer & Out)305 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
306                                              unsigned CntReg,
307                                              unsigned AccessSize,
308                                              MCContext &Ctx, MCStreamer &Out) {
309   // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
310   // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
311   RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */,
312                          IsSmallMemAccess(AccessSize)
313                              ? X86::RBX
314                              : X86::NoRegister /* ScratchReg */);
315   RegCtx.AddBusyReg(DstReg);
316   RegCtx.AddBusyReg(SrcReg);
317   RegCtx.AddBusyReg(CntReg);
318 
319   InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
320 
321   // Test (%SrcReg)
322   {
323     const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
324     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
325         getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
326     InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
327                          Out);
328   }
329 
330   // Test -1(%SrcReg, %CntReg, AccessSize)
331   {
332     const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
333     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
334         getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
335         SMLoc()));
336     InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
337                          Out);
338   }
339 
340   // Test (%DstReg)
341   {
342     const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
343     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
344         getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
345     InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
346   }
347 
348   // Test -1(%DstReg, %CntReg, AccessSize)
349   {
350     const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
351     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
352         getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
353         SMLoc()));
354     InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
355   }
356 
357   InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
358 }
359 
InstrumentMOVS(const MCInst & Inst,OperandVector & Operands,MCContext & Ctx,const MCInstrInfo & MII,MCStreamer & Out)360 void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst,
361                                          OperandVector &Operands,
362                                          MCContext &Ctx, const MCInstrInfo &MII,
363                                          MCStreamer &Out) {
364   // Access size in bytes.
365   unsigned AccessSize = 0;
366 
367   switch (Inst.getOpcode()) {
368   case X86::MOVSB:
369     AccessSize = 1;
370     break;
371   case X86::MOVSW:
372     AccessSize = 2;
373     break;
374   case X86::MOVSL:
375     AccessSize = 4;
376     break;
377   case X86::MOVSQ:
378     AccessSize = 8;
379     break;
380   default:
381     return;
382   }
383 
384   InstrumentMOVSImpl(AccessSize, Ctx, Out);
385 }
386 
InstrumentMOV(const MCInst & Inst,OperandVector & Operands,MCContext & Ctx,const MCInstrInfo & MII,MCStreamer & Out)387 void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
388                                         OperandVector &Operands, MCContext &Ctx,
389                                         const MCInstrInfo &MII,
390                                         MCStreamer &Out) {
391   // Access size in bytes.
392   unsigned AccessSize = 0;
393 
394   switch (Inst.getOpcode()) {
395   case X86::MOV8mi:
396   case X86::MOV8mr:
397   case X86::MOV8rm:
398     AccessSize = 1;
399     break;
400   case X86::MOV16mi:
401   case X86::MOV16mr:
402   case X86::MOV16rm:
403     AccessSize = 2;
404     break;
405   case X86::MOV32mi:
406   case X86::MOV32mr:
407   case X86::MOV32rm:
408     AccessSize = 4;
409     break;
410   case X86::MOV64mi32:
411   case X86::MOV64mr:
412   case X86::MOV64rm:
413     AccessSize = 8;
414     break;
415   case X86::MOVAPDmr:
416   case X86::MOVAPSmr:
417   case X86::MOVAPDrm:
418   case X86::MOVAPSrm:
419     AccessSize = 16;
420     break;
421   default:
422     return;
423   }
424 
425   const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
426 
427   for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
428     assert(Operands[Ix]);
429     MCParsedAsmOperand &Op = *Operands[Ix];
430     if (Op.isMem()) {
431       X86Operand &MemOp = static_cast<X86Operand &>(Op);
432       RegisterContext RegCtx(
433           X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */,
434           IsSmallMemAccess(AccessSize) ? X86::RCX
435                                        : X86::NoRegister /* ScratchReg */);
436       RegCtx.AddBusyRegs(MemOp);
437       InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
438       InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out);
439       InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
440     }
441   }
442 }
443 
ComputeMemOperandAddress(X86Operand & Op,unsigned Size,unsigned Reg,MCContext & Ctx,MCStreamer & Out)444 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
445                                                    unsigned Size,
446                                                    unsigned Reg, MCContext &Ctx,
447                                                    MCStreamer &Out) {
448   int64_t Displacement = 0;
449   if (IsStackReg(Op.getMemBaseReg()))
450     Displacement -= OrigSPOffset;
451   if (IsStackReg(Op.getMemIndexReg()))
452     Displacement -= OrigSPOffset * Op.getMemScale();
453 
454   assert(Displacement >= 0);
455 
456   // Emit Op as is.
457   if (Displacement == 0) {
458     EmitLEA(Op, Size, Reg, Out);
459     return;
460   }
461 
462   int64_t Residue;
463   std::unique_ptr<X86Operand> NewOp =
464       AddDisplacement(Op, Displacement, Ctx, &Residue);
465   EmitLEA(*NewOp, Size, Reg, Out);
466 
467   while (Residue != 0) {
468     const MCConstantExpr *Disp =
469         MCConstantExpr::create(ApplyDisplacementBounds(Residue), Ctx);
470     std::unique_ptr<X86Operand> DispOp =
471         X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
472                               SMLoc());
473     EmitLEA(*DispOp, Size, Reg, Out);
474     Residue -= Disp->getValue();
475   }
476 }
477 
478 std::unique_ptr<X86Operand>
AddDisplacement(X86Operand & Op,int64_t Displacement,MCContext & Ctx,int64_t * Residue)479 X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
480                                      MCContext &Ctx, int64_t *Residue) {
481   assert(Displacement >= 0);
482 
483   if (Displacement == 0 ||
484       (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
485     *Residue = Displacement;
486     return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
487                                  Op.getMemDisp(), Op.getMemBaseReg(),
488                                  Op.getMemIndexReg(), Op.getMemScale(),
489                                  SMLoc(), SMLoc());
490   }
491 
492   int64_t OrigDisplacement =
493       static_cast<const MCConstantExpr *>(Op.getMemDisp())->getValue();
494   CheckDisplacementBounds(OrigDisplacement);
495   Displacement += OrigDisplacement;
496 
497   int64_t NewDisplacement = ApplyDisplacementBounds(Displacement);
498   CheckDisplacementBounds(NewDisplacement);
499 
500   *Residue = Displacement - NewDisplacement;
501   const MCExpr *Disp = MCConstantExpr::create(NewDisplacement, Ctx);
502   return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
503                                Op.getMemBaseReg(), Op.getMemIndexReg(),
504                                Op.getMemScale(), SMLoc(), SMLoc());
505 }
506 
507 class X86AddressSanitizer32 : public X86AddressSanitizer {
508 public:
509   static const long kShadowOffset = 0x20000000;
510 
X86AddressSanitizer32(const MCSubtargetInfo * & STI)511   X86AddressSanitizer32(const MCSubtargetInfo *&STI)
512       : X86AddressSanitizer(STI) {}
513 
514   ~X86AddressSanitizer32() override = default;
515 
GetFrameReg(const MCContext & Ctx,MCStreamer & Out)516   unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
517     unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
518     if (FrameReg == X86::NoRegister)
519       return FrameReg;
520     return getX86SubSuperRegister(FrameReg, 32);
521   }
522 
SpillReg(MCStreamer & Out,unsigned Reg)523   void SpillReg(MCStreamer &Out, unsigned Reg) {
524     EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg));
525     OrigSPOffset -= 4;
526   }
527 
RestoreReg(MCStreamer & Out,unsigned Reg)528   void RestoreReg(MCStreamer &Out, unsigned Reg) {
529     EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg));
530     OrigSPOffset += 4;
531   }
532 
StoreFlags(MCStreamer & Out)533   void StoreFlags(MCStreamer &Out) {
534     EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
535     OrigSPOffset -= 4;
536   }
537 
RestoreFlags(MCStreamer & Out)538   void RestoreFlags(MCStreamer &Out) {
539     EmitInstruction(Out, MCInstBuilder(X86::POPF32));
540     OrigSPOffset += 4;
541   }
542 
InstrumentMemOperandPrologue(const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)543   void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
544                                     MCContext &Ctx,
545                                     MCStreamer &Out) override {
546     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
547     assert(LocalFrameReg != X86::NoRegister);
548 
549     const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
550     unsigned FrameReg = GetFrameReg(Ctx, Out);
551     if (MRI && FrameReg != X86::NoRegister) {
552       SpillReg(Out, LocalFrameReg);
553       if (FrameReg == X86::ESP) {
554         Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
555         Out.EmitCFIRelOffset(
556             MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
557       }
558       EmitInstruction(
559           Out,
560           MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg));
561       Out.EmitCFIRememberState();
562       Out.EmitCFIDefCfaRegister(
563           MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
564     }
565 
566     SpillReg(Out, RegCtx.AddressReg(32));
567     SpillReg(Out, RegCtx.ShadowReg(32));
568     if (RegCtx.ScratchReg(32) != X86::NoRegister)
569       SpillReg(Out, RegCtx.ScratchReg(32));
570     StoreFlags(Out);
571   }
572 
InstrumentMemOperandEpilogue(const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)573   void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
574                                     MCContext &Ctx,
575                                     MCStreamer &Out) override {
576     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
577     assert(LocalFrameReg != X86::NoRegister);
578 
579     RestoreFlags(Out);
580     if (RegCtx.ScratchReg(32) != X86::NoRegister)
581       RestoreReg(Out, RegCtx.ScratchReg(32));
582     RestoreReg(Out, RegCtx.ShadowReg(32));
583     RestoreReg(Out, RegCtx.AddressReg(32));
584 
585     unsigned FrameReg = GetFrameReg(Ctx, Out);
586     if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
587       RestoreReg(Out, LocalFrameReg);
588       Out.EmitCFIRestoreState();
589       if (FrameReg == X86::ESP)
590         Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
591     }
592   }
593 
594   void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
595                                  bool IsWrite,
596                                  const RegisterContext &RegCtx,
597                                  MCContext &Ctx,
598                                  MCStreamer &Out) override;
599   void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
600                                  bool IsWrite,
601                                  const RegisterContext &RegCtx,
602                                  MCContext &Ctx,
603                                  MCStreamer &Out) override;
604   void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
605                           MCStreamer &Out) override;
606 
607 private:
EmitCallAsanReport(unsigned AccessSize,bool IsWrite,MCContext & Ctx,MCStreamer & Out,const RegisterContext & RegCtx)608   void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
609                           MCStreamer &Out, const RegisterContext &RegCtx) {
610     EmitInstruction(Out, MCInstBuilder(X86::CLD));
611     EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
612 
613     EmitInstruction(Out, MCInstBuilder(X86::AND32ri8)
614                              .addReg(X86::ESP)
615                              .addReg(X86::ESP)
616                              .addImm(-16));
617     EmitInstruction(
618         Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(32)));
619 
620     MCSymbol *FnSym = Ctx.getOrCreateSymbol(Twine("__asan_report_") +
621                                             (IsWrite ? "store" : "load") +
622                                             Twine(AccessSize));
623     const MCSymbolRefExpr *FnExpr =
624         MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
625     EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
626   }
627 };
628 
InstrumentMemOperandSmall(X86Operand & Op,unsigned AccessSize,bool IsWrite,const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)629 void X86AddressSanitizer32::InstrumentMemOperandSmall(
630     X86Operand &Op, unsigned AccessSize, bool IsWrite,
631     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
632   unsigned AddressRegI32 = RegCtx.AddressReg(32);
633   unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
634   unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
635 
636   assert(RegCtx.ScratchReg(32) != X86::NoRegister);
637   unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
638 
639   ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
640 
641   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
642                            AddressRegI32));
643   EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
644                            .addReg(ShadowRegI32)
645                            .addReg(ShadowRegI32)
646                            .addImm(3));
647 
648   {
649     MCInst Inst;
650     Inst.setOpcode(X86::MOV8rm);
651     Inst.addOperand(MCOperand::createReg(ShadowRegI8));
652     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
653     std::unique_ptr<X86Operand> Op(
654         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
655                               SMLoc(), SMLoc()));
656     Op->addMemOperands(Inst, 5);
657     EmitInstruction(Out, Inst);
658   }
659 
660   EmitInstruction(
661       Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
662   MCSymbol *DoneSym = Ctx.createTempSymbol();
663   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
664   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
665 
666   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
667                            AddressRegI32));
668   EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
669                            .addReg(ScratchRegI32)
670                            .addReg(ScratchRegI32)
671                            .addImm(7));
672 
673   switch (AccessSize) {
674   default: llvm_unreachable("Incorrect access size");
675   case 1:
676     break;
677   case 2: {
678     const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
679     std::unique_ptr<X86Operand> Op(
680         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
681                               SMLoc(), SMLoc()));
682     EmitLEA(*Op, 32, ScratchRegI32, Out);
683     break;
684   }
685   case 4:
686     EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
687                              .addReg(ScratchRegI32)
688                              .addReg(ScratchRegI32)
689                              .addImm(3));
690     break;
691   }
692 
693   EmitInstruction(
694       Out,
695       MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
696   EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
697                            ShadowRegI32));
698   EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
699 
700   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
701   EmitLabel(Out, DoneSym);
702 }
703 
InstrumentMemOperandLarge(X86Operand & Op,unsigned AccessSize,bool IsWrite,const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)704 void X86AddressSanitizer32::InstrumentMemOperandLarge(
705     X86Operand &Op, unsigned AccessSize, bool IsWrite,
706     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
707   unsigned AddressRegI32 = RegCtx.AddressReg(32);
708   unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
709 
710   ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
711 
712   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
713                            AddressRegI32));
714   EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
715                            .addReg(ShadowRegI32)
716                            .addReg(ShadowRegI32)
717                            .addImm(3));
718   {
719     MCInst Inst;
720     switch (AccessSize) {
721     default: llvm_unreachable("Incorrect access size");
722     case 8:
723       Inst.setOpcode(X86::CMP8mi);
724       break;
725     case 16:
726       Inst.setOpcode(X86::CMP16mi);
727       break;
728     }
729     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
730     std::unique_ptr<X86Operand> Op(
731         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
732                               SMLoc(), SMLoc()));
733     Op->addMemOperands(Inst, 5);
734     Inst.addOperand(MCOperand::createImm(0));
735     EmitInstruction(Out, Inst);
736   }
737   MCSymbol *DoneSym = Ctx.createTempSymbol();
738   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
739   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
740 
741   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
742   EmitLabel(Out, DoneSym);
743 }
744 
InstrumentMOVSImpl(unsigned AccessSize,MCContext & Ctx,MCStreamer & Out)745 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
746                                                MCContext &Ctx,
747                                                MCStreamer &Out) {
748   StoreFlags(Out);
749 
750   // No need to test when ECX is equals to zero.
751   MCSymbol *DoneSym = Ctx.createTempSymbol();
752   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
753   EmitInstruction(
754       Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
755   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
756 
757   // Instrument first and last elements in src and dst range.
758   InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
759                      X86::ECX /* CntReg */, AccessSize, Ctx, Out);
760 
761   EmitLabel(Out, DoneSym);
762   RestoreFlags(Out);
763 }
764 
765 class X86AddressSanitizer64 : public X86AddressSanitizer {
766 public:
767   static const long kShadowOffset = 0x7fff8000;
768 
X86AddressSanitizer64(const MCSubtargetInfo * & STI)769   X86AddressSanitizer64(const MCSubtargetInfo *&STI)
770       : X86AddressSanitizer(STI) {}
771 
772   ~X86AddressSanitizer64() override = default;
773 
GetFrameReg(const MCContext & Ctx,MCStreamer & Out)774   unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
775     unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
776     if (FrameReg == X86::NoRegister)
777       return FrameReg;
778     return getX86SubSuperRegister(FrameReg, 64);
779   }
780 
SpillReg(MCStreamer & Out,unsigned Reg)781   void SpillReg(MCStreamer &Out, unsigned Reg) {
782     EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg));
783     OrigSPOffset -= 8;
784   }
785 
RestoreReg(MCStreamer & Out,unsigned Reg)786   void RestoreReg(MCStreamer &Out, unsigned Reg) {
787     EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg));
788     OrigSPOffset += 8;
789   }
790 
StoreFlags(MCStreamer & Out)791   void StoreFlags(MCStreamer &Out) {
792     EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
793     OrigSPOffset -= 8;
794   }
795 
RestoreFlags(MCStreamer & Out)796   void RestoreFlags(MCStreamer &Out) {
797     EmitInstruction(Out, MCInstBuilder(X86::POPF64));
798     OrigSPOffset += 8;
799   }
800 
InstrumentMemOperandPrologue(const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)801   void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
802                                     MCContext &Ctx,
803                                     MCStreamer &Out) override {
804     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
805     assert(LocalFrameReg != X86::NoRegister);
806 
807     const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
808     unsigned FrameReg = GetFrameReg(Ctx, Out);
809     if (MRI && FrameReg != X86::NoRegister) {
810       SpillReg(Out, X86::RBP);
811       if (FrameReg == X86::RSP) {
812         Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
813         Out.EmitCFIRelOffset(
814             MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
815       }
816       EmitInstruction(
817           Out,
818           MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg));
819       Out.EmitCFIRememberState();
820       Out.EmitCFIDefCfaRegister(
821           MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
822     }
823 
824     EmitAdjustRSP(Ctx, Out, -128);
825     SpillReg(Out, RegCtx.ShadowReg(64));
826     SpillReg(Out, RegCtx.AddressReg(64));
827     if (RegCtx.ScratchReg(64) != X86::NoRegister)
828       SpillReg(Out, RegCtx.ScratchReg(64));
829     StoreFlags(Out);
830   }
831 
InstrumentMemOperandEpilogue(const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)832   void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
833                                     MCContext &Ctx,
834                                     MCStreamer &Out) override {
835     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
836     assert(LocalFrameReg != X86::NoRegister);
837 
838     RestoreFlags(Out);
839     if (RegCtx.ScratchReg(64) != X86::NoRegister)
840       RestoreReg(Out, RegCtx.ScratchReg(64));
841     RestoreReg(Out, RegCtx.AddressReg(64));
842     RestoreReg(Out, RegCtx.ShadowReg(64));
843     EmitAdjustRSP(Ctx, Out, 128);
844 
845     unsigned FrameReg = GetFrameReg(Ctx, Out);
846     if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
847       RestoreReg(Out, LocalFrameReg);
848       Out.EmitCFIRestoreState();
849       if (FrameReg == X86::RSP)
850         Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
851     }
852   }
853 
854   void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
855                                  bool IsWrite,
856                                  const RegisterContext &RegCtx,
857                                  MCContext &Ctx,
858                                  MCStreamer &Out) override;
859   void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
860                                  bool IsWrite,
861                                  const RegisterContext &RegCtx,
862                                  MCContext &Ctx,
863                                  MCStreamer &Out) override;
864   void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
865                           MCStreamer &Out) override;
866 
867 private:
EmitAdjustRSP(MCContext & Ctx,MCStreamer & Out,long Offset)868   void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
869     const MCExpr *Disp = MCConstantExpr::create(Offset, Ctx);
870     std::unique_ptr<X86Operand> Op(
871         X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
872                               SMLoc(), SMLoc()));
873     EmitLEA(*Op, 64, X86::RSP, Out);
874     OrigSPOffset += Offset;
875   }
876 
EmitCallAsanReport(unsigned AccessSize,bool IsWrite,MCContext & Ctx,MCStreamer & Out,const RegisterContext & RegCtx)877   void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
878                           MCStreamer &Out, const RegisterContext &RegCtx) {
879     EmitInstruction(Out, MCInstBuilder(X86::CLD));
880     EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
881 
882     EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
883                              .addReg(X86::RSP)
884                              .addReg(X86::RSP)
885                              .addImm(-16));
886 
887     if (RegCtx.AddressReg(64) != X86::RDI) {
888       EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
889                                RegCtx.AddressReg(64)));
890     }
891     MCSymbol *FnSym = Ctx.getOrCreateSymbol(Twine("__asan_report_") +
892                                             (IsWrite ? "store" : "load") +
893                                             Twine(AccessSize));
894     const MCSymbolRefExpr *FnExpr =
895         MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
896     EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
897   }
898 };
899 
900 } // end anonymous namespace
901 
InstrumentMemOperandSmall(X86Operand & Op,unsigned AccessSize,bool IsWrite,const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)902 void X86AddressSanitizer64::InstrumentMemOperandSmall(
903     X86Operand &Op, unsigned AccessSize, bool IsWrite,
904     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
905   unsigned AddressRegI64 = RegCtx.AddressReg(64);
906   unsigned AddressRegI32 = RegCtx.AddressReg(32);
907   unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
908   unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
909   unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
910 
911   assert(RegCtx.ScratchReg(32) != X86::NoRegister);
912   unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
913 
914   ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
915 
916   EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
917                            AddressRegI64));
918   EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
919                            .addReg(ShadowRegI64)
920                            .addReg(ShadowRegI64)
921                            .addImm(3));
922   {
923     MCInst Inst;
924     Inst.setOpcode(X86::MOV8rm);
925     Inst.addOperand(MCOperand::createReg(ShadowRegI8));
926     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
927     std::unique_ptr<X86Operand> Op(
928         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
929                               SMLoc(), SMLoc()));
930     Op->addMemOperands(Inst, 5);
931     EmitInstruction(Out, Inst);
932   }
933 
934   EmitInstruction(
935       Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
936   MCSymbol *DoneSym = Ctx.createTempSymbol();
937   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
938   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
939 
940   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
941                            AddressRegI32));
942   EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
943                            .addReg(ScratchRegI32)
944                            .addReg(ScratchRegI32)
945                            .addImm(7));
946 
947   switch (AccessSize) {
948   default: llvm_unreachable("Incorrect access size");
949   case 1:
950     break;
951   case 2: {
952     const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
953     std::unique_ptr<X86Operand> Op(
954         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
955                               SMLoc(), SMLoc()));
956     EmitLEA(*Op, 32, ScratchRegI32, Out);
957     break;
958   }
959   case 4:
960     EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
961                              .addReg(ScratchRegI32)
962                              .addReg(ScratchRegI32)
963                              .addImm(3));
964     break;
965   }
966 
967   EmitInstruction(
968       Out,
969       MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
970   EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
971                            ShadowRegI32));
972   EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
973 
974   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
975   EmitLabel(Out, DoneSym);
976 }
977 
InstrumentMemOperandLarge(X86Operand & Op,unsigned AccessSize,bool IsWrite,const RegisterContext & RegCtx,MCContext & Ctx,MCStreamer & Out)978 void X86AddressSanitizer64::InstrumentMemOperandLarge(
979     X86Operand &Op, unsigned AccessSize, bool IsWrite,
980     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
981   unsigned AddressRegI64 = RegCtx.AddressReg(64);
982   unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
983 
984   ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
985 
986   EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
987                            AddressRegI64));
988   EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
989                            .addReg(ShadowRegI64)
990                            .addReg(ShadowRegI64)
991                            .addImm(3));
992   {
993     MCInst Inst;
994     switch (AccessSize) {
995     default: llvm_unreachable("Incorrect access size");
996     case 8:
997       Inst.setOpcode(X86::CMP8mi);
998       break;
999     case 16:
1000       Inst.setOpcode(X86::CMP16mi);
1001       break;
1002     }
1003     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
1004     std::unique_ptr<X86Operand> Op(
1005         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
1006                               SMLoc(), SMLoc()));
1007     Op->addMemOperands(Inst, 5);
1008     Inst.addOperand(MCOperand::createImm(0));
1009     EmitInstruction(Out, Inst);
1010   }
1011 
1012   MCSymbol *DoneSym = Ctx.createTempSymbol();
1013   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1014   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1015 
1016   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
1017   EmitLabel(Out, DoneSym);
1018 }
1019 
InstrumentMOVSImpl(unsigned AccessSize,MCContext & Ctx,MCStreamer & Out)1020 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
1021                                                MCContext &Ctx,
1022                                                MCStreamer &Out) {
1023   StoreFlags(Out);
1024 
1025   // No need to test when RCX is equals to zero.
1026   MCSymbol *DoneSym = Ctx.createTempSymbol();
1027   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1028   EmitInstruction(
1029       Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
1030   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1031 
1032   // Instrument first and last elements in src and dst range.
1033   InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
1034                      X86::RCX /* CntReg */, AccessSize, Ctx, Out);
1035 
1036   EmitLabel(Out, DoneSym);
1037   RestoreFlags(Out);
1038 }
1039 
X86AsmInstrumentation(const MCSubtargetInfo * & STI)1040 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo *&STI)
1041     : STI(STI) {}
1042 
1043 X86AsmInstrumentation::~X86AsmInstrumentation() = default;
1044 
InstrumentAndEmitInstruction(const MCInst & Inst,OperandVector & Operands,MCContext & Ctx,const MCInstrInfo & MII,MCStreamer & Out,bool PrintSchedInfoEnabled)1045 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
1046     const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
1047     const MCInstrInfo &MII, MCStreamer &Out, bool PrintSchedInfoEnabled) {
1048   EmitInstruction(Out, Inst, PrintSchedInfoEnabled);
1049 }
1050 
EmitInstruction(MCStreamer & Out,const MCInst & Inst,bool PrintSchedInfoEnabled)1051 void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out, const MCInst &Inst,
1052                                             bool PrintSchedInfoEnabled) {
1053   Out.EmitInstruction(Inst, *STI, PrintSchedInfoEnabled);
1054 }
1055 
GetFrameRegGeneric(const MCContext & Ctx,MCStreamer & Out)1056 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
1057                                                    MCStreamer &Out) {
1058   if (!Out.getNumFrameInfos()) // No active dwarf frame
1059     return X86::NoRegister;
1060   const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back();
1061   if (Frame.End) // Active dwarf frame is closed
1062     return X86::NoRegister;
1063   const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
1064   if (!MRI) // No register info
1065     return X86::NoRegister;
1066 
1067   if (InitialFrameReg) {
1068     // FrameReg is set explicitly, we're instrumenting a MachineFunction.
1069     return InitialFrameReg;
1070   }
1071 
1072   return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */);
1073 }
1074 
1075 X86AsmInstrumentation *
CreateX86AsmInstrumentation(const MCTargetOptions & MCOptions,const MCContext & Ctx,const MCSubtargetInfo * & STI)1076 llvm::CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
1077                                   const MCContext &Ctx,
1078                                   const MCSubtargetInfo *&STI) {
1079   Triple T(STI->getTargetTriple());
1080   const bool hasCompilerRTSupport = T.isOSLinux();
1081   if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
1082       MCOptions.SanitizeAddress) {
1083     if (STI->getFeatureBits()[X86::Mode32Bit] != 0)
1084       return new X86AddressSanitizer32(STI);
1085     if (STI->getFeatureBits()[X86::Mode64Bit] != 0)
1086       return new X86AddressSanitizer64(STI);
1087   }
1088   return new X86AsmInstrumentation(STI);
1089 }
1090