1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "slicer/instrumentation.h"
18 #include "slicer/dex_ir_builder.h"
19 
20 namespace slicer {
21 
22 namespace {
23 
24 struct BytecodeConvertingVisitor : public lir::Visitor {
25   lir::Bytecode* out = nullptr;
Visitslicer::__anon9dc31ed30111::BytecodeConvertingVisitor26   bool Visit(lir::Bytecode* bytecode) {
27     out = bytecode;
28     return true;
29   }
30 };
31 
BoxValue(lir::Bytecode * bytecode,lir::CodeIr * code_ir,ir::Type * type,dex::u4 src_reg,dex::u4 dst_reg)32 void BoxValue(lir::Bytecode* bytecode,
33               lir::CodeIr* code_ir,
34               ir::Type* type,
35               dex::u4 src_reg,
36               dex::u4 dst_reg) {
37   bool is_wide = false;
38   const char* boxed_type_name = nullptr;
39   switch (*(type->descriptor)->c_str()) {
40     case 'Z':
41       boxed_type_name = "Ljava/lang/Boolean;";
42       break;
43     case 'B':
44       boxed_type_name = "Ljava/lang/Byte;";
45       break;
46     case 'C':
47       boxed_type_name = "Ljava/lang/Character;";
48       break;
49     case 'S':
50       boxed_type_name = "Ljava/lang/Short;";
51       break;
52     case 'I':
53       boxed_type_name = "Ljava/lang/Integer;";
54       break;
55     case 'J':
56       is_wide = true;
57       boxed_type_name = "Ljava/lang/Long;";
58       break;
59     case 'F':
60       boxed_type_name = "Ljava/lang/Float;";
61       break;
62     case 'D':
63       is_wide = true;
64       boxed_type_name = "Ljava/lang/Double;";
65       break;
66   }
67   SLICER_CHECK(boxed_type_name != nullptr);
68 
69   ir::Builder builder(code_ir->dex_ir);
70   std::vector<ir::Type*> param_types;
71   param_types.push_back(type);
72 
73   auto boxed_type = builder.GetType(boxed_type_name);
74   auto ir_proto = builder.GetProto(boxed_type, builder.GetTypeList(param_types));
75 
76   auto ir_method_decl = builder.GetMethodDecl(
77       builder.GetAsciiString("valueOf"), ir_proto, boxed_type);
78 
79   auto boxing_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
80 
81   auto args = code_ir->Alloc<lir::VRegRange>(src_reg, 1 + is_wide);
82   auto boxing_invoke = code_ir->Alloc<lir::Bytecode>();
83   boxing_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
84   boxing_invoke->operands.push_back(args);
85   boxing_invoke->operands.push_back(boxing_method);
86   code_ir->instructions.InsertBefore(bytecode, boxing_invoke);
87 
88   auto move_result = code_ir->Alloc<lir::Bytecode>();
89   move_result->opcode = dex::OP_MOVE_RESULT_OBJECT;
90   move_result->operands.push_back(code_ir->Alloc<lir::VReg>(dst_reg));
91   code_ir->instructions.InsertBefore(bytecode, move_result);
92 }
93 
94 }  // namespace
95 
Apply(lir::CodeIr * code_ir)96 bool EntryHook::Apply(lir::CodeIr* code_ir) {
97   lir::Bytecode* bytecode = nullptr;
98   // find the first bytecode in the method body to insert the hook before it
99   for (auto instr : code_ir->instructions) {
100     BytecodeConvertingVisitor visitor;
101     instr->Accept(&visitor);
102     bytecode = visitor.out;
103     if (bytecode != nullptr) {
104       break;
105     }
106   }
107   if (bytecode == nullptr) {
108     return false;
109   }
110   if (tweak_ == Tweak::ArrayParams) {
111     return InjectArrayParamsHook(code_ir, bytecode);
112   }
113 
114   ir::Builder builder(code_ir->dex_ir);
115   const auto ir_method = code_ir->ir_method;
116 
117   // construct the hook method declaration
118   std::vector<ir::Type*> param_types;
119   if ((ir_method->access_flags & dex::kAccStatic) == 0) {
120     ir::Type* this_argument_type;
121     switch (tweak_) {
122       case Tweak::ThisAsObject:
123         this_argument_type = builder.GetType("Ljava/lang/Object;");
124         break;
125       default:
126         this_argument_type = ir_method->decl->parent;
127         break;
128     }
129     param_types.push_back(this_argument_type);
130   }
131   if (ir_method->decl->prototype->param_types != nullptr) {
132     const auto& orig_param_types = ir_method->decl->prototype->param_types->types;
133     param_types.insert(param_types.end(), orig_param_types.begin(), orig_param_types.end());
134   }
135 
136   auto ir_proto = builder.GetProto(builder.GetType("V"),
137                                    builder.GetTypeList(param_types));
138 
139   auto ir_method_decl = builder.GetMethodDecl(
140       builder.GetAsciiString(hook_method_id_.method_name), ir_proto,
141       builder.GetType(hook_method_id_.class_descriptor));
142 
143   auto hook_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
144 
145   // argument registers
146   auto regs = ir_method->code->registers;
147   auto args_count = ir_method->code->ins_count;
148   auto args = code_ir->Alloc<lir::VRegRange>(regs - args_count, args_count);
149 
150   // invoke hook bytecode
151   auto hook_invoke = code_ir->Alloc<lir::Bytecode>();
152   hook_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
153   hook_invoke->operands.push_back(args);
154   hook_invoke->operands.push_back(hook_method);
155 
156   // insert the hook before the first bytecode in the method body
157   code_ir->instructions.InsertBefore(bytecode, hook_invoke);
158   return true;
159 }
160 
InjectArrayParamsHook(lir::CodeIr * code_ir,lir::Bytecode * bytecode)161 bool EntryHook::InjectArrayParamsHook(lir::CodeIr* code_ir, lir::Bytecode* bytecode) {
162   ir::Builder builder(code_ir->dex_ir);
163   const auto ir_method = code_ir->ir_method;
164   auto param_types_list = ir_method->decl->prototype->param_types;
165   auto param_types = param_types_list != nullptr ? param_types_list->types : std::vector<ir::Type*>();
166   bool is_static = (ir_method->access_flags & dex::kAccStatic) != 0;
167 
168   bool needsBoxingReg = false;
169   for (auto type: param_types) {
170     needsBoxingReg |= type->GetCategory() != ir::Type::Category::Reference;
171   }
172 
173   // allocate scract registers
174   slicer::AllocateScratchRegs alloc_regs(2 + needsBoxingReg);
175   alloc_regs.Apply(code_ir);
176   auto reg_iterator = alloc_regs.ScratchRegs().begin();
177   // register that will store size of during allocation
178   // later will be reused to store index when do "aput"
179   dex::u4 array_size_reg = *(reg_iterator);
180   // register that will store an array that will be passed
181   // as a parameter in entry hook
182   dex::u4 array_reg = *(++reg_iterator);
183   // if we need to boxing, this register stores result of boxing
184   dex::u4 boxing_reg = needsBoxingReg ? *(++reg_iterator) : 0;
185 
186   // TODO: handle very "high" registers
187   if (boxing_reg > 0xff) {
188     printf("WARNING: can't instrument method %s.%s%s\n",
189            ir_method->decl->parent->Decl().c_str(),
190            ir_method->decl->name->c_str(),
191            ir_method->decl->prototype->Signature().c_str());
192     return false;
193   }
194 
195   // array size bytecode
196   auto const_size_op = code_ir->Alloc<lir::Bytecode>();
197   const_size_op->opcode = dex::OP_CONST;
198   const_size_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_size_reg));
199   const_size_op->operands.push_back(code_ir->Alloc<lir::Const32>(param_types.size() + !is_static));
200   code_ir->instructions.InsertBefore(bytecode, const_size_op);
201 
202   // allocate array
203   const auto obj_array_type = builder.GetType("[Ljava/lang/Object;");
204   auto allocate_array_op = code_ir->Alloc<lir::Bytecode>();
205   allocate_array_op->opcode = dex::OP_NEW_ARRAY;
206   allocate_array_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_reg));
207   allocate_array_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_size_reg));
208   allocate_array_op->operands.push_back(
209       code_ir->Alloc<lir::Type>(obj_array_type, obj_array_type->orig_index));
210   code_ir->instructions.InsertBefore(bytecode, allocate_array_op);
211 
212   // fill the array with parameters passed into function
213 
214   std::vector<ir::Type*> types;
215   if (!is_static) {
216     types.push_back(ir_method->decl->parent);
217   }
218 
219   types.insert(types.end(), param_types.begin(), param_types.end());
220 
221   // register where params start
222   dex::u4 current_reg = ir_method->code->registers - ir_method->code->ins_count;
223   // reuse not needed anymore register to store indexes
224   dex::u4 array_index_reg = array_size_reg;
225   int i = 0;
226   for (auto type: types) {
227     dex::u4 src_reg = 0;
228     if (type->GetCategory() != ir::Type::Category::Reference) {
229       BoxValue(bytecode, code_ir, type, current_reg, boxing_reg);
230       src_reg = boxing_reg;
231       current_reg += 1 + (type->GetCategory() == ir::Type::Category::WideScalar);
232     } else {
233       src_reg = current_reg;
234       current_reg++;
235     }
236 
237     auto index_const_op = code_ir->Alloc<lir::Bytecode>();
238     index_const_op->opcode = dex::OP_CONST;
239     index_const_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_index_reg));
240     index_const_op->operands.push_back(code_ir->Alloc<lir::Const32>(i++));
241     code_ir->instructions.InsertBefore(bytecode, index_const_op);
242 
243     auto aput_op = code_ir->Alloc<lir::Bytecode>();
244     aput_op->opcode = dex::OP_APUT_OBJECT;
245     aput_op->operands.push_back(code_ir->Alloc<lir::VReg>(src_reg));
246     aput_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_reg));
247     aput_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_index_reg));
248     code_ir->instructions.InsertBefore(bytecode, aput_op);
249   }
250 
251   std::vector<ir::Type*> hook_param_types;
252   hook_param_types.push_back(obj_array_type);
253 
254   auto ir_proto = builder.GetProto(builder.GetType("V"),
255                                    builder.GetTypeList(hook_param_types));
256 
257   auto ir_method_decl = builder.GetMethodDecl(
258       builder.GetAsciiString(hook_method_id_.method_name), ir_proto,
259       builder.GetType(hook_method_id_.class_descriptor));
260 
261   auto hook_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
262   auto args = code_ir->Alloc<lir::VRegRange>(array_reg, 1);
263   auto hook_invoke = code_ir->Alloc<lir::Bytecode>();
264   hook_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
265   hook_invoke->operands.push_back(args);
266   hook_invoke->operands.push_back(hook_method);
267   code_ir->instructions.InsertBefore(bytecode, hook_invoke);
268   return true;
269 }
270 
Apply(lir::CodeIr * code_ir)271 bool ExitHook::Apply(lir::CodeIr* code_ir) {
272   ir::Builder builder(code_ir->dex_ir);
273   const auto ir_method = code_ir->ir_method;
274   const auto declared_return_type = ir_method->decl->prototype->return_type;
275   bool return_as_object = tweak_ == Tweak::ReturnAsObject;
276   // do we have a void-return method?
277   bool return_void = (::strcmp(declared_return_type->descriptor->c_str(), "V") == 0);
278   // returnAsObject supports only object return type;
279   SLICER_CHECK(!return_as_object ||
280       (declared_return_type->GetCategory() == ir::Type::Category::Reference));
281   const auto return_type = return_as_object ? builder.GetType("Ljava/lang/Object;")
282       : declared_return_type;
283 
284   // construct the hook method declaration
285   std::vector<ir::Type*> param_types;
286   if (!return_void) {
287     param_types.push_back(return_type);
288   }
289 
290   auto ir_proto = builder.GetProto(return_type, builder.GetTypeList(param_types));
291 
292   auto ir_method_decl = builder.GetMethodDecl(
293       builder.GetAsciiString(hook_method_id_.method_name), ir_proto,
294       builder.GetType(hook_method_id_.class_descriptor));
295 
296   auto hook_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
297 
298   // find and instrument all return instructions
299   for (auto instr : code_ir->instructions) {
300     BytecodeConvertingVisitor visitor;
301     instr->Accept(&visitor);
302     auto bytecode = visitor.out;
303     if (bytecode == nullptr) {
304       continue;
305     }
306 
307     dex::Opcode move_result_opcode = dex::OP_NOP;
308     dex::u4 reg = 0;
309     int reg_count = 0;
310 
311     switch (bytecode->opcode) {
312       case dex::OP_RETURN_VOID:
313         SLICER_CHECK(return_void);
314         break;
315       case dex::OP_RETURN:
316         SLICER_CHECK(!return_void);
317         move_result_opcode = dex::OP_MOVE_RESULT;
318         reg = bytecode->CastOperand<lir::VReg>(0)->reg;
319         reg_count = 1;
320         break;
321       case dex::OP_RETURN_OBJECT:
322         SLICER_CHECK(!return_void);
323         move_result_opcode = dex::OP_MOVE_RESULT_OBJECT;
324         reg = bytecode->CastOperand<lir::VReg>(0)->reg;
325         reg_count = 1;
326         break;
327       case dex::OP_RETURN_WIDE:
328         SLICER_CHECK(!return_void);
329         move_result_opcode = dex::OP_MOVE_RESULT_WIDE;
330         reg = bytecode->CastOperand<lir::VRegPair>(0)->base_reg;
331         reg_count = 2;
332         break;
333       default:
334         // skip the bytecode...
335         continue;
336     }
337 
338     // invoke hook bytecode
339     auto args = code_ir->Alloc<lir::VRegRange>(reg, reg_count);
340     auto hook_invoke = code_ir->Alloc<lir::Bytecode>();
341     hook_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
342     hook_invoke->operands.push_back(args);
343     hook_invoke->operands.push_back(hook_method);
344     code_ir->instructions.InsertBefore(bytecode, hook_invoke);
345 
346     // move result back to the right register
347     //
348     // NOTE: we're reusing the original return's operand,
349     //   which is valid and more efficient than allocating
350     //   a new LIR node, but it's also fragile: we need to be
351     //   very careful about mutating shared nodes.
352     //
353     if (move_result_opcode != dex::OP_NOP) {
354       auto move_result = code_ir->Alloc<lir::Bytecode>();
355       move_result->opcode = move_result_opcode;
356       move_result->operands.push_back(bytecode->operands[0]);
357       code_ir->instructions.InsertBefore(bytecode, move_result);
358 
359       if (tweak_ == Tweak::ReturnAsObject) {
360         auto check_cast = code_ir->Alloc<lir::Bytecode>();
361         check_cast->opcode = dex::OP_CHECK_CAST;
362         check_cast->operands.push_back(code_ir->Alloc<lir::VReg>(reg));
363         check_cast->operands.push_back(
364             code_ir->Alloc<lir::Type>(declared_return_type, declared_return_type->orig_index));
365         code_ir->instructions.InsertBefore(bytecode, check_cast);
366       }
367     }
368   }
369 
370   return true;
371 }
372 
Apply(lir::CodeIr * code_ir)373 bool DetourHook::Apply(lir::CodeIr* code_ir) {
374   ir::Builder builder(code_ir->dex_ir);
375 
376   // search for matching invoke-virtual[/range] bytecodes
377   for (auto instr : code_ir->instructions) {
378     BytecodeConvertingVisitor visitor;
379     instr->Accept(&visitor);
380     auto bytecode = visitor.out;
381     if (bytecode == nullptr) {
382       continue;
383     }
384 
385     dex::Opcode new_call_opcode = GetNewOpcode(bytecode->opcode);
386     if (new_call_opcode == dex::OP_NOP) {
387       continue;
388     }
389 
390     auto orig_method = bytecode->CastOperand<lir::Method>(1)->ir_method;
391     if (!orig_method_id_.Match(orig_method)) {
392       // this is not the method you're looking for...
393       continue;
394     }
395 
396     // construct the detour method declaration
397     // (matching the original method, plus an explicit "this" argument)
398     std::vector<ir::Type*> param_types;
399     param_types.push_back(orig_method->parent);
400     if (orig_method->prototype->param_types != nullptr) {
401       const auto& orig_param_types = orig_method->prototype->param_types->types;
402       param_types.insert(param_types.end(), orig_param_types.begin(),
403                          orig_param_types.end());
404     }
405 
406     auto ir_proto = builder.GetProto(orig_method->prototype->return_type,
407                                      builder.GetTypeList(param_types));
408 
409     auto ir_method_decl = builder.GetMethodDecl(
410         builder.GetAsciiString(detour_method_id_.method_name), ir_proto,
411         builder.GetType(detour_method_id_.class_descriptor));
412 
413     auto detour_method =
414         code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
415 
416     // We mutate the original invoke bytecode in-place: this is ok
417     // because lir::Instructions can't be shared (referenced multiple times)
418     // in the code IR. It's also simpler and more efficient than allocating a
419     // new IR invoke bytecode.
420     bytecode->opcode = new_call_opcode;
421     bytecode->operands[1] = detour_method;
422   }
423 
424   return true;
425 }
426 
GetNewOpcode(dex::Opcode opcode)427 dex::Opcode DetourVirtualInvoke::GetNewOpcode(dex::Opcode opcode) {
428   switch (opcode) {
429     case dex::OP_INVOKE_VIRTUAL:
430       return dex::OP_INVOKE_STATIC;
431     case dex::OP_INVOKE_VIRTUAL_RANGE:
432       return dex::OP_INVOKE_STATIC_RANGE;
433     default:
434       // skip instruction ...
435       return dex::OP_NOP;
436   }
437 }
438 
GetNewOpcode(dex::Opcode opcode)439 dex::Opcode DetourInterfaceInvoke::GetNewOpcode(dex::Opcode opcode) {
440   switch (opcode) {
441     case dex::OP_INVOKE_INTERFACE:
442       return dex::OP_INVOKE_STATIC;
443     case dex::OP_INVOKE_INTERFACE_RANGE:
444       return dex::OP_INVOKE_STATIC_RANGE;
445     default:
446       // skip instruction ...
447       return dex::OP_NOP;
448   }
449 }
450 
451 // Register re-numbering visitor
452 // (renumbers vN to vN+shift)
453 class RegsRenumberVisitor : public lir::Visitor {
454  public:
RegsRenumberVisitor(int shift)455   explicit RegsRenumberVisitor(int shift) : shift_(shift) {
456     SLICER_CHECK(shift > 0);
457   }
458 
459  private:
Visit(lir::Bytecode * bytecode)460   virtual bool Visit(lir::Bytecode* bytecode) override {
461     for (auto operand : bytecode->operands) {
462       operand->Accept(this);
463     }
464     return true;
465   }
466 
Visit(lir::DbgInfoAnnotation * dbg_annotation)467   virtual bool Visit(lir::DbgInfoAnnotation* dbg_annotation) override {
468     for (auto operand : dbg_annotation->operands) {
469       operand->Accept(this);
470     }
471     return true;
472   }
473 
Visit(lir::VReg * vreg)474   virtual bool Visit(lir::VReg* vreg) override {
475     vreg->reg += shift_;
476     return true;
477   }
478 
Visit(lir::VRegPair * vreg_pair)479   virtual bool Visit(lir::VRegPair* vreg_pair) override {
480     vreg_pair->base_reg += shift_;
481     return true;
482   }
483 
Visit(lir::VRegList * vreg_list)484   virtual bool Visit(lir::VRegList* vreg_list) override {
485     for (auto& reg : vreg_list->registers) {
486       reg += shift_;
487     }
488     return true;
489   }
490 
Visit(lir::VRegRange * vreg_range)491   virtual bool Visit(lir::VRegRange* vreg_range) override {
492     vreg_range->base_reg += shift_;
493     return true;
494   }
495 
496  private:
497   int shift_ = 0;
498 };
499 
500 // Try to allocate registers by renumbering the existing allocation
501 //
502 // NOTE: we can't bump the register count over 16 since it may
503 //  make existing bytecodes "unencodable" (if they have 4 bit reg fields)
504 //
RegsRenumbering(lir::CodeIr * code_ir)505 void AllocateScratchRegs::RegsRenumbering(lir::CodeIr* code_ir) {
506   SLICER_CHECK(left_to_allocate_ > 0);
507   int delta = std::min(left_to_allocate_,
508                        16 - static_cast<int>(code_ir->ir_method->code->registers));
509   if (delta < 1) {
510     // can't allocate any registers through renumbering
511     return;
512   }
513   assert(delta <= 16);
514 
515   // renumber existing registers
516   RegsRenumberVisitor visitor(delta);
517   for (auto instr : code_ir->instructions) {
518     instr->Accept(&visitor);
519   }
520 
521   // we just allocated "delta" registers (v0..vX)
522   Allocate(code_ir, 0, delta);
523 }
524 
525 // Allocates registers by generating prologue code to relocate params
526 // into their original registers (parameters are allocated in the last IN registers)
527 //
528 // There are three types of register moves depending on the value type:
529 // 1. vreg -> vreg
530 // 2. vreg/wide -> vreg/wide
531 // 3. vreg/obj -> vreg/obj
532 //
ShiftParams(lir::CodeIr * code_ir)533 void AllocateScratchRegs::ShiftParams(lir::CodeIr* code_ir) {
534   const auto ir_method = code_ir->ir_method;
535   SLICER_CHECK(ir_method->code->ins_count > 0);
536   SLICER_CHECK(left_to_allocate_ > 0);
537 
538   // build a param list with the explicit "this" argument for non-static methods
539   std::vector<ir::Type*> param_types;
540   if ((ir_method->access_flags & dex::kAccStatic) == 0) {
541     param_types.push_back(ir_method->decl->parent);
542   }
543   if (ir_method->decl->prototype->param_types != nullptr) {
544     const auto& orig_param_types = ir_method->decl->prototype->param_types->types;
545     param_types.insert(param_types.end(), orig_param_types.begin(), orig_param_types.end());
546   }
547 
548   const dex::u4 shift = left_to_allocate_;
549 
550   Allocate(code_ir, ir_method->code->registers, left_to_allocate_);
551   assert(left_to_allocate_ == 0);
552 
553   const dex::u4 regs = ir_method->code->registers;
554   const dex::u4 ins_count = ir_method->code->ins_count;
555   SLICER_CHECK(regs >= ins_count);
556 
557   // generate the args "relocation" instructions
558   auto first_instr = code_ir->instructions.begin();
559   dex::u4 reg = regs - ins_count;
560   for (const auto& type : param_types) {
561     auto move = code_ir->Alloc<lir::Bytecode>();
562     switch (type->GetCategory()) {
563       case ir::Type::Category::Reference:
564         move->opcode = dex::OP_MOVE_OBJECT_16;
565         move->operands.push_back(code_ir->Alloc<lir::VReg>(reg - shift));
566         move->operands.push_back(code_ir->Alloc<lir::VReg>(reg));
567         reg += 1;
568         break;
569       case ir::Type::Category::Scalar:
570         move->opcode = dex::OP_MOVE_16;
571         move->operands.push_back(code_ir->Alloc<lir::VReg>(reg - shift));
572         move->operands.push_back(code_ir->Alloc<lir::VReg>(reg));
573         reg += 1;
574         break;
575       case ir::Type::Category::WideScalar:
576         move->opcode = dex::OP_MOVE_WIDE_16;
577         move->operands.push_back(code_ir->Alloc<lir::VRegPair>(reg - shift));
578         move->operands.push_back(code_ir->Alloc<lir::VRegPair>(reg));
579         reg += 2;
580         break;
581       case ir::Type::Category::Void:
582         SLICER_FATAL("void parameter type");
583     }
584     code_ir->instructions.insert(first_instr, move);
585   }
586 }
587 
588 // Mark [first_reg, first_reg + count) as scratch registers
Allocate(lir::CodeIr * code_ir,dex::u4 first_reg,int count)589 void AllocateScratchRegs::Allocate(lir::CodeIr* code_ir, dex::u4 first_reg, int count) {
590   SLICER_CHECK(count > 0 && count <= left_to_allocate_);
591   code_ir->ir_method->code->registers += count;
592   left_to_allocate_ -= count;
593   for (int i = 0; i < count; ++i) {
594     SLICER_CHECK(scratch_regs_.insert(first_reg + i).second);
595   }
596 }
597 
598 // Allocate scratch registers without doing a full register allocation:
599 //
600 // 1. if there are not params, increase the method regs count and we're done
601 // 2. if the method uses less than 16 registers, we can renumber the existing registers
602 // 3. if we still have registers to allocate, increase the method registers count,
603 //     and generate prologue code to shift the param regs into their original registers
604 //
Apply(lir::CodeIr * code_ir)605 bool AllocateScratchRegs::Apply(lir::CodeIr* code_ir) {
606   const auto code = code_ir->ir_method->code;
607   // .dex bytecode allows up to 64k vregs
608   SLICER_CHECK(code->registers + allocate_count_ <= (1 << 16));
609 
610   scratch_regs_.clear();
611   left_to_allocate_ = allocate_count_;
612 
613   // can we allocate by simply incrementing the method regs count?
614   if (code->ins_count == 0) {
615     Allocate(code_ir, code->registers, left_to_allocate_);
616     return true;
617   }
618 
619   // allocate as many registers as possible using renumbering
620   if (allow_renumbering_) {
621     RegsRenumbering(code_ir);
622   }
623 
624   // if we still have registers to allocate, generate prologue
625   // code to shift the params into their original registers
626   if (left_to_allocate_ > 0) {
627     ShiftParams(code_ir);
628   }
629 
630   assert(left_to_allocate_ == 0);
631   assert(scratch_regs_.size() == size_t(allocate_count_));
632   return true;
633 }
634 
InstrumentMethod(ir::EncodedMethod * ir_method)635 bool MethodInstrumenter::InstrumentMethod(ir::EncodedMethod* ir_method) {
636   SLICER_CHECK(ir_method != nullptr);
637   if (ir_method->code == nullptr) {
638     // can't instrument abstract methods
639     return false;
640   }
641 
642   // apply all the queued transformations
643   lir::CodeIr code_ir(ir_method, dex_ir_);
644   for (const auto& transformation : transformations_) {
645     if (!transformation->Apply(&code_ir)) {
646       // the transformation failed, bail out...
647       return false;
648     }
649   }
650   code_ir.Assemble();
651   return true;
652 }
653 
InstrumentMethod(const ir::MethodId & method_id)654 bool MethodInstrumenter::InstrumentMethod(const ir::MethodId& method_id) {
655   // locate the method to be instrumented
656   ir::Builder builder(dex_ir_);
657   auto ir_method = builder.FindMethod(method_id);
658   if (ir_method == nullptr) {
659     // we couldn't find the specified method
660     return false;
661   }
662   return InstrumentMethod(ir_method);
663 }
664 
665 }  // namespace slicer
666