1 //===-- Execution.cpp - Implement code to simulate the program ------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  This file contains the actual instruction interpreter.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "Interpreter.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/CodeGen/IntrinsicLowering.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/GetElementPtrTypeIterator.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include <algorithm>
28 #include <cmath>
29 using namespace llvm;
30 
31 #define DEBUG_TYPE "interpreter"
32 
33 STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
34 
35 static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
36           cl::desc("make the interpreter print every volatile load and store"));
37 
38 //===----------------------------------------------------------------------===//
39 //                     Various Helper Functions
40 //===----------------------------------------------------------------------===//
41 
SetValue(Value * V,GenericValue Val,ExecutionContext & SF)42 static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
43   SF.Values[V] = Val;
44 }
45 
46 //===----------------------------------------------------------------------===//
47 //                    Binary Instruction Implementations
48 //===----------------------------------------------------------------------===//
49 
50 #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
51    case Type::TY##TyID: \
52      Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
53      break
54 
executeFAddInst(GenericValue & Dest,GenericValue Src1,GenericValue Src2,Type * Ty)55 static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
56                             GenericValue Src2, Type *Ty) {
57   switch (Ty->getTypeID()) {
58     IMPLEMENT_BINARY_OPERATOR(+, Float);
59     IMPLEMENT_BINARY_OPERATOR(+, Double);
60   default:
61     dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
62     llvm_unreachable(nullptr);
63   }
64 }
65 
executeFSubInst(GenericValue & Dest,GenericValue Src1,GenericValue Src2,Type * Ty)66 static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
67                             GenericValue Src2, Type *Ty) {
68   switch (Ty->getTypeID()) {
69     IMPLEMENT_BINARY_OPERATOR(-, Float);
70     IMPLEMENT_BINARY_OPERATOR(-, Double);
71   default:
72     dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
73     llvm_unreachable(nullptr);
74   }
75 }
76 
executeFMulInst(GenericValue & Dest,GenericValue Src1,GenericValue Src2,Type * Ty)77 static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
78                             GenericValue Src2, Type *Ty) {
79   switch (Ty->getTypeID()) {
80     IMPLEMENT_BINARY_OPERATOR(*, Float);
81     IMPLEMENT_BINARY_OPERATOR(*, Double);
82   default:
83     dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
84     llvm_unreachable(nullptr);
85   }
86 }
87 
executeFDivInst(GenericValue & Dest,GenericValue Src1,GenericValue Src2,Type * Ty)88 static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
89                             GenericValue Src2, Type *Ty) {
90   switch (Ty->getTypeID()) {
91     IMPLEMENT_BINARY_OPERATOR(/, Float);
92     IMPLEMENT_BINARY_OPERATOR(/, Double);
93   default:
94     dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
95     llvm_unreachable(nullptr);
96   }
97 }
98 
executeFRemInst(GenericValue & Dest,GenericValue Src1,GenericValue Src2,Type * Ty)99 static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
100                             GenericValue Src2, Type *Ty) {
101   switch (Ty->getTypeID()) {
102   case Type::FloatTyID:
103     Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
104     break;
105   case Type::DoubleTyID:
106     Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
107     break;
108   default:
109     dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
110     llvm_unreachable(nullptr);
111   }
112 }
113 
114 #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
115    case Type::IntegerTyID:  \
116       Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
117       break;
118 
119 #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY)                        \
120   case Type::VectorTyID: {                                           \
121     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());    \
122     Dest.AggregateVal.resize( Src1.AggregateVal.size() );            \
123     for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++)             \
124       Dest.AggregateVal[_i].IntVal = APInt(1,                        \
125       Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
126   } break;
127 
128 // Handle pointers specially because they must be compared with only as much
129 // width as the host has.  We _do not_ want to be comparing 64 bit values when
130 // running on a 32-bit target, otherwise the upper 32 bits might mess up
131 // comparisons if they contain garbage.
132 #define IMPLEMENT_POINTER_ICMP(OP) \
133    case Type::PointerTyID: \
134       Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
135                             (void*)(intptr_t)Src2.PointerVal); \
136       break;
137 
executeICMP_EQ(GenericValue Src1,GenericValue Src2,Type * Ty)138 static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
139                                    Type *Ty) {
140   GenericValue Dest;
141   switch (Ty->getTypeID()) {
142     IMPLEMENT_INTEGER_ICMP(eq,Ty);
143     IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
144     IMPLEMENT_POINTER_ICMP(==);
145   default:
146     dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
147     llvm_unreachable(nullptr);
148   }
149   return Dest;
150 }
151 
executeICMP_NE(GenericValue Src1,GenericValue Src2,Type * Ty)152 static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
153                                    Type *Ty) {
154   GenericValue Dest;
155   switch (Ty->getTypeID()) {
156     IMPLEMENT_INTEGER_ICMP(ne,Ty);
157     IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
158     IMPLEMENT_POINTER_ICMP(!=);
159   default:
160     dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
161     llvm_unreachable(nullptr);
162   }
163   return Dest;
164 }
165 
executeICMP_ULT(GenericValue Src1,GenericValue Src2,Type * Ty)166 static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
167                                     Type *Ty) {
168   GenericValue Dest;
169   switch (Ty->getTypeID()) {
170     IMPLEMENT_INTEGER_ICMP(ult,Ty);
171     IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
172     IMPLEMENT_POINTER_ICMP(<);
173   default:
174     dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
175     llvm_unreachable(nullptr);
176   }
177   return Dest;
178 }
179 
executeICMP_SLT(GenericValue Src1,GenericValue Src2,Type * Ty)180 static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
181                                     Type *Ty) {
182   GenericValue Dest;
183   switch (Ty->getTypeID()) {
184     IMPLEMENT_INTEGER_ICMP(slt,Ty);
185     IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
186     IMPLEMENT_POINTER_ICMP(<);
187   default:
188     dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
189     llvm_unreachable(nullptr);
190   }
191   return Dest;
192 }
193 
executeICMP_UGT(GenericValue Src1,GenericValue Src2,Type * Ty)194 static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
195                                     Type *Ty) {
196   GenericValue Dest;
197   switch (Ty->getTypeID()) {
198     IMPLEMENT_INTEGER_ICMP(ugt,Ty);
199     IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
200     IMPLEMENT_POINTER_ICMP(>);
201   default:
202     dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
203     llvm_unreachable(nullptr);
204   }
205   return Dest;
206 }
207 
executeICMP_SGT(GenericValue Src1,GenericValue Src2,Type * Ty)208 static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
209                                     Type *Ty) {
210   GenericValue Dest;
211   switch (Ty->getTypeID()) {
212     IMPLEMENT_INTEGER_ICMP(sgt,Ty);
213     IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
214     IMPLEMENT_POINTER_ICMP(>);
215   default:
216     dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
217     llvm_unreachable(nullptr);
218   }
219   return Dest;
220 }
221 
executeICMP_ULE(GenericValue Src1,GenericValue Src2,Type * Ty)222 static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
223                                     Type *Ty) {
224   GenericValue Dest;
225   switch (Ty->getTypeID()) {
226     IMPLEMENT_INTEGER_ICMP(ule,Ty);
227     IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
228     IMPLEMENT_POINTER_ICMP(<=);
229   default:
230     dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
231     llvm_unreachable(nullptr);
232   }
233   return Dest;
234 }
235 
executeICMP_SLE(GenericValue Src1,GenericValue Src2,Type * Ty)236 static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
237                                     Type *Ty) {
238   GenericValue Dest;
239   switch (Ty->getTypeID()) {
240     IMPLEMENT_INTEGER_ICMP(sle,Ty);
241     IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
242     IMPLEMENT_POINTER_ICMP(<=);
243   default:
244     dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
245     llvm_unreachable(nullptr);
246   }
247   return Dest;
248 }
249 
executeICMP_UGE(GenericValue Src1,GenericValue Src2,Type * Ty)250 static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
251                                     Type *Ty) {
252   GenericValue Dest;
253   switch (Ty->getTypeID()) {
254     IMPLEMENT_INTEGER_ICMP(uge,Ty);
255     IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
256     IMPLEMENT_POINTER_ICMP(>=);
257   default:
258     dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
259     llvm_unreachable(nullptr);
260   }
261   return Dest;
262 }
263 
executeICMP_SGE(GenericValue Src1,GenericValue Src2,Type * Ty)264 static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
265                                     Type *Ty) {
266   GenericValue Dest;
267   switch (Ty->getTypeID()) {
268     IMPLEMENT_INTEGER_ICMP(sge,Ty);
269     IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
270     IMPLEMENT_POINTER_ICMP(>=);
271   default:
272     dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
273     llvm_unreachable(nullptr);
274   }
275   return Dest;
276 }
277 
visitICmpInst(ICmpInst & I)278 void Interpreter::visitICmpInst(ICmpInst &I) {
279   ExecutionContext &SF = ECStack.back();
280   Type *Ty    = I.getOperand(0)->getType();
281   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
282   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
283   GenericValue R;   // Result
284 
285   switch (I.getPredicate()) {
286   case ICmpInst::ICMP_EQ:  R = executeICMP_EQ(Src1,  Src2, Ty); break;
287   case ICmpInst::ICMP_NE:  R = executeICMP_NE(Src1,  Src2, Ty); break;
288   case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
289   case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
290   case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
291   case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
292   case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
293   case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
294   case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
295   case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
296   default:
297     dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
298     llvm_unreachable(nullptr);
299   }
300 
301   SetValue(&I, R, SF);
302 }
303 
304 #define IMPLEMENT_FCMP(OP, TY) \
305    case Type::TY##TyID: \
306      Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
307      break
308 
309 #define IMPLEMENT_VECTOR_FCMP_T(OP, TY)                             \
310   assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());     \
311   Dest.AggregateVal.resize( Src1.AggregateVal.size() );             \
312   for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++)              \
313     Dest.AggregateVal[_i].IntVal = APInt(1,                         \
314     Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
315   break;
316 
317 #define IMPLEMENT_VECTOR_FCMP(OP)                                   \
318   case Type::VectorTyID:                                            \
319     if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {      \
320       IMPLEMENT_VECTOR_FCMP_T(OP, Float);                           \
321     } else {                                                        \
322         IMPLEMENT_VECTOR_FCMP_T(OP, Double);                        \
323     }
324 
executeFCMP_OEQ(GenericValue Src1,GenericValue Src2,Type * Ty)325 static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
326                                    Type *Ty) {
327   GenericValue Dest;
328   switch (Ty->getTypeID()) {
329     IMPLEMENT_FCMP(==, Float);
330     IMPLEMENT_FCMP(==, Double);
331     IMPLEMENT_VECTOR_FCMP(==);
332   default:
333     dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
334     llvm_unreachable(nullptr);
335   }
336   return Dest;
337 }
338 
339 #define IMPLEMENT_SCALAR_NANS(TY, X,Y)                                      \
340   if (TY->isFloatTy()) {                                                    \
341     if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) {             \
342       Dest.IntVal = APInt(1,false);                                         \
343       return Dest;                                                          \
344     }                                                                       \
345   } else {                                                                  \
346     if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) {         \
347       Dest.IntVal = APInt(1,false);                                         \
348       return Dest;                                                          \
349     }                                                                       \
350   }
351 
352 #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG)                                   \
353   assert(X.AggregateVal.size() == Y.AggregateVal.size());                   \
354   Dest.AggregateVal.resize( X.AggregateVal.size() );                        \
355   for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) {                       \
356     if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val ||         \
357         Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val)           \
358       Dest.AggregateVal[_i].IntVal = APInt(1,FLAG);                         \
359     else  {                                                                 \
360       Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG);                        \
361     }                                                                       \
362   }
363 
364 #define MASK_VECTOR_NANS(TY, X,Y, FLAG)                                     \
365   if (TY->isVectorTy()) {                                                   \
366     if (cast<VectorType>(TY)->getElementType()->isFloatTy()) {              \
367       MASK_VECTOR_NANS_T(X, Y, Float, FLAG)                                 \
368     } else {                                                                \
369       MASK_VECTOR_NANS_T(X, Y, Double, FLAG)                                \
370     }                                                                       \
371   }                                                                         \
372 
373 
374 
executeFCMP_ONE(GenericValue Src1,GenericValue Src2,Type * Ty)375 static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
376                                     Type *Ty)
377 {
378   GenericValue Dest;
379   // if input is scalar value and Src1 or Src2 is NaN return false
380   IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
381   // if vector input detect NaNs and fill mask
382   MASK_VECTOR_NANS(Ty, Src1, Src2, false)
383   GenericValue DestMask = Dest;
384   switch (Ty->getTypeID()) {
385     IMPLEMENT_FCMP(!=, Float);
386     IMPLEMENT_FCMP(!=, Double);
387     IMPLEMENT_VECTOR_FCMP(!=);
388     default:
389       dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
390       llvm_unreachable(nullptr);
391   }
392   // in vector case mask out NaN elements
393   if (Ty->isVectorTy())
394     for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
395       if (DestMask.AggregateVal[_i].IntVal == false)
396         Dest.AggregateVal[_i].IntVal = APInt(1,false);
397 
398   return Dest;
399 }
400 
executeFCMP_OLE(GenericValue Src1,GenericValue Src2,Type * Ty)401 static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
402                                    Type *Ty) {
403   GenericValue Dest;
404   switch (Ty->getTypeID()) {
405     IMPLEMENT_FCMP(<=, Float);
406     IMPLEMENT_FCMP(<=, Double);
407     IMPLEMENT_VECTOR_FCMP(<=);
408   default:
409     dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
410     llvm_unreachable(nullptr);
411   }
412   return Dest;
413 }
414 
executeFCMP_OGE(GenericValue Src1,GenericValue Src2,Type * Ty)415 static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
416                                    Type *Ty) {
417   GenericValue Dest;
418   switch (Ty->getTypeID()) {
419     IMPLEMENT_FCMP(>=, Float);
420     IMPLEMENT_FCMP(>=, Double);
421     IMPLEMENT_VECTOR_FCMP(>=);
422   default:
423     dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
424     llvm_unreachable(nullptr);
425   }
426   return Dest;
427 }
428 
executeFCMP_OLT(GenericValue Src1,GenericValue Src2,Type * Ty)429 static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
430                                    Type *Ty) {
431   GenericValue Dest;
432   switch (Ty->getTypeID()) {
433     IMPLEMENT_FCMP(<, Float);
434     IMPLEMENT_FCMP(<, Double);
435     IMPLEMENT_VECTOR_FCMP(<);
436   default:
437     dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
438     llvm_unreachable(nullptr);
439   }
440   return Dest;
441 }
442 
executeFCMP_OGT(GenericValue Src1,GenericValue Src2,Type * Ty)443 static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
444                                      Type *Ty) {
445   GenericValue Dest;
446   switch (Ty->getTypeID()) {
447     IMPLEMENT_FCMP(>, Float);
448     IMPLEMENT_FCMP(>, Double);
449     IMPLEMENT_VECTOR_FCMP(>);
450   default:
451     dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
452     llvm_unreachable(nullptr);
453   }
454   return Dest;
455 }
456 
457 #define IMPLEMENT_UNORDERED(TY, X,Y)                                     \
458   if (TY->isFloatTy()) {                                                 \
459     if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) {          \
460       Dest.IntVal = APInt(1,true);                                       \
461       return Dest;                                                       \
462     }                                                                    \
463   } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
464     Dest.IntVal = APInt(1,true);                                         \
465     return Dest;                                                         \
466   }
467 
468 #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC)                             \
469   if (TY->isVectorTy()) {                                                      \
470     GenericValue DestMask = Dest;                                              \
471     Dest = FUNC(Src1, Src2, Ty);                                               \
472     for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++)                   \
473       if (DestMask.AggregateVal[_i].IntVal == true)                            \
474         Dest.AggregateVal[_i].IntVal = APInt(1, true);                         \
475     return Dest;                                                               \
476   }
477 
executeFCMP_UEQ(GenericValue Src1,GenericValue Src2,Type * Ty)478 static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
479                                    Type *Ty) {
480   GenericValue Dest;
481   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
482   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
483   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
484   return executeFCMP_OEQ(Src1, Src2, Ty);
485 
486 }
487 
executeFCMP_UNE(GenericValue Src1,GenericValue Src2,Type * Ty)488 static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
489                                    Type *Ty) {
490   GenericValue Dest;
491   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
492   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
493   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
494   return executeFCMP_ONE(Src1, Src2, Ty);
495 }
496 
executeFCMP_ULE(GenericValue Src1,GenericValue Src2,Type * Ty)497 static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
498                                    Type *Ty) {
499   GenericValue Dest;
500   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
501   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
502   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
503   return executeFCMP_OLE(Src1, Src2, Ty);
504 }
505 
executeFCMP_UGE(GenericValue Src1,GenericValue Src2,Type * Ty)506 static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
507                                    Type *Ty) {
508   GenericValue Dest;
509   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
510   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
511   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
512   return executeFCMP_OGE(Src1, Src2, Ty);
513 }
514 
executeFCMP_ULT(GenericValue Src1,GenericValue Src2,Type * Ty)515 static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
516                                    Type *Ty) {
517   GenericValue Dest;
518   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
519   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
520   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
521   return executeFCMP_OLT(Src1, Src2, Ty);
522 }
523 
executeFCMP_UGT(GenericValue Src1,GenericValue Src2,Type * Ty)524 static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
525                                      Type *Ty) {
526   GenericValue Dest;
527   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
528   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
529   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
530   return executeFCMP_OGT(Src1, Src2, Ty);
531 }
532 
executeFCMP_ORD(GenericValue Src1,GenericValue Src2,Type * Ty)533 static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
534                                      Type *Ty) {
535   GenericValue Dest;
536   if(Ty->isVectorTy()) {
537     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
538     Dest.AggregateVal.resize( Src1.AggregateVal.size() );
539     if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
540       for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
541         Dest.AggregateVal[_i].IntVal = APInt(1,
542         ( (Src1.AggregateVal[_i].FloatVal ==
543         Src1.AggregateVal[_i].FloatVal) &&
544         (Src2.AggregateVal[_i].FloatVal ==
545         Src2.AggregateVal[_i].FloatVal)));
546     } else {
547       for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
548         Dest.AggregateVal[_i].IntVal = APInt(1,
549         ( (Src1.AggregateVal[_i].DoubleVal ==
550         Src1.AggregateVal[_i].DoubleVal) &&
551         (Src2.AggregateVal[_i].DoubleVal ==
552         Src2.AggregateVal[_i].DoubleVal)));
553     }
554   } else if (Ty->isFloatTy())
555     Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
556                            Src2.FloatVal == Src2.FloatVal));
557   else {
558     Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
559                            Src2.DoubleVal == Src2.DoubleVal));
560   }
561   return Dest;
562 }
563 
executeFCMP_UNO(GenericValue Src1,GenericValue Src2,Type * Ty)564 static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
565                                      Type *Ty) {
566   GenericValue Dest;
567   if(Ty->isVectorTy()) {
568     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
569     Dest.AggregateVal.resize( Src1.AggregateVal.size() );
570     if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
571       for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
572         Dest.AggregateVal[_i].IntVal = APInt(1,
573         ( (Src1.AggregateVal[_i].FloatVal !=
574            Src1.AggregateVal[_i].FloatVal) ||
575           (Src2.AggregateVal[_i].FloatVal !=
576            Src2.AggregateVal[_i].FloatVal)));
577       } else {
578         for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
579           Dest.AggregateVal[_i].IntVal = APInt(1,
580           ( (Src1.AggregateVal[_i].DoubleVal !=
581              Src1.AggregateVal[_i].DoubleVal) ||
582             (Src2.AggregateVal[_i].DoubleVal !=
583              Src2.AggregateVal[_i].DoubleVal)));
584       }
585   } else if (Ty->isFloatTy())
586     Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
587                            Src2.FloatVal != Src2.FloatVal));
588   else {
589     Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
590                            Src2.DoubleVal != Src2.DoubleVal));
591   }
592   return Dest;
593 }
594 
executeFCMP_BOOL(GenericValue Src1,GenericValue Src2,const Type * Ty,const bool val)595 static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
596                                     const Type *Ty, const bool val) {
597   GenericValue Dest;
598     if(Ty->isVectorTy()) {
599       assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
600       Dest.AggregateVal.resize( Src1.AggregateVal.size() );
601       for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
602         Dest.AggregateVal[_i].IntVal = APInt(1,val);
603     } else {
604       Dest.IntVal = APInt(1, val);
605     }
606 
607     return Dest;
608 }
609 
visitFCmpInst(FCmpInst & I)610 void Interpreter::visitFCmpInst(FCmpInst &I) {
611   ExecutionContext &SF = ECStack.back();
612   Type *Ty    = I.getOperand(0)->getType();
613   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
614   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
615   GenericValue R;   // Result
616 
617   switch (I.getPredicate()) {
618   default:
619     dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
620     llvm_unreachable(nullptr);
621   break;
622   case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
623   break;
624   case FCmpInst::FCMP_TRUE:  R = executeFCMP_BOOL(Src1, Src2, Ty, true);
625   break;
626   case FCmpInst::FCMP_ORD:   R = executeFCMP_ORD(Src1, Src2, Ty); break;
627   case FCmpInst::FCMP_UNO:   R = executeFCMP_UNO(Src1, Src2, Ty); break;
628   case FCmpInst::FCMP_UEQ:   R = executeFCMP_UEQ(Src1, Src2, Ty); break;
629   case FCmpInst::FCMP_OEQ:   R = executeFCMP_OEQ(Src1, Src2, Ty); break;
630   case FCmpInst::FCMP_UNE:   R = executeFCMP_UNE(Src1, Src2, Ty); break;
631   case FCmpInst::FCMP_ONE:   R = executeFCMP_ONE(Src1, Src2, Ty); break;
632   case FCmpInst::FCMP_ULT:   R = executeFCMP_ULT(Src1, Src2, Ty); break;
633   case FCmpInst::FCMP_OLT:   R = executeFCMP_OLT(Src1, Src2, Ty); break;
634   case FCmpInst::FCMP_UGT:   R = executeFCMP_UGT(Src1, Src2, Ty); break;
635   case FCmpInst::FCMP_OGT:   R = executeFCMP_OGT(Src1, Src2, Ty); break;
636   case FCmpInst::FCMP_ULE:   R = executeFCMP_ULE(Src1, Src2, Ty); break;
637   case FCmpInst::FCMP_OLE:   R = executeFCMP_OLE(Src1, Src2, Ty); break;
638   case FCmpInst::FCMP_UGE:   R = executeFCMP_UGE(Src1, Src2, Ty); break;
639   case FCmpInst::FCMP_OGE:   R = executeFCMP_OGE(Src1, Src2, Ty); break;
640   }
641 
642   SetValue(&I, R, SF);
643 }
644 
executeCmpInst(unsigned predicate,GenericValue Src1,GenericValue Src2,Type * Ty)645 static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
646                                    GenericValue Src2, Type *Ty) {
647   GenericValue Result;
648   switch (predicate) {
649   case ICmpInst::ICMP_EQ:    return executeICMP_EQ(Src1, Src2, Ty);
650   case ICmpInst::ICMP_NE:    return executeICMP_NE(Src1, Src2, Ty);
651   case ICmpInst::ICMP_UGT:   return executeICMP_UGT(Src1, Src2, Ty);
652   case ICmpInst::ICMP_SGT:   return executeICMP_SGT(Src1, Src2, Ty);
653   case ICmpInst::ICMP_ULT:   return executeICMP_ULT(Src1, Src2, Ty);
654   case ICmpInst::ICMP_SLT:   return executeICMP_SLT(Src1, Src2, Ty);
655   case ICmpInst::ICMP_UGE:   return executeICMP_UGE(Src1, Src2, Ty);
656   case ICmpInst::ICMP_SGE:   return executeICMP_SGE(Src1, Src2, Ty);
657   case ICmpInst::ICMP_ULE:   return executeICMP_ULE(Src1, Src2, Ty);
658   case ICmpInst::ICMP_SLE:   return executeICMP_SLE(Src1, Src2, Ty);
659   case FCmpInst::FCMP_ORD:   return executeFCMP_ORD(Src1, Src2, Ty);
660   case FCmpInst::FCMP_UNO:   return executeFCMP_UNO(Src1, Src2, Ty);
661   case FCmpInst::FCMP_OEQ:   return executeFCMP_OEQ(Src1, Src2, Ty);
662   case FCmpInst::FCMP_UEQ:   return executeFCMP_UEQ(Src1, Src2, Ty);
663   case FCmpInst::FCMP_ONE:   return executeFCMP_ONE(Src1, Src2, Ty);
664   case FCmpInst::FCMP_UNE:   return executeFCMP_UNE(Src1, Src2, Ty);
665   case FCmpInst::FCMP_OLT:   return executeFCMP_OLT(Src1, Src2, Ty);
666   case FCmpInst::FCMP_ULT:   return executeFCMP_ULT(Src1, Src2, Ty);
667   case FCmpInst::FCMP_OGT:   return executeFCMP_OGT(Src1, Src2, Ty);
668   case FCmpInst::FCMP_UGT:   return executeFCMP_UGT(Src1, Src2, Ty);
669   case FCmpInst::FCMP_OLE:   return executeFCMP_OLE(Src1, Src2, Ty);
670   case FCmpInst::FCMP_ULE:   return executeFCMP_ULE(Src1, Src2, Ty);
671   case FCmpInst::FCMP_OGE:   return executeFCMP_OGE(Src1, Src2, Ty);
672   case FCmpInst::FCMP_UGE:   return executeFCMP_UGE(Src1, Src2, Ty);
673   case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
674   case FCmpInst::FCMP_TRUE:  return executeFCMP_BOOL(Src1, Src2, Ty, true);
675   default:
676     dbgs() << "Unhandled Cmp predicate\n";
677     llvm_unreachable(nullptr);
678   }
679 }
680 
visitBinaryOperator(BinaryOperator & I)681 void Interpreter::visitBinaryOperator(BinaryOperator &I) {
682   ExecutionContext &SF = ECStack.back();
683   Type *Ty    = I.getOperand(0)->getType();
684   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
685   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
686   GenericValue R;   // Result
687 
688   // First process vector operation
689   if (Ty->isVectorTy()) {
690     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
691     R.AggregateVal.resize(Src1.AggregateVal.size());
692 
693     // Macros to execute binary operation 'OP' over integer vectors
694 #define INTEGER_VECTOR_OPERATION(OP)                               \
695     for (unsigned i = 0; i < R.AggregateVal.size(); ++i)           \
696       R.AggregateVal[i].IntVal =                                   \
697       Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
698 
699     // Additional macros to execute binary operations udiv/sdiv/urem/srem since
700     // they have different notation.
701 #define INTEGER_VECTOR_FUNCTION(OP)                                \
702     for (unsigned i = 0; i < R.AggregateVal.size(); ++i)           \
703       R.AggregateVal[i].IntVal =                                   \
704       Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
705 
706     // Macros to execute binary operation 'OP' over floating point type TY
707     // (float or double) vectors
708 #define FLOAT_VECTOR_FUNCTION(OP, TY)                               \
709       for (unsigned i = 0; i < R.AggregateVal.size(); ++i)          \
710         R.AggregateVal[i].TY =                                      \
711         Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
712 
713     // Macros to choose appropriate TY: float or double and run operation
714     // execution
715 #define FLOAT_VECTOR_OP(OP) {                                         \
716   if (cast<VectorType>(Ty)->getElementType()->isFloatTy())            \
717     FLOAT_VECTOR_FUNCTION(OP, FloatVal)                               \
718   else {                                                              \
719     if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())         \
720       FLOAT_VECTOR_FUNCTION(OP, DoubleVal)                            \
721     else {                                                            \
722       dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
723       llvm_unreachable(0);                                            \
724     }                                                                 \
725   }                                                                   \
726 }
727 
728     switch(I.getOpcode()){
729     default:
730       dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
731       llvm_unreachable(nullptr);
732       break;
733     case Instruction::Add:   INTEGER_VECTOR_OPERATION(+) break;
734     case Instruction::Sub:   INTEGER_VECTOR_OPERATION(-) break;
735     case Instruction::Mul:   INTEGER_VECTOR_OPERATION(*) break;
736     case Instruction::UDiv:  INTEGER_VECTOR_FUNCTION(udiv) break;
737     case Instruction::SDiv:  INTEGER_VECTOR_FUNCTION(sdiv) break;
738     case Instruction::URem:  INTEGER_VECTOR_FUNCTION(urem) break;
739     case Instruction::SRem:  INTEGER_VECTOR_FUNCTION(srem) break;
740     case Instruction::And:   INTEGER_VECTOR_OPERATION(&) break;
741     case Instruction::Or:    INTEGER_VECTOR_OPERATION(|) break;
742     case Instruction::Xor:   INTEGER_VECTOR_OPERATION(^) break;
743     case Instruction::FAdd:  FLOAT_VECTOR_OP(+) break;
744     case Instruction::FSub:  FLOAT_VECTOR_OP(-) break;
745     case Instruction::FMul:  FLOAT_VECTOR_OP(*) break;
746     case Instruction::FDiv:  FLOAT_VECTOR_OP(/) break;
747     case Instruction::FRem:
748       if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
749         for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
750           R.AggregateVal[i].FloatVal =
751           fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
752       else {
753         if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
754           for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
755             R.AggregateVal[i].DoubleVal =
756             fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
757         else {
758           dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
759           llvm_unreachable(nullptr);
760         }
761       }
762       break;
763     }
764   } else {
765     switch (I.getOpcode()) {
766     default:
767       dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
768       llvm_unreachable(nullptr);
769       break;
770     case Instruction::Add:   R.IntVal = Src1.IntVal + Src2.IntVal; break;
771     case Instruction::Sub:   R.IntVal = Src1.IntVal - Src2.IntVal; break;
772     case Instruction::Mul:   R.IntVal = Src1.IntVal * Src2.IntVal; break;
773     case Instruction::FAdd:  executeFAddInst(R, Src1, Src2, Ty); break;
774     case Instruction::FSub:  executeFSubInst(R, Src1, Src2, Ty); break;
775     case Instruction::FMul:  executeFMulInst(R, Src1, Src2, Ty); break;
776     case Instruction::FDiv:  executeFDivInst(R, Src1, Src2, Ty); break;
777     case Instruction::FRem:  executeFRemInst(R, Src1, Src2, Ty); break;
778     case Instruction::UDiv:  R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
779     case Instruction::SDiv:  R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
780     case Instruction::URem:  R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
781     case Instruction::SRem:  R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
782     case Instruction::And:   R.IntVal = Src1.IntVal & Src2.IntVal; break;
783     case Instruction::Or:    R.IntVal = Src1.IntVal | Src2.IntVal; break;
784     case Instruction::Xor:   R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
785     }
786   }
787   SetValue(&I, R, SF);
788 }
789 
executeSelectInst(GenericValue Src1,GenericValue Src2,GenericValue Src3,const Type * Ty)790 static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
791                                       GenericValue Src3, const Type *Ty) {
792     GenericValue Dest;
793     if(Ty->isVectorTy()) {
794       assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
795       assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
796       Dest.AggregateVal.resize( Src1.AggregateVal.size() );
797       for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
798         Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
799           Src3.AggregateVal[i] : Src2.AggregateVal[i];
800     } else {
801       Dest = (Src1.IntVal == 0) ? Src3 : Src2;
802     }
803     return Dest;
804 }
805 
visitSelectInst(SelectInst & I)806 void Interpreter::visitSelectInst(SelectInst &I) {
807   ExecutionContext &SF = ECStack.back();
808   const Type * Ty = I.getOperand(0)->getType();
809   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
810   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
811   GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
812   GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
813   SetValue(&I, R, SF);
814 }
815 
816 //===----------------------------------------------------------------------===//
817 //                     Terminator Instruction Implementations
818 //===----------------------------------------------------------------------===//
819 
exitCalled(GenericValue GV)820 void Interpreter::exitCalled(GenericValue GV) {
821   // runAtExitHandlers() assumes there are no stack frames, but
822   // if exit() was called, then it had a stack frame. Blow away
823   // the stack before interpreting atexit handlers.
824   ECStack.clear();
825   runAtExitHandlers();
826   exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
827 }
828 
829 /// Pop the last stack frame off of ECStack and then copy the result
830 /// back into the result variable if we are not returning void. The
831 /// result variable may be the ExitValue, or the Value of the calling
832 /// CallInst if there was a previous stack frame. This method may
833 /// invalidate any ECStack iterators you have. This method also takes
834 /// care of switching to the normal destination BB, if we are returning
835 /// from an invoke.
836 ///
popStackAndReturnValueToCaller(Type * RetTy,GenericValue Result)837 void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
838                                                  GenericValue Result) {
839   // Pop the current stack frame.
840   ECStack.pop_back();
841 
842   if (ECStack.empty()) {  // Finished main.  Put result into exit code...
843     if (RetTy && !RetTy->isVoidTy()) {          // Nonvoid return type?
844       ExitValue = Result;   // Capture the exit value of the program
845     } else {
846       memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
847     }
848   } else {
849     // If we have a previous stack frame, and we have a previous call,
850     // fill in the return value...
851     ExecutionContext &CallingSF = ECStack.back();
852     if (Instruction *I = CallingSF.Caller.getInstruction()) {
853       // Save result...
854       if (!CallingSF.Caller.getType()->isVoidTy())
855         SetValue(I, Result, CallingSF);
856       if (InvokeInst *II = dyn_cast<InvokeInst> (I))
857         SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
858       CallingSF.Caller = CallSite();          // We returned from the call...
859     }
860   }
861 }
862 
visitReturnInst(ReturnInst & I)863 void Interpreter::visitReturnInst(ReturnInst &I) {
864   ExecutionContext &SF = ECStack.back();
865   Type *RetTy = Type::getVoidTy(I.getContext());
866   GenericValue Result;
867 
868   // Save away the return value... (if we are not 'ret void')
869   if (I.getNumOperands()) {
870     RetTy  = I.getReturnValue()->getType();
871     Result = getOperandValue(I.getReturnValue(), SF);
872   }
873 
874   popStackAndReturnValueToCaller(RetTy, Result);
875 }
876 
visitUnreachableInst(UnreachableInst & I)877 void Interpreter::visitUnreachableInst(UnreachableInst &I) {
878   report_fatal_error("Program executed an 'unreachable' instruction!");
879 }
880 
visitBranchInst(BranchInst & I)881 void Interpreter::visitBranchInst(BranchInst &I) {
882   ExecutionContext &SF = ECStack.back();
883   BasicBlock *Dest;
884 
885   Dest = I.getSuccessor(0);          // Uncond branches have a fixed dest...
886   if (!I.isUnconditional()) {
887     Value *Cond = I.getCondition();
888     if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
889       Dest = I.getSuccessor(1);
890   }
891   SwitchToNewBasicBlock(Dest, SF);
892 }
893 
visitSwitchInst(SwitchInst & I)894 void Interpreter::visitSwitchInst(SwitchInst &I) {
895   ExecutionContext &SF = ECStack.back();
896   Value* Cond = I.getCondition();
897   Type *ElTy = Cond->getType();
898   GenericValue CondVal = getOperandValue(Cond, SF);
899 
900   // Check to see if any of the cases match...
901   BasicBlock *Dest = nullptr;
902   for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
903     GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
904     if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
905       Dest = cast<BasicBlock>(i.getCaseSuccessor());
906       break;
907     }
908   }
909   if (!Dest) Dest = I.getDefaultDest();   // No cases matched: use default
910   SwitchToNewBasicBlock(Dest, SF);
911 }
912 
visitIndirectBrInst(IndirectBrInst & I)913 void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
914   ExecutionContext &SF = ECStack.back();
915   void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
916   SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
917 }
918 
919 
920 // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
921 // This function handles the actual updating of block and instruction iterators
922 // as well as execution of all of the PHI nodes in the destination block.
923 //
924 // This method does this because all of the PHI nodes must be executed
925 // atomically, reading their inputs before any of the results are updated.  Not
926 // doing this can cause problems if the PHI nodes depend on other PHI nodes for
927 // their inputs.  If the input PHI node is updated before it is read, incorrect
928 // results can happen.  Thus we use a two phase approach.
929 //
SwitchToNewBasicBlock(BasicBlock * Dest,ExecutionContext & SF)930 void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
931   BasicBlock *PrevBB = SF.CurBB;      // Remember where we came from...
932   SF.CurBB   = Dest;                  // Update CurBB to branch destination
933   SF.CurInst = SF.CurBB->begin();     // Update new instruction ptr...
934 
935   if (!isa<PHINode>(SF.CurInst)) return;  // Nothing fancy to do
936 
937   // Loop over all of the PHI nodes in the current block, reading their inputs.
938   std::vector<GenericValue> ResultValues;
939 
940   for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
941     // Search for the value corresponding to this previous bb...
942     int i = PN->getBasicBlockIndex(PrevBB);
943     assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
944     Value *IncomingValue = PN->getIncomingValue(i);
945 
946     // Save the incoming value for this PHI node...
947     ResultValues.push_back(getOperandValue(IncomingValue, SF));
948   }
949 
950   // Now loop over all of the PHI nodes setting their values...
951   SF.CurInst = SF.CurBB->begin();
952   for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
953     PHINode *PN = cast<PHINode>(SF.CurInst);
954     SetValue(PN, ResultValues[i], SF);
955   }
956 }
957 
958 //===----------------------------------------------------------------------===//
959 //                     Memory Instruction Implementations
960 //===----------------------------------------------------------------------===//
961 
visitAllocaInst(AllocaInst & I)962 void Interpreter::visitAllocaInst(AllocaInst &I) {
963   ExecutionContext &SF = ECStack.back();
964 
965   Type *Ty = I.getType()->getElementType();  // Type to be allocated
966 
967   // Get the number of elements being allocated by the array...
968   unsigned NumElements =
969     getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
970 
971   unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty);
972 
973   // Avoid malloc-ing zero bytes, use max()...
974   unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
975 
976   // Allocate enough memory to hold the type...
977   void *Memory = malloc(MemToAlloc);
978 
979   DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
980                << NumElements << " (Total: " << MemToAlloc << ") at "
981                << uintptr_t(Memory) << '\n');
982 
983   GenericValue Result = PTOGV(Memory);
984   assert(Result.PointerVal && "Null pointer returned by malloc!");
985   SetValue(&I, Result, SF);
986 
987   if (I.getOpcode() == Instruction::Alloca)
988     ECStack.back().Allocas.add(Memory);
989 }
990 
991 // getElementOffset - The workhorse for getelementptr.
992 //
executeGEPOperation(Value * Ptr,gep_type_iterator I,gep_type_iterator E,ExecutionContext & SF)993 GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
994                                               gep_type_iterator E,
995                                               ExecutionContext &SF) {
996   assert(Ptr->getType()->isPointerTy() &&
997          "Cannot getElementOffset of a nonpointer type!");
998 
999   uint64_t Total = 0;
1000 
1001   for (; I != E; ++I) {
1002     if (StructType *STy = dyn_cast<StructType>(*I)) {
1003       const StructLayout *SLO = TD.getStructLayout(STy);
1004 
1005       const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
1006       unsigned Index = unsigned(CPU->getZExtValue());
1007 
1008       Total += SLO->getElementOffset(Index);
1009     } else {
1010       SequentialType *ST = cast<SequentialType>(*I);
1011       // Get the index number for the array... which must be long type...
1012       GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
1013 
1014       int64_t Idx;
1015       unsigned BitWidth =
1016         cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
1017       if (BitWidth == 32)
1018         Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
1019       else {
1020         assert(BitWidth == 64 && "Invalid index type for getelementptr");
1021         Idx = (int64_t)IdxGV.IntVal.getZExtValue();
1022       }
1023       Total += TD.getTypeAllocSize(ST->getElementType())*Idx;
1024     }
1025   }
1026 
1027   GenericValue Result;
1028   Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
1029   DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
1030   return Result;
1031 }
1032 
visitGetElementPtrInst(GetElementPtrInst & I)1033 void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
1034   ExecutionContext &SF = ECStack.back();
1035   SetValue(&I, executeGEPOperation(I.getPointerOperand(),
1036                                    gep_type_begin(I), gep_type_end(I), SF), SF);
1037 }
1038 
visitLoadInst(LoadInst & I)1039 void Interpreter::visitLoadInst(LoadInst &I) {
1040   ExecutionContext &SF = ECStack.back();
1041   GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1042   GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
1043   GenericValue Result;
1044   LoadValueFromMemory(Result, Ptr, I.getType());
1045   SetValue(&I, Result, SF);
1046   if (I.isVolatile() && PrintVolatile)
1047     dbgs() << "Volatile load " << I;
1048 }
1049 
visitStoreInst(StoreInst & I)1050 void Interpreter::visitStoreInst(StoreInst &I) {
1051   ExecutionContext &SF = ECStack.back();
1052   GenericValue Val = getOperandValue(I.getOperand(0), SF);
1053   GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1054   StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
1055                      I.getOperand(0)->getType());
1056   if (I.isVolatile() && PrintVolatile)
1057     dbgs() << "Volatile store: " << I;
1058 }
1059 
1060 //===----------------------------------------------------------------------===//
1061 //                 Miscellaneous Instruction Implementations
1062 //===----------------------------------------------------------------------===//
1063 
visitCallSite(CallSite CS)1064 void Interpreter::visitCallSite(CallSite CS) {
1065   ExecutionContext &SF = ECStack.back();
1066 
1067   // Check to see if this is an intrinsic function call...
1068   Function *F = CS.getCalledFunction();
1069   if (F && F->isDeclaration())
1070     switch (F->getIntrinsicID()) {
1071     case Intrinsic::not_intrinsic:
1072       break;
1073     case Intrinsic::vastart: { // va_start
1074       GenericValue ArgIndex;
1075       ArgIndex.UIntPairVal.first = ECStack.size() - 1;
1076       ArgIndex.UIntPairVal.second = 0;
1077       SetValue(CS.getInstruction(), ArgIndex, SF);
1078       return;
1079     }
1080     case Intrinsic::vaend:    // va_end is a noop for the interpreter
1081       return;
1082     case Intrinsic::vacopy:   // va_copy: dest = src
1083       SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
1084       return;
1085     default:
1086       // If it is an unknown intrinsic function, use the intrinsic lowering
1087       // class to transform it into hopefully tasty LLVM code.
1088       //
1089       BasicBlock::iterator me(CS.getInstruction());
1090       BasicBlock *Parent = CS.getInstruction()->getParent();
1091       bool atBegin(Parent->begin() == me);
1092       if (!atBegin)
1093         --me;
1094       IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
1095 
1096       // Restore the CurInst pointer to the first instruction newly inserted, if
1097       // any.
1098       if (atBegin) {
1099         SF.CurInst = Parent->begin();
1100       } else {
1101         SF.CurInst = me;
1102         ++SF.CurInst;
1103       }
1104       return;
1105     }
1106 
1107 
1108   SF.Caller = CS;
1109   std::vector<GenericValue> ArgVals;
1110   const unsigned NumArgs = SF.Caller.arg_size();
1111   ArgVals.reserve(NumArgs);
1112   uint16_t pNum = 1;
1113   for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
1114          e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
1115     Value *V = *i;
1116     ArgVals.push_back(getOperandValue(V, SF));
1117   }
1118 
1119   // To handle indirect calls, we must get the pointer value from the argument
1120   // and treat it as a function pointer.
1121   GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
1122   callFunction((Function*)GVTOP(SRC), ArgVals);
1123 }
1124 
1125 // auxiliary function for shift operations
getShiftAmount(uint64_t orgShiftAmount,llvm::APInt valueToShift)1126 static unsigned getShiftAmount(uint64_t orgShiftAmount,
1127                                llvm::APInt valueToShift) {
1128   unsigned valueWidth = valueToShift.getBitWidth();
1129   if (orgShiftAmount < (uint64_t)valueWidth)
1130     return orgShiftAmount;
1131   // according to the llvm documentation, if orgShiftAmount > valueWidth,
1132   // the result is undfeined. but we do shift by this rule:
1133   return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
1134 }
1135 
1136 
visitShl(BinaryOperator & I)1137 void Interpreter::visitShl(BinaryOperator &I) {
1138   ExecutionContext &SF = ECStack.back();
1139   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1140   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1141   GenericValue Dest;
1142   const Type *Ty = I.getType();
1143 
1144   if (Ty->isVectorTy()) {
1145     uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
1146     assert(src1Size == Src2.AggregateVal.size());
1147     for (unsigned i = 0; i < src1Size; i++) {
1148       GenericValue Result;
1149       uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1150       llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1151       Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
1152       Dest.AggregateVal.push_back(Result);
1153     }
1154   } else {
1155     // scalar
1156     uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1157     llvm::APInt valueToShift = Src1.IntVal;
1158     Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
1159   }
1160 
1161   SetValue(&I, Dest, SF);
1162 }
1163 
visitLShr(BinaryOperator & I)1164 void Interpreter::visitLShr(BinaryOperator &I) {
1165   ExecutionContext &SF = ECStack.back();
1166   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1167   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1168   GenericValue Dest;
1169   const Type *Ty = I.getType();
1170 
1171   if (Ty->isVectorTy()) {
1172     uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
1173     assert(src1Size == Src2.AggregateVal.size());
1174     for (unsigned i = 0; i < src1Size; i++) {
1175       GenericValue Result;
1176       uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1177       llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1178       Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1179       Dest.AggregateVal.push_back(Result);
1180     }
1181   } else {
1182     // scalar
1183     uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1184     llvm::APInt valueToShift = Src1.IntVal;
1185     Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1186   }
1187 
1188   SetValue(&I, Dest, SF);
1189 }
1190 
visitAShr(BinaryOperator & I)1191 void Interpreter::visitAShr(BinaryOperator &I) {
1192   ExecutionContext &SF = ECStack.back();
1193   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1194   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1195   GenericValue Dest;
1196   const Type *Ty = I.getType();
1197 
1198   if (Ty->isVectorTy()) {
1199     size_t src1Size = Src1.AggregateVal.size();
1200     assert(src1Size == Src2.AggregateVal.size());
1201     for (unsigned i = 0; i < src1Size; i++) {
1202       GenericValue Result;
1203       uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1204       llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1205       Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
1206       Dest.AggregateVal.push_back(Result);
1207     }
1208   } else {
1209     // scalar
1210     uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1211     llvm::APInt valueToShift = Src1.IntVal;
1212     Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
1213   }
1214 
1215   SetValue(&I, Dest, SF);
1216 }
1217 
executeTruncInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1218 GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
1219                                            ExecutionContext &SF) {
1220   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1221   Type *SrcTy = SrcVal->getType();
1222   if (SrcTy->isVectorTy()) {
1223     Type *DstVecTy = DstTy->getScalarType();
1224     unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1225     unsigned NumElts = Src.AggregateVal.size();
1226     // the sizes of src and dst vectors must be equal
1227     Dest.AggregateVal.resize(NumElts);
1228     for (unsigned i = 0; i < NumElts; i++)
1229       Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
1230   } else {
1231     IntegerType *DITy = cast<IntegerType>(DstTy);
1232     unsigned DBitWidth = DITy->getBitWidth();
1233     Dest.IntVal = Src.IntVal.trunc(DBitWidth);
1234   }
1235   return Dest;
1236 }
1237 
executeSExtInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1238 GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
1239                                           ExecutionContext &SF) {
1240   const Type *SrcTy = SrcVal->getType();
1241   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1242   if (SrcTy->isVectorTy()) {
1243     const Type *DstVecTy = DstTy->getScalarType();
1244     unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1245     unsigned size = Src.AggregateVal.size();
1246     // the sizes of src and dst vectors must be equal.
1247     Dest.AggregateVal.resize(size);
1248     for (unsigned i = 0; i < size; i++)
1249       Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
1250   } else {
1251     const IntegerType *DITy = cast<IntegerType>(DstTy);
1252     unsigned DBitWidth = DITy->getBitWidth();
1253     Dest.IntVal = Src.IntVal.sext(DBitWidth);
1254   }
1255   return Dest;
1256 }
1257 
executeZExtInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1258 GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
1259                                           ExecutionContext &SF) {
1260   const Type *SrcTy = SrcVal->getType();
1261   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1262   if (SrcTy->isVectorTy()) {
1263     const Type *DstVecTy = DstTy->getScalarType();
1264     unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1265 
1266     unsigned size = Src.AggregateVal.size();
1267     // the sizes of src and dst vectors must be equal.
1268     Dest.AggregateVal.resize(size);
1269     for (unsigned i = 0; i < size; i++)
1270       Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
1271   } else {
1272     const IntegerType *DITy = cast<IntegerType>(DstTy);
1273     unsigned DBitWidth = DITy->getBitWidth();
1274     Dest.IntVal = Src.IntVal.zext(DBitWidth);
1275   }
1276   return Dest;
1277 }
1278 
executeFPTruncInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1279 GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
1280                                              ExecutionContext &SF) {
1281   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1282 
1283   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1284     assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
1285            DstTy->getScalarType()->isFloatTy() &&
1286            "Invalid FPTrunc instruction");
1287 
1288     unsigned size = Src.AggregateVal.size();
1289     // the sizes of src and dst vectors must be equal.
1290     Dest.AggregateVal.resize(size);
1291     for (unsigned i = 0; i < size; i++)
1292       Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
1293   } else {
1294     assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
1295            "Invalid FPTrunc instruction");
1296     Dest.FloatVal = (float)Src.DoubleVal;
1297   }
1298 
1299   return Dest;
1300 }
1301 
executeFPExtInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1302 GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
1303                                            ExecutionContext &SF) {
1304   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1305 
1306   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1307     assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
1308            DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
1309 
1310     unsigned size = Src.AggregateVal.size();
1311     // the sizes of src and dst vectors must be equal.
1312     Dest.AggregateVal.resize(size);
1313     for (unsigned i = 0; i < size; i++)
1314       Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
1315   } else {
1316     assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
1317            "Invalid FPExt instruction");
1318     Dest.DoubleVal = (double)Src.FloatVal;
1319   }
1320 
1321   return Dest;
1322 }
1323 
executeFPToUIInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1324 GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
1325                                             ExecutionContext &SF) {
1326   Type *SrcTy = SrcVal->getType();
1327   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1328 
1329   if (SrcTy->getTypeID() == Type::VectorTyID) {
1330     const Type *DstVecTy = DstTy->getScalarType();
1331     const Type *SrcVecTy = SrcTy->getScalarType();
1332     uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1333     unsigned size = Src.AggregateVal.size();
1334     // the sizes of src and dst vectors must be equal.
1335     Dest.AggregateVal.resize(size);
1336 
1337     if (SrcVecTy->getTypeID() == Type::FloatTyID) {
1338       assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
1339       for (unsigned i = 0; i < size; i++)
1340         Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
1341             Src.AggregateVal[i].FloatVal, DBitWidth);
1342     } else {
1343       for (unsigned i = 0; i < size; i++)
1344         Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
1345             Src.AggregateVal[i].DoubleVal, DBitWidth);
1346     }
1347   } else {
1348     // scalar
1349     uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1350     assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
1351 
1352     if (SrcTy->getTypeID() == Type::FloatTyID)
1353       Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1354     else {
1355       Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1356     }
1357   }
1358 
1359   return Dest;
1360 }
1361 
executeFPToSIInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1362 GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
1363                                             ExecutionContext &SF) {
1364   Type *SrcTy = SrcVal->getType();
1365   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1366 
1367   if (SrcTy->getTypeID() == Type::VectorTyID) {
1368     const Type *DstVecTy = DstTy->getScalarType();
1369     const Type *SrcVecTy = SrcTy->getScalarType();
1370     uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1371     unsigned size = Src.AggregateVal.size();
1372     // the sizes of src and dst vectors must be equal
1373     Dest.AggregateVal.resize(size);
1374 
1375     if (SrcVecTy->getTypeID() == Type::FloatTyID) {
1376       assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
1377       for (unsigned i = 0; i < size; i++)
1378         Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
1379             Src.AggregateVal[i].FloatVal, DBitWidth);
1380     } else {
1381       for (unsigned i = 0; i < size; i++)
1382         Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
1383             Src.AggregateVal[i].DoubleVal, DBitWidth);
1384     }
1385   } else {
1386     // scalar
1387     unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1388     assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
1389 
1390     if (SrcTy->getTypeID() == Type::FloatTyID)
1391       Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1392     else {
1393       Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1394     }
1395   }
1396   return Dest;
1397 }
1398 
executeUIToFPInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1399 GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
1400                                             ExecutionContext &SF) {
1401   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1402 
1403   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1404     const Type *DstVecTy = DstTy->getScalarType();
1405     unsigned size = Src.AggregateVal.size();
1406     // the sizes of src and dst vectors must be equal
1407     Dest.AggregateVal.resize(size);
1408 
1409     if (DstVecTy->getTypeID() == Type::FloatTyID) {
1410       assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
1411       for (unsigned i = 0; i < size; i++)
1412         Dest.AggregateVal[i].FloatVal =
1413             APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
1414     } else {
1415       for (unsigned i = 0; i < size; i++)
1416         Dest.AggregateVal[i].DoubleVal =
1417             APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
1418     }
1419   } else {
1420     // scalar
1421     assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
1422     if (DstTy->getTypeID() == Type::FloatTyID)
1423       Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
1424     else {
1425       Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
1426     }
1427   }
1428   return Dest;
1429 }
1430 
executeSIToFPInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1431 GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
1432                                             ExecutionContext &SF) {
1433   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1434 
1435   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1436     const Type *DstVecTy = DstTy->getScalarType();
1437     unsigned size = Src.AggregateVal.size();
1438     // the sizes of src and dst vectors must be equal
1439     Dest.AggregateVal.resize(size);
1440 
1441     if (DstVecTy->getTypeID() == Type::FloatTyID) {
1442       assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
1443       for (unsigned i = 0; i < size; i++)
1444         Dest.AggregateVal[i].FloatVal =
1445             APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
1446     } else {
1447       for (unsigned i = 0; i < size; i++)
1448         Dest.AggregateVal[i].DoubleVal =
1449             APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
1450     }
1451   } else {
1452     // scalar
1453     assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
1454 
1455     if (DstTy->getTypeID() == Type::FloatTyID)
1456       Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
1457     else {
1458       Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
1459     }
1460   }
1461 
1462   return Dest;
1463 }
1464 
executePtrToIntInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1465 GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
1466                                               ExecutionContext &SF) {
1467   uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1468   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1469   assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
1470 
1471   Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
1472   return Dest;
1473 }
1474 
executeIntToPtrInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1475 GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
1476                                               ExecutionContext &SF) {
1477   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1478   assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
1479 
1480   uint32_t PtrSize = TD.getPointerSizeInBits();
1481   if (PtrSize != Src.IntVal.getBitWidth())
1482     Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
1483 
1484   Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
1485   return Dest;
1486 }
1487 
executeBitCastInst(Value * SrcVal,Type * DstTy,ExecutionContext & SF)1488 GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
1489                                              ExecutionContext &SF) {
1490 
1491   // This instruction supports bitwise conversion of vectors to integers and
1492   // to vectors of other types (as long as they have the same size)
1493   Type *SrcTy = SrcVal->getType();
1494   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1495 
1496   if ((SrcTy->getTypeID() == Type::VectorTyID) ||
1497       (DstTy->getTypeID() == Type::VectorTyID)) {
1498     // vector src bitcast to vector dst or vector src bitcast to scalar dst or
1499     // scalar src bitcast to vector dst
1500     bool isLittleEndian = TD.isLittleEndian();
1501     GenericValue TempDst, TempSrc, SrcVec;
1502     const Type *SrcElemTy;
1503     const Type *DstElemTy;
1504     unsigned SrcBitSize;
1505     unsigned DstBitSize;
1506     unsigned SrcNum;
1507     unsigned DstNum;
1508 
1509     if (SrcTy->getTypeID() == Type::VectorTyID) {
1510       SrcElemTy = SrcTy->getScalarType();
1511       SrcBitSize = SrcTy->getScalarSizeInBits();
1512       SrcNum = Src.AggregateVal.size();
1513       SrcVec = Src;
1514     } else {
1515       // if src is scalar value, make it vector <1 x type>
1516       SrcElemTy = SrcTy;
1517       SrcBitSize = SrcTy->getPrimitiveSizeInBits();
1518       SrcNum = 1;
1519       SrcVec.AggregateVal.push_back(Src);
1520     }
1521 
1522     if (DstTy->getTypeID() == Type::VectorTyID) {
1523       DstElemTy = DstTy->getScalarType();
1524       DstBitSize = DstTy->getScalarSizeInBits();
1525       DstNum = (SrcNum * SrcBitSize) / DstBitSize;
1526     } else {
1527       DstElemTy = DstTy;
1528       DstBitSize = DstTy->getPrimitiveSizeInBits();
1529       DstNum = 1;
1530     }
1531 
1532     if (SrcNum * SrcBitSize != DstNum * DstBitSize)
1533       llvm_unreachable("Invalid BitCast");
1534 
1535     // If src is floating point, cast to integer first.
1536     TempSrc.AggregateVal.resize(SrcNum);
1537     if (SrcElemTy->isFloatTy()) {
1538       for (unsigned i = 0; i < SrcNum; i++)
1539         TempSrc.AggregateVal[i].IntVal =
1540             APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
1541 
1542     } else if (SrcElemTy->isDoubleTy()) {
1543       for (unsigned i = 0; i < SrcNum; i++)
1544         TempSrc.AggregateVal[i].IntVal =
1545             APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
1546     } else if (SrcElemTy->isIntegerTy()) {
1547       for (unsigned i = 0; i < SrcNum; i++)
1548         TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
1549     } else {
1550       // Pointers are not allowed as the element type of vector.
1551       llvm_unreachable("Invalid Bitcast");
1552     }
1553 
1554     // now TempSrc is integer type vector
1555     if (DstNum < SrcNum) {
1556       // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
1557       unsigned Ratio = SrcNum / DstNum;
1558       unsigned SrcElt = 0;
1559       for (unsigned i = 0; i < DstNum; i++) {
1560         GenericValue Elt;
1561         Elt.IntVal = 0;
1562         Elt.IntVal = Elt.IntVal.zext(DstBitSize);
1563         unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
1564         for (unsigned j = 0; j < Ratio; j++) {
1565           APInt Tmp;
1566           Tmp = Tmp.zext(SrcBitSize);
1567           Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
1568           Tmp = Tmp.zext(DstBitSize);
1569           Tmp = Tmp.shl(ShiftAmt);
1570           ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
1571           Elt.IntVal |= Tmp;
1572         }
1573         TempDst.AggregateVal.push_back(Elt);
1574       }
1575     } else {
1576       // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
1577       unsigned Ratio = DstNum / SrcNum;
1578       for (unsigned i = 0; i < SrcNum; i++) {
1579         unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
1580         for (unsigned j = 0; j < Ratio; j++) {
1581           GenericValue Elt;
1582           Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
1583           Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
1584           Elt.IntVal = Elt.IntVal.lshr(ShiftAmt);
1585           // it could be DstBitSize == SrcBitSize, so check it
1586           if (DstBitSize < SrcBitSize)
1587             Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
1588           ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
1589           TempDst.AggregateVal.push_back(Elt);
1590         }
1591       }
1592     }
1593 
1594     // convert result from integer to specified type
1595     if (DstTy->getTypeID() == Type::VectorTyID) {
1596       if (DstElemTy->isDoubleTy()) {
1597         Dest.AggregateVal.resize(DstNum);
1598         for (unsigned i = 0; i < DstNum; i++)
1599           Dest.AggregateVal[i].DoubleVal =
1600               TempDst.AggregateVal[i].IntVal.bitsToDouble();
1601       } else if (DstElemTy->isFloatTy()) {
1602         Dest.AggregateVal.resize(DstNum);
1603         for (unsigned i = 0; i < DstNum; i++)
1604           Dest.AggregateVal[i].FloatVal =
1605               TempDst.AggregateVal[i].IntVal.bitsToFloat();
1606       } else {
1607         Dest = TempDst;
1608       }
1609     } else {
1610       if (DstElemTy->isDoubleTy())
1611         Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
1612       else if (DstElemTy->isFloatTy()) {
1613         Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
1614       } else {
1615         Dest.IntVal = TempDst.AggregateVal[0].IntVal;
1616       }
1617     }
1618   } else { //  if ((SrcTy->getTypeID() == Type::VectorTyID) ||
1619            //     (DstTy->getTypeID() == Type::VectorTyID))
1620 
1621     // scalar src bitcast to scalar dst
1622     if (DstTy->isPointerTy()) {
1623       assert(SrcTy->isPointerTy() && "Invalid BitCast");
1624       Dest.PointerVal = Src.PointerVal;
1625     } else if (DstTy->isIntegerTy()) {
1626       if (SrcTy->isFloatTy())
1627         Dest.IntVal = APInt::floatToBits(Src.FloatVal);
1628       else if (SrcTy->isDoubleTy()) {
1629         Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
1630       } else if (SrcTy->isIntegerTy()) {
1631         Dest.IntVal = Src.IntVal;
1632       } else {
1633         llvm_unreachable("Invalid BitCast");
1634       }
1635     } else if (DstTy->isFloatTy()) {
1636       if (SrcTy->isIntegerTy())
1637         Dest.FloatVal = Src.IntVal.bitsToFloat();
1638       else {
1639         Dest.FloatVal = Src.FloatVal;
1640       }
1641     } else if (DstTy->isDoubleTy()) {
1642       if (SrcTy->isIntegerTy())
1643         Dest.DoubleVal = Src.IntVal.bitsToDouble();
1644       else {
1645         Dest.DoubleVal = Src.DoubleVal;
1646       }
1647     } else {
1648       llvm_unreachable("Invalid Bitcast");
1649     }
1650   }
1651 
1652   return Dest;
1653 }
1654 
visitTruncInst(TruncInst & I)1655 void Interpreter::visitTruncInst(TruncInst &I) {
1656   ExecutionContext &SF = ECStack.back();
1657   SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
1658 }
1659 
visitSExtInst(SExtInst & I)1660 void Interpreter::visitSExtInst(SExtInst &I) {
1661   ExecutionContext &SF = ECStack.back();
1662   SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
1663 }
1664 
visitZExtInst(ZExtInst & I)1665 void Interpreter::visitZExtInst(ZExtInst &I) {
1666   ExecutionContext &SF = ECStack.back();
1667   SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
1668 }
1669 
visitFPTruncInst(FPTruncInst & I)1670 void Interpreter::visitFPTruncInst(FPTruncInst &I) {
1671   ExecutionContext &SF = ECStack.back();
1672   SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
1673 }
1674 
visitFPExtInst(FPExtInst & I)1675 void Interpreter::visitFPExtInst(FPExtInst &I) {
1676   ExecutionContext &SF = ECStack.back();
1677   SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
1678 }
1679 
visitUIToFPInst(UIToFPInst & I)1680 void Interpreter::visitUIToFPInst(UIToFPInst &I) {
1681   ExecutionContext &SF = ECStack.back();
1682   SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1683 }
1684 
visitSIToFPInst(SIToFPInst & I)1685 void Interpreter::visitSIToFPInst(SIToFPInst &I) {
1686   ExecutionContext &SF = ECStack.back();
1687   SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1688 }
1689 
visitFPToUIInst(FPToUIInst & I)1690 void Interpreter::visitFPToUIInst(FPToUIInst &I) {
1691   ExecutionContext &SF = ECStack.back();
1692   SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
1693 }
1694 
visitFPToSIInst(FPToSIInst & I)1695 void Interpreter::visitFPToSIInst(FPToSIInst &I) {
1696   ExecutionContext &SF = ECStack.back();
1697   SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
1698 }
1699 
visitPtrToIntInst(PtrToIntInst & I)1700 void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
1701   ExecutionContext &SF = ECStack.back();
1702   SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
1703 }
1704 
visitIntToPtrInst(IntToPtrInst & I)1705 void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
1706   ExecutionContext &SF = ECStack.back();
1707   SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
1708 }
1709 
visitBitCastInst(BitCastInst & I)1710 void Interpreter::visitBitCastInst(BitCastInst &I) {
1711   ExecutionContext &SF = ECStack.back();
1712   SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
1713 }
1714 
1715 #define IMPLEMENT_VAARG(TY) \
1716    case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
1717 
visitVAArgInst(VAArgInst & I)1718 void Interpreter::visitVAArgInst(VAArgInst &I) {
1719   ExecutionContext &SF = ECStack.back();
1720 
1721   // Get the incoming valist parameter.  LLI treats the valist as a
1722   // (ec-stack-depth var-arg-index) pair.
1723   GenericValue VAList = getOperandValue(I.getOperand(0), SF);
1724   GenericValue Dest;
1725   GenericValue Src = ECStack[VAList.UIntPairVal.first]
1726                       .VarArgs[VAList.UIntPairVal.second];
1727   Type *Ty = I.getType();
1728   switch (Ty->getTypeID()) {
1729   case Type::IntegerTyID:
1730     Dest.IntVal = Src.IntVal;
1731     break;
1732   IMPLEMENT_VAARG(Pointer);
1733   IMPLEMENT_VAARG(Float);
1734   IMPLEMENT_VAARG(Double);
1735   default:
1736     dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
1737     llvm_unreachable(nullptr);
1738   }
1739 
1740   // Set the Value of this Instruction.
1741   SetValue(&I, Dest, SF);
1742 
1743   // Move the pointer to the next vararg.
1744   ++VAList.UIntPairVal.second;
1745 }
1746 
visitExtractElementInst(ExtractElementInst & I)1747 void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
1748   ExecutionContext &SF = ECStack.back();
1749   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1750   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1751   GenericValue Dest;
1752 
1753   Type *Ty = I.getType();
1754   const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
1755 
1756   if(Src1.AggregateVal.size() > indx) {
1757     switch (Ty->getTypeID()) {
1758     default:
1759       dbgs() << "Unhandled destination type for extractelement instruction: "
1760       << *Ty << "\n";
1761       llvm_unreachable(nullptr);
1762       break;
1763     case Type::IntegerTyID:
1764       Dest.IntVal = Src1.AggregateVal[indx].IntVal;
1765       break;
1766     case Type::FloatTyID:
1767       Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
1768       break;
1769     case Type::DoubleTyID:
1770       Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
1771       break;
1772     }
1773   } else {
1774     dbgs() << "Invalid index in extractelement instruction\n";
1775   }
1776 
1777   SetValue(&I, Dest, SF);
1778 }
1779 
visitInsertElementInst(InsertElementInst & I)1780 void Interpreter::visitInsertElementInst(InsertElementInst &I) {
1781   ExecutionContext &SF = ECStack.back();
1782   Type *Ty = I.getType();
1783 
1784   if(!(Ty->isVectorTy()) )
1785     llvm_unreachable("Unhandled dest type for insertelement instruction");
1786 
1787   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1788   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1789   GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
1790   GenericValue Dest;
1791 
1792   Type *TyContained = Ty->getContainedType(0);
1793 
1794   const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
1795   Dest.AggregateVal = Src1.AggregateVal;
1796 
1797   if(Src1.AggregateVal.size() <= indx)
1798       llvm_unreachable("Invalid index in insertelement instruction");
1799   switch (TyContained->getTypeID()) {
1800     default:
1801       llvm_unreachable("Unhandled dest type for insertelement instruction");
1802     case Type::IntegerTyID:
1803       Dest.AggregateVal[indx].IntVal = Src2.IntVal;
1804       break;
1805     case Type::FloatTyID:
1806       Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
1807       break;
1808     case Type::DoubleTyID:
1809       Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
1810       break;
1811   }
1812   SetValue(&I, Dest, SF);
1813 }
1814 
visitShuffleVectorInst(ShuffleVectorInst & I)1815 void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
1816   ExecutionContext &SF = ECStack.back();
1817 
1818   Type *Ty = I.getType();
1819   if(!(Ty->isVectorTy()))
1820     llvm_unreachable("Unhandled dest type for shufflevector instruction");
1821 
1822   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1823   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1824   GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
1825   GenericValue Dest;
1826 
1827   // There is no need to check types of src1 and src2, because the compiled
1828   // bytecode can't contain different types for src1 and src2 for a
1829   // shufflevector instruction.
1830 
1831   Type *TyContained = Ty->getContainedType(0);
1832   unsigned src1Size = (unsigned)Src1.AggregateVal.size();
1833   unsigned src2Size = (unsigned)Src2.AggregateVal.size();
1834   unsigned src3Size = (unsigned)Src3.AggregateVal.size();
1835 
1836   Dest.AggregateVal.resize(src3Size);
1837 
1838   switch (TyContained->getTypeID()) {
1839     default:
1840       llvm_unreachable("Unhandled dest type for insertelement instruction");
1841       break;
1842     case Type::IntegerTyID:
1843       for( unsigned i=0; i<src3Size; i++) {
1844         unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
1845         if(j < src1Size)
1846           Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
1847         else if(j < src1Size + src2Size)
1848           Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
1849         else
1850           // The selector may not be greater than sum of lengths of first and
1851           // second operands and llasm should not allow situation like
1852           // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
1853           //                      <2 x i32> < i32 0, i32 5 >,
1854           // where i32 5 is invalid, but let it be additional check here:
1855           llvm_unreachable("Invalid mask in shufflevector instruction");
1856       }
1857       break;
1858     case Type::FloatTyID:
1859       for( unsigned i=0; i<src3Size; i++) {
1860         unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
1861         if(j < src1Size)
1862           Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
1863         else if(j < src1Size + src2Size)
1864           Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
1865         else
1866           llvm_unreachable("Invalid mask in shufflevector instruction");
1867         }
1868       break;
1869     case Type::DoubleTyID:
1870       for( unsigned i=0; i<src3Size; i++) {
1871         unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
1872         if(j < src1Size)
1873           Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
1874         else if(j < src1Size + src2Size)
1875           Dest.AggregateVal[i].DoubleVal =
1876             Src2.AggregateVal[j-src1Size].DoubleVal;
1877         else
1878           llvm_unreachable("Invalid mask in shufflevector instruction");
1879       }
1880       break;
1881   }
1882   SetValue(&I, Dest, SF);
1883 }
1884 
visitExtractValueInst(ExtractValueInst & I)1885 void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
1886   ExecutionContext &SF = ECStack.back();
1887   Value *Agg = I.getAggregateOperand();
1888   GenericValue Dest;
1889   GenericValue Src = getOperandValue(Agg, SF);
1890 
1891   ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
1892   unsigned Num = I.getNumIndices();
1893   GenericValue *pSrc = &Src;
1894 
1895   for (unsigned i = 0 ; i < Num; ++i) {
1896     pSrc = &pSrc->AggregateVal[*IdxBegin];
1897     ++IdxBegin;
1898   }
1899 
1900   Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
1901   switch (IndexedType->getTypeID()) {
1902     default:
1903       llvm_unreachable("Unhandled dest type for extractelement instruction");
1904     break;
1905     case Type::IntegerTyID:
1906       Dest.IntVal = pSrc->IntVal;
1907     break;
1908     case Type::FloatTyID:
1909       Dest.FloatVal = pSrc->FloatVal;
1910     break;
1911     case Type::DoubleTyID:
1912       Dest.DoubleVal = pSrc->DoubleVal;
1913     break;
1914     case Type::ArrayTyID:
1915     case Type::StructTyID:
1916     case Type::VectorTyID:
1917       Dest.AggregateVal = pSrc->AggregateVal;
1918     break;
1919     case Type::PointerTyID:
1920       Dest.PointerVal = pSrc->PointerVal;
1921     break;
1922   }
1923 
1924   SetValue(&I, Dest, SF);
1925 }
1926 
visitInsertValueInst(InsertValueInst & I)1927 void Interpreter::visitInsertValueInst(InsertValueInst &I) {
1928 
1929   ExecutionContext &SF = ECStack.back();
1930   Value *Agg = I.getAggregateOperand();
1931 
1932   GenericValue Src1 = getOperandValue(Agg, SF);
1933   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1934   GenericValue Dest = Src1; // Dest is a slightly changed Src1
1935 
1936   ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
1937   unsigned Num = I.getNumIndices();
1938 
1939   GenericValue *pDest = &Dest;
1940   for (unsigned i = 0 ; i < Num; ++i) {
1941     pDest = &pDest->AggregateVal[*IdxBegin];
1942     ++IdxBegin;
1943   }
1944   // pDest points to the target value in the Dest now
1945 
1946   Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
1947 
1948   switch (IndexedType->getTypeID()) {
1949     default:
1950       llvm_unreachable("Unhandled dest type for insertelement instruction");
1951     break;
1952     case Type::IntegerTyID:
1953       pDest->IntVal = Src2.IntVal;
1954     break;
1955     case Type::FloatTyID:
1956       pDest->FloatVal = Src2.FloatVal;
1957     break;
1958     case Type::DoubleTyID:
1959       pDest->DoubleVal = Src2.DoubleVal;
1960     break;
1961     case Type::ArrayTyID:
1962     case Type::StructTyID:
1963     case Type::VectorTyID:
1964       pDest->AggregateVal = Src2.AggregateVal;
1965     break;
1966     case Type::PointerTyID:
1967       pDest->PointerVal = Src2.PointerVal;
1968     break;
1969   }
1970 
1971   SetValue(&I, Dest, SF);
1972 }
1973 
getConstantExprValue(ConstantExpr * CE,ExecutionContext & SF)1974 GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
1975                                                 ExecutionContext &SF) {
1976   switch (CE->getOpcode()) {
1977   case Instruction::Trunc:
1978       return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
1979   case Instruction::ZExt:
1980       return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
1981   case Instruction::SExt:
1982       return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
1983   case Instruction::FPTrunc:
1984       return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
1985   case Instruction::FPExt:
1986       return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
1987   case Instruction::UIToFP:
1988       return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
1989   case Instruction::SIToFP:
1990       return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
1991   case Instruction::FPToUI:
1992       return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
1993   case Instruction::FPToSI:
1994       return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
1995   case Instruction::PtrToInt:
1996       return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
1997   case Instruction::IntToPtr:
1998       return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
1999   case Instruction::BitCast:
2000       return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
2001   case Instruction::GetElementPtr:
2002     return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
2003                                gep_type_end(CE), SF);
2004   case Instruction::FCmp:
2005   case Instruction::ICmp:
2006     return executeCmpInst(CE->getPredicate(),
2007                           getOperandValue(CE->getOperand(0), SF),
2008                           getOperandValue(CE->getOperand(1), SF),
2009                           CE->getOperand(0)->getType());
2010   case Instruction::Select:
2011     return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
2012                              getOperandValue(CE->getOperand(1), SF),
2013                              getOperandValue(CE->getOperand(2), SF),
2014                              CE->getOperand(0)->getType());
2015   default :
2016     break;
2017   }
2018 
2019   // The cases below here require a GenericValue parameter for the result
2020   // so we initialize one, compute it and then return it.
2021   GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
2022   GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
2023   GenericValue Dest;
2024   Type * Ty = CE->getOperand(0)->getType();
2025   switch (CE->getOpcode()) {
2026   case Instruction::Add:  Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
2027   case Instruction::Sub:  Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
2028   case Instruction::Mul:  Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
2029   case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
2030   case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
2031   case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
2032   case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
2033   case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
2034   case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
2035   case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
2036   case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
2037   case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
2038   case Instruction::And:  Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
2039   case Instruction::Or:   Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
2040   case Instruction::Xor:  Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
2041   case Instruction::Shl:
2042     Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
2043     break;
2044   case Instruction::LShr:
2045     Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
2046     break;
2047   case Instruction::AShr:
2048     Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
2049     break;
2050   default:
2051     dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
2052     llvm_unreachable("Unhandled ConstantExpr");
2053   }
2054   return Dest;
2055 }
2056 
getOperandValue(Value * V,ExecutionContext & SF)2057 GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
2058   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
2059     return getConstantExprValue(CE, SF);
2060   } else if (Constant *CPV = dyn_cast<Constant>(V)) {
2061     return getConstantValue(CPV);
2062   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2063     return PTOGV(getPointerToGlobal(GV));
2064   } else {
2065     return SF.Values[V];
2066   }
2067 }
2068 
2069 //===----------------------------------------------------------------------===//
2070 //                        Dispatch and Execution Code
2071 //===----------------------------------------------------------------------===//
2072 
2073 //===----------------------------------------------------------------------===//
2074 // callFunction - Execute the specified function...
2075 //
callFunction(Function * F,const std::vector<GenericValue> & ArgVals)2076 void Interpreter::callFunction(Function *F,
2077                                const std::vector<GenericValue> &ArgVals) {
2078   assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
2079           ECStack.back().Caller.arg_size() == ArgVals.size()) &&
2080          "Incorrect number of arguments passed into function call!");
2081   // Make a new stack frame... and fill it in.
2082   ECStack.push_back(ExecutionContext());
2083   ExecutionContext &StackFrame = ECStack.back();
2084   StackFrame.CurFunction = F;
2085 
2086   // Special handling for external functions.
2087   if (F->isDeclaration()) {
2088     GenericValue Result = callExternalFunction (F, ArgVals);
2089     // Simulate a 'ret' instruction of the appropriate type.
2090     popStackAndReturnValueToCaller (F->getReturnType (), Result);
2091     return;
2092   }
2093 
2094   // Get pointers to first LLVM BB & Instruction in function.
2095   StackFrame.CurBB     = F->begin();
2096   StackFrame.CurInst   = StackFrame.CurBB->begin();
2097 
2098   // Run through the function arguments and initialize their values...
2099   assert((ArgVals.size() == F->arg_size() ||
2100          (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
2101          "Invalid number of values passed to function invocation!");
2102 
2103   // Handle non-varargs arguments...
2104   unsigned i = 0;
2105   for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
2106        AI != E; ++AI, ++i)
2107     SetValue(AI, ArgVals[i], StackFrame);
2108 
2109   // Handle varargs arguments...
2110   StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
2111 }
2112 
2113 
run()2114 void Interpreter::run() {
2115   while (!ECStack.empty()) {
2116     // Interpret a single instruction & increment the "PC".
2117     ExecutionContext &SF = ECStack.back();  // Current stack frame
2118     Instruction &I = *SF.CurInst++;         // Increment before execute
2119 
2120     // Track the number of dynamic instructions executed.
2121     ++NumDynamicInsts;
2122 
2123     DEBUG(dbgs() << "About to interpret: " << I);
2124     visit(I);   // Dispatch to one of the visit* methods...
2125 #if 0
2126     // This is not safe, as visiting the instruction could lower it and free I.
2127 DEBUG(
2128     if (!isa<CallInst>(I) && !isa<InvokeInst>(I) &&
2129         I.getType() != Type::VoidTy) {
2130       dbgs() << "  --> ";
2131       const GenericValue &Val = SF.Values[&I];
2132       switch (I.getType()->getTypeID()) {
2133       default: llvm_unreachable("Invalid GenericValue Type");
2134       case Type::VoidTyID:    dbgs() << "void"; break;
2135       case Type::FloatTyID:   dbgs() << "float " << Val.FloatVal; break;
2136       case Type::DoubleTyID:  dbgs() << "double " << Val.DoubleVal; break;
2137       case Type::PointerTyID: dbgs() << "void* " << intptr_t(Val.PointerVal);
2138         break;
2139       case Type::IntegerTyID:
2140         dbgs() << "i" << Val.IntVal.getBitWidth() << " "
2141                << Val.IntVal.toStringUnsigned(10)
2142                << " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n";
2143         break;
2144       }
2145     });
2146 #endif
2147   }
2148 }
2149