1 //===-- sanitizer_stacktrace_test.cc --------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_stacktrace.h"
16 #include "gtest/gtest.h"
17 
18 namespace __sanitizer {
19 
20 class FastUnwindTest : public ::testing::Test {
21  protected:
22   virtual void SetUp();
23   virtual void TearDown();
TryFastUnwind(uptr max_depth)24   bool TryFastUnwind(uptr max_depth) {
25     if (!StackTrace::WillUseFastUnwind(true))
26       return false;
27     trace.Unwind(max_depth, start_pc, (uptr)&fake_stack[0], 0, fake_top,
28                  fake_bottom, true);
29     return true;
30   }
31 
32   void *mapping;
33   uhwptr *fake_stack;
34   const uptr fake_stack_size = 10;
35   uhwptr start_pc;
36   uhwptr fake_top;
37   uhwptr fake_bottom;
38   BufferedStackTrace trace;
39 };
40 
PC(uptr idx)41 static uptr PC(uptr idx) {
42   return (1<<20) + idx;
43 }
44 
SetUp()45 void FastUnwindTest::SetUp() {
46   size_t ps = GetPageSize();
47   mapping = MmapOrDie(2 * ps, "FastUnwindTest");
48   MprotectNoAccess((uptr)mapping, ps);
49 
50   // Unwinder may peek 1 word down from the starting FP.
51   fake_stack = (uhwptr *)((uptr)mapping + ps + sizeof(uhwptr));
52 
53   // Fill an array of pointers with fake fp+retaddr pairs.  Frame pointers have
54   // even indices.
55   for (uptr i = 0; i + 1 < fake_stack_size; i += 2) {
56     fake_stack[i] = (uptr)&fake_stack[i+2];  // fp
57     fake_stack[i+1] = PC(i + 1); // retaddr
58   }
59   // Mark the last fp point back up to terminate the stack trace.
60   fake_stack[RoundDownTo(fake_stack_size - 1, 2)] = (uhwptr)&fake_stack[0];
61 
62   // Top is two slots past the end because FastUnwindStack subtracts two.
63   fake_top = (uhwptr)&fake_stack[fake_stack_size + 2];
64   // Bottom is one slot before the start because FastUnwindStack uses >.
65   fake_bottom = (uhwptr)mapping;
66   start_pc = PC(0);
67 }
68 
TearDown()69 void FastUnwindTest::TearDown() {
70   size_t ps = GetPageSize();
71   UnmapOrDie(mapping, 2 * ps);
72 }
73 
TEST_F(FastUnwindTest,Basic)74 TEST_F(FastUnwindTest, Basic) {
75   if (!TryFastUnwind(kStackTraceMax))
76     return;
77   // Should get all on-stack retaddrs and start_pc.
78   EXPECT_EQ(6U, trace.size);
79   EXPECT_EQ(start_pc, trace.trace[0]);
80   for (uptr i = 1; i <= 5; i++) {
81     EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
82   }
83 }
84 
85 // From: https://github.com/google/sanitizers/issues/162
TEST_F(FastUnwindTest,FramePointerLoop)86 TEST_F(FastUnwindTest, FramePointerLoop) {
87   // Make one fp point to itself.
88   fake_stack[4] = (uhwptr)&fake_stack[4];
89   if (!TryFastUnwind(kStackTraceMax))
90     return;
91   // Should get all on-stack retaddrs up to the 4th slot and start_pc.
92   EXPECT_EQ(4U, trace.size);
93   EXPECT_EQ(start_pc, trace.trace[0]);
94   for (uptr i = 1; i <= 3; i++) {
95     EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
96   }
97 }
98 
TEST_F(FastUnwindTest,MisalignedFramePointer)99 TEST_F(FastUnwindTest, MisalignedFramePointer) {
100   // Make one fp misaligned.
101   fake_stack[4] += 3;
102   if (!TryFastUnwind(kStackTraceMax))
103     return;
104   // Should get all on-stack retaddrs up to the 4th slot and start_pc.
105   EXPECT_EQ(4U, trace.size);
106   EXPECT_EQ(start_pc, trace.trace[0]);
107   for (uptr i = 1; i < 4U; i++) {
108     EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
109   }
110 }
111 
TEST_F(FastUnwindTest,OneFrameStackTrace)112 TEST_F(FastUnwindTest, OneFrameStackTrace) {
113   if (!TryFastUnwind(1))
114     return;
115   EXPECT_EQ(1U, trace.size);
116   EXPECT_EQ(start_pc, trace.trace[0]);
117   EXPECT_EQ((uhwptr)&fake_stack[0], trace.top_frame_bp);
118 }
119 
TEST_F(FastUnwindTest,ZeroFramesStackTrace)120 TEST_F(FastUnwindTest, ZeroFramesStackTrace) {
121   if (!TryFastUnwind(0))
122     return;
123   EXPECT_EQ(0U, trace.size);
124   EXPECT_EQ(0U, trace.top_frame_bp);
125 }
126 
TEST_F(FastUnwindTest,FPBelowPrevFP)127 TEST_F(FastUnwindTest, FPBelowPrevFP) {
128   // The next FP points to unreadable memory inside the stack limits, but below
129   // current FP.
130   fake_stack[0] = (uhwptr)&fake_stack[-50];
131   fake_stack[1] = PC(1);
132   if (!TryFastUnwind(3))
133     return;
134   EXPECT_EQ(2U, trace.size);
135   EXPECT_EQ(PC(0), trace.trace[0]);
136   EXPECT_EQ(PC(1), trace.trace[1]);
137 }
138 
TEST_F(FastUnwindTest,CloseToZeroFrame)139 TEST_F(FastUnwindTest, CloseToZeroFrame) {
140   // Make one pc a NULL pointer.
141   fake_stack[5] = 0x0;
142   if (!TryFastUnwind(kStackTraceMax))
143     return;
144   // The stack should be truncated at the NULL pointer (and not include it).
145   EXPECT_EQ(3U, trace.size);
146   EXPECT_EQ(start_pc, trace.trace[0]);
147   for (uptr i = 1; i < 3U; i++) {
148     EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
149   }
150 }
151 
TEST(SlowUnwindTest,ShortStackTrace)152 TEST(SlowUnwindTest, ShortStackTrace) {
153   if (StackTrace::WillUseFastUnwind(false))
154     return;
155   BufferedStackTrace stack;
156   uptr pc = StackTrace::GetCurrentPc();
157   uptr bp = GET_CURRENT_FRAME();
158   stack.Unwind(0, pc, bp, 0, 0, 0, false);
159   EXPECT_EQ(0U, stack.size);
160   EXPECT_EQ(0U, stack.top_frame_bp);
161   stack.Unwind(1, pc, bp, 0, 0, 0, false);
162   EXPECT_EQ(1U, stack.size);
163   EXPECT_EQ(pc, stack.trace[0]);
164   EXPECT_EQ(bp, stack.top_frame_bp);
165 }
166 
167 }  // namespace __sanitizer
168