1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/sampler.h"
6
7 #if V8_OS_POSIX && !V8_OS_CYGWIN
8
9 #define USE_SIGNALS
10
11 #include <errno.h>
12 #include <pthread.h>
13 #include <signal.h>
14 #include <sys/time.h>
15
16 #if !V8_OS_QNX && !V8_OS_NACL
17 #include <sys/syscall.h> // NOLINT
18 #endif
19
20 #if V8_OS_MACOSX
21 #include <mach/mach.h>
22 // OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
23 // and is a typedef for struct sigcontext. There is no uc_mcontext.
24 #elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \
25 !V8_OS_OPENBSD && !V8_OS_NACL
26 #include <ucontext.h>
27 #endif
28
29 #include <unistd.h>
30
31 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
32 // Old versions of the C library <signal.h> didn't define the type.
33 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
34 (defined(__arm__) || defined(__aarch64__)) && \
35 !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
36 #include <asm/sigcontext.h> // NOLINT
37 #endif
38
39 #elif V8_OS_WIN || V8_OS_CYGWIN
40
41 #include "src/base/win32-headers.h"
42
43 #endif
44
45 #include "src/v8.h"
46
47 #include "src/base/platform/platform.h"
48 #include "src/cpu-profiler-inl.h"
49 #include "src/flags.h"
50 #include "src/frames-inl.h"
51 #include "src/log.h"
52 #include "src/simulator.h"
53 #include "src/v8threads.h"
54 #include "src/vm-state-inl.h"
55
56
57 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
58
59 // Not all versions of Android's C library provide ucontext_t.
60 // Detect this and provide custom but compatible definitions. Note that these
61 // follow the GLibc naming convention to access register values from
62 // mcontext_t.
63 //
64 // See http://code.google.com/p/android/issues/detail?id=34784
65
66 #if defined(__arm__)
67
68 typedef struct sigcontext mcontext_t;
69
70 typedef struct ucontext {
71 uint32_t uc_flags;
72 struct ucontext* uc_link;
73 stack_t uc_stack;
74 mcontext_t uc_mcontext;
75 // Other fields are not used by V8, don't define them here.
76 } ucontext_t;
77
78 #elif defined(__aarch64__)
79
80 typedef struct sigcontext mcontext_t;
81
82 typedef struct ucontext {
83 uint64_t uc_flags;
84 struct ucontext *uc_link;
85 stack_t uc_stack;
86 mcontext_t uc_mcontext;
87 // Other fields are not used by V8, don't define them here.
88 } ucontext_t;
89
90 #elif defined(__mips__)
91 // MIPS version of sigcontext, for Android bionic.
92 typedef struct {
93 uint32_t regmask;
94 uint32_t status;
95 uint64_t pc;
96 uint64_t gregs[32];
97 uint64_t fpregs[32];
98 uint32_t acx;
99 uint32_t fpc_csr;
100 uint32_t fpc_eir;
101 uint32_t used_math;
102 uint32_t dsp;
103 uint64_t mdhi;
104 uint64_t mdlo;
105 uint32_t hi1;
106 uint32_t lo1;
107 uint32_t hi2;
108 uint32_t lo2;
109 uint32_t hi3;
110 uint32_t lo3;
111 } mcontext_t;
112
113 typedef struct ucontext {
114 uint32_t uc_flags;
115 struct ucontext* uc_link;
116 stack_t uc_stack;
117 mcontext_t uc_mcontext;
118 // Other fields are not used by V8, don't define them here.
119 } ucontext_t;
120
121 #elif defined(__i386__)
122 // x86 version for Android.
123 typedef struct {
124 uint32_t gregs[19];
125 void* fpregs;
126 uint32_t oldmask;
127 uint32_t cr2;
128 } mcontext_t;
129
130 typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
131 typedef struct ucontext {
132 uint32_t uc_flags;
133 struct ucontext* uc_link;
134 stack_t uc_stack;
135 mcontext_t uc_mcontext;
136 // Other fields are not used by V8, don't define them here.
137 } ucontext_t;
138 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
139
140 #elif defined(__x86_64__)
141 // x64 version for Android.
142 typedef struct {
143 uint64_t gregs[23];
144 void* fpregs;
145 uint64_t __reserved1[8];
146 } mcontext_t;
147
148 typedef struct ucontext {
149 uint64_t uc_flags;
150 struct ucontext *uc_link;
151 stack_t uc_stack;
152 mcontext_t uc_mcontext;
153 // Other fields are not used by V8, don't define them here.
154 } ucontext_t;
155 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
156 #endif
157
158 #endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
159
160
161 namespace v8 {
162 namespace internal {
163
164 namespace {
165
166 class PlatformDataCommon : public Malloced {
167 public:
PlatformDataCommon()168 PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {}
profiled_thread_id()169 ThreadId profiled_thread_id() { return profiled_thread_id_; }
170
171 protected:
~PlatformDataCommon()172 ~PlatformDataCommon() {}
173
174 private:
175 ThreadId profiled_thread_id_;
176 };
177
178 } // namespace
179
180 #if defined(USE_SIGNALS)
181
182 class Sampler::PlatformData : public PlatformDataCommon {
183 public:
PlatformData()184 PlatformData() : vm_tid_(pthread_self()) {}
vm_tid() const185 pthread_t vm_tid() const { return vm_tid_; }
186
187 private:
188 pthread_t vm_tid_;
189 };
190
191 #elif V8_OS_WIN || V8_OS_CYGWIN
192
193 // ----------------------------------------------------------------------------
194 // Win32 profiler support. On Cygwin we use the same sampler implementation as
195 // on Win32.
196
197 class Sampler::PlatformData : public PlatformDataCommon {
198 public:
199 // Get a handle to the calling thread. This is the thread that we are
200 // going to profile. We need to make a copy of the handle because we are
201 // going to use it in the sampler thread. Using GetThreadHandle() will
202 // not work in this case. We're using OpenThread because DuplicateHandle
203 // for some reason doesn't work in Chrome's sandbox.
PlatformData()204 PlatformData()
205 : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
206 THREAD_SUSPEND_RESUME |
207 THREAD_QUERY_INFORMATION,
208 false,
209 GetCurrentThreadId())) {}
210
~PlatformData()211 ~PlatformData() {
212 if (profiled_thread_ != NULL) {
213 CloseHandle(profiled_thread_);
214 profiled_thread_ = NULL;
215 }
216 }
217
profiled_thread()218 HANDLE profiled_thread() { return profiled_thread_; }
219
220 private:
221 HANDLE profiled_thread_;
222 };
223 #endif
224
225
226 #if defined(USE_SIMULATOR)
227 class SimulatorHelper {
228 public:
Init(Sampler * sampler,Isolate * isolate)229 inline bool Init(Sampler* sampler, Isolate* isolate) {
230 simulator_ = isolate->thread_local_top()->simulator_;
231 // Check if there is active simulator.
232 return simulator_ != NULL;
233 }
234
FillRegisters(RegisterState * state)235 inline void FillRegisters(RegisterState* state) {
236 #if V8_TARGET_ARCH_ARM
237 state->pc = reinterpret_cast<Address>(simulator_->get_pc());
238 state->sp = reinterpret_cast<Address>(simulator_->get_register(
239 Simulator::sp));
240 state->fp = reinterpret_cast<Address>(simulator_->get_register(
241 Simulator::r11));
242 #elif V8_TARGET_ARCH_ARM64
243 if (simulator_->sp() == 0 || simulator_->fp() == 0) {
244 // It possible that the simulator is interrupted while it is updating
245 // the sp or fp register. ARM64 simulator does this in two steps:
246 // first setting it to zero and then setting it to the new value.
247 // Bailout if sp/fp doesn't contain the new value.
248 return;
249 }
250 state->pc = reinterpret_cast<Address>(simulator_->pc());
251 state->sp = reinterpret_cast<Address>(simulator_->sp());
252 state->fp = reinterpret_cast<Address>(simulator_->fp());
253 #elif V8_TARGET_ARCH_MIPS
254 state->pc = reinterpret_cast<Address>(simulator_->get_pc());
255 state->sp = reinterpret_cast<Address>(simulator_->get_register(
256 Simulator::sp));
257 state->fp = reinterpret_cast<Address>(simulator_->get_register(
258 Simulator::fp));
259 #elif V8_TARGET_ARCH_MIPS64
260 state->pc = reinterpret_cast<Address>(simulator_->get_pc());
261 state->sp = reinterpret_cast<Address>(simulator_->get_register(
262 Simulator::sp));
263 state->fp = reinterpret_cast<Address>(simulator_->get_register(
264 Simulator::fp));
265 #endif
266 }
267
268 private:
269 Simulator* simulator_;
270 };
271 #endif // USE_SIMULATOR
272
273
274 #if defined(USE_SIGNALS)
275
276 class SignalHandler : public AllStatic {
277 public:
SetUp()278 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
TearDown()279 static void TearDown() { delete mutex_; mutex_ = NULL; }
280
IncreaseSamplerCount()281 static void IncreaseSamplerCount() {
282 base::LockGuard<base::Mutex> lock_guard(mutex_);
283 if (++client_count_ == 1) Install();
284 }
285
DecreaseSamplerCount()286 static void DecreaseSamplerCount() {
287 base::LockGuard<base::Mutex> lock_guard(mutex_);
288 if (--client_count_ == 0) Restore();
289 }
290
Installed()291 static bool Installed() {
292 return signal_handler_installed_;
293 }
294
295 private:
Install()296 static void Install() {
297 #if !V8_OS_NACL
298 struct sigaction sa;
299 sa.sa_sigaction = &HandleProfilerSignal;
300 sigemptyset(&sa.sa_mask);
301 #if V8_OS_QNX
302 sa.sa_flags = SA_SIGINFO;
303 #else
304 sa.sa_flags = SA_RESTART | SA_SIGINFO;
305 #endif
306 signal_handler_installed_ =
307 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
308 #endif
309 }
310
Restore()311 static void Restore() {
312 #if !V8_OS_NACL
313 if (signal_handler_installed_) {
314 sigaction(SIGPROF, &old_signal_handler_, 0);
315 signal_handler_installed_ = false;
316 }
317 #endif
318 }
319
320 #if !V8_OS_NACL
321 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
322 #endif
323 // Protects the process wide state below.
324 static base::Mutex* mutex_;
325 static int client_count_;
326 static bool signal_handler_installed_;
327 static struct sigaction old_signal_handler_;
328 };
329
330
331 base::Mutex* SignalHandler::mutex_ = NULL;
332 int SignalHandler::client_count_ = 0;
333 struct sigaction SignalHandler::old_signal_handler_;
334 bool SignalHandler::signal_handler_installed_ = false;
335
336
337 // As Native Client does not support signal handling, profiling is disabled.
338 #if !V8_OS_NACL
HandleProfilerSignal(int signal,siginfo_t * info,void * context)339 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
340 void* context) {
341 USE(info);
342 if (signal != SIGPROF) return;
343 Isolate* isolate = Isolate::UnsafeCurrent();
344 if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
345 // We require a fully initialized and entered isolate.
346 return;
347 }
348 if (v8::Locker::IsActive() &&
349 !isolate->thread_manager()->IsLockedByCurrentThread()) {
350 return;
351 }
352
353 Sampler* sampler = isolate->logger()->sampler();
354 if (sampler == NULL) return;
355
356 RegisterState state;
357
358 #if defined(USE_SIMULATOR)
359 SimulatorHelper helper;
360 if (!helper.Init(sampler, isolate)) return;
361 helper.FillRegisters(&state);
362 // It possible that the simulator is interrupted while it is updating
363 // the sp or fp register. ARM64 simulator does this in two steps:
364 // first setting it to zero and then setting it to the new value.
365 // Bailout if sp/fp doesn't contain the new value.
366 if (state.sp == 0 || state.fp == 0) return;
367 #else
368 // Extracting the sample from the context is extremely machine dependent.
369 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
370 #if !V8_OS_OPENBSD
371 mcontext_t& mcontext = ucontext->uc_mcontext;
372 #endif
373 #if V8_OS_LINUX
374 #if V8_HOST_ARCH_IA32
375 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
376 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
377 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
378 #elif V8_HOST_ARCH_X64
379 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
380 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
381 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
382 #elif V8_HOST_ARCH_ARM
383 #if defined(__GLIBC__) && !defined(__UCLIBC__) && \
384 (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
385 // Old GLibc ARM versions used a gregs[] array to access the register
386 // values from mcontext_t.
387 state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
388 state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
389 state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
390 #else
391 state.pc = reinterpret_cast<Address>(mcontext.arm_pc);
392 state.sp = reinterpret_cast<Address>(mcontext.arm_sp);
393 state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
394 #endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
395 // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
396 #elif V8_HOST_ARCH_ARM64
397 state.pc = reinterpret_cast<Address>(mcontext.pc);
398 state.sp = reinterpret_cast<Address>(mcontext.sp);
399 // FP is an alias for x29.
400 state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
401 #elif V8_HOST_ARCH_MIPS
402 state.pc = reinterpret_cast<Address>(mcontext.pc);
403 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
404 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
405 #elif V8_HOST_ARCH_MIPS64
406 state.pc = reinterpret_cast<Address>(mcontext.pc);
407 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
408 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
409 #endif // V8_HOST_ARCH_*
410 #elif V8_OS_MACOSX
411 #if V8_HOST_ARCH_X64
412 #if __DARWIN_UNIX03
413 state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
414 state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
415 state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
416 #else // !__DARWIN_UNIX03
417 state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
418 state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
419 state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
420 #endif // __DARWIN_UNIX03
421 #elif V8_HOST_ARCH_IA32
422 #if __DARWIN_UNIX03
423 state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
424 state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
425 state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
426 #else // !__DARWIN_UNIX03
427 state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
428 state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
429 state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
430 #endif // __DARWIN_UNIX03
431 #endif // V8_HOST_ARCH_IA32
432 #elif V8_OS_FREEBSD
433 #if V8_HOST_ARCH_IA32
434 state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
435 state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
436 state.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
437 #elif V8_HOST_ARCH_X64
438 state.pc = reinterpret_cast<Address>(mcontext.mc_rip);
439 state.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
440 state.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
441 #elif V8_HOST_ARCH_ARM
442 state.pc = reinterpret_cast<Address>(mcontext.mc_r15);
443 state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
444 state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
445 #endif // V8_HOST_ARCH_*
446 #elif V8_OS_NETBSD
447 #if V8_HOST_ARCH_IA32
448 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
449 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
450 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
451 #elif V8_HOST_ARCH_X64
452 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
453 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
454 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
455 #endif // V8_HOST_ARCH_*
456 #elif V8_OS_OPENBSD
457 #if V8_HOST_ARCH_IA32
458 state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
459 state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
460 state.fp = reinterpret_cast<Address>(ucontext->sc_ebp);
461 #elif V8_HOST_ARCH_X64
462 state.pc = reinterpret_cast<Address>(ucontext->sc_rip);
463 state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
464 state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
465 #endif // V8_HOST_ARCH_*
466 #elif V8_OS_SOLARIS
467 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
468 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
469 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
470 #elif V8_OS_QNX
471 #if V8_HOST_ARCH_IA32
472 state.pc = reinterpret_cast<Address>(mcontext.cpu.eip);
473 state.sp = reinterpret_cast<Address>(mcontext.cpu.esp);
474 state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
475 #elif V8_HOST_ARCH_ARM
476 state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
477 state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
478 state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
479 #endif // V8_HOST_ARCH_*
480 #endif // V8_OS_QNX
481 #endif // USE_SIMULATOR
482 sampler->SampleStack(state);
483 }
484 #endif // V8_OS_NACL
485
486 #endif
487
488
489 class SamplerThread : public base::Thread {
490 public:
491 static const int kSamplerThreadStackSize = 64 * KB;
492
SamplerThread(int interval)493 explicit SamplerThread(int interval)
494 : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
495 interval_(interval) {}
496
SetUp()497 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
TearDown()498 static void TearDown() { delete mutex_; mutex_ = NULL; }
499
AddActiveSampler(Sampler * sampler)500 static void AddActiveSampler(Sampler* sampler) {
501 bool need_to_start = false;
502 base::LockGuard<base::Mutex> lock_guard(mutex_);
503 if (instance_ == NULL) {
504 // Start a thread that will send SIGPROF signal to VM threads,
505 // when CPU profiling will be enabled.
506 instance_ = new SamplerThread(sampler->interval());
507 need_to_start = true;
508 }
509
510 DCHECK(sampler->IsActive());
511 DCHECK(!instance_->active_samplers_.Contains(sampler));
512 DCHECK(instance_->interval_ == sampler->interval());
513 instance_->active_samplers_.Add(sampler);
514
515 if (need_to_start) instance_->StartSynchronously();
516 }
517
RemoveActiveSampler(Sampler * sampler)518 static void RemoveActiveSampler(Sampler* sampler) {
519 SamplerThread* instance_to_remove = NULL;
520 {
521 base::LockGuard<base::Mutex> lock_guard(mutex_);
522
523 DCHECK(sampler->IsActive());
524 bool removed = instance_->active_samplers_.RemoveElement(sampler);
525 DCHECK(removed);
526 USE(removed);
527
528 // We cannot delete the instance immediately as we need to Join() the
529 // thread but we are holding mutex_ and the thread may try to acquire it.
530 if (instance_->active_samplers_.is_empty()) {
531 instance_to_remove = instance_;
532 instance_ = NULL;
533 }
534 }
535
536 if (!instance_to_remove) return;
537 instance_to_remove->Join();
538 delete instance_to_remove;
539 }
540
541 // Implement Thread::Run().
Run()542 virtual void Run() {
543 while (true) {
544 {
545 base::LockGuard<base::Mutex> lock_guard(mutex_);
546 if (active_samplers_.is_empty()) break;
547 // When CPU profiling is enabled both JavaScript and C++ code is
548 // profiled. We must not suspend.
549 for (int i = 0; i < active_samplers_.length(); ++i) {
550 Sampler* sampler = active_samplers_.at(i);
551 if (!sampler->isolate()->IsInitialized()) continue;
552 if (!sampler->IsProfiling()) continue;
553 sampler->DoSample();
554 }
555 }
556 base::OS::Sleep(interval_);
557 }
558 }
559
560 private:
561 // Protects the process wide state below.
562 static base::Mutex* mutex_;
563 static SamplerThread* instance_;
564
565 const int interval_;
566 List<Sampler*> active_samplers_;
567
568 DISALLOW_COPY_AND_ASSIGN(SamplerThread);
569 };
570
571
572 base::Mutex* SamplerThread::mutex_ = NULL;
573 SamplerThread* SamplerThread::instance_ = NULL;
574
575
576 //
577 // StackTracer implementation
578 //
Init(Isolate * isolate,const RegisterState & regs)579 DISABLE_ASAN void TickSample::Init(Isolate* isolate,
580 const RegisterState& regs) {
581 DCHECK(isolate->IsInitialized());
582 timestamp = base::TimeTicks::HighResolutionNow();
583 pc = regs.pc;
584 state = isolate->current_vm_state();
585
586 // Avoid collecting traces while doing GC.
587 if (state == GC) return;
588
589 Address js_entry_sp = isolate->js_entry_sp();
590 if (js_entry_sp == 0) {
591 // Not executing JS now.
592 return;
593 }
594
595 ExternalCallbackScope* scope = isolate->external_callback_scope();
596 Address handler = Isolate::handler(isolate->thread_local_top());
597 // If there is a handler on top of the external callback scope then
598 // we have already entrered JavaScript again and the external callback
599 // is not the top function.
600 if (scope && scope->scope_address() < handler) {
601 external_callback = scope->callback();
602 has_external_callback = true;
603 } else {
604 // Sample potential return address value for frameless invocation of
605 // stubs (we'll figure out later, if this value makes sense).
606 tos = Memory::Address_at(regs.sp);
607 has_external_callback = false;
608 }
609
610 SafeStackFrameIterator it(isolate, regs.fp, regs.sp, js_entry_sp);
611 top_frame_type = it.top_frame_type();
612 unsigned i = 0;
613 while (!it.done() && i < TickSample::kMaxFramesCount) {
614 stack[i++] = it.frame()->pc();
615 it.Advance();
616 }
617 frames_count = i;
618 }
619
620
SetUp()621 void Sampler::SetUp() {
622 #if defined(USE_SIGNALS)
623 SignalHandler::SetUp();
624 #endif
625 SamplerThread::SetUp();
626 }
627
628
TearDown()629 void Sampler::TearDown() {
630 SamplerThread::TearDown();
631 #if defined(USE_SIGNALS)
632 SignalHandler::TearDown();
633 #endif
634 }
635
636
Sampler(Isolate * isolate,int interval)637 Sampler::Sampler(Isolate* isolate, int interval)
638 : isolate_(isolate),
639 interval_(interval),
640 profiling_(false),
641 has_processing_thread_(false),
642 active_(false),
643 is_counting_samples_(false),
644 js_and_external_sample_count_(0) {
645 data_ = new PlatformData;
646 }
647
648
~Sampler()649 Sampler::~Sampler() {
650 DCHECK(!IsActive());
651 delete data_;
652 }
653
654
Start()655 void Sampler::Start() {
656 DCHECK(!IsActive());
657 SetActive(true);
658 SamplerThread::AddActiveSampler(this);
659 }
660
661
Stop()662 void Sampler::Stop() {
663 DCHECK(IsActive());
664 SamplerThread::RemoveActiveSampler(this);
665 SetActive(false);
666 }
667
668
IncreaseProfilingDepth()669 void Sampler::IncreaseProfilingDepth() {
670 base::NoBarrier_AtomicIncrement(&profiling_, 1);
671 #if defined(USE_SIGNALS)
672 SignalHandler::IncreaseSamplerCount();
673 #endif
674 }
675
676
DecreaseProfilingDepth()677 void Sampler::DecreaseProfilingDepth() {
678 #if defined(USE_SIGNALS)
679 SignalHandler::DecreaseSamplerCount();
680 #endif
681 base::NoBarrier_AtomicIncrement(&profiling_, -1);
682 }
683
684
SampleStack(const RegisterState & state)685 void Sampler::SampleStack(const RegisterState& state) {
686 TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
687 TickSample sample_obj;
688 if (sample == NULL) sample = &sample_obj;
689 sample->Init(isolate_, state);
690 if (is_counting_samples_) {
691 if (sample->state == JS || sample->state == EXTERNAL) {
692 ++js_and_external_sample_count_;
693 }
694 }
695 Tick(sample);
696 if (sample != &sample_obj) {
697 isolate_->cpu_profiler()->FinishTickSample();
698 }
699 }
700
701
702 #if defined(USE_SIGNALS)
703
DoSample()704 void Sampler::DoSample() {
705 if (!SignalHandler::Installed()) return;
706 pthread_kill(platform_data()->vm_tid(), SIGPROF);
707 }
708
709 #elif V8_OS_WIN || V8_OS_CYGWIN
710
DoSample()711 void Sampler::DoSample() {
712 HANDLE profiled_thread = platform_data()->profiled_thread();
713 if (profiled_thread == NULL) return;
714
715 #if defined(USE_SIMULATOR)
716 SimulatorHelper helper;
717 if (!helper.Init(this, isolate())) return;
718 #endif
719
720 const DWORD kSuspendFailed = static_cast<DWORD>(-1);
721 if (SuspendThread(profiled_thread) == kSuspendFailed) return;
722
723 // Context used for sampling the register state of the profiled thread.
724 CONTEXT context;
725 memset(&context, 0, sizeof(context));
726 context.ContextFlags = CONTEXT_FULL;
727 if (GetThreadContext(profiled_thread, &context) != 0) {
728 RegisterState state;
729 #if defined(USE_SIMULATOR)
730 helper.FillRegisters(&state);
731 #else
732 #if V8_HOST_ARCH_X64
733 state.pc = reinterpret_cast<Address>(context.Rip);
734 state.sp = reinterpret_cast<Address>(context.Rsp);
735 state.fp = reinterpret_cast<Address>(context.Rbp);
736 #else
737 state.pc = reinterpret_cast<Address>(context.Eip);
738 state.sp = reinterpret_cast<Address>(context.Esp);
739 state.fp = reinterpret_cast<Address>(context.Ebp);
740 #endif
741 #endif // USE_SIMULATOR
742 SampleStack(state);
743 }
744 ResumeThread(profiled_thread);
745 }
746
747 #endif // USE_SIGNALS
748
749
750 } } // namespace v8::internal
751