1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/platform/time.h"
6
7 #if V8_OS_POSIX
8 #include <fcntl.h> // for O_RDONLY
9 #include <sys/time.h>
10 #include <unistd.h>
11 #endif
12 #if V8_OS_MACOSX
13 #include <mach/mach_time.h>
14 #endif
15
16 #include <cstring>
17 #include <ostream>
18
19 #if V8_OS_WIN
20 #include "src/base/atomicops.h"
21 #include "src/base/lazy-instance.h"
22 #include "src/base/win32-headers.h"
23 #endif
24 #include "src/base/cpu.h"
25 #include "src/base/logging.h"
26 #include "src/base/platform/platform.h"
27
28 namespace v8 {
29 namespace base {
30
FromDays(int days)31 TimeDelta TimeDelta::FromDays(int days) {
32 return TimeDelta(days * Time::kMicrosecondsPerDay);
33 }
34
35
FromHours(int hours)36 TimeDelta TimeDelta::FromHours(int hours) {
37 return TimeDelta(hours * Time::kMicrosecondsPerHour);
38 }
39
40
FromMinutes(int minutes)41 TimeDelta TimeDelta::FromMinutes(int minutes) {
42 return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
43 }
44
45
FromSeconds(int64_t seconds)46 TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
47 return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
48 }
49
50
FromMilliseconds(int64_t milliseconds)51 TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
52 return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
53 }
54
55
FromNanoseconds(int64_t nanoseconds)56 TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
57 return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
58 }
59
60
InDays() const61 int TimeDelta::InDays() const {
62 return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
63 }
64
65
InHours() const66 int TimeDelta::InHours() const {
67 return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
68 }
69
70
InMinutes() const71 int TimeDelta::InMinutes() const {
72 return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
73 }
74
75
InSecondsF() const76 double TimeDelta::InSecondsF() const {
77 return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
78 }
79
80
InSeconds() const81 int64_t TimeDelta::InSeconds() const {
82 return delta_ / Time::kMicrosecondsPerSecond;
83 }
84
85
InMillisecondsF() const86 double TimeDelta::InMillisecondsF() const {
87 return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
88 }
89
90
InMilliseconds() const91 int64_t TimeDelta::InMilliseconds() const {
92 return delta_ / Time::kMicrosecondsPerMillisecond;
93 }
94
95
InNanoseconds() const96 int64_t TimeDelta::InNanoseconds() const {
97 return delta_ * Time::kNanosecondsPerMicrosecond;
98 }
99
100
101 #if V8_OS_MACOSX
102
FromMachTimespec(struct mach_timespec ts)103 TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
104 DCHECK_GE(ts.tv_nsec, 0);
105 DCHECK_LT(ts.tv_nsec,
106 static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
107 return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
108 ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
109 }
110
111
ToMachTimespec() const112 struct mach_timespec TimeDelta::ToMachTimespec() const {
113 struct mach_timespec ts;
114 DCHECK(delta_ >= 0);
115 ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
116 ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
117 Time::kNanosecondsPerMicrosecond;
118 return ts;
119 }
120
121 #endif // V8_OS_MACOSX
122
123
124 #if V8_OS_POSIX
125
FromTimespec(struct timespec ts)126 TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
127 DCHECK_GE(ts.tv_nsec, 0);
128 DCHECK_LT(ts.tv_nsec,
129 static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
130 return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
131 ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
132 }
133
134
ToTimespec() const135 struct timespec TimeDelta::ToTimespec() const {
136 struct timespec ts;
137 ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
138 ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
139 Time::kNanosecondsPerMicrosecond;
140 return ts;
141 }
142
143 #endif // V8_OS_POSIX
144
145
146 #if V8_OS_WIN
147
148 // We implement time using the high-resolution timers so that we can get
149 // timeouts which are smaller than 10-15ms. To avoid any drift, we
150 // periodically resync the internal clock to the system clock.
151 class Clock final {
152 public:
Clock()153 Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
154
Now()155 Time Now() {
156 // Time between resampling the un-granular clock for this API (1 minute).
157 const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
158
159 LockGuard<Mutex> lock_guard(&mutex_);
160
161 // Determine current time and ticks.
162 TimeTicks ticks = GetSystemTicks();
163 Time time = GetSystemTime();
164
165 // Check if we need to synchronize with the system clock due to a backwards
166 // time change or the amount of time elapsed.
167 TimeDelta elapsed = ticks - initial_ticks_;
168 if (time < initial_time_ || elapsed > kMaxElapsedTime) {
169 initial_ticks_ = ticks;
170 initial_time_ = time;
171 return time;
172 }
173
174 return initial_time_ + elapsed;
175 }
176
NowFromSystemTime()177 Time NowFromSystemTime() {
178 LockGuard<Mutex> lock_guard(&mutex_);
179 initial_ticks_ = GetSystemTicks();
180 initial_time_ = GetSystemTime();
181 return initial_time_;
182 }
183
184 private:
GetSystemTicks()185 static TimeTicks GetSystemTicks() {
186 return TimeTicks::Now();
187 }
188
GetSystemTime()189 static Time GetSystemTime() {
190 FILETIME ft;
191 ::GetSystemTimeAsFileTime(&ft);
192 return Time::FromFiletime(ft);
193 }
194
195 TimeTicks initial_ticks_;
196 Time initial_time_;
197 Mutex mutex_;
198 };
199
200
201 static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
202 ThreadSafeInitOnceTrait>::type clock =
203 LAZY_STATIC_INSTANCE_INITIALIZER;
204
205
Now()206 Time Time::Now() {
207 return clock.Pointer()->Now();
208 }
209
210
NowFromSystemTime()211 Time Time::NowFromSystemTime() {
212 return clock.Pointer()->NowFromSystemTime();
213 }
214
215
216 // Time between windows epoch and standard epoch.
217 static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
218
219
FromFiletime(FILETIME ft)220 Time Time::FromFiletime(FILETIME ft) {
221 if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
222 return Time();
223 }
224 if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
225 ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
226 return Max();
227 }
228 int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
229 (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
230 return Time(us - kTimeToEpochInMicroseconds);
231 }
232
233
ToFiletime() const234 FILETIME Time::ToFiletime() const {
235 DCHECK(us_ >= 0);
236 FILETIME ft;
237 if (IsNull()) {
238 ft.dwLowDateTime = 0;
239 ft.dwHighDateTime = 0;
240 return ft;
241 }
242 if (IsMax()) {
243 ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
244 ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
245 return ft;
246 }
247 uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
248 ft.dwLowDateTime = static_cast<DWORD>(us);
249 ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
250 return ft;
251 }
252
253 #elif V8_OS_POSIX
254
Now()255 Time Time::Now() {
256 struct timeval tv;
257 int result = gettimeofday(&tv, NULL);
258 DCHECK_EQ(0, result);
259 USE(result);
260 return FromTimeval(tv);
261 }
262
263
NowFromSystemTime()264 Time Time::NowFromSystemTime() {
265 return Now();
266 }
267
268
FromTimespec(struct timespec ts)269 Time Time::FromTimespec(struct timespec ts) {
270 DCHECK(ts.tv_nsec >= 0);
271 DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
272 if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
273 return Time();
274 }
275 if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
276 ts.tv_sec == std::numeric_limits<time_t>::max()) {
277 return Max();
278 }
279 return Time(ts.tv_sec * kMicrosecondsPerSecond +
280 ts.tv_nsec / kNanosecondsPerMicrosecond);
281 }
282
283
ToTimespec() const284 struct timespec Time::ToTimespec() const {
285 struct timespec ts;
286 if (IsNull()) {
287 ts.tv_sec = 0;
288 ts.tv_nsec = 0;
289 return ts;
290 }
291 if (IsMax()) {
292 ts.tv_sec = std::numeric_limits<time_t>::max();
293 ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
294 return ts;
295 }
296 ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
297 ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
298 return ts;
299 }
300
301
FromTimeval(struct timeval tv)302 Time Time::FromTimeval(struct timeval tv) {
303 DCHECK(tv.tv_usec >= 0);
304 DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
305 if (tv.tv_usec == 0 && tv.tv_sec == 0) {
306 return Time();
307 }
308 if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
309 tv.tv_sec == std::numeric_limits<time_t>::max()) {
310 return Max();
311 }
312 return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
313 }
314
315
ToTimeval() const316 struct timeval Time::ToTimeval() const {
317 struct timeval tv;
318 if (IsNull()) {
319 tv.tv_sec = 0;
320 tv.tv_usec = 0;
321 return tv;
322 }
323 if (IsMax()) {
324 tv.tv_sec = std::numeric_limits<time_t>::max();
325 tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
326 return tv;
327 }
328 tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
329 tv.tv_usec = us_ % kMicrosecondsPerSecond;
330 return tv;
331 }
332
333 #endif // V8_OS_WIN
334
335
FromJsTime(double ms_since_epoch)336 Time Time::FromJsTime(double ms_since_epoch) {
337 // The epoch is a valid time, so this constructor doesn't interpret
338 // 0 as the null time.
339 if (ms_since_epoch == std::numeric_limits<double>::max()) {
340 return Max();
341 }
342 return Time(
343 static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
344 }
345
346
ToJsTime() const347 double Time::ToJsTime() const {
348 if (IsNull()) {
349 // Preserve 0 so the invalid result doesn't depend on the platform.
350 return 0;
351 }
352 if (IsMax()) {
353 // Preserve max without offset to prevent overflow.
354 return std::numeric_limits<double>::max();
355 }
356 return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
357 }
358
359
operator <<(std::ostream & os,const Time & time)360 std::ostream& operator<<(std::ostream& os, const Time& time) {
361 return os << time.ToJsTime();
362 }
363
364
365 #if V8_OS_WIN
366
367 class TickClock {
368 public:
~TickClock()369 virtual ~TickClock() {}
370 virtual int64_t Now() = 0;
371 virtual bool IsHighResolution() = 0;
372 };
373
374
375 // Overview of time counters:
376 // (1) CPU cycle counter. (Retrieved via RDTSC)
377 // The CPU counter provides the highest resolution time stamp and is the least
378 // expensive to retrieve. However, the CPU counter is unreliable and should not
379 // be used in production. Its biggest issue is that it is per processor and it
380 // is not synchronized between processors. Also, on some computers, the counters
381 // will change frequency due to thermal and power changes, and stop in some
382 // states.
383 //
384 // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
385 // resolution (100 nanoseconds) time stamp but is comparatively more expensive
386 // to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
387 // (with some help from ACPI).
388 // According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
389 // in the worst case, it gets the counter from the rollover interrupt on the
390 // programmable interrupt timer. In best cases, the HAL may conclude that the
391 // RDTSC counter runs at a constant frequency, then it uses that instead. On
392 // multiprocessor machines, it will try to verify the values returned from
393 // RDTSC on each processor are consistent with each other, and apply a handful
394 // of workarounds for known buggy hardware. In other words, QPC is supposed to
395 // give consistent result on a multiprocessor computer, but it is unreliable in
396 // reality due to bugs in BIOS or HAL on some, especially old computers.
397 // With recent updates on HAL and newer BIOS, QPC is getting more reliable but
398 // it should be used with caution.
399 //
400 // (3) System time. The system time provides a low-resolution (typically 10ms
401 // to 55 milliseconds) time stamp but is comparatively less expensive to
402 // retrieve and more reliable.
403 class HighResolutionTickClock final : public TickClock {
404 public:
HighResolutionTickClock(int64_t ticks_per_second)405 explicit HighResolutionTickClock(int64_t ticks_per_second)
406 : ticks_per_second_(ticks_per_second) {
407 DCHECK_LT(0, ticks_per_second);
408 }
~HighResolutionTickClock()409 virtual ~HighResolutionTickClock() {}
410
Now()411 int64_t Now() override {
412 LARGE_INTEGER now;
413 BOOL result = QueryPerformanceCounter(&now);
414 DCHECK(result);
415 USE(result);
416
417 // Intentionally calculate microseconds in a round about manner to avoid
418 // overflow and precision issues. Think twice before simplifying!
419 int64_t whole_seconds = now.QuadPart / ticks_per_second_;
420 int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
421 int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
422 ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
423
424 // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
425 // will never return 0.
426 return ticks + 1;
427 }
428
IsHighResolution()429 bool IsHighResolution() override { return true; }
430
431 private:
432 int64_t ticks_per_second_;
433 };
434
435
436 class RolloverProtectedTickClock final : public TickClock {
437 public:
RolloverProtectedTickClock()438 RolloverProtectedTickClock() : rollover_(0) {}
~RolloverProtectedTickClock()439 virtual ~RolloverProtectedTickClock() {}
440
Now()441 int64_t Now() override {
442 // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
443 // every ~49.7 days. We try to track rollover ourselves, which works if
444 // TimeTicks::Now() is called at least every 24 days.
445 // Note that we do not use GetTickCount() here, since timeGetTime() gives
446 // more predictable delta values, as described here:
447 // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
448 // timeGetTime() provides 1ms granularity when combined with
449 // timeBeginPeriod(). If the host application for V8 wants fast timers, it
450 // can use timeBeginPeriod() to increase the resolution.
451 // We use a lock-free version because the sampler thread calls it
452 // while having the rest of the world stopped, that could cause a deadlock.
453 base::Atomic32 rollover = base::Acquire_Load(&rollover_);
454 uint32_t now = static_cast<uint32_t>(timeGetTime());
455 if ((now >> 31) != static_cast<uint32_t>(rollover & 1)) {
456 base::Release_CompareAndSwap(&rollover_, rollover, rollover + 1);
457 ++rollover;
458 }
459 uint64_t ms = (static_cast<uint64_t>(rollover) << 31) | now;
460 return static_cast<int64_t>(ms * Time::kMicrosecondsPerMillisecond);
461 }
462
IsHighResolution()463 bool IsHighResolution() override { return false; }
464
465 private:
466 base::Atomic32 rollover_;
467 };
468
469
470 static LazyStaticInstance<RolloverProtectedTickClock,
471 DefaultConstructTrait<RolloverProtectedTickClock>,
472 ThreadSafeInitOnceTrait>::type tick_clock =
473 LAZY_STATIC_INSTANCE_INITIALIZER;
474
475
476 struct CreateHighResTickClockTrait {
Createv8::base::CreateHighResTickClockTrait477 static TickClock* Create() {
478 // Check if the installed hardware supports a high-resolution performance
479 // counter, and if not fallback to the low-resolution tick clock.
480 LARGE_INTEGER ticks_per_second;
481 if (!QueryPerformanceFrequency(&ticks_per_second)) {
482 return tick_clock.Pointer();
483 }
484
485 // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
486 // is unreliable, fallback to the low-resolution tick clock.
487 CPU cpu;
488 if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
489 return tick_clock.Pointer();
490 }
491
492 return new HighResolutionTickClock(ticks_per_second.QuadPart);
493 }
494 };
495
496
497 static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
498 ThreadSafeInitOnceTrait>::type high_res_tick_clock =
499 LAZY_DYNAMIC_INSTANCE_INITIALIZER;
500
501
Now()502 TimeTicks TimeTicks::Now() {
503 // Make sure we never return 0 here.
504 TimeTicks ticks(tick_clock.Pointer()->Now());
505 DCHECK(!ticks.IsNull());
506 return ticks;
507 }
508
509
HighResolutionNow()510 TimeTicks TimeTicks::HighResolutionNow() {
511 // Make sure we never return 0 here.
512 TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
513 DCHECK(!ticks.IsNull());
514 return ticks;
515 }
516
517
518 // static
IsHighResolutionClockWorking()519 bool TimeTicks::IsHighResolutionClockWorking() {
520 return high_res_tick_clock.Pointer()->IsHighResolution();
521 }
522
523
524 // static
KernelTimestampNow()525 TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); }
526
527
528 // static
KernelTimestampAvailable()529 bool TimeTicks::KernelTimestampAvailable() { return false; }
530
531 #else // V8_OS_WIN
532
Now()533 TimeTicks TimeTicks::Now() {
534 return HighResolutionNow();
535 }
536
537
HighResolutionNow()538 TimeTicks TimeTicks::HighResolutionNow() {
539 int64_t ticks;
540 #if V8_OS_MACOSX
541 static struct mach_timebase_info info;
542 if (info.denom == 0) {
543 kern_return_t result = mach_timebase_info(&info);
544 DCHECK_EQ(KERN_SUCCESS, result);
545 USE(result);
546 }
547 ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
548 info.numer / info.denom);
549 #elif V8_OS_SOLARIS
550 ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
551 #elif V8_OS_POSIX
552 struct timespec ts;
553 int result = clock_gettime(CLOCK_MONOTONIC, &ts);
554 DCHECK_EQ(0, result);
555 USE(result);
556 ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
557 ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
558 #endif // V8_OS_MACOSX
559 // Make sure we never return 0 here.
560 return TimeTicks(ticks + 1);
561 }
562
563
564 // static
IsHighResolutionClockWorking()565 bool TimeTicks::IsHighResolutionClockWorking() {
566 return true;
567 }
568
569
570 #if V8_OS_LINUX
571
572 class KernelTimestampClock {
573 public:
KernelTimestampClock()574 KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) {
575 clock_fd_ = open(kTraceClockDevice, O_RDONLY);
576 if (clock_fd_ == -1) {
577 return;
578 }
579 clock_id_ = get_clockid(clock_fd_);
580 }
581
~KernelTimestampClock()582 virtual ~KernelTimestampClock() {
583 if (clock_fd_ != -1) {
584 close(clock_fd_);
585 }
586 }
587
Now()588 int64_t Now() {
589 if (clock_id_ == kClockInvalid) {
590 return 0;
591 }
592
593 struct timespec ts;
594
595 clock_gettime(clock_id_, &ts);
596 return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
597 }
598
Available()599 bool Available() { return clock_id_ != kClockInvalid; }
600
601 private:
602 static const clockid_t kClockInvalid = -1;
603 static const char kTraceClockDevice[];
604 static const uint64_t kNsecPerSec = 1000000000;
605
606 int clock_fd_;
607 clockid_t clock_id_;
608
get_clockid(int fd)609 static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); }
610 };
611
612
613 // Timestamp module name
614 const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock";
615
616 #else
617
618 class KernelTimestampClock {
619 public:
KernelTimestampClock()620 KernelTimestampClock() {}
621
Now()622 int64_t Now() { return 0; }
Available()623 bool Available() { return false; }
624 };
625
626 #endif // V8_OS_LINUX
627
628 static LazyStaticInstance<KernelTimestampClock,
629 DefaultConstructTrait<KernelTimestampClock>,
630 ThreadSafeInitOnceTrait>::type kernel_tick_clock =
631 LAZY_STATIC_INSTANCE_INITIALIZER;
632
633
634 // static
KernelTimestampNow()635 TimeTicks TimeTicks::KernelTimestampNow() {
636 return TimeTicks(kernel_tick_clock.Pointer()->Now());
637 }
638
639
640 // static
KernelTimestampAvailable()641 bool TimeTicks::KernelTimestampAvailable() {
642 return kernel_tick_clock.Pointer()->Available();
643 }
644
645 #endif // V8_OS_WIN
646
647 } // namespace base
648 } // namespace v8
649