1 /**************************************************************************
2  *
3  * Copyright 2008-2010 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * @file
30  * OS independent time-manipulation functions.
31  *
32  * @author Jose Fonseca <jfonseca@vmware.com>
33  */
34 
35 #include "os_time.h"
36 #include "detect_os.h"
37 
38 #include "util/u_atomic.h"
39 
40 #if DETECT_OS_UNIX
41 #  include <unistd.h> /* usleep */
42 #  include <time.h> /* timeval */
43 #  include <sys/time.h> /* timeval */
44 #  include <sched.h> /* sched_yield */
45 #  include <errno.h>
46 #elif DETECT_OS_WINDOWS
47 #  include <windows.h>
48 #else
49 #  error Unsupported OS
50 #endif
51 
52 
53 int64_t
os_time_get_nano(void)54 os_time_get_nano(void)
55 {
56 #if DETECT_OS_LINUX || DETECT_OS_BSD
57 
58    struct timespec tv;
59    clock_gettime(CLOCK_MONOTONIC, &tv);
60    return tv.tv_nsec + tv.tv_sec*INT64_C(1000000000);
61 
62 #elif DETECT_OS_UNIX
63 
64    struct timeval tv;
65    gettimeofday(&tv, NULL);
66    return tv.tv_usec*INT64_C(1000) + tv.tv_sec*INT64_C(1000000000);
67 
68 #elif DETECT_OS_WINDOWS
69 
70    static LARGE_INTEGER frequency;
71    LARGE_INTEGER counter;
72    int64_t secs, nanosecs;
73    if(!frequency.QuadPart)
74       QueryPerformanceFrequency(&frequency);
75    QueryPerformanceCounter(&counter);
76    /* Compute seconds and nanoseconds parts separately to
77     * reduce severity of precision loss.
78     */
79    secs = counter.QuadPart / frequency.QuadPart;
80    nanosecs = (counter.QuadPart % frequency.QuadPart) * INT64_C(1000000000)
81       / frequency.QuadPart;
82    return secs*INT64_C(1000000000) + nanosecs;
83 
84 #else
85 
86 #error Unsupported OS
87 
88 #endif
89 }
90 
91 
92 
93 void
os_time_sleep(int64_t usecs)94 os_time_sleep(int64_t usecs)
95 {
96 #if DETECT_OS_LINUX
97    struct timespec time;
98    time.tv_sec = usecs / 1000000;
99    time.tv_nsec = (usecs % 1000000) * 1000;
100    while (clock_nanosleep(CLOCK_MONOTONIC, 0, &time, &time) == EINTR);
101 
102 #elif DETECT_OS_UNIX
103    usleep(usecs);
104 
105 #elif DETECT_OS_WINDOWS
106    DWORD dwMilliseconds = (DWORD) ((usecs + 999) / 1000);
107    /* Avoid Sleep(O) as that would cause to sleep for an undetermined duration */
108    if (dwMilliseconds) {
109       Sleep(dwMilliseconds);
110    }
111 #else
112 #  error Unsupported OS
113 #endif
114 }
115 
116 
117 
118 int64_t
os_time_get_absolute_timeout(uint64_t timeout)119 os_time_get_absolute_timeout(uint64_t timeout)
120 {
121    int64_t time, abs_timeout;
122 
123    /* Also check for the type upper bound. */
124    if (timeout == OS_TIMEOUT_INFINITE || timeout > INT64_MAX)
125       return OS_TIMEOUT_INFINITE;
126 
127    time = os_time_get_nano();
128    abs_timeout = time + (int64_t)timeout;
129 
130    /* Check for overflow. */
131    if (abs_timeout < time)
132       return OS_TIMEOUT_INFINITE;
133 
134    return abs_timeout;
135 }
136 
137 
138 bool
os_wait_until_zero(volatile int * var,uint64_t timeout)139 os_wait_until_zero(volatile int *var, uint64_t timeout)
140 {
141    if (!p_atomic_read(var))
142       return true;
143 
144    if (!timeout)
145       return false;
146 
147    if (timeout == OS_TIMEOUT_INFINITE) {
148       while (p_atomic_read(var)) {
149 #if DETECT_OS_UNIX
150          sched_yield();
151 #endif
152       }
153       return true;
154    }
155    else {
156       int64_t start_time = os_time_get_nano();
157       int64_t end_time = start_time + timeout;
158 
159       while (p_atomic_read(var)) {
160          if (os_time_timeout(start_time, end_time, os_time_get_nano()))
161             return false;
162 
163 #if DETECT_OS_UNIX
164          sched_yield();
165 #endif
166       }
167       return true;
168    }
169 }
170 
171 
172 bool
os_wait_until_zero_abs_timeout(volatile int * var,int64_t timeout)173 os_wait_until_zero_abs_timeout(volatile int *var, int64_t timeout)
174 {
175    if (!p_atomic_read(var))
176       return true;
177 
178    if (timeout == OS_TIMEOUT_INFINITE)
179       return os_wait_until_zero(var, OS_TIMEOUT_INFINITE);
180 
181    while (p_atomic_read(var)) {
182       if (os_time_get_nano() >= timeout)
183          return false;
184 
185 #if DETECT_OS_UNIX
186       sched_yield();
187 #endif
188    }
189    return true;
190 }
191