1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::mem;
6 use std::ops::Deref;
7 use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
8 use std::ptr;
9 use std::time::Duration;
10 
11 use libc::{c_void, eventfd, read, write, POLLIN};
12 use serde::{Deserialize, Serialize};
13 
14 use crate::{
15     duration_to_timespec, errno_result, AsRawDescriptor, FromRawDescriptor, IntoRawDescriptor,
16     RawDescriptor, Result, SafeDescriptor,
17 };
18 
19 /// A safe wrapper around a Linux eventfd (man 2 eventfd).
20 ///
21 /// An eventfd is useful because it is sendable across processes and can be used for signaling in
22 /// and out of the KVM API. They can also be polled like any other file descriptor.
23 #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
24 #[serde(transparent)]
25 pub struct EventFd {
26     event_handle: SafeDescriptor,
27 }
28 
29 /// Wrapper around the return value of doing a read on an EventFd which distinguishes between
30 /// getting a valid count of the number of times the eventfd has been written to and timing out
31 /// waiting for the count to be non-zero.
32 #[derive(Debug, PartialEq, Eq)]
33 pub enum EventReadResult {
34     Count(u64),
35     Timeout,
36 }
37 
38 impl EventFd {
39     /// Creates a new blocking EventFd with an initial value of 0.
new() -> Result<EventFd>40     pub fn new() -> Result<EventFd> {
41         // This is safe because eventfd merely allocated an eventfd for our process and we handle
42         // the error case.
43         let ret = unsafe { eventfd(0, 0) };
44         if ret < 0 {
45             return errno_result();
46         }
47         // This is safe because we checked ret for success and know the kernel gave us an fd that we
48         // own.
49         Ok(EventFd {
50             event_handle: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
51         })
52     }
53 
54     /// Adds `v` to the eventfd's count, blocking until this won't overflow the count.
write(&self, v: u64) -> Result<()>55     pub fn write(&self, v: u64) -> Result<()> {
56         // This is safe because we made this fd and the pointer we pass can not overflow because we
57         // give the syscall's size parameter properly.
58         let ret = unsafe {
59             write(
60                 self.as_raw_fd(),
61                 &v as *const u64 as *const c_void,
62                 mem::size_of::<u64>(),
63             )
64         };
65         if ret <= 0 {
66             return errno_result();
67         }
68         Ok(())
69     }
70 
71     /// Blocks until the the eventfd's count is non-zero, then resets the count to zero.
read(&self) -> Result<u64>72     pub fn read(&self) -> Result<u64> {
73         let mut buf: u64 = 0;
74         let ret = unsafe {
75             // This is safe because we made this fd and the pointer we pass can not overflow because
76             // we give the syscall's size parameter properly.
77             read(
78                 self.as_raw_fd(),
79                 &mut buf as *mut u64 as *mut c_void,
80                 mem::size_of::<u64>(),
81             )
82         };
83         if ret <= 0 {
84             return errno_result();
85         }
86         Ok(buf)
87     }
88 
89     /// Blocks for a maximum of `timeout` duration until the the eventfd's count is non-zero. If
90     /// a timeout does not occur then the count is returned as a EventReadResult::Count(count),
91     /// and the count is reset to 0. If a timeout does occur then this function will return
92     /// EventReadResult::Timeout.
read_timeout(&mut self, timeout: Duration) -> Result<EventReadResult>93     pub fn read_timeout(&mut self, timeout: Duration) -> Result<EventReadResult> {
94         let mut pfd = libc::pollfd {
95             fd: self.as_raw_descriptor(),
96             events: POLLIN,
97             revents: 0,
98         };
99         let timeoutspec: libc::timespec = duration_to_timespec(timeout);
100         // Safe because this only modifies |pfd| and we check the return value
101         let ret = unsafe {
102             libc::ppoll(
103                 &mut pfd as *mut libc::pollfd,
104                 1,
105                 &timeoutspec,
106                 ptr::null_mut(),
107             )
108         };
109         if ret < 0 {
110             return errno_result();
111         }
112 
113         // no return events (revents) means we got a timeout
114         if pfd.revents == 0 {
115             return Ok(EventReadResult::Timeout);
116         }
117 
118         let mut buf = 0u64;
119         // This is safe because we made this fd and the pointer we pass can not overflow because
120         // we give the syscall's size parameter properly.
121         let ret = unsafe {
122             libc::read(
123                 self.as_raw_descriptor(),
124                 &mut buf as *mut _ as *mut c_void,
125                 mem::size_of::<u64>(),
126             )
127         };
128         if ret < 0 {
129             return errno_result();
130         }
131         Ok(EventReadResult::Count(buf))
132     }
133 
134     /// Clones this EventFd, internally creating a new file descriptor. The new EventFd will share
135     /// the same underlying count within the kernel.
try_clone(&self) -> Result<EventFd>136     pub fn try_clone(&self) -> Result<EventFd> {
137         self.event_handle
138             .try_clone()
139             .map(|event_handle| EventFd { event_handle })
140     }
141 }
142 
143 impl AsRawFd for EventFd {
as_raw_fd(&self) -> RawFd144     fn as_raw_fd(&self) -> RawFd {
145         self.event_handle.as_raw_fd()
146     }
147 }
148 
149 impl AsRawDescriptor for EventFd {
as_raw_descriptor(&self) -> RawDescriptor150     fn as_raw_descriptor(&self) -> RawDescriptor {
151         self.event_handle.as_raw_descriptor()
152     }
153 }
154 
155 impl FromRawFd for EventFd {
from_raw_fd(fd: RawFd) -> Self156     unsafe fn from_raw_fd(fd: RawFd) -> Self {
157         EventFd {
158             event_handle: SafeDescriptor::from_raw_descriptor(fd),
159         }
160     }
161 }
162 
163 impl IntoRawFd for EventFd {
into_raw_fd(self) -> RawFd164     fn into_raw_fd(self) -> RawFd {
165         self.event_handle.into_raw_descriptor()
166     }
167 }
168 
169 /// An `EventFd` wrapper which triggers when it goes out of scope.
170 ///
171 /// If the underlying `EventFd` fails to trigger during drop, a panic is triggered instead.
172 pub struct ScopedEvent(EventFd);
173 
174 impl ScopedEvent {
175     /// Creates a new `ScopedEvent` which triggers when it goes out of scope.
new() -> Result<ScopedEvent>176     pub fn new() -> Result<ScopedEvent> {
177         Ok(EventFd::new()?.into())
178     }
179 }
180 
181 impl From<EventFd> for ScopedEvent {
from(e: EventFd) -> Self182     fn from(e: EventFd) -> Self {
183         Self(e)
184     }
185 }
186 
187 impl From<ScopedEvent> for EventFd {
from(scoped_event: ScopedEvent) -> Self188     fn from(scoped_event: ScopedEvent) -> Self {
189         // Rust doesn't allow moving out of types with a Drop implementation, so we have to use
190         // something that copies instead of moves. This is safe because we prevent the drop of
191         // `scoped_event` using `mem::forget`, so the underlying `EventFd` will not experience a
192         // double-drop.
193         let evt = unsafe { ptr::read(&scoped_event.0) };
194         mem::forget(scoped_event);
195         evt
196     }
197 }
198 
199 impl Deref for ScopedEvent {
200     type Target = EventFd;
201 
deref(&self) -> &EventFd202     fn deref(&self) -> &EventFd {
203         &self.0
204     }
205 }
206 
207 impl Drop for ScopedEvent {
drop(&mut self)208     fn drop(&mut self) {
209         self.write(1).expect("failed to trigger scoped event");
210     }
211 }
212 
213 #[cfg(test)]
214 mod tests {
215     use super::*;
216 
217     #[test]
new()218     fn new() {
219         EventFd::new().unwrap();
220     }
221 
222     #[test]
read_write()223     fn read_write() {
224         let evt = EventFd::new().unwrap();
225         evt.write(55).unwrap();
226         assert_eq!(evt.read(), Ok(55));
227     }
228 
229     #[test]
clone()230     fn clone() {
231         let evt = EventFd::new().unwrap();
232         let evt_clone = evt.try_clone().unwrap();
233         evt.write(923).unwrap();
234         assert_eq!(evt_clone.read(), Ok(923));
235     }
236 
237     #[test]
scoped_event()238     fn scoped_event() {
239         let scoped_evt = ScopedEvent::new().unwrap();
240         let evt_clone: EventFd = scoped_evt.try_clone().unwrap();
241         drop(scoped_evt);
242         assert_eq!(evt_clone.read(), Ok(1));
243     }
244 
245     #[test]
eventfd_from_scoped_event()246     fn eventfd_from_scoped_event() {
247         let scoped_evt = ScopedEvent::new().unwrap();
248         let evt: EventFd = scoped_evt.into();
249         evt.write(1).unwrap();
250     }
251 
252     #[test]
timeout()253     fn timeout() {
254         let mut evt = EventFd::new().expect("failed to create eventfd");
255         assert_eq!(
256             evt.read_timeout(Duration::from_millis(1))
257                 .expect("failed to read from eventfd with timeout"),
258             EventReadResult::Timeout
259         );
260     }
261 }
262