1 use crate::io::driver::{Handle, Interest, ReadyEvent, Registration};
2 
3 use mio::unix::SourceFd;
4 use std::io;
5 use std::os::unix::io::{AsRawFd, RawFd};
6 use std::{task::Context, task::Poll};
7 
8 /// Associates an IO object backed by a Unix file descriptor with the tokio
9 /// reactor, allowing for readiness to be polled. The file descriptor must be of
10 /// a type that can be used with the OS polling facilities (ie, `poll`, `epoll`,
11 /// `kqueue`, etc), such as a network socket or pipe, and the file descriptor
12 /// must have the nonblocking mode set to true.
13 ///
14 /// Creating an AsyncFd registers the file descriptor with the current tokio
15 /// Reactor, allowing you to directly await the file descriptor being readable
16 /// or writable. Once registered, the file descriptor remains registered until
17 /// the AsyncFd is dropped.
18 ///
19 /// The AsyncFd takes ownership of an arbitrary object to represent the IO
20 /// object. It is intended that this object will handle closing the file
21 /// descriptor when it is dropped, avoiding resource leaks and ensuring that the
22 /// AsyncFd can clean up the registration before closing the file descriptor.
23 /// The [`AsyncFd::into_inner`] function can be used to extract the inner object
24 /// to retake control from the tokio IO reactor.
25 ///
26 /// The inner object is required to implement [`AsRawFd`]. This file descriptor
27 /// must not change while [`AsyncFd`] owns the inner object, i.e. the
28 /// [`AsRawFd::as_raw_fd`] method on the inner type must always return the same
29 /// file descriptor when called multiple times. Failure to uphold this results
30 /// in unspecified behavior in the IO driver, which may include breaking
31 /// notifications for other sockets/etc.
32 ///
33 /// Polling for readiness is done by calling the async functions [`readable`]
34 /// and [`writable`]. These functions complete when the associated readiness
35 /// condition is observed. Any number of tasks can query the same `AsyncFd` in
36 /// parallel, on the same or different conditions.
37 ///
38 /// On some platforms, the readiness detecting mechanism relies on
39 /// edge-triggered notifications. This means that the OS will only notify Tokio
40 /// when the file descriptor transitions from not-ready to ready. For this to
41 /// work you should first try to read or write and only poll for readiness
42 /// if that fails with an error of [`std::io::ErrorKind::WouldBlock`].
43 ///
44 /// Tokio internally tracks when it has received a ready notification, and when
45 /// readiness checking functions like [`readable`] and [`writable`] are called,
46 /// if the readiness flag is set, these async functions will complete
47 /// immediately. This however does mean that it is critical to ensure that this
48 /// ready flag is cleared when (and only when) the file descriptor ceases to be
49 /// ready. The [`AsyncFdReadyGuard`] returned from readiness checking functions
50 /// serves this function; after calling a readiness-checking async function,
51 /// you must use this [`AsyncFdReadyGuard`] to signal to tokio whether the file
52 /// descriptor is no longer in a ready state.
53 ///
54 /// ## Use with to a poll-based API
55 ///
56 /// In some cases it may be desirable to use `AsyncFd` from APIs similar to
57 /// [`TcpStream::poll_read_ready`]. The [`AsyncFd::poll_read_ready`] and
58 /// [`AsyncFd::poll_write_ready`] functions are provided for this purpose.
59 /// Because these functions don't create a future to hold their state, they have
60 /// the limitation that only one task can wait on each direction (read or write)
61 /// at a time.
62 ///
63 /// # Examples
64 ///
65 /// This example shows how to turn [`std::net::TcpStream`] asynchronous using
66 /// `AsyncFd`.  It implements `read` as an async fn, and `AsyncWrite` as a trait
67 /// to show how to implement both approaches.
68 ///
69 /// ```no_run
70 /// use futures::ready;
71 /// use std::io::{self, Read, Write};
72 /// use std::net::TcpStream;
73 /// use std::pin::Pin;
74 /// use std::task::{Context, Poll};
75 /// use tokio::io::AsyncWrite;
76 /// use tokio::io::unix::AsyncFd;
77 ///
78 /// pub struct AsyncTcpStream {
79 ///     inner: AsyncFd<TcpStream>,
80 /// }
81 ///
82 /// impl AsyncTcpStream {
83 ///     pub fn new(tcp: TcpStream) -> io::Result<Self> {
84 ///         Ok(Self {
85 ///             inner: AsyncFd::new(tcp)?,
86 ///         })
87 ///     }
88 ///
89 ///     pub async fn read(&self, out: &mut [u8]) -> io::Result<usize> {
90 ///         loop {
91 ///             let mut guard = self.inner.readable().await?;
92 ///
93 ///             match guard.try_io(|inner| inner.get_ref().read(out)) {
94 ///                 Ok(result) => return result,
95 ///                 Err(_would_block) => continue,
96 ///             }
97 ///         }
98 ///     }
99 /// }
100 ///
101 /// impl AsyncWrite for AsyncTcpStream {
102 ///     fn poll_write(
103 ///         self: Pin<&mut Self>,
104 ///         cx: &mut Context<'_>,
105 ///         buf: &[u8]
106 ///     ) -> Poll<io::Result<usize>> {
107 ///         loop {
108 ///             let mut guard = ready!(self.inner.poll_write_ready(cx))?;
109 ///
110 ///             match guard.try_io(|inner| inner.get_ref().write(buf)) {
111 ///                 Ok(result) => return Poll::Ready(result),
112 ///                 Err(_would_block) => continue,
113 ///             }
114 ///         }
115 ///     }
116 ///
117 ///     fn poll_flush(
118 ///         self: Pin<&mut Self>,
119 ///         cx: &mut Context<'_>,
120 ///     ) -> Poll<io::Result<()>> {
121 ///         // tcp flush is a no-op
122 ///         Poll::Ready(Ok(()))
123 ///     }
124 ///
125 ///     fn poll_shutdown(
126 ///         self: Pin<&mut Self>,
127 ///         cx: &mut Context<'_>,
128 ///     ) -> Poll<io::Result<()>> {
129 ///         self.inner.get_ref().shutdown(std::net::Shutdown::Write)?;
130 ///         Poll::Ready(Ok(()))
131 ///     }
132 /// }
133 /// ```
134 ///
135 /// [`readable`]: method@Self::readable
136 /// [`writable`]: method@Self::writable
137 /// [`AsyncFdReadyGuard`]: struct@self::AsyncFdReadyGuard
138 /// [`TcpStream::poll_read_ready`]: struct@crate::net::TcpStream
139 pub struct AsyncFd<T: AsRawFd> {
140     registration: Registration,
141     inner: Option<T>,
142 }
143 
144 /// Represents an IO-ready event detected on a particular file descriptor that
145 /// has not yet been acknowledged. This is a `must_use` structure to help ensure
146 /// that you do not forget to explicitly clear (or not clear) the event.
147 ///
148 /// This type exposes an immutable reference to the underlying IO object.
149 #[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"]
150 pub struct AsyncFdReadyGuard<'a, T: AsRawFd> {
151     async_fd: &'a AsyncFd<T>,
152     event: Option<ReadyEvent>,
153 }
154 
155 /// Represents an IO-ready event detected on a particular file descriptor that
156 /// has not yet been acknowledged. This is a `must_use` structure to help ensure
157 /// that you do not forget to explicitly clear (or not clear) the event.
158 ///
159 /// This type exposes a mutable reference to the underlying IO object.
160 #[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"]
161 pub struct AsyncFdReadyMutGuard<'a, T: AsRawFd> {
162     async_fd: &'a mut AsyncFd<T>,
163     event: Option<ReadyEvent>,
164 }
165 
166 const ALL_INTEREST: Interest = Interest::READABLE.add(Interest::WRITABLE);
167 
168 impl<T: AsRawFd> AsyncFd<T> {
169     #[inline]
170     /// Creates an AsyncFd backed by (and taking ownership of) an object
171     /// implementing [`AsRawFd`]. The backing file descriptor is cached at the
172     /// time of creation.
173     ///
174     /// This method must be called in the context of a tokio runtime.
new(inner: T) -> io::Result<Self> where T: AsRawFd,175     pub fn new(inner: T) -> io::Result<Self>
176     where
177         T: AsRawFd,
178     {
179         Self::with_interest(inner, ALL_INTEREST)
180     }
181 
182     #[inline]
183     /// Creates new instance as `new` with additional ability to customize interest,
184     /// allowing to specify whether file descriptor will be polled for read, write or both.
with_interest(inner: T, interest: Interest) -> io::Result<Self> where T: AsRawFd,185     pub fn with_interest(inner: T, interest: Interest) -> io::Result<Self>
186     where
187         T: AsRawFd,
188     {
189         Self::new_with_handle_and_interest(inner, Handle::current(), interest)
190     }
191 
new_with_handle_and_interest( inner: T, handle: Handle, interest: Interest, ) -> io::Result<Self>192     pub(crate) fn new_with_handle_and_interest(
193         inner: T,
194         handle: Handle,
195         interest: Interest,
196     ) -> io::Result<Self> {
197         let fd = inner.as_raw_fd();
198 
199         let registration =
200             Registration::new_with_interest_and_handle(&mut SourceFd(&fd), interest, handle)?;
201 
202         Ok(AsyncFd {
203             registration,
204             inner: Some(inner),
205         })
206     }
207 
208     /// Returns a shared reference to the backing object of this [`AsyncFd`]
209     #[inline]
get_ref(&self) -> &T210     pub fn get_ref(&self) -> &T {
211         self.inner.as_ref().unwrap()
212     }
213 
214     /// Returns a mutable reference to the backing object of this [`AsyncFd`]
215     #[inline]
get_mut(&mut self) -> &mut T216     pub fn get_mut(&mut self) -> &mut T {
217         self.inner.as_mut().unwrap()
218     }
219 
take_inner(&mut self) -> Option<T>220     fn take_inner(&mut self) -> Option<T> {
221         let fd = self.inner.as_ref().map(AsRawFd::as_raw_fd);
222 
223         if let Some(fd) = fd {
224             let _ = self.registration.deregister(&mut SourceFd(&fd));
225         }
226 
227         self.inner.take()
228     }
229 
230     /// Deregisters this file descriptor and returns ownership of the backing
231     /// object.
into_inner(mut self) -> T232     pub fn into_inner(mut self) -> T {
233         self.take_inner().unwrap()
234     }
235 
236     /// Polls for read readiness.
237     ///
238     /// If the file descriptor is not currently ready for reading, this method
239     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
240     /// file descriptor becomes ready for reading, [`Waker::wake`] will be called.
241     ///
242     /// Note that on multiple calls to [`poll_read_ready`] or
243     /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the
244     /// most recent call is scheduled to receive a wakeup. (However,
245     /// [`poll_write_ready`] retains a second, independent waker).
246     ///
247     /// This method is intended for cases where creating and pinning a future
248     /// via [`readable`] is not feasible. Where possible, using [`readable`] is
249     /// preferred, as this supports polling from multiple tasks at once.
250     ///
251     /// This method takes `&self`, so it is possible to call this method
252     /// concurrently with other methods on this struct. This method only
253     /// provides shared access to the inner IO resource when handling the
254     /// [`AsyncFdReadyGuard`].
255     ///
256     /// [`poll_read_ready`]: method@Self::poll_read_ready
257     /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut
258     /// [`poll_write_ready`]: method@Self::poll_write_ready
259     /// [`readable`]: method@Self::readable
260     /// [`Context`]: struct@std::task::Context
261     /// [`Waker`]: struct@std::task::Waker
262     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_read_ready<'a>( &'a self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>>263     pub fn poll_read_ready<'a>(
264         &'a self,
265         cx: &mut Context<'_>,
266     ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>> {
267         let event = ready!(self.registration.poll_read_ready(cx))?;
268 
269         Ok(AsyncFdReadyGuard {
270             async_fd: self,
271             event: Some(event),
272         })
273         .into()
274     }
275 
276     /// Polls for read readiness.
277     ///
278     /// If the file descriptor is not currently ready for reading, this method
279     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
280     /// file descriptor becomes ready for reading, [`Waker::wake`] will be called.
281     ///
282     /// Note that on multiple calls to [`poll_read_ready`] or
283     /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the
284     /// most recent call is scheduled to receive a wakeup. (However,
285     /// [`poll_write_ready`] retains a second, independent waker).
286     ///
287     /// This method is intended for cases where creating and pinning a future
288     /// via [`readable`] is not feasible. Where possible, using [`readable`] is
289     /// preferred, as this supports polling from multiple tasks at once.
290     ///
291     /// This method takes `&mut self`, so it is possible to access the inner IO
292     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
293     ///
294     /// [`poll_read_ready`]: method@Self::poll_read_ready
295     /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut
296     /// [`poll_write_ready`]: method@Self::poll_write_ready
297     /// [`readable`]: method@Self::readable
298     /// [`Context`]: struct@std::task::Context
299     /// [`Waker`]: struct@std::task::Waker
300     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_read_ready_mut<'a>( &'a mut self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>>301     pub fn poll_read_ready_mut<'a>(
302         &'a mut self,
303         cx: &mut Context<'_>,
304     ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>> {
305         let event = ready!(self.registration.poll_read_ready(cx))?;
306 
307         Ok(AsyncFdReadyMutGuard {
308             async_fd: self,
309             event: Some(event),
310         })
311         .into()
312     }
313 
314     /// Polls for write readiness.
315     ///
316     /// If the file descriptor is not currently ready for writing, this method
317     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
318     /// file descriptor becomes ready for writing, [`Waker::wake`] will be called.
319     ///
320     /// Note that on multiple calls to [`poll_write_ready`] or
321     /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the
322     /// most recent call is scheduled to receive a wakeup. (However,
323     /// [`poll_read_ready`] retains a second, independent waker).
324     ///
325     /// This method is intended for cases where creating and pinning a future
326     /// via [`writable`] is not feasible. Where possible, using [`writable`] is
327     /// preferred, as this supports polling from multiple tasks at once.
328     ///
329     /// This method takes `&self`, so it is possible to call this method
330     /// concurrently with other methods on this struct. This method only
331     /// provides shared access to the inner IO resource when handling the
332     /// [`AsyncFdReadyGuard`].
333     ///
334     /// [`poll_read_ready`]: method@Self::poll_read_ready
335     /// [`poll_write_ready`]: method@Self::poll_write_ready
336     /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut
337     /// [`writable`]: method@Self::readable
338     /// [`Context`]: struct@std::task::Context
339     /// [`Waker`]: struct@std::task::Waker
340     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_write_ready<'a>( &'a self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>>341     pub fn poll_write_ready<'a>(
342         &'a self,
343         cx: &mut Context<'_>,
344     ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>> {
345         let event = ready!(self.registration.poll_write_ready(cx))?;
346 
347         Ok(AsyncFdReadyGuard {
348             async_fd: self,
349             event: Some(event),
350         })
351         .into()
352     }
353 
354     /// Polls for write readiness.
355     ///
356     /// If the file descriptor is not currently ready for writing, this method
357     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
358     /// file descriptor becomes ready for writing, [`Waker::wake`] will be called.
359     ///
360     /// Note that on multiple calls to [`poll_write_ready`] or
361     /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the
362     /// most recent call is scheduled to receive a wakeup. (However,
363     /// [`poll_read_ready`] retains a second, independent waker).
364     ///
365     /// This method is intended for cases where creating and pinning a future
366     /// via [`writable`] is not feasible. Where possible, using [`writable`] is
367     /// preferred, as this supports polling from multiple tasks at once.
368     ///
369     /// This method takes `&mut self`, so it is possible to access the inner IO
370     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
371     ///
372     /// [`poll_read_ready`]: method@Self::poll_read_ready
373     /// [`poll_write_ready`]: method@Self::poll_write_ready
374     /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut
375     /// [`writable`]: method@Self::readable
376     /// [`Context`]: struct@std::task::Context
377     /// [`Waker`]: struct@std::task::Waker
378     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_write_ready_mut<'a>( &'a mut self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>>379     pub fn poll_write_ready_mut<'a>(
380         &'a mut self,
381         cx: &mut Context<'_>,
382     ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>> {
383         let event = ready!(self.registration.poll_write_ready(cx))?;
384 
385         Ok(AsyncFdReadyMutGuard {
386             async_fd: self,
387             event: Some(event),
388         })
389         .into()
390     }
391 
readiness(&self, interest: Interest) -> io::Result<AsyncFdReadyGuard<'_, T>>392     async fn readiness(&self, interest: Interest) -> io::Result<AsyncFdReadyGuard<'_, T>> {
393         let event = self.registration.readiness(interest).await?;
394 
395         Ok(AsyncFdReadyGuard {
396             async_fd: self,
397             event: Some(event),
398         })
399     }
400 
readiness_mut( &mut self, interest: Interest, ) -> io::Result<AsyncFdReadyMutGuard<'_, T>>401     async fn readiness_mut(
402         &mut self,
403         interest: Interest,
404     ) -> io::Result<AsyncFdReadyMutGuard<'_, T>> {
405         let event = self.registration.readiness(interest).await?;
406 
407         Ok(AsyncFdReadyMutGuard {
408             async_fd: self,
409             event: Some(event),
410         })
411     }
412 
413     /// Waits for the file descriptor to become readable, returning a
414     /// [`AsyncFdReadyGuard`] that must be dropped to resume read-readiness
415     /// polling.
416     ///
417     /// This method takes `&self`, so it is possible to call this method
418     /// concurrently with other methods on this struct. This method only
419     /// provides shared access to the inner IO resource when handling the
420     /// [`AsyncFdReadyGuard`].
421     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
readable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>>422     pub async fn readable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>> {
423         self.readiness(Interest::READABLE).await
424     }
425 
426     /// Waits for the file descriptor to become readable, returning a
427     /// [`AsyncFdReadyMutGuard`] that must be dropped to resume read-readiness
428     /// polling.
429     ///
430     /// This method takes `&mut self`, so it is possible to access the inner IO
431     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
432     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
readable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>>433     pub async fn readable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>> {
434         self.readiness_mut(Interest::READABLE).await
435     }
436 
437     /// Waits for the file descriptor to become writable, returning a
438     /// [`AsyncFdReadyGuard`] that must be dropped to resume write-readiness
439     /// polling.
440     ///
441     /// This method takes `&self`, so it is possible to call this method
442     /// concurrently with other methods on this struct. This method only
443     /// provides shared access to the inner IO resource when handling the
444     /// [`AsyncFdReadyGuard`].
445     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
writable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>>446     pub async fn writable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>> {
447         self.readiness(Interest::WRITABLE).await
448     }
449 
450     /// Waits for the file descriptor to become writable, returning a
451     /// [`AsyncFdReadyMutGuard`] that must be dropped to resume write-readiness
452     /// polling.
453     ///
454     /// This method takes `&mut self`, so it is possible to access the inner IO
455     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
456     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
writable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>>457     pub async fn writable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>> {
458         self.readiness_mut(Interest::WRITABLE).await
459     }
460 }
461 
462 impl<T: AsRawFd> AsRawFd for AsyncFd<T> {
as_raw_fd(&self) -> RawFd463     fn as_raw_fd(&self) -> RawFd {
464         self.inner.as_ref().unwrap().as_raw_fd()
465     }
466 }
467 
468 impl<T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFd<T> {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result469     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
470         f.debug_struct("AsyncFd")
471             .field("inner", &self.inner)
472             .finish()
473     }
474 }
475 
476 impl<T: AsRawFd> Drop for AsyncFd<T> {
drop(&mut self)477     fn drop(&mut self) {
478         let _ = self.take_inner();
479     }
480 }
481 
482 impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> {
483     /// Indicates to tokio that the file descriptor is no longer ready. The
484     /// internal readiness flag will be cleared, and tokio will wait for the
485     /// next edge-triggered readiness notification from the OS.
486     ///
487     /// It is critical that this function not be called unless your code
488     /// _actually observes_ that the file descriptor is _not_ ready. Do not call
489     /// it simply because, for example, a read succeeded; it should be called
490     /// when a read is observed to block.
491     ///
492     /// [`drop`]: method@std::mem::drop
clear_ready(&mut self)493     pub fn clear_ready(&mut self) {
494         if let Some(event) = self.event.take() {
495             self.async_fd.registration.clear_readiness(event);
496         }
497     }
498 
499     /// This method should be invoked when you intentionally want to keep the
500     /// ready flag asserted.
501     ///
502     /// While this function is itself a no-op, it satisfies the `#[must_use]`
503     /// constraint on the [`AsyncFdReadyGuard`] type.
retain_ready(&mut self)504     pub fn retain_ready(&mut self) {
505         // no-op
506     }
507 
508     /// Performs the provided IO operation.
509     ///
510     /// If `f` returns a [`WouldBlock`] error, the readiness state associated
511     /// with this file descriptor is cleared, and the method returns
512     /// `Err(TryIoError::WouldBlock)`. You will typically need to poll the
513     /// `AsyncFd` again when this happens.
514     ///
515     /// This method helps ensure that the readiness state of the underlying file
516     /// descriptor remains in sync with the tokio-side readiness state, by
517     /// clearing the tokio-side state only when a [`WouldBlock`] condition
518     /// occurs. It is the responsibility of the caller to ensure that `f`
519     /// returns [`WouldBlock`] only if the file descriptor that originated this
520     /// `AsyncFdReadyGuard` no longer expresses the readiness state that was queried to
521     /// create this `AsyncFdReadyGuard`.
522     ///
523     /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
524     // Alias for old name in 0.x
525     #[cfg_attr(docsrs, doc(alias = "with_io"))]
try_io<R>( &mut self, f: impl FnOnce(&AsyncFd<Inner>) -> io::Result<R>, ) -> Result<io::Result<R>, TryIoError>526     pub fn try_io<R>(
527         &mut self,
528         f: impl FnOnce(&AsyncFd<Inner>) -> io::Result<R>,
529     ) -> Result<io::Result<R>, TryIoError> {
530         let result = f(self.async_fd);
531 
532         if let Err(e) = result.as_ref() {
533             if e.kind() == io::ErrorKind::WouldBlock {
534                 self.clear_ready();
535             }
536         }
537 
538         match result {
539             Err(err) if err.kind() == io::ErrorKind::WouldBlock => Err(TryIoError(())),
540             result => Ok(result),
541         }
542     }
543 }
544 
545 impl<'a, Inner: AsRawFd> AsyncFdReadyMutGuard<'a, Inner> {
546     /// Indicates to tokio that the file descriptor is no longer ready. The
547     /// internal readiness flag will be cleared, and tokio will wait for the
548     /// next edge-triggered readiness notification from the OS.
549     ///
550     /// It is critical that this function not be called unless your code
551     /// _actually observes_ that the file descriptor is _not_ ready. Do not call
552     /// it simply because, for example, a read succeeded; it should be called
553     /// when a read is observed to block.
554     ///
555     /// [`drop`]: method@std::mem::drop
clear_ready(&mut self)556     pub fn clear_ready(&mut self) {
557         if let Some(event) = self.event.take() {
558             self.async_fd.registration.clear_readiness(event);
559         }
560     }
561 
562     /// This method should be invoked when you intentionally want to keep the
563     /// ready flag asserted.
564     ///
565     /// While this function is itself a no-op, it satisfies the `#[must_use]`
566     /// constraint on the [`AsyncFdReadyGuard`] type.
retain_ready(&mut self)567     pub fn retain_ready(&mut self) {
568         // no-op
569     }
570 
571     /// Performs the provided IO operation.
572     ///
573     /// If `f` returns a [`WouldBlock`] error, the readiness state associated
574     /// with this file descriptor is cleared, and the method returns
575     /// `Err(TryIoError::WouldBlock)`. You will typically need to poll the
576     /// `AsyncFd` again when this happens.
577     ///
578     /// This method helps ensure that the readiness state of the underlying file
579     /// descriptor remains in sync with the tokio-side readiness state, by
580     /// clearing the tokio-side state only when a [`WouldBlock`] condition
581     /// occurs. It is the responsibility of the caller to ensure that `f`
582     /// returns [`WouldBlock`] only if the file descriptor that originated this
583     /// `AsyncFdReadyGuard` no longer expresses the readiness state that was queried to
584     /// create this `AsyncFdReadyGuard`.
585     ///
586     /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
try_io<R>( &mut self, f: impl FnOnce(&mut AsyncFd<Inner>) -> io::Result<R>, ) -> Result<io::Result<R>, TryIoError>587     pub fn try_io<R>(
588         &mut self,
589         f: impl FnOnce(&mut AsyncFd<Inner>) -> io::Result<R>,
590     ) -> Result<io::Result<R>, TryIoError> {
591         let result = f(&mut self.async_fd);
592 
593         if let Err(e) = result.as_ref() {
594             if e.kind() == io::ErrorKind::WouldBlock {
595                 self.clear_ready();
596             }
597         }
598 
599         match result {
600             Err(err) if err.kind() == io::ErrorKind::WouldBlock => Err(TryIoError(())),
601             result => Ok(result),
602         }
603     }
604 }
605 
606 impl<'a, T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFdReadyGuard<'a, T> {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result607     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
608         f.debug_struct("ReadyGuard")
609             .field("async_fd", &self.async_fd)
610             .finish()
611     }
612 }
613 
614 impl<'a, T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFdReadyMutGuard<'a, T> {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result615     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
616         f.debug_struct("MutReadyGuard")
617             .field("async_fd", &self.async_fd)
618             .finish()
619     }
620 }
621 
622 /// The error type returned by [`try_io`].
623 ///
624 /// This error indicates that the IO resource returned a [`WouldBlock`] error.
625 ///
626 /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
627 /// [`try_io`]: method@AsyncFdReadyGuard::try_io
628 #[derive(Debug)]
629 pub struct TryIoError(());
630