summaryrefslogtreecommitdiff
path: root/vendor/mio/src/sys
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-07-02 18:36:06 -0600
committermo khan <mo@mokhan.ca>2025-07-02 18:36:06 -0600
commit8cdfa445d6629ffef4cb84967ff7017654045bc2 (patch)
tree22f0b0907c024c78d26a731e2e1f5219407d8102 /vendor/mio/src/sys
parent4351c74c7c5f97156bc94d3a8549b9940ac80e3f (diff)
chore: add vendor directory
Diffstat (limited to 'vendor/mio/src/sys')
-rw-r--r--vendor/mio/src/sys/mod.rs82
-rw-r--r--vendor/mio/src/sys/shell/mod.rs105
-rw-r--r--vendor/mio/src/sys/shell/selector.rs122
-rw-r--r--vendor/mio/src/sys/shell/tcp.rs31
-rw-r--r--vendor/mio/src/sys/shell/udp.rs11
-rw-r--r--vendor/mio/src/sys/shell/uds.rs44
-rw-r--r--vendor/mio/src/sys/shell/waker.rs16
-rw-r--r--vendor/mio/src/sys/unix/mod.rs164
-rw-r--r--vendor/mio/src/sys/unix/net.rs215
-rw-r--r--vendor/mio/src/sys/unix/pipe.rs619
-rw-r--r--vendor/mio/src/sys/unix/selector/epoll.rs231
-rw-r--r--vendor/mio/src/sys/unix/selector/kqueue.rs894
-rw-r--r--vendor/mio/src/sys/unix/selector/poll.rs749
-rw-r--r--vendor/mio/src/sys/unix/selector/stateless_io_source.rs50
-rw-r--r--vendor/mio/src/sys/unix/sourcefd.rs121
-rw-r--r--vendor/mio/src/sys/unix/tcp.rs136
-rw-r--r--vendor/mio/src/sys/unix/udp.rs36
-rw-r--r--vendor/mio/src/sys/unix/uds/datagram.rs25
-rw-r--r--vendor/mio/src/sys/unix/uds/listener.rs121
-rw-r--r--vendor/mio/src/sys/unix/uds/mod.rs177
-rw-r--r--vendor/mio/src/sys/unix/uds/stream.rs25
-rw-r--r--vendor/mio/src/sys/unix/waker/eventfd.rs88
-rw-r--r--vendor/mio/src/sys/unix/waker/kqueue.rs28
-rw-r--r--vendor/mio/src/sys/unix/waker/pipe.rs82
-rw-r--r--vendor/mio/src/sys/wasi/mod.rs370
-rw-r--r--vendor/mio/src/sys/windows/afd.rs243
-rw-r--r--vendor/mio/src/sys/windows/event.rs169
-rw-r--r--vendor/mio/src/sys/windows/handle.rs30
-rw-r--r--vendor/mio/src/sys/windows/io_status_block.rs40
-rw-r--r--vendor/mio/src/sys/windows/iocp.rs282
-rw-r--r--vendor/mio/src/sys/windows/mod.rs154
-rw-r--r--vendor/mio/src/sys/windows/named_pipe.rs1060
-rw-r--r--vendor/mio/src/sys/windows/net.rs111
-rw-r--r--vendor/mio/src/sys/windows/overlapped.rs35
-rw-r--r--vendor/mio/src/sys/windows/selector.rs741
-rw-r--r--vendor/mio/src/sys/windows/tcp.rs66
-rw-r--r--vendor/mio/src/sys/windows/udp.rs46
-rw-r--r--vendor/mio/src/sys/windows/waker.rs29
38 files changed, 7548 insertions, 0 deletions
diff --git a/vendor/mio/src/sys/mod.rs b/vendor/mio/src/sys/mod.rs
new file mode 100644
index 00000000..8bfbdd9b
--- /dev/null
+++ b/vendor/mio/src/sys/mod.rs
@@ -0,0 +1,82 @@
+//! Module with system specific types.
+//!
+//! Required types:
+//!
+//! * `Event`: a type alias for the system specific event, e.g. `kevent` or
+//! `epoll_event`.
+//! * `event`: a module with various helper functions for `Event`, see
+//! [`crate::event::Event`] for the required functions.
+//! * `Events`: collection of `Event`s, see [`crate::Events`].
+//! * `IoSourceState`: state for the `IoSource` type.
+//! * `Selector`: selector used to register event sources and poll for events,
+//! see [`crate::Poll`] and [`crate::Registry`] for required methods.
+//! * `tcp` and `udp` modules: see the [`crate::net`] module.
+//! * `Waker`: see [`crate::Waker`].
+
+cfg_os_poll! {
+ macro_rules! debug_detail {
+ (
+ $type: ident ($event_type: ty), $test: path,
+ $($(#[$target: meta])* $libc: ident :: $flag: ident),+ $(,)*
+ ) => {
+ struct $type($event_type);
+
+ impl fmt::Debug for $type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut written_one = false;
+ $(
+ $(#[$target])*
+ #[allow(clippy::bad_bit_mask)] // Apparently some flags are zero.
+ {
+ // Windows doesn't use `libc` but the `afd` module.
+ if $test(&self.0, &$libc :: $flag) {
+ if !written_one {
+ write!(f, "{}", stringify!($flag))?;
+ written_one = true;
+ } else {
+ write!(f, "|{}", stringify!($flag))?;
+ }
+ }
+ }
+ )+
+ if !written_one {
+ write!(f, "(empty)")
+ } else {
+ Ok(())
+ }
+ }
+ }
+ };
+ }
+}
+
+#[cfg(any(unix, target_os = "hermit"))]
+cfg_os_poll! {
+ mod unix;
+ #[allow(unused_imports)]
+ pub use self::unix::*;
+}
+
+#[cfg(windows)]
+cfg_os_poll! {
+ mod windows;
+ pub use self::windows::*;
+}
+
+#[cfg(target_os = "wasi")]
+cfg_os_poll! {
+ mod wasi;
+ pub(crate) use self::wasi::*;
+}
+
+cfg_not_os_poll! {
+ mod shell;
+ pub(crate) use self::shell::*;
+
+ #[cfg(unix)]
+ cfg_any_os_ext! {
+ mod unix;
+ #[cfg(feature = "os-ext")]
+ pub use self::unix::SourceFd;
+ }
+}
diff --git a/vendor/mio/src/sys/shell/mod.rs b/vendor/mio/src/sys/shell/mod.rs
new file mode 100644
index 00000000..aa1c6220
--- /dev/null
+++ b/vendor/mio/src/sys/shell/mod.rs
@@ -0,0 +1,105 @@
+macro_rules! os_required {
+ () => {
+ panic!("mio must be compiled with `os-poll` to run.")
+ };
+}
+
+mod selector;
+pub(crate) use self::selector::{event, Event, Events, Selector};
+
+#[cfg(not(target_os = "wasi"))]
+mod waker;
+#[cfg(not(target_os = "wasi"))]
+pub(crate) use self::waker::Waker;
+
+cfg_net! {
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+ #[cfg(unix)]
+ pub(crate) mod uds;
+}
+
+cfg_io_source! {
+ use std::io;
+ #[cfg(any(unix))]
+ use std::os::fd::RawFd;
+ // TODO: once <https://github.com/rust-lang/rust/issues/126198> is fixed this
+ // can use `std::os::fd` and be merged with the above.
+ #[cfg(target_os = "hermit")]
+ use std::os::hermit::io::RawFd;
+ #[cfg(windows)]
+ use std::os::windows::io::RawSocket;
+
+ #[cfg(any(windows, unix, target_os = "hermit"))]
+ use crate::{Registry, Token, Interest};
+
+ pub(crate) struct IoSourceState;
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ // We don't hold state, so we can just call the function and
+ // return.
+ f(io)
+ }
+ }
+
+ #[cfg(any(unix, target_os = "hermit"))]
+ impl IoSourceState {
+ pub fn register(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ _: RawFd,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn reregister(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ _: RawFd,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn deregister(&mut self, _: &Registry, _: RawFd) -> io::Result<()> {
+ os_required!()
+ }
+ }
+
+ #[cfg(windows)]
+ impl IoSourceState {
+ pub fn register(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ _: RawSocket,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn reregister(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn deregister(&mut self) -> io::Result<()> {
+ os_required!()
+ }
+ }
+}
diff --git a/vendor/mio/src/sys/shell/selector.rs b/vendor/mio/src/sys/shell/selector.rs
new file mode 100644
index 00000000..83456ef8
--- /dev/null
+++ b/vendor/mio/src/sys/shell/selector.rs
@@ -0,0 +1,122 @@
+use std::io;
+#[cfg(unix)]
+use std::os::fd::{AsRawFd, RawFd};
+use std::time::Duration;
+
+pub type Event = usize;
+
+pub type Events = Vec<Event>;
+
+#[derive(Debug)]
+pub struct Selector {}
+
+impl Selector {
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ os_required!();
+ }
+
+ pub fn select(&self, _: &mut Events, _: Option<Duration>) -> io::Result<()> {
+ os_required!();
+ }
+}
+
+#[cfg(unix)]
+cfg_any_os_ext! {
+ use crate::{Interest, Token};
+
+ impl Selector {
+ pub fn register(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn reregister(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn deregister(&self, _: RawFd) -> io::Result<()> {
+ os_required!();
+ }
+ }
+}
+
+#[cfg(target_os = "wasi")]
+cfg_any_os_ext! {
+ use crate::{Interest, Token};
+
+ impl Selector {
+ pub fn register(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn reregister(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn deregister(&self, _: wasi::Fd) -> io::Result<()> {
+ os_required!();
+ }
+ }
+}
+
+cfg_io_source! {
+ #[cfg(debug_assertions)]
+ impl Selector {
+ pub fn id(&self) -> usize {
+ os_required!();
+ }
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ os_required!()
+ }
+}
+
+#[allow(clippy::trivially_copy_pass_by_ref)]
+pub mod event {
+ use crate::sys::Event;
+ use crate::Token;
+ use std::fmt;
+
+ pub fn token(_: &Event) -> Token {
+ os_required!();
+ }
+
+ pub fn is_readable(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_writable(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_error(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_read_closed(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_write_closed(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_priority(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_aio(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_lio(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn debug_details(_: &mut fmt::Formatter<'_>, _: &Event) -> fmt::Result {
+ os_required!();
+ }
+}
diff --git a/vendor/mio/src/sys/shell/tcp.rs b/vendor/mio/src/sys/shell/tcp.rs
new file mode 100644
index 00000000..b61a4ffc
--- /dev/null
+++ b/vendor/mio/src/sys/shell/tcp.rs
@@ -0,0 +1,31 @@
+use std::io;
+use std::net::{self, SocketAddr};
+
+#[cfg(not(target_os = "wasi"))]
+pub(crate) fn new_for_addr(_: SocketAddr) -> io::Result<i32> {
+ os_required!();
+}
+
+#[cfg(not(target_os = "wasi"))]
+pub(crate) fn bind(_: &net::TcpListener, _: SocketAddr) -> io::Result<()> {
+ os_required!();
+}
+
+#[cfg(not(target_os = "wasi"))]
+pub(crate) fn connect(_: &net::TcpStream, _: SocketAddr) -> io::Result<()> {
+ os_required!();
+}
+
+#[cfg(not(target_os = "wasi"))]
+pub(crate) fn listen(_: &net::TcpListener, _: u32) -> io::Result<()> {
+ os_required!();
+}
+
+#[cfg(any(unix, target_os = "hermit"))]
+pub(crate) fn set_reuseaddr(_: &net::TcpListener, _: bool) -> io::Result<()> {
+ os_required!();
+}
+
+pub(crate) fn accept(_: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ os_required!();
+}
diff --git a/vendor/mio/src/sys/shell/udp.rs b/vendor/mio/src/sys/shell/udp.rs
new file mode 100644
index 00000000..6a48b694
--- /dev/null
+++ b/vendor/mio/src/sys/shell/udp.rs
@@ -0,0 +1,11 @@
+#![cfg(not(target_os = "wasi"))]
+use std::io;
+use std::net::{self, SocketAddr};
+
+pub fn bind(_: SocketAddr) -> io::Result<net::UdpSocket> {
+ os_required!()
+}
+
+pub(crate) fn only_v6(_: &net::UdpSocket) -> io::Result<bool> {
+ os_required!()
+}
diff --git a/vendor/mio/src/sys/shell/uds.rs b/vendor/mio/src/sys/shell/uds.rs
new file mode 100644
index 00000000..446781ae
--- /dev/null
+++ b/vendor/mio/src/sys/shell/uds.rs
@@ -0,0 +1,44 @@
+pub(crate) mod datagram {
+ use std::io;
+ use std::os::unix::net::{self, SocketAddr};
+
+ pub(crate) fn bind_addr(_: &SocketAddr) -> io::Result<net::UnixDatagram> {
+ os_required!()
+ }
+
+ pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
+ os_required!()
+ }
+
+ pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
+ os_required!()
+ }
+}
+
+pub(crate) mod listener {
+ use std::io;
+ use std::os::unix::net::{self, SocketAddr};
+
+ use crate::net::UnixStream;
+
+ pub(crate) fn bind_addr(_: &SocketAddr) -> io::Result<net::UnixListener> {
+ os_required!()
+ }
+
+ pub(crate) fn accept(_: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
+ os_required!()
+ }
+}
+
+pub(crate) mod stream {
+ use std::io;
+ use std::os::unix::net::{self, SocketAddr};
+
+ pub(crate) fn connect_addr(_: &SocketAddr) -> io::Result<net::UnixStream> {
+ os_required!()
+ }
+
+ pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
+ os_required!()
+ }
+}
diff --git a/vendor/mio/src/sys/shell/waker.rs b/vendor/mio/src/sys/shell/waker.rs
new file mode 100644
index 00000000..bbdd7c33
--- /dev/null
+++ b/vendor/mio/src/sys/shell/waker.rs
@@ -0,0 +1,16 @@
+use crate::sys::Selector;
+use crate::Token;
+use std::io;
+
+#[derive(Debug)]
+pub struct Waker {}
+
+impl Waker {
+ pub fn new(_: &Selector, _: Token) -> io::Result<Waker> {
+ os_required!();
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ os_required!();
+ }
+}
diff --git a/vendor/mio/src/sys/unix/mod.rs b/vendor/mio/src/sys/unix/mod.rs
new file mode 100644
index 00000000..6c0c8850
--- /dev/null
+++ b/vendor/mio/src/sys/unix/mod.rs
@@ -0,0 +1,164 @@
+/// Helper macro to execute a system call that returns an `io::Result`.
+//
+// Macro must be defined before any modules that uses them.
+#[allow(unused_macros)]
+macro_rules! syscall {
+ ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{
+ #[allow(unused_unsafe)]
+ let res = unsafe { libc::$fn($($arg, )*) };
+ if res < 0 {
+ Err(std::io::Error::last_os_error())
+ } else {
+ Ok(res)
+ }
+ }};
+}
+
+cfg_os_poll! {
+ #[cfg_attr(all(
+ not(mio_unsupported_force_poll_poll),
+ any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "redox",
+ )
+ ), path = "selector/epoll.rs")]
+ #[cfg_attr(all(
+ not(mio_unsupported_force_poll_poll),
+ any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ )
+ ), path = "selector/kqueue.rs")]
+ #[cfg_attr(any(
+ mio_unsupported_force_poll_poll,
+ target_os = "aix",
+ target_os = "espidf",
+ target_os = "fuchsia",
+ target_os = "haiku",
+ target_os = "hermit",
+ target_os = "hurd",
+ target_os = "nto",
+ target_os = "solaris",
+ target_os = "vita",
+ ), path = "selector/poll.rs")]
+ mod selector;
+ pub(crate) use self::selector::*;
+
+ #[cfg_attr(all(
+ not(mio_unsupported_force_waker_pipe),
+ any(
+ target_os = "android",
+ target_os = "espidf",
+ target_os = "fuchsia",
+ target_os = "hermit",
+ target_os = "illumos",
+ target_os = "linux",
+ )
+ ), path = "waker/eventfd.rs")]
+ #[cfg_attr(all(
+ not(mio_unsupported_force_waker_pipe),
+ not(mio_unsupported_force_poll_poll), // `kqueue(2)` based waker doesn't work with `poll(2)`.
+ any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ )
+ ), path = "waker/kqueue.rs")]
+ #[cfg_attr(any(
+ // NOTE: also add to the list list for the `pipe` module below.
+ mio_unsupported_force_waker_pipe,
+ all(
+ // `kqueue(2)` based waker doesn't work with `poll(2)`.
+ mio_unsupported_force_poll_poll,
+ any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ),
+ ),
+ target_os = "aix",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "hurd",
+ target_os = "netbsd",
+ target_os = "nto",
+ target_os = "openbsd",
+ target_os = "redox",
+ target_os = "solaris",
+ target_os = "vita",
+ ), path = "waker/pipe.rs")]
+ mod waker;
+ // NOTE: the `Waker` type is expected in the selector module as the
+ // `poll(2)` implementation needs to do some special stuff.
+
+ mod sourcefd;
+ #[cfg(feature = "os-ext")]
+ pub use self::sourcefd::SourceFd;
+
+ cfg_net! {
+ mod net;
+
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+ #[cfg(not(target_os = "hermit"))]
+ pub(crate) mod uds;
+ }
+
+ #[cfg(all(
+ any(
+ // For the public `pipe` module, must match `cfg_os_ext` macro.
+ feature = "os-ext",
+ // For the `Waker` type based on a pipe.
+ mio_unsupported_force_waker_pipe,
+ all(
+ // `kqueue(2)` based waker doesn't work with `poll(2)`.
+ mio_unsupported_force_poll_poll,
+ any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ),
+ ),
+ // NOTE: also add to the list list for the `pipe` module below.
+ target_os = "aix",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "hurd",
+ target_os = "netbsd",
+ target_os = "nto",
+ target_os = "openbsd",
+ target_os = "redox",
+ target_os = "solaris",
+ target_os = "vita",
+ ),
+ // Hermit doesn't support pipes.
+ not(target_os = "hermit"),
+ ))]
+ pub(crate) mod pipe;
+}
+
+cfg_not_os_poll! {
+ cfg_any_os_ext! {
+ mod sourcefd;
+ #[cfg(feature = "os-ext")]
+ pub use self::sourcefd::SourceFd;
+ }
+}
diff --git a/vendor/mio/src/sys/unix/net.rs b/vendor/mio/src/sys/unix/net.rs
new file mode 100644
index 00000000..76451e9a
--- /dev/null
+++ b/vendor/mio/src/sys/unix/net.rs
@@ -0,0 +1,215 @@
+use std::io;
+use std::mem::size_of;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
+
+pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: libc::c_int) -> io::Result<libc::c_int> {
+ let domain = match addr {
+ SocketAddr::V4(..) => libc::AF_INET,
+ SocketAddr::V6(..) => libc::AF_INET6,
+ };
+
+ new_socket(domain, socket_type)
+}
+
+/// Create a new non-blocking socket.
+pub(crate) fn new_socket(domain: libc::c_int, socket_type: libc::c_int) -> io::Result<libc::c_int> {
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "hurd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "solaris",
+ target_os = "hermit",
+ ))]
+ let socket_type = socket_type | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+ #[cfg(target_os = "nto")]
+ let socket_type = socket_type | libc::SOCK_CLOEXEC;
+
+ let socket = syscall!(socket(domain, socket_type, 0))?;
+
+ // Mimic `libstd` and set `SO_NOSIGPIPE` on apple systems.
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ if let Err(err) = syscall!(setsockopt(
+ socket,
+ libc::SOL_SOCKET,
+ libc::SO_NOSIGPIPE,
+ &1 as *const libc::c_int as *const libc::c_void,
+ size_of::<libc::c_int>() as libc::socklen_t
+ )) {
+ let _ = syscall!(close(socket));
+ return Err(err);
+ }
+
+ // Darwin (and others) doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
+ #[cfg(any(
+ target_os = "aix",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "nto",
+ ))]
+ {
+ if let Err(err) = syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK)) {
+ let _ = syscall!(close(socket));
+ return Err(err);
+ }
+ #[cfg(not(any(target_os = "espidf", target_os = "vita", target_os = "nto")))]
+ if let Err(err) = syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC)) {
+ let _ = syscall!(close(socket));
+ return Err(err);
+ }
+ }
+
+ Ok(socket)
+}
+
+/// A type with the same memory layout as `libc::sockaddr`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `libc::sockaddr_storage` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: libc::sockaddr_in,
+ v6: libc::sockaddr_in6,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const libc::sockaddr {
+ self as *const _ as *const libc::sockaddr
+ }
+}
+
+/// Converts a Rust `SocketAddr` into the system representation.
+pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, libc::socklen_t) {
+ match addr {
+ SocketAddr::V4(ref addr) => {
+ // `s_addr` is stored as BE on all machine and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ let sin_addr = libc::in_addr {
+ s_addr: u32::from_ne_bytes(addr.ip().octets()),
+ };
+
+ let sockaddr_in = libc::sockaddr_in {
+ sin_family: libc::AF_INET as libc::sa_family_t,
+ sin_port: addr.port().to_be(),
+ sin_addr,
+ #[cfg(not(any(target_os = "haiku", target_os = "vita")))]
+ sin_zero: [0; 8],
+ #[cfg(target_os = "haiku")]
+ sin_zero: [0; 24],
+ #[cfg(target_os = "vita")]
+ sin_zero: [0; 6],
+ #[cfg(any(
+ target_os = "aix",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "haiku",
+ target_os = "hurd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "hermit",
+ target_os = "nto",
+ ))]
+ sin_len: 0,
+ #[cfg(target_os = "vita")]
+ sin_vport: addr.port().to_be(),
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ let socklen = size_of::<libc::sockaddr_in>() as libc::socklen_t;
+ (sockaddr, socklen)
+ }
+ SocketAddr::V6(ref addr) => {
+ let sockaddr_in6 = libc::sockaddr_in6 {
+ sin6_family: libc::AF_INET6 as libc::sa_family_t,
+ sin6_port: addr.port().to_be(),
+ sin6_addr: libc::in6_addr {
+ s6_addr: addr.ip().octets(),
+ },
+ sin6_flowinfo: addr.flowinfo(),
+ sin6_scope_id: addr.scope_id(),
+ #[cfg(any(
+ target_os = "aix",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "haiku",
+ target_os = "hurd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "nto",
+ target_os = "hermit",
+ ))]
+ sin6_len: 0,
+ #[cfg(target_os = "vita")]
+ sin6_vport: addr.port().to_be(),
+ #[cfg(any(target_os = "illumos", target_os = "solaris"))]
+ __sin6_src_id: 0,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ let socklen = size_of::<libc::sockaddr_in6>() as libc::socklen_t;
+ (sockaddr, socklen)
+ }
+ }
+}
+
+/// Converts a `libc::sockaddr` compatible struct into a native Rust `SocketAddr`.
+///
+/// # Safety
+///
+/// `storage` must have the `ss_family` field correctly initialized.
+/// `storage` must be initialised to a `sockaddr_in` or `sockaddr_in6`.
+pub(crate) unsafe fn to_socket_addr(
+ storage: *const libc::sockaddr_storage,
+) -> io::Result<SocketAddr> {
+ match (*storage).ss_family as libc::c_int {
+ libc::AF_INET => {
+ // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in.
+ let addr: &libc::sockaddr_in = &*(storage as *const libc::sockaddr_in);
+ let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes());
+ let port = u16::from_be(addr.sin_port);
+ Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
+ }
+ libc::AF_INET6 => {
+ // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6.
+ let addr: &libc::sockaddr_in6 = &*(storage as *const libc::sockaddr_in6);
+ let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr);
+ let port = u16::from_be(addr.sin6_port);
+ Ok(SocketAddr::V6(SocketAddrV6::new(
+ ip,
+ port,
+ addr.sin6_flowinfo,
+ addr.sin6_scope_id,
+ )))
+ }
+ _ => Err(io::ErrorKind::InvalidInput.into()),
+ }
+}
diff --git a/vendor/mio/src/sys/unix/pipe.rs b/vendor/mio/src/sys/unix/pipe.rs
new file mode 100644
index 00000000..0a3be9af
--- /dev/null
+++ b/vendor/mio/src/sys/unix/pipe.rs
@@ -0,0 +1,619 @@
+//! Unix pipe.
+//!
+//! See the [`new`] function for documentation.
+
+use std::io;
+use std::os::fd::RawFd;
+
+pub(crate) fn new_raw() -> io::Result<[RawFd; 2]> {
+ let mut fds: [RawFd; 2] = [-1, -1];
+
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "hurd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "illumos",
+ target_os = "redox",
+ target_os = "solaris",
+ target_os = "vita",
+ ))]
+ unsafe {
+ if libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) != 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+
+ #[cfg(any(
+ target_os = "aix",
+ target_os = "haiku",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "nto",
+ ))]
+ unsafe {
+ // For platforms that don't have `pipe2(2)` we need to manually set the
+ // correct flags on the file descriptor.
+ if libc::pipe(fds.as_mut_ptr()) != 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ for fd in &fds {
+ if libc::fcntl(*fd, libc::F_SETFL, libc::O_NONBLOCK) != 0
+ || libc::fcntl(*fd, libc::F_SETFD, libc::FD_CLOEXEC) != 0
+ {
+ let err = io::Error::last_os_error();
+ // Don't leak file descriptors. Can't handle closing error though.
+ let _ = libc::close(fds[0]);
+ let _ = libc::close(fds[1]);
+ return Err(err);
+ }
+ }
+ }
+
+ Ok(fds)
+}
+
+cfg_os_ext! {
+use std::fs::File;
+use std::io::{IoSlice, IoSliceMut, Read, Write};
+use std::os::fd::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd};
+use std::process::{ChildStderr, ChildStdin, ChildStdout};
+
+use crate::io_source::IoSource;
+use crate::{event, Interest, Registry, Token};
+
+/// Create a new non-blocking Unix pipe.
+///
+/// This is a wrapper around Unix's [`pipe(2)`] system call and can be used as
+/// inter-process or thread communication channel.
+///
+/// This channel may be created before forking the process and then one end used
+/// in each process, e.g. the parent process has the sending end to send command
+/// to the child process.
+///
+/// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html
+///
+/// # Events
+///
+/// The [`Sender`] can be registered with [`WRITABLE`] interest to receive
+/// [writable events], the [`Receiver`] with [`READABLE`] interest. Once data is
+/// written to the `Sender` the `Receiver` will receive an [readable event].
+///
+/// In addition to those events, events will also be generated if the other side
+/// is dropped. To check if the `Sender` is dropped you'll need to check
+/// [`is_read_closed`] on events for the `Receiver`, if it returns true the
+/// `Sender` is dropped. On the `Sender` end check [`is_write_closed`], if it
+/// returns true the `Receiver` was dropped. Also see the second example below.
+///
+/// [`WRITABLE`]: Interest::WRITABLE
+/// [writable events]: event::Event::is_writable
+/// [`READABLE`]: Interest::READABLE
+/// [readable event]: event::Event::is_readable
+/// [`is_read_closed`]: event::Event::is_read_closed
+/// [`is_write_closed`]: event::Event::is_write_closed
+///
+/// # Deregistering
+///
+/// Both `Sender` and `Receiver` will deregister themselves when dropped,
+/// **iff** the file descriptors are not duplicated (via [`dup(2)`]).
+///
+/// [`dup(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/dup.html
+///
+/// # Examples
+///
+/// Simple example that writes data into the sending end and read it from the
+/// receiving end.
+///
+/// ```
+/// use std::io::{self, Read, Write};
+///
+/// use mio::{Poll, Events, Interest, Token};
+/// use mio::unix::pipe;
+///
+/// // Unique tokens for the two ends of the channel.
+/// const PIPE_RECV: Token = Token(0);
+/// const PIPE_SEND: Token = Token(1);
+///
+/// # fn main() -> io::Result<()> {
+/// // Create our `Poll` instance and the `Events` container.
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(8);
+///
+/// // Create a new pipe.
+/// let (mut sender, mut receiver) = pipe::new()?;
+///
+/// // Register both ends of the channel.
+/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
+/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
+///
+/// const MSG: &[u8; 11] = b"Hello world";
+///
+/// loop {
+/// poll.poll(&mut events, None)?;
+///
+/// for event in events.iter() {
+/// match event.token() {
+/// PIPE_SEND => sender.write(MSG)
+/// .and_then(|n| if n != MSG.len() {
+/// // We'll consider a short write an error in this
+/// // example. NOTE: we can't use `write_all` with
+/// // non-blocking I/O.
+/// Err(io::ErrorKind::WriteZero.into())
+/// } else {
+/// Ok(())
+/// })?,
+/// PIPE_RECV => {
+/// let mut buf = [0; 11];
+/// let n = receiver.read(&mut buf)?;
+/// println!("received: {:?}", &buf[0..n]);
+/// assert_eq!(n, MSG.len());
+/// assert_eq!(&buf, &*MSG);
+/// return Ok(());
+/// },
+/// _ => unreachable!(),
+/// }
+/// }
+/// }
+/// # }
+/// ```
+///
+/// Example that receives an event once the `Sender` is dropped.
+///
+/// ```
+/// # use std::io;
+/// #
+/// # use mio::{Poll, Events, Interest, Token};
+/// # use mio::unix::pipe;
+/// #
+/// # const PIPE_RECV: Token = Token(0);
+/// # const PIPE_SEND: Token = Token(1);
+/// #
+/// # fn main() -> io::Result<()> {
+/// // Same setup as in the example above.
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(8);
+///
+/// let (mut sender, mut receiver) = pipe::new()?;
+///
+/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
+/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
+///
+/// // Drop the sender.
+/// drop(sender);
+///
+/// poll.poll(&mut events, None)?;
+///
+/// for event in events.iter() {
+/// match event.token() {
+/// PIPE_RECV if event.is_read_closed() => {
+/// // Detected that the sender was dropped.
+/// println!("Sender dropped!");
+/// return Ok(());
+/// },
+/// _ => unreachable!(),
+/// }
+/// }
+/// # unreachable!();
+/// # }
+/// ```
+pub fn new() -> io::Result<(Sender, Receiver)> {
+ let fds = new_raw()?;
+ // SAFETY: `new_raw` initialised the `fds` above.
+ let r = unsafe { Receiver::from_raw_fd(fds[0]) };
+ let w = unsafe { Sender::from_raw_fd(fds[1]) };
+ Ok((w, r))
+}
+
+/// Sending end of an Unix pipe.
+///
+/// See [`new`] for documentation, including examples.
+#[derive(Debug)]
+pub struct Sender {
+ inner: IoSource<File>,
+}
+
+impl Sender {
+ /// Set the `Sender` into or out of non-blocking mode.
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ set_nonblocking(self.inner.as_raw_fd(), nonblocking)
+ }
+
+ /// Execute an I/O operation ensuring that the socket receives more events
+ /// if it hits a [`WouldBlock`] error.
+ ///
+ /// # Notes
+ ///
+ /// This method is required to be called for **all** I/O operations to
+ /// ensure the user will receive events once the socket is ready again after
+ /// returning a [`WouldBlock`] error.
+ ///
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use std::io;
+ /// use std::os::fd::AsRawFd;
+ /// use mio::unix::pipe;
+ ///
+ /// let (sender, receiver) = pipe::new()?;
+ ///
+ /// // Wait until the sender is writable...
+ ///
+ /// // Write to the sender using a direct libc call, of course the
+ /// // `io::Write` implementation would be easier to use.
+ /// let buf = b"hello";
+ /// let n = sender.try_io(|| {
+ /// let buf_ptr = &buf as *const _ as *const _;
+ /// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) };
+ /// if res != -1 {
+ /// Ok(res as usize)
+ /// } else {
+ /// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure
+ /// // should return `WouldBlock` error.
+ /// Err(io::Error::last_os_error())
+ /// }
+ /// })?;
+ /// eprintln!("write {} bytes", n);
+ ///
+ /// // Wait until the receiver is readable...
+ ///
+ /// // Read from the receiver using a direct libc call, of course the
+ /// // `io::Read` implementation would be easier to use.
+ /// let mut buf = [0; 512];
+ /// let n = receiver.try_io(|| {
+ /// let buf_ptr = &mut buf as *mut _ as *mut _;
+ /// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) };
+ /// if res != -1 {
+ /// Ok(res as usize)
+ /// } else {
+ /// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure
+ /// // should return `WouldBlock` error.
+ /// Err(io::Error::last_os_error())
+ /// }
+ /// })?;
+ /// eprintln!("read {} bytes", n);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
+ where
+ F: FnOnce() -> io::Result<T>,
+ {
+ self.inner.do_io(|_| f())
+ }
+}
+
+impl event::Source for Sender {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl Write for Sender {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|mut sender| sender.flush())
+ }
+}
+
+impl Write for &Sender {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|mut sender| sender.flush())
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStdin> for Sender {
+ fn from(stdin: ChildStdin) -> Sender {
+ // Safety: `ChildStdin` is guaranteed to be a valid file descriptor.
+ unsafe { Sender::from_raw_fd(stdin.into_raw_fd()) }
+ }
+}
+
+impl FromRawFd for Sender {
+ unsafe fn from_raw_fd(fd: RawFd) -> Sender {
+ Sender {
+ inner: IoSource::new(File::from_raw_fd(fd)),
+ }
+ }
+}
+
+impl AsRawFd for Sender {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for Sender {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl From<Sender> for OwnedFd {
+ fn from(sender: Sender) -> Self {
+ sender.inner.into_inner().into()
+ }
+}
+
+impl AsFd for Sender {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.inner.as_fd()
+ }
+}
+
+impl From<OwnedFd> for Sender {
+ fn from(fd: OwnedFd) -> Self {
+ Sender {
+ inner: IoSource::new(File::from(fd)),
+ }
+ }
+}
+
+/// Receiving end of an Unix pipe.
+///
+/// See [`new`] for documentation, including examples.
+#[derive(Debug)]
+pub struct Receiver {
+ inner: IoSource<File>,
+}
+
+impl Receiver {
+ /// Set the `Receiver` into or out of non-blocking mode.
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ set_nonblocking(self.inner.as_raw_fd(), nonblocking)
+ }
+
+ /// Execute an I/O operation ensuring that the socket receives more events
+ /// if it hits a [`WouldBlock`] error.
+ ///
+ /// # Notes
+ ///
+ /// This method is required to be called for **all** I/O operations to
+ /// ensure the user will receive events once the socket is ready again after
+ /// returning a [`WouldBlock`] error.
+ ///
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use std::io;
+ /// use std::os::fd::AsRawFd;
+ /// use mio::unix::pipe;
+ ///
+ /// let (sender, receiver) = pipe::new()?;
+ ///
+ /// // Wait until the sender is writable...
+ ///
+ /// // Write to the sender using a direct libc call, of course the
+ /// // `io::Write` implementation would be easier to use.
+ /// let buf = b"hello";
+ /// let n = sender.try_io(|| {
+ /// let buf_ptr = &buf as *const _ as *const _;
+ /// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) };
+ /// if res != -1 {
+ /// Ok(res as usize)
+ /// } else {
+ /// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure
+ /// // should return `WouldBlock` error.
+ /// Err(io::Error::last_os_error())
+ /// }
+ /// })?;
+ /// eprintln!("write {} bytes", n);
+ ///
+ /// // Wait until the receiver is readable...
+ ///
+ /// // Read from the receiver using a direct libc call, of course the
+ /// // `io::Read` implementation would be easier to use.
+ /// let mut buf = [0; 512];
+ /// let n = receiver.try_io(|| {
+ /// let buf_ptr = &mut buf as *mut _ as *mut _;
+ /// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) };
+ /// if res != -1 {
+ /// Ok(res as usize)
+ /// } else {
+ /// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure
+ /// // should return `WouldBlock` error.
+ /// Err(io::Error::last_os_error())
+ /// }
+ /// })?;
+ /// eprintln!("read {} bytes", n);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
+ where
+ F: FnOnce() -> io::Result<T>,
+ {
+ self.inner.do_io(|_| f())
+ }
+}
+
+impl event::Source for Receiver {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl Read for Receiver {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.read_vectored(bufs))
+ }
+}
+
+impl Read for &Receiver {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|mut sender| sender.read_vectored(bufs))
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStdout> for Receiver {
+ fn from(stdout: ChildStdout) -> Receiver {
+ // Safety: `ChildStdout` is guaranteed to be a valid file descriptor.
+ unsafe { Receiver::from_raw_fd(stdout.into_raw_fd()) }
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStderr> for Receiver {
+ fn from(stderr: ChildStderr) -> Receiver {
+ // Safety: `ChildStderr` is guaranteed to be a valid file descriptor.
+ unsafe { Receiver::from_raw_fd(stderr.into_raw_fd()) }
+ }
+}
+
+impl IntoRawFd for Receiver {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl AsRawFd for Receiver {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl FromRawFd for Receiver {
+ unsafe fn from_raw_fd(fd: RawFd) -> Receiver {
+ Receiver {
+ inner: IoSource::new(File::from_raw_fd(fd)),
+ }
+ }
+}
+
+impl From<Receiver> for OwnedFd {
+ fn from(receiver: Receiver) -> Self {
+ receiver.inner.into_inner().into()
+ }
+}
+
+impl AsFd for Receiver {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.inner.as_fd()
+ }
+}
+
+impl From<OwnedFd> for Receiver {
+ fn from(fd: OwnedFd) -> Self {
+ Receiver {
+ inner: IoSource::new(File::from(fd)),
+ }
+ }
+}
+
+#[cfg(not(any(target_os = "aix", target_os = "illumos", target_os = "solaris", target_os = "vita")))]
+fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
+ let value = nonblocking as libc::c_int;
+ if unsafe { libc::ioctl(fd, libc::FIONBIO, &value) } == -1 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+#[cfg(any(target_os = "aix", target_os = "illumos", target_os = "solaris", target_os = "vita"))]
+fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
+ let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) };
+ if flags < 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ let nflags = if nonblocking {
+ flags | libc::O_NONBLOCK
+ } else {
+ flags & !libc::O_NONBLOCK
+ };
+
+ if flags != nflags {
+ if unsafe { libc::fcntl(fd, libc::F_SETFL, nflags) } < 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+
+ Ok(())
+}
+} // `cfg_os_ext!`.
diff --git a/vendor/mio/src/sys/unix/selector/epoll.rs b/vendor/mio/src/sys/unix/selector/epoll.rs
new file mode 100644
index 00000000..082a6587
--- /dev/null
+++ b/vendor/mio/src/sys/unix/selector/epoll.rs
@@ -0,0 +1,231 @@
+use std::os::fd::{AsRawFd, FromRawFd, OwnedFd, RawFd};
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::time::Duration;
+use std::{io, ptr};
+
+use libc::{EPOLLET, EPOLLIN, EPOLLOUT, EPOLLPRI, EPOLLRDHUP};
+
+use crate::{Interest, Token};
+
+/// Unique id for use as `SelectorId`.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ ep: OwnedFd,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // SAFETY: `epoll_create1(2)` ensures the fd is valid.
+ let ep = unsafe { OwnedFd::from_raw_fd(syscall!(epoll_create1(libc::EPOLL_CLOEXEC))?) };
+ Ok(Selector {
+ #[cfg(debug_assertions)]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ ep,
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ self.ep.try_clone().map(|ep| Selector {
+ // It's the same selector, so we use the same id.
+ #[cfg(debug_assertions)]
+ id: self.id,
+ ep,
+ })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ let timeout = timeout
+ .map(|to| {
+ // `Duration::as_millis` truncates, so round up. This avoids
+ // turning sub-millisecond timeouts into a zero timeout, unless
+ // the caller explicitly requests that by specifying a zero
+ // timeout.
+ to.checked_add(Duration::from_nanos(999_999))
+ .unwrap_or(to)
+ .as_millis() as libc::c_int
+ })
+ .unwrap_or(-1);
+
+ events.clear();
+ syscall!(epoll_wait(
+ self.ep.as_raw_fd(),
+ events.as_mut_ptr(),
+ events.capacity() as i32,
+ timeout,
+ ))
+ .map(|n_events| {
+ // This is safe because `epoll_wait` ensures that `n_events` are
+ // assigned.
+ unsafe { events.set_len(n_events as usize) };
+ })
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let mut event = libc::epoll_event {
+ events: interests_to_epoll(interests),
+ u64: usize::from(token) as u64,
+ #[cfg(target_os = "redox")]
+ _pad: 0,
+ };
+
+ let ep = self.ep.as_raw_fd();
+ syscall!(epoll_ctl(ep, libc::EPOLL_CTL_ADD, fd, &mut event)).map(|_| ())
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let mut event = libc::epoll_event {
+ events: interests_to_epoll(interests),
+ u64: usize::from(token) as u64,
+ #[cfg(target_os = "redox")]
+ _pad: 0,
+ };
+
+ let ep = self.ep.as_raw_fd();
+ syscall!(epoll_ctl(ep, libc::EPOLL_CTL_MOD, fd, &mut event)).map(|_| ())
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ let ep = self.ep.as_raw_fd();
+ syscall!(epoll_ctl(ep, libc::EPOLL_CTL_DEL, fd, ptr::null_mut())).map(|_| ())
+ }
+}
+
+cfg_io_source! {
+ impl Selector {
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.ep.as_raw_fd()
+ }
+}
+
+fn interests_to_epoll(interests: Interest) -> u32 {
+ let mut kind = EPOLLET;
+
+ if interests.is_readable() {
+ kind = kind | EPOLLIN | EPOLLRDHUP;
+ }
+
+ if interests.is_writable() {
+ kind |= EPOLLOUT;
+ }
+
+ if interests.is_priority() {
+ kind |= EPOLLPRI;
+ }
+
+ kind as u32
+}
+
+pub type Event = libc::epoll_event;
+pub type Events = Vec<Event>;
+
+pub mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ pub fn token(event: &Event) -> Token {
+ Token(event.u64 as usize)
+ }
+
+ pub fn is_readable(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLIN) != 0
+ || (event.events as libc::c_int & libc::EPOLLPRI) != 0
+ }
+
+ pub fn is_writable(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLOUT) != 0
+ }
+
+ pub fn is_error(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLERR) != 0
+ }
+
+ pub fn is_read_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ event.events as libc::c_int & libc::EPOLLHUP != 0
+ // Socket has received FIN or called shutdown(SHUT_RD)
+ || (event.events as libc::c_int & libc::EPOLLIN != 0
+ && event.events as libc::c_int & libc::EPOLLRDHUP != 0)
+ }
+
+ pub fn is_write_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ event.events as libc::c_int & libc::EPOLLHUP != 0
+ // Unix pipe write end has closed
+ || (event.events as libc::c_int & libc::EPOLLOUT != 0
+ && event.events as libc::c_int & libc::EPOLLERR != 0)
+ // The other side (read end) of a Unix pipe has closed.
+ || event.events as libc::c_int == libc::EPOLLERR
+ }
+
+ pub fn is_priority(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLPRI) != 0
+ }
+
+ pub fn is_aio(_: &Event) -> bool {
+ // Not supported in the kernel, only in libc.
+ false
+ }
+
+ pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+ }
+
+ pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_events(got: &u32, want: &libc::c_int) -> bool {
+ (*got as libc::c_int & want) != 0
+ }
+ debug_detail!(
+ EventsDetails(u32),
+ check_events,
+ libc::EPOLLIN,
+ libc::EPOLLPRI,
+ libc::EPOLLOUT,
+ libc::EPOLLRDNORM,
+ libc::EPOLLRDBAND,
+ libc::EPOLLWRNORM,
+ libc::EPOLLWRBAND,
+ libc::EPOLLMSG,
+ libc::EPOLLERR,
+ libc::EPOLLHUP,
+ libc::EPOLLET,
+ libc::EPOLLRDHUP,
+ libc::EPOLLONESHOT,
+ libc::EPOLLEXCLUSIVE,
+ libc::EPOLLWAKEUP,
+ libc::EPOLL_CLOEXEC,
+ );
+
+ // Can't reference fields in packed structures.
+ let e_u64 = event.u64;
+ f.debug_struct("epoll_event")
+ .field("events", &EventsDetails(event.events))
+ .field("u64", &e_u64)
+ .finish()
+ }
+}
+
+// No special requirement from the implementation around waking.
+pub(crate) use crate::sys::unix::waker::Waker;
+
+cfg_io_source! {
+ mod stateless_io_source;
+ pub(crate) use stateless_io_source::IoSourceState;
+}
diff --git a/vendor/mio/src/sys/unix/selector/kqueue.rs b/vendor/mio/src/sys/unix/selector/kqueue.rs
new file mode 100644
index 00000000..f31db35f
--- /dev/null
+++ b/vendor/mio/src/sys/unix/selector/kqueue.rs
@@ -0,0 +1,894 @@
+use crate::{Interest, Token};
+use std::mem::{self, MaybeUninit};
+use std::ops::{Deref, DerefMut};
+use std::os::fd::{AsRawFd, FromRawFd, OwnedFd, RawFd};
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::time::Duration;
+use std::{cmp, io, ptr, slice};
+
+/// Unique id for use as `SelectorId`.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+// Type of the `nchanges` and `nevents` parameters in the `kevent` function.
+#[cfg(not(target_os = "netbsd"))]
+type Count = libc::c_int;
+#[cfg(target_os = "netbsd")]
+type Count = libc::size_t;
+
+// Type of the `filter` field in the `kevent` structure.
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
+type Filter = libc::c_short;
+#[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+))]
+type Filter = i16;
+#[cfg(target_os = "netbsd")]
+type Filter = u32;
+
+// Type of the `flags` field in the `kevent` structure.
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
+type Flags = libc::c_ushort;
+#[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+))]
+type Flags = u16;
+#[cfg(target_os = "netbsd")]
+type Flags = u32;
+
+// Type of the `udata` field in the `kevent` structure.
+#[cfg(not(target_os = "netbsd"))]
+type UData = *mut libc::c_void;
+#[cfg(target_os = "netbsd")]
+type UData = libc::intptr_t;
+
+macro_rules! kevent {
+ ($id: expr, $filter: expr, $flags: expr, $data: expr) => {
+ libc::kevent {
+ ident: $id as libc::uintptr_t,
+ filter: $filter as Filter,
+ flags: $flags,
+ udata: $data as UData,
+ ..unsafe { mem::zeroed() }
+ }
+ };
+}
+
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ kq: OwnedFd,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // SAFETY: `kqueue(2)` ensures the fd is valid.
+ let kq = unsafe { OwnedFd::from_raw_fd(syscall!(kqueue())?) };
+ syscall!(fcntl(kq.as_raw_fd(), libc::F_SETFD, libc::FD_CLOEXEC))?;
+ Ok(Selector {
+ #[cfg(debug_assertions)]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ kq,
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ self.kq.try_clone().map(|kq| Selector {
+ // It's the same selector, so we use the same id.
+ #[cfg(debug_assertions)]
+ id: self.id,
+ kq,
+ })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ let timeout = timeout.map(|to| libc::timespec {
+ tv_sec: cmp::min(to.as_secs(), libc::time_t::MAX as u64) as libc::time_t,
+ // `Duration::subsec_nanos` is guaranteed to be less than one
+ // billion (the number of nanoseconds in a second), making the
+ // cast to i32 safe. The cast itself is needed for platforms
+ // where C's long is only 32 bits.
+ tv_nsec: libc::c_long::from(to.subsec_nanos() as i32),
+ });
+ let timeout = timeout
+ .as_ref()
+ .map(|s| s as *const _)
+ .unwrap_or(ptr::null_mut());
+
+ events.clear();
+ syscall!(kevent(
+ self.kq.as_raw_fd(),
+ ptr::null(),
+ 0,
+ events.as_mut_ptr(),
+ events.capacity() as Count,
+ timeout,
+ ))
+ .map(|n_events| {
+ // This is safe because `kevent` ensures that `n_events` are
+ // assigned.
+ unsafe { events.set_len(n_events as usize) };
+ })
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let flags = libc::EV_CLEAR | libc::EV_RECEIPT | libc::EV_ADD;
+ // At most we need two changes, but maybe we only need 1.
+ let mut changes: [MaybeUninit<libc::kevent>; 2] =
+ [MaybeUninit::uninit(), MaybeUninit::uninit()];
+ let mut n_changes = 0;
+
+ if interests.is_writable() {
+ let kevent = kevent!(fd, libc::EVFILT_WRITE, flags, token.0);
+ changes[n_changes] = MaybeUninit::new(kevent);
+ n_changes += 1;
+ }
+
+ if interests.is_readable() {
+ let kevent = kevent!(fd, libc::EVFILT_READ, flags, token.0);
+ changes[n_changes] = MaybeUninit::new(kevent);
+ n_changes += 1;
+ }
+
+ // Older versions of macOS (OS X 10.11 and 10.10 have been witnessed)
+ // can return EPIPE when registering a pipe file descriptor where the
+ // other end has already disappeared. For example code that creates a
+ // pipe, closes a file descriptor, and then registers the other end will
+ // see an EPIPE returned from `register`.
+ //
+ // It also turns out that kevent will still report events on the file
+ // descriptor, telling us that it's readable/hup at least after we've
+ // done this registration. As a result we just ignore `EPIPE` here
+ // instead of propagating it.
+ //
+ // More info can be found at tokio-rs/mio#582.
+ let changes = unsafe {
+ // This is safe because we ensure that at least `n_changes` are in
+ // the array.
+ slice::from_raw_parts_mut(changes[0].as_mut_ptr(), n_changes)
+ };
+ kevent_register(self.kq.as_raw_fd(), changes, &[libc::EPIPE as i64])
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let flags = libc::EV_CLEAR | libc::EV_RECEIPT;
+ let write_flags = if interests.is_writable() {
+ flags | libc::EV_ADD
+ } else {
+ flags | libc::EV_DELETE
+ };
+ let read_flags = if interests.is_readable() {
+ flags | libc::EV_ADD
+ } else {
+ flags | libc::EV_DELETE
+ };
+
+ let mut changes: [libc::kevent; 2] = [
+ kevent!(fd, libc::EVFILT_WRITE, write_flags, token.0),
+ kevent!(fd, libc::EVFILT_READ, read_flags, token.0),
+ ];
+
+ // Since there is no way to check with which interests the fd was
+ // registered we modify both readable and write, adding it when required
+ // and removing it otherwise, ignoring the ENOENT error when it comes
+ // up. The ENOENT error informs us that a filter we're trying to remove
+ // wasn't there in first place, but we don't really care since our goal
+ // is accomplished.
+ //
+ // For the explanation of ignoring `EPIPE` see `register`.
+ kevent_register(
+ self.kq.as_raw_fd(),
+ &mut changes,
+ &[libc::ENOENT as i64, libc::EPIPE as i64],
+ )
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ let flags = libc::EV_DELETE | libc::EV_RECEIPT;
+ let mut changes: [libc::kevent; 2] = [
+ kevent!(fd, libc::EVFILT_WRITE, flags, 0),
+ kevent!(fd, libc::EVFILT_READ, flags, 0),
+ ];
+
+ // Since there is no way to check with which interests the fd was
+ // registered we remove both filters (readable and writeable) and ignore
+ // the ENOENT error when it comes up. The ENOENT error informs us that
+ // the filter wasn't there in first place, but we don't really care
+ // about that since our goal is to remove it.
+ kevent_register(self.kq.as_raw_fd(), &mut changes, &[libc::ENOENT as i64])
+ }
+
+ // Used by `Waker`.
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ pub fn setup_waker(&self, token: Token) -> io::Result<()> {
+ // First attempt to accept user space notifications.
+ let mut kevent = kevent!(
+ 0,
+ libc::EVFILT_USER,
+ libc::EV_ADD | libc::EV_CLEAR | libc::EV_RECEIPT,
+ token.0
+ );
+
+ let kq = self.kq.as_raw_fd();
+ syscall!(kevent(kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
+ if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
+ Err(io::Error::from_raw_os_error(kevent.data as i32))
+ } else {
+ Ok(())
+ }
+ })
+ }
+
+ // Used by `Waker`.
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ pub fn wake(&self, token: Token) -> io::Result<()> {
+ let mut kevent = kevent!(
+ 0,
+ libc::EVFILT_USER,
+ libc::EV_ADD | libc::EV_RECEIPT,
+ token.0
+ );
+ kevent.fflags = libc::NOTE_TRIGGER;
+
+ let kq = self.kq.as_raw_fd();
+ syscall!(kevent(kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
+ if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
+ Err(io::Error::from_raw_os_error(kevent.data as i32))
+ } else {
+ Ok(())
+ }
+ })
+ }
+}
+
+/// Register `changes` with `kq`ueue.
+fn kevent_register(
+ kq: RawFd,
+ changes: &mut [libc::kevent],
+ ignored_errors: &[i64],
+) -> io::Result<()> {
+ syscall!(kevent(
+ kq,
+ changes.as_ptr(),
+ changes.len() as Count,
+ changes.as_mut_ptr(),
+ changes.len() as Count,
+ ptr::null(),
+ ))
+ .map(|_| ())
+ .or_else(|err| {
+ // According to the manual page of FreeBSD: "When kevent() call fails
+ // with EINTR error, all changes in the changelist have been applied",
+ // so we can safely ignore it.
+ if err.raw_os_error() == Some(libc::EINTR) {
+ Ok(())
+ } else {
+ Err(err)
+ }
+ })
+ .and_then(|()| check_errors(changes, ignored_errors))
+}
+
+/// Check all events for possible errors, it returns the first error found.
+fn check_errors(events: &[libc::kevent], ignored_errors: &[i64]) -> io::Result<()> {
+ for event in events {
+ // We can't use references to packed structures (in checking the ignored
+ // errors), so we need copy the data out before use.
+ let data = event.data as _;
+ // Check for the error flag, the actual error will be in the `data`
+ // field.
+ if (event.flags & libc::EV_ERROR != 0) && data != 0 && !ignored_errors.contains(&data) {
+ return Err(io::Error::from_raw_os_error(data as i32));
+ }
+ }
+ Ok(())
+}
+
+cfg_io_source! {
+ #[cfg(debug_assertions)]
+ impl Selector {
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.kq.as_raw_fd()
+ }
+}
+
+pub type Event = libc::kevent;
+pub struct Events(Vec<libc::kevent>);
+
+impl Events {
+ pub fn with_capacity(capacity: usize) -> Events {
+ Events(Vec::with_capacity(capacity))
+ }
+}
+
+impl Deref for Events {
+ type Target = Vec<libc::kevent>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for Events {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+// `Events` cannot derive `Send` or `Sync` because of the
+// `udata: *mut ::c_void` field in `libc::kevent`. However, `Events`'s public
+// API treats the `udata` field as a `uintptr_t` which is `Send`. `Sync` is
+// safe because with a `events: &Events` value, the only access to the `udata`
+// field is through `fn token(event: &Event)` which cannot mutate the field.
+unsafe impl Send for Events {}
+unsafe impl Sync for Events {}
+
+pub mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ use super::{Filter, Flags};
+
+ pub fn token(event: &Event) -> Token {
+ Token(event.udata as usize)
+ }
+
+ pub fn is_readable(event: &Event) -> bool {
+ event.filter == libc::EVFILT_READ || {
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ // Used by the `Awakener`. On platforms that use `eventfd` or a unix
+ // pipe it will emit a readable event so we'll fake that here as
+ // well.
+ {
+ event.filter == libc::EVFILT_USER
+ }
+ #[cfg(not(any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ )))]
+ {
+ false
+ }
+ }
+ }
+
+ pub fn is_writable(event: &Event) -> bool {
+ event.filter == libc::EVFILT_WRITE
+ }
+
+ pub fn is_error(event: &Event) -> bool {
+ (event.flags & libc::EV_ERROR) != 0 ||
+ // When the read end of the socket is closed, EV_EOF is set on
+ // flags, and fflags contains the error if there is one.
+ (event.flags & libc::EV_EOF) != 0 && event.fflags != 0
+ }
+
+ pub fn is_read_closed(event: &Event) -> bool {
+ event.filter == libc::EVFILT_READ && event.flags & libc::EV_EOF != 0
+ }
+
+ pub fn is_write_closed(event: &Event) -> bool {
+ event.filter == libc::EVFILT_WRITE && event.flags & libc::EV_EOF != 0
+ }
+
+ pub fn is_priority(_: &Event) -> bool {
+ // kqueue doesn't have priority indicators.
+ false
+ }
+
+ #[allow(unused_variables)] // `event` is not used on some platforms.
+ pub fn is_aio(event: &Event) -> bool {
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ {
+ event.filter == libc::EVFILT_AIO
+ }
+ #[cfg(not(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ )))]
+ {
+ false
+ }
+ }
+
+ #[allow(unused_variables)] // `event` is only used on FreeBSD.
+ pub fn is_lio(event: &Event) -> bool {
+ #[cfg(target_os = "freebsd")]
+ {
+ event.filter == libc::EVFILT_LIO
+ }
+ #[cfg(not(target_os = "freebsd"))]
+ {
+ false
+ }
+ }
+
+ pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ debug_detail!(
+ FilterDetails(Filter),
+ PartialEq::eq,
+ libc::EVFILT_READ,
+ libc::EVFILT_WRITE,
+ libc::EVFILT_AIO,
+ libc::EVFILT_VNODE,
+ libc::EVFILT_PROC,
+ libc::EVFILT_SIGNAL,
+ libc::EVFILT_TIMER,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_PROCDESC,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::EVFILT_FS,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_LIO,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::EVFILT_USER,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_SENDFILE,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_EMPTY,
+ #[cfg(target_os = "dragonfly")]
+ libc::EVFILT_EXCEPT,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::EVFILT_MACHPORT,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::EVFILT_VM,
+ );
+
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flag(got: &Flags, want: &Flags) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FlagsDetails(Flags),
+ check_flag,
+ libc::EV_ADD,
+ libc::EV_DELETE,
+ libc::EV_ENABLE,
+ libc::EV_DISABLE,
+ libc::EV_ONESHOT,
+ libc::EV_CLEAR,
+ libc::EV_RECEIPT,
+ libc::EV_DISPATCH,
+ #[cfg(target_os = "freebsd")]
+ libc::EV_DROP,
+ libc::EV_FLAG1,
+ libc::EV_ERROR,
+ libc::EV_EOF,
+ // Not stable across OS versions on OpenBSD.
+ #[cfg(not(target_os = "openbsd"))]
+ libc::EV_SYSFLAGS,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::EV_FLAG0,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::EV_POLL,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::EV_OOBAND,
+ #[cfg(target_os = "dragonfly")]
+ libc::EV_NODATA,
+ );
+
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_fflag(got: &u32, want: &u32) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FflagsDetails(u32),
+ check_fflag,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::NOTE_TRIGGER,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::NOTE_FFNOP,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::NOTE_FFAND,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::NOTE_FFOR,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::NOTE_FFCOPY,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::NOTE_FFCTRLMASK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ ))]
+ libc::NOTE_FFLAGSMASK,
+ libc::NOTE_LOWAT,
+ libc::NOTE_DELETE,
+ libc::NOTE_WRITE,
+ #[cfg(target_os = "dragonfly")]
+ libc::NOTE_OOB,
+ #[cfg(target_os = "openbsd")]
+ libc::NOTE_EOF,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_EXTEND,
+ libc::NOTE_ATTRIB,
+ libc::NOTE_LINK,
+ libc::NOTE_RENAME,
+ libc::NOTE_REVOKE,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_NONE,
+ #[cfg(any(target_os = "openbsd"))]
+ libc::NOTE_TRUNCATE,
+ libc::NOTE_EXIT,
+ libc::NOTE_FORK,
+ libc::NOTE_EXEC,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_SIGNAL,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_EXITSTATUS,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_EXIT_DETAIL,
+ libc::NOTE_PDATAMASK,
+ libc::NOTE_PCTRLMASK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ ))]
+ libc::NOTE_TRACK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ ))]
+ libc::NOTE_TRACKERR,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ ))]
+ libc::NOTE_CHILD,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_EXIT_DETAIL_MASK,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_EXIT_DECRYPTFAIL,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_EXIT_MEMORY,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_EXIT_CSERROR,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_VM_PRESSURE,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_VM_PRESSURE_TERMINATE,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_VM_PRESSURE_SUDDEN_TERMINATE,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_VM_ERROR,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_SECONDS,
+ #[cfg(any(target_os = "freebsd"))]
+ libc::NOTE_MSECONDS,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_USECONDS,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_NSECONDS,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_ABSOLUTE,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_LEEWAY,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_CRITICAL,
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos"
+ ))]
+ libc::NOTE_BACKGROUND,
+ );
+
+ // Can't reference fields in packed structures.
+ let ident = event.ident;
+ let data = event.data;
+ let udata = event.udata;
+ f.debug_struct("kevent")
+ .field("ident", &ident)
+ .field("filter", &FilterDetails(event.filter))
+ .field("flags", &FlagsDetails(event.flags))
+ .field("fflags", &FflagsDetails(event.fflags))
+ .field("data", &data)
+ .field("udata", &udata)
+ .finish()
+ }
+}
+
+// No special requirement from the implementation around waking.
+pub(crate) use crate::sys::unix::waker::Waker;
+
+cfg_io_source! {
+ mod stateless_io_source;
+ pub(crate) use stateless_io_source::IoSourceState;
+}
+
+#[test]
+#[cfg(feature = "os-ext")]
+fn does_not_register_rw() {
+ use crate::unix::SourceFd;
+ use crate::{Poll, Token};
+
+ let kq = unsafe { libc::kqueue() };
+ let mut kqf = SourceFd(&kq);
+ let poll = Poll::new().unwrap();
+
+ // Registering kqueue fd will fail if write is requested (On anything but
+ // some versions of macOS).
+ poll.registry()
+ .register(&mut kqf, Token(1234), Interest::READABLE)
+ .unwrap();
+}
diff --git a/vendor/mio/src/sys/unix/selector/poll.rs b/vendor/mio/src/sys/unix/selector/poll.rs
new file mode 100644
index 00000000..ac282c62
--- /dev/null
+++ b/vendor/mio/src/sys/unix/selector/poll.rs
@@ -0,0 +1,749 @@
+// This implementation is based on the one in the `polling` crate.
+// Thanks to https://github.com/Kestrer for the original implementation!
+// Permission to use this code has been granted by original author:
+// https://github.com/tokio-rs/mio/pull/1602#issuecomment-1218441031
+
+use std::collections::HashMap;
+use std::fmt::{Debug, Formatter};
+#[cfg(not(target_os = "hermit"))]
+use std::os::fd::{AsRawFd, RawFd};
+// TODO: once <https://github.com/rust-lang/rust/issues/126198> is fixed this
+// can use `std::os::fd` and be merged with the above.
+#[cfg(target_os = "hermit")]
+use std::os::hermit::io::{AsRawFd, RawFd};
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::sync::{Arc, Condvar, Mutex};
+use std::time::Duration;
+use std::{cmp, fmt, io};
+
+use crate::sys::unix::waker::Waker as WakerInternal;
+use crate::{Interest, Token};
+
+/// Unique id for use as `SelectorId`.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+#[derive(Debug)]
+pub struct Selector {
+ state: Arc<SelectorState>,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ let state = SelectorState::new()?;
+
+ Ok(Selector {
+ state: Arc::new(state),
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ let state = self.state.clone();
+
+ Ok(Selector { state })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ self.state.select(events, timeout)
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ self.state.register(fd, token, interests)
+ }
+
+ #[allow(dead_code)]
+ pub(crate) fn register_internal(
+ &self,
+ fd: RawFd,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<Arc<RegistrationRecord>> {
+ self.state.register_internal(fd, token, interests)
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ self.state.reregister(fd, token, interests)
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ self.state.deregister(fd)
+ }
+
+ pub fn wake(&self, token: Token) -> io::Result<()> {
+ self.state.wake(token)
+ }
+
+ cfg_io_source! {
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.state.id
+ }
+ }
+}
+
+/// Interface to poll.
+#[derive(Debug)]
+struct SelectorState {
+ /// File descriptors to poll.
+ fds: Mutex<Fds>,
+
+ /// File descriptors which will be removed before the next poll call.
+ ///
+ /// When a file descriptor is deregistered while a poll is running, we need to filter
+ /// out all removed descriptors after that poll is finished running.
+ pending_removal: Mutex<Vec<RawFd>>,
+
+ /// Token associated with Waker that have recently asked to wake. This will
+ /// cause a synthetic behaviour where on any wakeup we add all pending tokens
+ /// to the list of emitted events.
+ pending_wake_token: Mutex<Option<Token>>,
+
+ /// Data is written to this to wake up the current instance of `wait`, which can occur when the
+ /// user notifies it (in which case `notified` would have been set) or when an operation needs
+ /// to occur (in which case `waiting_operations` would have been incremented).
+ notify_waker: WakerInternal,
+
+ /// The number of operations (`add`, `modify` or `delete`) that are currently waiting on the
+ /// mutex to become free. When this is nonzero, `wait` must be suspended until it reaches zero
+ /// again.
+ waiting_operations: AtomicUsize,
+ /// The condition variable that gets notified when `waiting_operations` reaches zero or
+ /// `notified` becomes true.
+ ///
+ /// This is used with the `fds` mutex.
+ operations_complete: Condvar,
+
+ /// This selectors id.
+ #[cfg(debug_assertions)]
+ #[allow(dead_code)]
+ id: usize,
+}
+
+/// The file descriptors to poll in a `Poller`.
+#[derive(Debug, Clone)]
+struct Fds {
+ /// The list of `pollfds` taken by poll.
+ ///
+ /// The first file descriptor is always present and is used to notify the poller.
+ poll_fds: Vec<PollFd>,
+ /// The map of each file descriptor to data associated with it. This does not include the file
+ /// descriptors created by the internal notify waker.
+ fd_data: HashMap<RawFd, FdData>,
+}
+
+/// Transparent wrapper around `libc::pollfd`, used to support `Debug` derives without adding the
+/// `extra_traits` feature of `libc`.
+#[repr(transparent)]
+#[derive(Clone)]
+struct PollFd(libc::pollfd);
+
+impl Debug for PollFd {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.debug_struct("pollfd")
+ .field("fd", &self.0.fd)
+ .field("events", &self.0.events)
+ .field("revents", &self.0.revents)
+ .finish()
+ }
+}
+
+/// Data associated with a file descriptor in a poller.
+#[derive(Debug, Clone)]
+struct FdData {
+ /// The index into `poll_fds` this file descriptor is.
+ poll_fds_index: usize,
+ /// The key of the `Event` associated with this file descriptor.
+ token: Token,
+ /// Used to communicate with IoSourceState when we need to internally deregister
+ /// based on a closed fd.
+ shared_record: Arc<RegistrationRecord>,
+}
+
+impl SelectorState {
+ pub fn new() -> io::Result<SelectorState> {
+ let notify_waker = WakerInternal::new_unregistered()?;
+
+ Ok(Self {
+ fds: Mutex::new(Fds {
+ poll_fds: vec![PollFd(libc::pollfd {
+ fd: notify_waker.as_raw_fd(),
+ events: libc::POLLIN,
+ revents: 0,
+ })],
+ fd_data: HashMap::new(),
+ }),
+ pending_removal: Mutex::new(Vec::new()),
+ pending_wake_token: Mutex::new(None),
+ notify_waker,
+ waiting_operations: AtomicUsize::new(0),
+ operations_complete: Condvar::new(),
+ #[cfg(debug_assertions)]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ events.clear();
+
+ let mut fds = self.fds.lock().unwrap();
+
+ // Keep track of fds that receive POLLHUP or POLLERR (i.e. won't receive further
+ // events) and internally deregister them before they are externally deregister'd. See
+ // IoSourceState below to track how the external deregister call will be handled
+ // when this state occurs.
+ let mut closed_raw_fds = Vec::new();
+
+ loop {
+ // Complete all current operations.
+ loop {
+ if self.waiting_operations.load(Ordering::SeqCst) == 0 {
+ break;
+ }
+
+ fds = self.operations_complete.wait(fds).unwrap();
+ }
+
+ // Perform the poll.
+ trace!("Polling on {:?}", &fds);
+ let num_events = poll(&mut fds.poll_fds, timeout)?;
+ trace!("Poll finished: {:?}", &fds);
+
+ if num_events == 0 {
+ return Ok(());
+ }
+
+ let waker_events = fds.poll_fds[0].0.revents;
+ let notified = waker_events != 0;
+ let mut num_fd_events = if notified { num_events - 1 } else { num_events };
+
+ let pending_wake_token = self.pending_wake_token.lock().unwrap().take();
+
+ if notified {
+ self.notify_waker.ack_and_reset();
+ if pending_wake_token.is_some() {
+ num_fd_events += 1;
+ }
+ }
+
+ // We now check whether this poll was performed with descriptors which were pending
+ // for removal and filter out any matching.
+ let mut pending_removal_guard = self.pending_removal.lock().unwrap();
+ let mut pending_removal = std::mem::replace(pending_removal_guard.as_mut(), Vec::new());
+ drop(pending_removal_guard);
+
+ // Store the events if there were any.
+ if num_fd_events > 0 {
+ let fds = &mut *fds;
+
+ events.reserve(num_fd_events);
+
+ // Add synthetic events we picked up from calls to wake()
+ if let Some(pending_wake_token) = pending_wake_token {
+ events.push(Event {
+ token: pending_wake_token,
+ events: waker_events,
+ });
+ }
+
+ for fd_data in fds.fd_data.values_mut() {
+ let PollFd(poll_fd) = &mut fds.poll_fds[fd_data.poll_fds_index];
+
+ if pending_removal.contains(&poll_fd.fd) {
+ // Fd was removed while poll was running
+ continue;
+ }
+
+ if poll_fd.revents != 0 {
+ // Store event
+ events.push(Event {
+ token: fd_data.token,
+ events: poll_fd.revents,
+ });
+
+ if poll_fd.revents & (libc::POLLHUP | libc::POLLERR) != 0 {
+ pending_removal.push(poll_fd.fd);
+ closed_raw_fds.push(poll_fd.fd);
+ }
+
+ // Remove the interest which just got triggered the IoSourceState's do_io
+ // wrapper used with this selector will add back the interest using
+ // reregister.
+ poll_fd.events &= !poll_fd.revents;
+
+ // Minor optimization to potentially avoid looping n times where n is the
+ // number of input fds (i.e. we might loop between m and n times where m is
+ // the number of fds with revents != 0).
+ if events.len() == num_fd_events {
+ break;
+ }
+ }
+ }
+
+ break; // No more polling.
+ }
+
+ // If we didn't break above it means we got woken up internally (for example for adding an fd), so we poll again.
+ }
+
+ drop(fds);
+ let _ = self.deregister_all(&closed_raw_fds);
+
+ Ok(())
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ self.register_internal(fd, token, interests).map(|_| ())
+ }
+
+ pub fn register_internal(
+ &self,
+ fd: RawFd,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<Arc<RegistrationRecord>> {
+ #[cfg(debug_assertions)]
+ if fd == self.notify_waker.as_raw_fd() {
+ return Err(io::Error::from(io::ErrorKind::InvalidInput));
+ }
+
+ // We must handle the unlikely case that the following order of operations happens:
+ //
+ // register(1 as RawFd)
+ // deregister(1 as RawFd)
+ // register(1 as RawFd)
+ // <poll happens>
+ //
+ // Fd's pending removal only get cleared when poll has been run. It is possible that
+ // between registering and deregistering and then _again_ registering the file descriptor
+ // poll never gets called, thus the fd stays stuck in the pending removal list.
+ //
+ // To avoid this scenario we remove an fd from pending removals when registering it.
+ let mut pending_removal = self.pending_removal.lock().unwrap();
+ if let Some(idx) = pending_removal.iter().position(|&pending| pending == fd) {
+ pending_removal.swap_remove(idx);
+ }
+ drop(pending_removal);
+
+ self.modify_fds(|fds| {
+ if fds.fd_data.contains_key(&fd) {
+ return Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered this `Registry` \
+ (an old file descriptor might have been closed without deregistration)",
+ ));
+ }
+
+ let poll_fds_index = fds.poll_fds.len();
+ let record = Arc::new(RegistrationRecord::new());
+ fds.fd_data.insert(
+ fd,
+ FdData {
+ poll_fds_index,
+ token,
+ shared_record: record.clone(),
+ },
+ );
+
+ fds.poll_fds.push(PollFd(libc::pollfd {
+ fd,
+ events: interests_to_poll(interests),
+ revents: 0,
+ }));
+
+ Ok(record)
+ })
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ self.modify_fds(|fds| {
+ let data = fds.fd_data.get_mut(&fd).ok_or(io::ErrorKind::NotFound)?;
+ data.token = token;
+ let poll_fds_index = data.poll_fds_index;
+ fds.poll_fds[poll_fds_index].0.events = interests_to_poll(interests);
+
+ Ok(())
+ })
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ self.deregister_all(&[fd])
+ .map_err(|_| io::ErrorKind::NotFound)?;
+ Ok(())
+ }
+
+ /// Perform a modification on `fds`, interrupting the current caller of `wait` if it's running.
+ fn modify_fds<T>(&self, f: impl FnOnce(&mut Fds) -> T) -> T {
+ self.waiting_operations.fetch_add(1, Ordering::SeqCst);
+
+ // Wake up the current caller of `wait` if there is one.
+ let sent_notification = self.notify_waker.wake().is_ok();
+
+ let mut fds = self.fds.lock().unwrap();
+
+ // If there was no caller of `wait` our notification was not removed from the pipe.
+ if sent_notification {
+ self.notify_waker.ack_and_reset();
+ }
+
+ let res = f(&mut *fds);
+
+ if self.waiting_operations.fetch_sub(1, Ordering::SeqCst) == 1 {
+ self.operations_complete.notify_one();
+ }
+
+ res
+ }
+
+ /// Special optimized version of [Self::deregister] which handles multiple removals
+ /// at once. Ok result if all removals were performed, Err if any entries
+ /// were not found.
+ fn deregister_all(&self, targets: &[RawFd]) -> Result<(), ()> {
+ if targets.is_empty() {
+ return Ok(());
+ }
+
+ let mut pending_removal = self.pending_removal.lock().unwrap();
+ pending_removal.extend(targets);
+ drop(pending_removal);
+
+ self.modify_fds(|fds| {
+ let mut all_successful = true;
+
+ for target in targets {
+ match fds.fd_data.remove(target).ok_or(()) {
+ Ok(data) => {
+ data.shared_record.mark_unregistered();
+ fds.poll_fds.swap_remove(data.poll_fds_index);
+ if let Some(swapped_pollfd) = fds.poll_fds.get(data.poll_fds_index) {
+ fds.fd_data
+ .get_mut(&swapped_pollfd.0.fd)
+ .unwrap()
+ .poll_fds_index = data.poll_fds_index;
+ }
+ }
+ Err(_) => all_successful = false,
+ }
+ }
+
+ if all_successful {
+ Ok(())
+ } else {
+ Err(())
+ }
+ })
+ }
+
+ pub fn wake(&self, token: Token) -> io::Result<()> {
+ self.pending_wake_token.lock().unwrap().replace(token);
+ self.notify_waker.wake()
+ }
+}
+
+/// Shared record between IoSourceState and SelectorState that allows us to internally
+/// deregister partially or fully closed fds (i.e. when we get POLLHUP or PULLERR) without
+/// confusing IoSourceState and trying to deregister twice. This isn't strictly
+/// required as technically deregister is idempotent but it is confusing
+/// when trying to debug behaviour as we get imbalanced calls to register/deregister and
+/// superfluous NotFound errors.
+#[derive(Debug)]
+pub(crate) struct RegistrationRecord {
+ is_unregistered: AtomicBool,
+}
+
+impl RegistrationRecord {
+ pub fn new() -> Self {
+ Self {
+ is_unregistered: AtomicBool::new(false),
+ }
+ }
+
+ pub fn mark_unregistered(&self) {
+ self.is_unregistered.store(true, Ordering::Relaxed);
+ }
+
+ #[allow(dead_code)]
+ pub fn is_registered(&self) -> bool {
+ !self.is_unregistered.load(Ordering::Relaxed)
+ }
+}
+
+#[cfg(target_os = "linux")]
+const POLLRDHUP: libc::c_short = libc::POLLRDHUP;
+#[cfg(not(target_os = "linux"))]
+const POLLRDHUP: libc::c_short = 0;
+
+const READ_EVENTS: libc::c_short = libc::POLLIN | POLLRDHUP;
+
+const WRITE_EVENTS: libc::c_short = libc::POLLOUT;
+
+const PRIORITY_EVENTS: libc::c_short = libc::POLLPRI;
+
+/// Get the input poll events for the given event.
+fn interests_to_poll(interest: Interest) -> libc::c_short {
+ let mut kind = 0;
+
+ if interest.is_readable() {
+ kind |= READ_EVENTS;
+ }
+
+ if interest.is_writable() {
+ kind |= WRITE_EVENTS;
+ }
+
+ if interest.is_priority() {
+ kind |= PRIORITY_EVENTS;
+ }
+
+ kind
+}
+
+/// Helper function to call poll.
+fn poll(fds: &mut [PollFd], timeout: Option<Duration>) -> io::Result<usize> {
+ loop {
+ // A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ
+ // (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits
+ // architectures. The magic number is the same constant used by libuv.
+ #[cfg(target_pointer_width = "32")]
+ const MAX_SAFE_TIMEOUT: u128 = 1789569;
+ #[cfg(not(target_pointer_width = "32"))]
+ const MAX_SAFE_TIMEOUT: u128 = libc::c_int::MAX as u128;
+
+ let timeout = timeout
+ .map(|to| {
+ // `Duration::as_millis` truncates, so round up. This avoids
+ // turning sub-millisecond timeouts into a zero timeout, unless
+ // the caller explicitly requests that by specifying a zero
+ // timeout.
+ let to_ms = to
+ .checked_add(Duration::from_nanos(999_999))
+ .unwrap_or(to)
+ .as_millis();
+ cmp::min(MAX_SAFE_TIMEOUT, to_ms) as libc::c_int
+ })
+ .unwrap_or(-1);
+
+ let res = syscall!(poll(
+ fds.as_mut_ptr() as *mut libc::pollfd,
+ fds.len() as libc::nfds_t,
+ timeout,
+ ));
+
+ match res {
+ Ok(num_events) => break Ok(num_events as usize),
+ // poll returns EAGAIN if we can retry it.
+ Err(e) if e.raw_os_error() == Some(libc::EAGAIN) => continue,
+ Err(e) => return Err(e),
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct Event {
+ token: Token,
+ events: libc::c_short,
+}
+
+pub type Events = Vec<Event>;
+
+pub mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ use super::POLLRDHUP;
+
+ pub fn token(event: &Event) -> Token {
+ event.token
+ }
+
+ pub fn is_readable(event: &Event) -> bool {
+ (event.events & libc::POLLIN) != 0 || (event.events & libc::POLLPRI) != 0
+ }
+
+ pub fn is_writable(event: &Event) -> bool {
+ (event.events & libc::POLLOUT) != 0
+ }
+
+ pub fn is_error(event: &Event) -> bool {
+ (event.events & libc::POLLERR) != 0
+ }
+
+ pub fn is_read_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ (event.events & libc::POLLHUP) != 0
+ // Socket has received FIN or called shutdown(SHUT_RD)
+ || (event.events & POLLRDHUP) != 0
+ }
+
+ pub fn is_write_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ (event.events & libc::POLLHUP) != 0
+ // Unix pipe write end has closed
+ || ((event.events & libc::POLLOUT) != 0 && (event.events & libc::POLLERR) != 0)
+ // The other side (read end) of a Unix pipe has closed.
+ || (event.events == libc::POLLERR)
+ }
+
+ pub fn is_priority(event: &Event) -> bool {
+ (event.events & libc::POLLPRI) != 0
+ }
+
+ pub fn is_aio(_: &Event) -> bool {
+ // Not supported in the kernel, only in libc.
+ false
+ }
+
+ pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+ }
+
+ pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_events(got: &libc::c_short, want: &libc::c_short) -> bool {
+ (*got & want) != 0
+ }
+ debug_detail!(
+ EventsDetails(libc::c_short),
+ check_events,
+ libc::POLLIN,
+ libc::POLLPRI,
+ libc::POLLOUT,
+ libc::POLLRDNORM,
+ libc::POLLRDBAND,
+ libc::POLLWRNORM,
+ libc::POLLWRBAND,
+ libc::POLLERR,
+ libc::POLLHUP,
+ );
+
+ f.debug_struct("poll_event")
+ .field("token", &event.token)
+ .field("events", &EventsDetails(event.events))
+ .finish()
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct Waker {
+ selector: Selector,
+ token: Token,
+}
+
+impl Waker {
+ pub(crate) fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ Ok(Waker {
+ selector: selector.try_clone()?,
+ token,
+ })
+ }
+
+ pub(crate) fn wake(&self) -> io::Result<()> {
+ self.selector.wake(self.token)
+ }
+}
+
+cfg_io_source! {
+ use crate::Registry;
+
+ struct InternalState {
+ selector: Selector,
+ token: Token,
+ interests: Interest,
+ fd: RawFd,
+ shared_record: Arc<RegistrationRecord>,
+ }
+
+ impl Drop for InternalState {
+ fn drop(&mut self) {
+ if self.shared_record.is_registered() {
+ let _ = self.selector.deregister(self.fd);
+ }
+ }
+ }
+
+ pub(crate) struct IoSourceState {
+ inner: Option<Box<InternalState>>,
+ }
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState { inner: None }
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ let result = f(io);
+
+ if let Err(err) = &result {
+ if err.kind() == io::ErrorKind::WouldBlock {
+ self.inner.as_ref().map_or(Ok(()), |state| {
+ state
+ .selector
+ .reregister(state.fd, state.token, state.interests)
+ })?;
+ }
+ }
+
+ result
+ }
+
+ pub fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ fd: RawFd,
+ ) -> io::Result<()> {
+ if self.inner.is_some() {
+ Err(io::ErrorKind::AlreadyExists.into())
+ } else {
+ let selector = registry.selector().try_clone()?;
+
+ selector.register_internal(fd, token, interests).map(move |shared_record| {
+ let state = InternalState {
+ selector,
+ token,
+ interests,
+ fd,
+ shared_record,
+ };
+
+ self.inner = Some(Box::new(state));
+ })
+ }
+ }
+
+ pub fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ fd: RawFd,
+ ) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => registry
+ .selector()
+ .reregister(fd, token, interests)
+ .map(|()| {
+ state.token = token;
+ state.interests = interests;
+ }),
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+
+ pub fn deregister(&mut self, registry: &Registry, fd: RawFd) -> io::Result<()> {
+ if let Some(state) = self.inner.take() {
+ // Marking unregistered will short circuit the drop behaviour of calling
+ // deregister so the call to deregister below is strictly required.
+ state.shared_record.mark_unregistered();
+ }
+
+ registry.selector().deregister(fd)
+ }
+ }
+}
diff --git a/vendor/mio/src/sys/unix/selector/stateless_io_source.rs b/vendor/mio/src/sys/unix/selector/stateless_io_source.rs
new file mode 100644
index 00000000..e9050c9f
--- /dev/null
+++ b/vendor/mio/src/sys/unix/selector/stateless_io_source.rs
@@ -0,0 +1,50 @@
+//! Both `kqueue(2)` and `epoll(2)` don't need to hold any user space state.
+
+use std::io;
+use std::os::fd::RawFd;
+
+use crate::{Interest, Registry, Token};
+
+pub(crate) struct IoSourceState;
+
+impl IoSourceState {
+ pub(crate) fn new() -> IoSourceState {
+ IoSourceState
+ }
+
+ pub(crate) fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ // We don't hold state, so we can just call the function and
+ // return.
+ f(io)
+ }
+
+ pub(crate) fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ fd: RawFd,
+ ) -> io::Result<()> {
+ // Pass through, we don't have any state.
+ registry.selector().register(fd, token, interests)
+ }
+
+ pub(crate) fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ fd: RawFd,
+ ) -> io::Result<()> {
+ // Pass through, we don't have any state.
+ registry.selector().reregister(fd, token, interests)
+ }
+
+ pub(crate) fn deregister(&mut self, registry: &Registry, fd: RawFd) -> io::Result<()> {
+ // Pass through, we don't have any state.
+ registry.selector().deregister(fd)
+ }
+}
diff --git a/vendor/mio/src/sys/unix/sourcefd.rs b/vendor/mio/src/sys/unix/sourcefd.rs
new file mode 100644
index 00000000..1cac89ab
--- /dev/null
+++ b/vendor/mio/src/sys/unix/sourcefd.rs
@@ -0,0 +1,121 @@
+use std::io;
+#[cfg(not(target_os = "hermit"))]
+use std::os::fd::RawFd;
+// TODO: once <https://github.com/rust-lang/rust/issues/126198> is fixed this
+// can use `std::os::fd` and be merged with the above.
+#[cfg(target_os = "hermit")]
+use std::os::hermit::io::RawFd;
+
+use crate::{event, Interest, Registry, Token};
+
+/// Adapter for [`RawFd`] providing an [`event::Source`] implementation.
+///
+/// `SourceFd` enables registering any type with an FD with [`Poll`].
+///
+/// While only implementations for TCP and UDP are provided, Mio supports
+/// registering any FD that can be registered with the underlying OS selector.
+/// `SourceFd` provides the necessary bridge.
+///
+/// Note that `SourceFd` takes a `&RawFd`. This is because `SourceFd` **does
+/// not** take ownership of the FD. Specifically, it will not manage any
+/// lifecycle related operations, such as closing the FD on drop. It is expected
+/// that the `SourceFd` is constructed right before a call to
+/// [`Registry::register`]. See the examples for more detail.
+///
+/// [`event::Source`]: ../event/trait.Source.html
+/// [`Poll`]: ../struct.Poll.html
+/// [`Registry::register`]: ../struct.Registry.html#method.register
+///
+/// # Examples
+///
+/// Basic usage.
+///
+#[cfg_attr(
+ all(feature = "os-poll", feature = "net", feature = "os-ext"),
+ doc = "```"
+)]
+#[cfg_attr(
+ not(all(feature = "os-poll", feature = "net", feature = "os-ext")),
+ doc = "```ignore"
+)]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Interest, Poll, Token};
+/// use mio::unix::SourceFd;
+///
+/// use std::os::fd::AsRawFd;
+/// use std::net::TcpListener;
+///
+/// // Bind a std listener
+/// let listener = TcpListener::bind("127.0.0.1:0")?;
+///
+/// let poll = Poll::new()?;
+///
+/// // Register the listener
+/// poll.registry().register(
+/// &mut SourceFd(&listener.as_raw_fd()),
+/// Token(0),
+/// Interest::READABLE)?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Implementing [`event::Source`] for a custom type backed by a [`RawFd`].
+///
+#[cfg_attr(all(feature = "os-poll", feature = "os-ext"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "os-ext")), doc = "```ignore")]
+/// use mio::{event, Interest, Registry, Token};
+/// use mio::unix::SourceFd;
+///
+/// use std::os::fd::RawFd;
+/// use std::io;
+///
+/// # #[allow(dead_code)]
+/// pub struct MyIo {
+/// fd: RawFd,
+/// }
+///
+/// impl event::Source for MyIo {
+/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// SourceFd(&self.fd).register(registry, token, interests)
+/// }
+///
+/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// SourceFd(&self.fd).reregister(registry, token, interests)
+/// }
+///
+/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+/// SourceFd(&self.fd).deregister(registry)
+/// }
+/// }
+/// ```
+#[derive(Debug)]
+pub struct SourceFd<'a>(pub &'a RawFd);
+
+impl<'a> event::Source for SourceFd<'a> {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ registry.selector().register(*self.0, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ registry.selector().reregister(*self.0, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ registry.selector().deregister(*self.0)
+ }
+}
diff --git a/vendor/mio/src/sys/unix/tcp.rs b/vendor/mio/src/sys/unix/tcp.rs
new file mode 100644
index 00000000..1d1965d9
--- /dev/null
+++ b/vendor/mio/src/sys/unix/tcp.rs
@@ -0,0 +1,136 @@
+use std::convert::TryInto;
+use std::io;
+use std::mem::{size_of, MaybeUninit};
+use std::net::{self, SocketAddr};
+#[cfg(not(target_os = "hermit"))]
+use std::os::fd::{AsRawFd, FromRawFd};
+// TODO: once <https://github.com/rust-lang/rust/issues/126198> is fixed this
+// can use `std::os::fd` and be merged with the above.
+#[cfg(target_os = "hermit")]
+use std::os::hermit::io::{AsRawFd, FromRawFd};
+
+use crate::sys::unix::net::{new_socket, socket_addr, to_socket_addr};
+
+pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<libc::c_int> {
+ let domain = match address {
+ SocketAddr::V4(_) => libc::AF_INET,
+ SocketAddr::V6(_) => libc::AF_INET6,
+ };
+ new_socket(domain, libc::SOCK_STREAM)
+}
+
+pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(bind(socket.as_raw_fd(), raw_addr.as_ptr(), raw_addr_length))?;
+ Ok(())
+}
+
+pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+
+ match syscall!(connect(
+ socket.as_raw_fd(),
+ raw_addr.as_ptr(),
+ raw_addr_length
+ )) {
+ Err(err) if err.raw_os_error() != Some(libc::EINPROGRESS) => Err(err),
+ _ => Ok(()),
+ }
+}
+
+pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
+ let backlog = backlog.try_into().unwrap_or(i32::MAX);
+ syscall!(listen(socket.as_raw_fd(), backlog))?;
+ Ok(())
+}
+
+pub(crate) fn set_reuseaddr(socket: &net::TcpListener, reuseaddr: bool) -> io::Result<()> {
+ let val: libc::c_int = i32::from(reuseaddr);
+ syscall!(setsockopt(
+ socket.as_raw_fd(),
+ libc::SOL_SOCKET,
+ libc::SO_REUSEADDR,
+ &val as *const libc::c_int as *const libc::c_void,
+ size_of::<libc::c_int>() as libc::socklen_t,
+ ))?;
+ Ok(())
+}
+
+pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ let mut addr: MaybeUninit<libc::sockaddr_storage> = MaybeUninit::uninit();
+ let mut length = size_of::<libc::sockaddr_storage>() as libc::socklen_t;
+
+ // On platforms that support it we can use `accept4(2)` to set `NONBLOCK`
+ // and `CLOEXEC` in the call to accept the connection.
+ #[cfg(any(
+ // Android x86's seccomp profile forbids calls to `accept4(2)`
+ // See https://github.com/tokio-rs/mio/issues/1445 for details
+ all(not(target_arch="x86"), target_os = "android"),
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "hurd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "solaris",
+ ))]
+ let stream = {
+ syscall!(accept4(
+ listener.as_raw_fd(),
+ addr.as_mut_ptr() as *mut _,
+ &mut length,
+ libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK,
+ ))
+ .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
+ }?;
+
+ // But not all platforms have the `accept4(2)` call. Luckily BSD (derived)
+ // OSs inherit the non-blocking flag from the listener, so we just have to
+ // set `CLOEXEC`.
+ #[cfg(any(
+ target_os = "aix",
+ target_os = "haiku",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "redox",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "hermit",
+ target_os = "nto",
+ all(target_arch = "x86", target_os = "android"),
+ ))]
+ let stream = {
+ syscall!(accept(
+ listener.as_raw_fd(),
+ addr.as_mut_ptr() as *mut _,
+ &mut length
+ ))
+ .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
+ .and_then(|s| {
+ #[cfg(not(any(target_os = "espidf", target_os = "vita")))]
+ syscall!(fcntl(s.as_raw_fd(), libc::F_SETFD, libc::FD_CLOEXEC))?;
+
+ // See https://github.com/tokio-rs/mio/issues/1450
+ #[cfg(any(
+ all(target_arch = "x86", target_os = "android"),
+ target_os = "aix",
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "hermit",
+ target_os = "nto",
+ ))]
+ syscall!(fcntl(s.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK))?;
+
+ Ok(s)
+ })
+ }?;
+
+ // This is safe because `accept` calls above ensures the address
+ // initialised.
+ unsafe { to_socket_addr(addr.as_ptr()) }.map(|addr| (stream, addr))
+}
diff --git a/vendor/mio/src/sys/unix/udp.rs b/vendor/mio/src/sys/unix/udp.rs
new file mode 100644
index 00000000..80a09f63
--- /dev/null
+++ b/vendor/mio/src/sys/unix/udp.rs
@@ -0,0 +1,36 @@
+use std::io;
+use std::mem;
+use std::net::{self, SocketAddr};
+#[cfg(not(target_os = "hermit"))]
+use std::os::fd::{AsRawFd, FromRawFd};
+// TODO: once <https://github.com/rust-lang/rust/issues/126198> is fixed this
+// can use `std::os::fd` and be merged with the above.
+#[cfg(target_os = "hermit")]
+use std::os::hermit::io::{AsRawFd, FromRawFd};
+
+use crate::sys::unix::net::{new_ip_socket, socket_addr};
+
+pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
+ let fd = new_ip_socket(addr, libc::SOCK_DGRAM)?;
+ let socket = unsafe { net::UdpSocket::from_raw_fd(fd) };
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(bind(fd, raw_addr.as_ptr(), raw_addr_length))?;
+
+ Ok(socket)
+}
+
+pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
+ let mut optval: libc::c_int = 0;
+ let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t;
+
+ syscall!(getsockopt(
+ socket.as_raw_fd(),
+ libc::IPPROTO_IPV6,
+ libc::IPV6_V6ONLY,
+ &mut optval as *mut _ as *mut _,
+ &mut optlen,
+ ))?;
+
+ Ok(optval != 0)
+}
diff --git a/vendor/mio/src/sys/unix/uds/datagram.rs b/vendor/mio/src/sys/unix/uds/datagram.rs
new file mode 100644
index 00000000..2010e8eb
--- /dev/null
+++ b/vendor/mio/src/sys/unix/uds/datagram.rs
@@ -0,0 +1,25 @@
+use std::io;
+use std::os::fd::{AsRawFd, FromRawFd};
+use std::os::unix::net::{self, SocketAddr};
+
+use crate::sys::unix::net::new_socket;
+use crate::sys::unix::uds::unix_addr;
+
+pub(crate) fn bind_addr(address: &SocketAddr) -> io::Result<net::UnixDatagram> {
+ let socket = unbound()?;
+
+ let (unix_address, addrlen) = unix_addr(address);
+ let sockaddr = &unix_address as *const libc::sockaddr_un as *const libc::sockaddr;
+ syscall!(bind(socket.as_raw_fd(), sockaddr, addrlen))?;
+
+ Ok(socket)
+}
+
+pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
+ let fd = new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)?;
+ Ok(unsafe { net::UnixDatagram::from_raw_fd(fd) })
+}
+
+pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
+ super::pair(libc::SOCK_DGRAM)
+}
diff --git a/vendor/mio/src/sys/unix/uds/listener.rs b/vendor/mio/src/sys/unix/uds/listener.rs
new file mode 100644
index 00000000..5b4219a2
--- /dev/null
+++ b/vendor/mio/src/sys/unix/uds/listener.rs
@@ -0,0 +1,121 @@
+use std::ffi::OsStr;
+use std::os::fd::{AsRawFd, FromRawFd};
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::net::{self, SocketAddr};
+use std::path::Path;
+use std::{io, mem};
+
+use crate::net::UnixStream;
+use crate::sys::unix::net::new_socket;
+use crate::sys::unix::uds::{path_offset, unix_addr};
+
+pub(crate) fn bind_addr(address: &SocketAddr) -> io::Result<net::UnixListener> {
+ let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let socket = unsafe { net::UnixListener::from_raw_fd(fd) };
+
+ let (unix_address, addrlen) = unix_addr(address);
+ let sockaddr = &unix_address as *const libc::sockaddr_un as *const libc::sockaddr;
+ syscall!(bind(fd, sockaddr, addrlen))?;
+ syscall!(listen(fd, 1024))?;
+
+ Ok(socket)
+}
+
+pub(crate) fn accept(listener: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
+ // SAFETY: `libc::sockaddr_un` zero filled is properly initialized.
+ //
+ // `0` is a valid value for `sockaddr_un::sun_family`; it is
+ // `libc::AF_UNSPEC`.
+ //
+ // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
+ // abstract path.
+ let mut sockaddr = unsafe { mem::zeroed::<libc::sockaddr_un>() };
+
+ let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t;
+
+ #[cfg(not(any(
+ target_os = "aix",
+ target_os = "haiku",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "redox",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "nto",
+ // Android x86's seccomp profile forbids calls to `accept4(2)`
+ // See https://github.com/tokio-rs/mio/issues/1445 for details
+ all(target_arch = "x86", target_os = "android"),
+ )))]
+ let socket = {
+ let flags = libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+ syscall!(accept4(
+ listener.as_raw_fd(),
+ &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
+ &mut socklen,
+ flags
+ ))
+ .map(|socket| unsafe { net::UnixStream::from_raw_fd(socket) })
+ };
+
+ #[cfg(any(
+ target_os = "aix",
+ target_os = "haiku",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "redox",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "nto",
+ all(target_arch = "x86", target_os = "android")
+ ))]
+ let socket = syscall!(accept(
+ listener.as_raw_fd(),
+ &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
+ &mut socklen,
+ ))
+ .and_then(|socket| {
+ // Ensure the socket is closed if either of the `fcntl` calls
+ // error below.
+ let s = unsafe { net::UnixStream::from_raw_fd(socket) };
+ #[cfg(not(any(target_os = "espidf", target_os = "vita")))]
+ syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC))?;
+
+ // See https://github.com/tokio-rs/mio/issues/1450
+ #[cfg(any(
+ all(target_arch = "x86", target_os = "android"),
+ target_os = "espidf",
+ target_os = "vita",
+ target_os = "nto",
+ ))]
+ syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))?;
+
+ Ok(s)
+ });
+
+ let socket = socket.map(UnixStream::from_std)?;
+
+ #[allow(unused_mut)] // See below.
+ let mut path_len = socklen as usize - path_offset(&sockaddr);
+ // On FreeBSD and Darwin, it returns a length of 14/16, but an unnamed (all
+ // zero) address. Map that to a length of 0 to match other OS.
+ if sockaddr.sun_path[0] == 0 {
+ path_len = 0;
+ }
+ // SAFETY: going from i8 to u8 is fine in this context.
+ let mut path =
+ unsafe { &*(&sockaddr.sun_path[..path_len] as *const [libc::c_char] as *const [u8]) };
+ // Remove last null as `SocketAddr::from_pathname` doesn't accept it.
+ if let Some(0) = path.last() {
+ path = &path[..path.len() - 1];
+ }
+ let address = SocketAddr::from_pathname(Path::new(OsStr::from_bytes(path)))?;
+ Ok((socket, address))
+}
diff --git a/vendor/mio/src/sys/unix/uds/mod.rs b/vendor/mio/src/sys/unix/uds/mod.rs
new file mode 100644
index 00000000..0c61db94
--- /dev/null
+++ b/vendor/mio/src/sys/unix/uds/mod.rs
@@ -0,0 +1,177 @@
+#[cfg(target_os = "android")]
+use std::os::android::net::SocketAddrExt;
+#[cfg(target_os = "linux")]
+use std::os::linux::net::SocketAddrExt;
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::FromRawFd;
+use std::os::unix::net::SocketAddr;
+use std::{io, mem, ptr};
+
+pub(crate) mod datagram;
+pub(crate) mod listener;
+pub(crate) mod stream;
+
+const UNNAMED_ADDRESS: &[u8] = &[];
+
+/// Get the `sun_path` field offset of `sockaddr_un` for the target OS.
+///
+/// On Linux, this function equates to the same value as
+/// `size_of::<sa_family_t>()`, but some other implementations include
+/// other fields before `sun_path`, so the expression more portably
+/// describes the size of the address structure.
+fn path_offset(sockaddr: &libc::sockaddr_un) -> usize {
+ let base = sockaddr as *const _ as usize;
+ let path = &sockaddr.sun_path as *const _ as usize;
+ path - base
+}
+
+/// Converts a Rust `SocketAddr` into the system representation.
+fn unix_addr(address: &SocketAddr) -> (libc::sockaddr_un, libc::socklen_t) {
+ // SAFETY: `libc::sockaddr_un` zero filled is properly initialized.
+ //
+ // `0` is a valid value for `sockaddr_un::sun_family`; it is
+ // `libc::AF_UNSPEC`.
+ //
+ // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
+ // abstract path.
+ let mut sockaddr = unsafe { mem::zeroed::<libc::sockaddr_un>() };
+
+ sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+ #[allow(unused_mut)] // Only used with abstract namespaces.
+ let mut offset = 0;
+ let addr = match address.as_pathname() {
+ Some(path) => path.as_os_str().as_bytes(),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ None => match address.as_abstract_name() {
+ Some(name) => {
+ offset += 1;
+ name
+ }
+ None => UNNAMED_ADDRESS,
+ },
+ #[cfg(not(any(target_os = "android", target_os = "linux")))]
+ None => UNNAMED_ADDRESS,
+ };
+
+ // SAFETY: `addr` and `sockaddr.sun_path` are not overlapping and both point
+ // to valid memory.
+ // SAFETY: since `addr` is a valid Unix address, it must not be larger than
+ // `SUN_LEN` bytes, thus we won't overwrite the size of sockaddr.sun_path.
+ // SAFETY: null byte is already written because we zeroed the address above.
+ debug_assert!(offset + addr.len() <= sockaddr.sun_path.len());
+ unsafe {
+ ptr::copy_nonoverlapping(
+ addr.as_ptr(),
+ sockaddr.sun_path.as_mut_ptr().add(offset).cast(),
+ addr.len(),
+ )
+ };
+
+ let mut addrlen = path_offset(&sockaddr) + addr.len();
+ // +1 for null byte at the end of the path, not needed for abstract
+ // namespaces (which start with a null byte).
+ match addr.first() {
+ Some(&0) | None => {}
+ Some(_) => addrlen += 1,
+ }
+
+ // SAFETY: the length is fine to cast to `socklen_t` as it's 32 bits and the
+ // address can be at most `SUN_LEN` bytes.
+ (sockaddr, addrlen as _)
+}
+
+fn pair<T>(flags: libc::c_int) -> io::Result<(T, T)>
+where
+ T: FromRawFd,
+{
+ #[cfg(not(any(
+ target_os = "aix",
+ target_os = "haiku",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "nto",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ )))]
+ let flags = flags | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+
+ let mut fds = [-1; 2];
+ syscall!(socketpair(libc::AF_UNIX, flags, 0, fds.as_mut_ptr()))?;
+ let pair = unsafe { (T::from_raw_fd(fds[0]), T::from_raw_fd(fds[1])) };
+
+ // Darwin (and others) doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
+ //
+ // In order to set those flags, additional `fcntl` sys calls must be
+ // performed. If a `fnctl` fails after the sockets have been created,
+ // the file descriptors will leak. Creating `pair` above ensures that if
+ // there is an error, the file descriptors are closed.
+ #[cfg(any(
+ target_os = "aix",
+ target_os = "haiku",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "nto",
+ target_os = "tvos",
+ target_os = "visionos",
+ target_os = "watchos",
+ target_os = "espidf",
+ target_os = "vita",
+ ))]
+ {
+ syscall!(fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK))?;
+ #[cfg(not(any(target_os = "espidf", target_os = "vita", target_os = "nto")))]
+ syscall!(fcntl(fds[0], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ syscall!(fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK))?;
+ #[cfg(not(any(target_os = "espidf", target_os = "vita", target_os = "nto")))]
+ syscall!(fcntl(fds[1], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ }
+
+ Ok(pair)
+}
+
+#[cfg(test)]
+mod tests {
+ use std::os::unix::net::SocketAddr;
+ use std::path::Path;
+ use std::str;
+
+ use super::{path_offset, unix_addr};
+
+ #[test]
+ fn pathname_address() {
+ const PATH: &str = "./foo/bar.txt";
+ const PATH_LEN: usize = 13;
+
+ // Pathname addresses do have a null terminator, so `socklen` is
+ // expected to be `PATH_LEN` + `offset` + 1.
+ let address = SocketAddr::from_pathname(Path::new(PATH)).unwrap();
+ let (sockaddr, actual) = unix_addr(&address);
+ let offset = path_offset(&sockaddr);
+ let expected = PATH_LEN + offset + 1;
+ assert_eq!(expected as libc::socklen_t, actual)
+ }
+
+ #[test]
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ fn abstract_address() {
+ #[cfg(target_os = "android")]
+ use std::os::android::net::SocketAddrExt;
+ #[cfg(target_os = "linux")]
+ use std::os::linux::net::SocketAddrExt;
+
+ const PATH: &[u8] = &[0, 116, 111, 107, 105, 111];
+ const PATH_LEN: usize = 6;
+
+ // Abstract addresses do not have a null terminator, so `socklen` is
+ // expected to be `PATH_LEN` + `offset`.
+ let address = SocketAddr::from_abstract_name(PATH).unwrap();
+ let (sockaddr, actual) = unix_addr(&address);
+ let offset = path_offset(&sockaddr);
+ let expected = PATH_LEN + offset;
+ assert_eq!(expected as libc::socklen_t, actual)
+ }
+}
diff --git a/vendor/mio/src/sys/unix/uds/stream.rs b/vendor/mio/src/sys/unix/uds/stream.rs
new file mode 100644
index 00000000..dd2b2cab
--- /dev/null
+++ b/vendor/mio/src/sys/unix/uds/stream.rs
@@ -0,0 +1,25 @@
+use std::io;
+use std::os::fd::FromRawFd;
+use std::os::unix::net::{self, SocketAddr};
+
+use crate::sys::unix::net::new_socket;
+use crate::sys::unix::uds::unix_addr;
+
+pub(crate) fn connect_addr(address: &SocketAddr) -> io::Result<net::UnixStream> {
+ let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let socket = unsafe { net::UnixStream::from_raw_fd(fd) };
+
+ let (unix_address, addrlen) = unix_addr(address);
+ let sockaddr = &unix_address as *const libc::sockaddr_un as *const libc::sockaddr;
+ match syscall!(connect(fd, sockaddr, addrlen)) {
+ Ok(_) => {}
+ Err(ref err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {}
+ Err(e) => return Err(e),
+ }
+
+ Ok(socket)
+}
+
+pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
+ super::pair(libc::SOCK_STREAM)
+}
diff --git a/vendor/mio/src/sys/unix/waker/eventfd.rs b/vendor/mio/src/sys/unix/waker/eventfd.rs
new file mode 100644
index 00000000..af185efd
--- /dev/null
+++ b/vendor/mio/src/sys/unix/waker/eventfd.rs
@@ -0,0 +1,88 @@
+use std::fs::File;
+use std::io::{self, Read, Write};
+#[cfg(not(target_os = "hermit"))]
+use std::os::fd::{AsRawFd, FromRawFd, RawFd};
+// TODO: once <https://github.com/rust-lang/rust/issues/126198> is fixed this
+// can use `std::os::fd` and be merged with the above.
+#[cfg(target_os = "hermit")]
+use std::os::hermit::io::{AsRawFd, FromRawFd, RawFd};
+
+use crate::sys::Selector;
+use crate::{Interest, Token};
+
+/// Waker backed by `eventfd`.
+///
+/// `eventfd` is effectively an 64 bit counter. All writes must be of 8
+/// bytes (64 bits) and are converted (native endian) into an 64 bit
+/// unsigned integer and added to the count. Reads must also be 8 bytes and
+/// reset the count to 0, returning the count.
+#[derive(Debug)]
+pub(crate) struct Waker {
+ fd: File,
+}
+
+impl Waker {
+ #[allow(dead_code)] // Not used by the `poll(2)` implementation.
+ pub(crate) fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ let waker = Waker::new_unregistered()?;
+ selector.register(waker.fd.as_raw_fd(), token, Interest::READABLE)?;
+ Ok(waker)
+ }
+
+ pub(crate) fn new_unregistered() -> io::Result<Waker> {
+ #[cfg(not(target_os = "espidf"))]
+ let flags = libc::EFD_CLOEXEC | libc::EFD_NONBLOCK;
+ // ESP-IDF is EFD_NONBLOCK by default and errors if you try to pass this flag.
+ #[cfg(target_os = "espidf")]
+ let flags = 0;
+ let fd = syscall!(eventfd(0, flags))?;
+ let file = unsafe { File::from_raw_fd(fd) };
+ Ok(Waker { fd: file })
+ }
+
+ #[allow(clippy::unused_io_amount)] // Don't care about partial writes.
+ pub(crate) fn wake(&self) -> io::Result<()> {
+ // The epoll emulation on some illumos systems currently requires
+ // the eventfd to be read before an edge-triggered read event is
+ // generated.
+ // See https://www.illumos.org/issues/16700.
+ #[cfg(target_os = "illumos")]
+ self.reset()?;
+
+ let buf: [u8; 8] = 1u64.to_ne_bytes();
+ match (&self.fd).write(&buf) {
+ Ok(_) => Ok(()),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
+ // Writing only blocks if the counter is going to overflow.
+ // So we'll reset the counter to 0 and wake it again.
+ self.reset()?;
+ self.wake()
+ }
+ Err(err) => Err(err),
+ }
+ }
+
+ #[allow(dead_code)] // Only used by the `poll(2)` implementation.
+ pub(crate) fn ack_and_reset(&self) {
+ let _ = self.reset();
+ }
+
+ /// Reset the eventfd object, only need to call this if `wake` fails.
+ #[allow(clippy::unused_io_amount)] // Don't care about partial reads.
+ fn reset(&self) -> io::Result<()> {
+ let mut buf: [u8; 8] = 0u64.to_ne_bytes();
+ match (&self.fd).read(&mut buf) {
+ Ok(_) => Ok(()),
+ // If the `Waker` hasn't been awoken yet this will return a
+ // `WouldBlock` error which we can safely ignore.
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()),
+ Err(err) => Err(err),
+ }
+ }
+}
+
+impl AsRawFd for Waker {
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd.as_raw_fd()
+ }
+}
diff --git a/vendor/mio/src/sys/unix/waker/kqueue.rs b/vendor/mio/src/sys/unix/waker/kqueue.rs
new file mode 100644
index 00000000..46a0a1ee
--- /dev/null
+++ b/vendor/mio/src/sys/unix/waker/kqueue.rs
@@ -0,0 +1,28 @@
+use std::io;
+
+use crate::sys::Selector;
+use crate::Token;
+
+/// Waker backed by kqueue user space notifications (`EVFILT_USER`).
+///
+/// The implementation is fairly simple, first the kqueue must be setup to
+/// receive waker events this done by calling `Selector.setup_waker`. Next
+/// we need access to kqueue, thus we need to duplicate the file descriptor.
+/// Now waking is as simple as adding an event to the kqueue.
+#[derive(Debug)]
+pub(crate) struct Waker {
+ selector: Selector,
+ token: Token,
+}
+
+impl Waker {
+ pub(crate) fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ let selector = selector.try_clone()?;
+ selector.setup_waker(token)?;
+ Ok(Waker { selector, token })
+ }
+
+ pub(crate) fn wake(&self) -> io::Result<()> {
+ self.selector.wake(self.token)
+ }
+}
diff --git a/vendor/mio/src/sys/unix/waker/pipe.rs b/vendor/mio/src/sys/unix/waker/pipe.rs
new file mode 100644
index 00000000..dc54744d
--- /dev/null
+++ b/vendor/mio/src/sys/unix/waker/pipe.rs
@@ -0,0 +1,82 @@
+use std::fs::File;
+use std::io::{self, Read, Write};
+#[cfg(not(target_os = "hermit"))]
+use std::os::fd::{AsRawFd, FromRawFd, RawFd};
+// TODO: once <https://github.com/rust-lang/rust/issues/126198> is fixed this
+// can use `std::os::fd` and be merged with the above.
+#[cfg(target_os = "hermit")]
+use std::os::hermit::io::{AsRawFd, FromRawFd, RawFd};
+
+use crate::sys::unix::pipe;
+use crate::sys::Selector;
+use crate::{Interest, Token};
+
+/// Waker backed by a unix pipe.
+///
+/// Waker controls both the sending and receiving ends and empties the pipe
+/// if writing to it (waking) fails.
+#[derive(Debug)]
+pub(crate) struct Waker {
+ sender: File,
+ receiver: File,
+}
+
+impl Waker {
+ #[allow(dead_code)] // Not used by the `poll(2)` implementation.
+ pub(crate) fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ let waker = Waker::new_unregistered()?;
+ selector.register(waker.receiver.as_raw_fd(), token, Interest::READABLE)?;
+ Ok(waker)
+ }
+
+ pub(crate) fn new_unregistered() -> io::Result<Waker> {
+ let [receiver, sender] = pipe::new_raw()?;
+ let sender = unsafe { File::from_raw_fd(sender) };
+ let receiver = unsafe { File::from_raw_fd(receiver) };
+ Ok(Waker { sender, receiver })
+ }
+
+ pub(crate) fn wake(&self) -> io::Result<()> {
+ // The epoll emulation on some illumos systems currently requires
+ // the pipe buffer to be completely empty for an edge-triggered
+ // wakeup on the pipe read side.
+ // See https://www.illumos.org/issues/13436.
+ #[cfg(target_os = "illumos")]
+ self.empty();
+
+ match (&self.sender).write(&[1]) {
+ Ok(_) => Ok(()),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
+ // The reading end is full so we'll empty the buffer and try
+ // again.
+ self.empty();
+ self.wake()
+ }
+ Err(ref err) if err.kind() == io::ErrorKind::Interrupted => self.wake(),
+ Err(err) => Err(err),
+ }
+ }
+
+ #[allow(dead_code)] // Only used by the `poll(2)` implementation.
+ pub(crate) fn ack_and_reset(&self) {
+ self.empty();
+ }
+
+ /// Empty the pipe's buffer, only need to call this if `wake` fails.
+ /// This ignores any errors.
+ fn empty(&self) {
+ let mut buf = [0; 4096];
+ loop {
+ match (&self.receiver).read(&mut buf) {
+ Ok(n) if n > 0 => continue,
+ _ => return,
+ }
+ }
+ }
+}
+
+impl AsRawFd for Waker {
+ fn as_raw_fd(&self) -> RawFd {
+ self.receiver.as_raw_fd()
+ }
+}
diff --git a/vendor/mio/src/sys/wasi/mod.rs b/vendor/mio/src/sys/wasi/mod.rs
new file mode 100644
index 00000000..e1169d28
--- /dev/null
+++ b/vendor/mio/src/sys/wasi/mod.rs
@@ -0,0 +1,370 @@
+//! # Notes
+//!
+//! The current implementation is somewhat limited. The `Waker` is not
+//! implemented, as at the time of writing there is no way to support to wake-up
+//! a thread from calling `poll_oneoff`.
+//!
+//! Furthermore the (re/de)register functions also don't work while concurrently
+//! polling as both registering and polling requires a lock on the
+//! `subscriptions`.
+//!
+//! Finally `Selector::try_clone`, required by `Registry::try_clone`, doesn't
+//! work. However this could be implemented by use of an `Arc`.
+//!
+//! In summary, this only (barely) works using a single thread.
+
+use std::cmp::min;
+use std::io;
+#[cfg(all(feature = "net", debug_assertions))]
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+
+#[cfg(feature = "net")]
+use crate::{Interest, Token};
+
+cfg_net! {
+ pub(crate) mod tcp {
+ use std::io;
+ use std::net::{self, SocketAddr};
+
+ pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ let (stream, addr) = listener.accept()?;
+ stream.set_nonblocking(true)?;
+ Ok((stream, addr))
+ }
+ }
+}
+
+/// Unique id for use as `SelectorId`.
+#[cfg(all(debug_assertions, feature = "net"))]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+pub(crate) struct Selector {
+ #[cfg(all(debug_assertions, feature = "net"))]
+ id: usize,
+ /// Subscriptions (reads events) we're interested in.
+ subscriptions: Arc<Mutex<Vec<wasi::Subscription>>>,
+}
+
+impl Selector {
+ pub(crate) fn new() -> io::Result<Selector> {
+ Ok(Selector {
+ #[cfg(all(debug_assertions, feature = "net"))]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ subscriptions: Arc::new(Mutex::new(Vec::new())),
+ })
+ }
+
+ #[cfg(all(debug_assertions, feature = "net"))]
+ pub(crate) fn id(&self) -> usize {
+ self.id
+ }
+
+ pub(crate) fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ events.clear();
+
+ let mut subscriptions = self.subscriptions.lock().unwrap();
+
+ // If we want to a use a timeout in the `wasi_poll_oneoff()` function
+ // we need another subscription to the list.
+ if let Some(timeout) = timeout {
+ subscriptions.push(timeout_subscription(timeout));
+ }
+
+ // `poll_oneoff` needs the same number of events as subscriptions.
+ let length = subscriptions.len();
+ events.reserve(length);
+
+ debug_assert!(events.capacity() >= length);
+ #[cfg(debug_assertions)]
+ if length == 0 {
+ warn!(
+ "calling mio::Poll::poll with empty subscriptions, this likely not what you want"
+ );
+ }
+
+ let res = unsafe { wasi::poll_oneoff(subscriptions.as_ptr(), events.as_mut_ptr(), length) };
+
+ // Remove the timeout subscription we possibly added above.
+ if timeout.is_some() {
+ let timeout_sub = subscriptions.pop();
+ debug_assert_eq!(
+ timeout_sub.unwrap().u.tag,
+ wasi::EVENTTYPE_CLOCK.raw(),
+ "failed to remove timeout subscription"
+ );
+ }
+
+ drop(subscriptions); // Unlock.
+
+ match res {
+ Ok(n_events) => {
+ // Safety: `poll_oneoff` initialises the `events` for us.
+ unsafe { events.set_len(n_events) };
+
+ // Remove the timeout event.
+ if timeout.is_some() {
+ if let Some(index) = events.iter().position(is_timeout_event) {
+ events.swap_remove(index);
+ }
+ }
+
+ check_errors(&events)
+ }
+ Err(err) => Err(io_err(err)),
+ }
+ }
+
+ pub(crate) fn try_clone(&self) -> io::Result<Selector> {
+ Ok(Selector {
+ #[cfg(all(debug_assertions, feature = "net"))]
+ id: self.id,
+ subscriptions: self.subscriptions.clone(),
+ })
+ }
+
+ #[cfg(feature = "net")]
+ pub(crate) fn register(
+ &self,
+ fd: wasi::Fd,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ let mut subscriptions = self.subscriptions.lock().unwrap();
+
+ if interests.is_writable() {
+ let subscription = wasi::Subscription {
+ userdata: token.0 as wasi::Userdata,
+ u: wasi::SubscriptionU {
+ tag: wasi::EVENTTYPE_FD_WRITE.raw(),
+ u: wasi::SubscriptionUU {
+ fd_write: wasi::SubscriptionFdReadwrite {
+ file_descriptor: fd,
+ },
+ },
+ },
+ };
+ subscriptions.push(subscription);
+ }
+
+ if interests.is_readable() {
+ let subscription = wasi::Subscription {
+ userdata: token.0 as wasi::Userdata,
+ u: wasi::SubscriptionU {
+ tag: wasi::EVENTTYPE_FD_READ.raw(),
+ u: wasi::SubscriptionUU {
+ fd_read: wasi::SubscriptionFdReadwrite {
+ file_descriptor: fd,
+ },
+ },
+ },
+ };
+ subscriptions.push(subscription);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(feature = "net")]
+ pub(crate) fn reregister(
+ &self,
+ fd: wasi::Fd,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.deregister(fd)
+ .and_then(|()| self.register(fd, token, interests))
+ }
+
+ #[cfg(feature = "net")]
+ pub(crate) fn deregister(&self, fd: wasi::Fd) -> io::Result<()> {
+ let mut subscriptions = self.subscriptions.lock().unwrap();
+
+ let predicate = |subscription: &wasi::Subscription| {
+ // Safety: `subscription.u.tag` defines the type of the union in
+ // `subscription.u.u`.
+ match subscription.u.tag {
+ t if t == wasi::EVENTTYPE_FD_WRITE.raw() => unsafe {
+ subscription.u.u.fd_write.file_descriptor == fd
+ },
+ t if t == wasi::EVENTTYPE_FD_READ.raw() => unsafe {
+ subscription.u.u.fd_read.file_descriptor == fd
+ },
+ _ => false,
+ }
+ };
+
+ let mut ret = Err(io::ErrorKind::NotFound.into());
+
+ while let Some(index) = subscriptions.iter().position(predicate) {
+ subscriptions.swap_remove(index);
+ ret = Ok(())
+ }
+
+ ret
+ }
+}
+
+/// Token used to a add a timeout subscription, also used in removing it again.
+const TIMEOUT_TOKEN: wasi::Userdata = wasi::Userdata::MAX;
+
+/// Returns a `wasi::Subscription` for `timeout`.
+fn timeout_subscription(timeout: Duration) -> wasi::Subscription {
+ wasi::Subscription {
+ userdata: TIMEOUT_TOKEN,
+ u: wasi::SubscriptionU {
+ tag: wasi::EVENTTYPE_CLOCK.raw(),
+ u: wasi::SubscriptionUU {
+ clock: wasi::SubscriptionClock {
+ id: wasi::CLOCKID_MONOTONIC,
+ // Timestamp is in nanoseconds.
+ timeout: min(wasi::Timestamp::MAX as u128, timeout.as_nanos())
+ as wasi::Timestamp,
+ // Give the implementation another millisecond to coalesce
+ // events.
+ precision: Duration::from_millis(1).as_nanos() as wasi::Timestamp,
+ // Zero means the `timeout` is considered relative to the
+ // current time.
+ flags: 0,
+ },
+ },
+ },
+ }
+}
+
+fn is_timeout_event(event: &wasi::Event) -> bool {
+ event.type_ == wasi::EVENTTYPE_CLOCK && event.userdata == TIMEOUT_TOKEN
+}
+
+/// Check all events for possible errors, it returns the first error found.
+fn check_errors(events: &[Event]) -> io::Result<()> {
+ for event in events {
+ if event.error != wasi::ERRNO_SUCCESS {
+ return Err(io_err(event.error));
+ }
+ }
+ Ok(())
+}
+
+/// Convert `wasi::Errno` into an `io::Error`.
+fn io_err(errno: wasi::Errno) -> io::Error {
+ // TODO: check if this is valid.
+ io::Error::from_raw_os_error(errno.raw() as i32)
+}
+
+pub(crate) type Events = Vec<Event>;
+
+pub(crate) type Event = wasi::Event;
+
+pub(crate) mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ pub(crate) fn token(event: &Event) -> Token {
+ Token(event.userdata as usize)
+ }
+
+ pub(crate) fn is_readable(event: &Event) -> bool {
+ event.type_ == wasi::EVENTTYPE_FD_READ
+ }
+
+ pub(crate) fn is_writable(event: &Event) -> bool {
+ event.type_ == wasi::EVENTTYPE_FD_WRITE
+ }
+
+ pub(crate) fn is_error(_: &Event) -> bool {
+ // Not supported? It could be that `wasi::Event.error` could be used for
+ // this, but the docs say `error that occurred while processing the
+ // subscription request`, so it's checked in `Select::select` already.
+ false
+ }
+
+ pub(crate) fn is_read_closed(event: &Event) -> bool {
+ event.type_ == wasi::EVENTTYPE_FD_READ
+ // Safety: checked the type of the union above.
+ && (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0
+ }
+
+ pub(crate) fn is_write_closed(event: &Event) -> bool {
+ event.type_ == wasi::EVENTTYPE_FD_WRITE
+ // Safety: checked the type of the union above.
+ && (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0
+ }
+
+ pub(crate) fn is_priority(_: &Event) -> bool {
+ // Not supported.
+ false
+ }
+
+ pub(crate) fn is_aio(_: &Event) -> bool {
+ // Not supported.
+ false
+ }
+
+ pub(crate) fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+ }
+
+ pub(crate) fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ debug_detail!(
+ TypeDetails(wasi::Eventtype),
+ PartialEq::eq,
+ wasi::EVENTTYPE_CLOCK,
+ wasi::EVENTTYPE_FD_READ,
+ wasi::EVENTTYPE_FD_WRITE,
+ );
+
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flag(got: &wasi::Eventrwflags, want: &wasi::Eventrwflags) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ EventrwflagsDetails(wasi::Eventrwflags),
+ check_flag,
+ wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP,
+ );
+
+ struct EventFdReadwriteDetails(wasi::EventFdReadwrite);
+
+ impl fmt::Debug for EventFdReadwriteDetails {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("EventFdReadwrite")
+ .field("nbytes", &self.0.nbytes)
+ .field("flags", &EventrwflagsDetails(self.0.flags))
+ .finish()
+ }
+ }
+
+ f.debug_struct("Event")
+ .field("userdata", &event.userdata)
+ .field("error", &event.error)
+ .field("type", &TypeDetails(event.type_))
+ .field("fd_readwrite", &EventFdReadwriteDetails(event.fd_readwrite))
+ .finish()
+ }
+}
+
+cfg_os_poll! {
+ cfg_io_source! {
+ pub(crate) struct IoSourceState;
+
+ impl IoSourceState {
+ pub(crate) fn new() -> IoSourceState {
+ IoSourceState
+ }
+
+ pub(crate) fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ // We don't hold state, so we can just call the function and
+ // return.
+ f(io)
+ }
+ }
+ }
+}
diff --git a/vendor/mio/src/sys/windows/afd.rs b/vendor/mio/src/sys/windows/afd.rs
new file mode 100644
index 00000000..bd929354
--- /dev/null
+++ b/vendor/mio/src/sys/windows/afd.rs
@@ -0,0 +1,243 @@
+use std::ffi::c_void;
+use std::fmt;
+use std::fs::File;
+use std::io;
+use std::mem::size_of;
+use std::os::windows::io::AsRawHandle;
+
+use windows_sys::Wdk::Storage::FileSystem::NtCancelIoFileEx;
+use windows_sys::Wdk::System::IO::NtDeviceIoControlFile;
+use windows_sys::Win32::Foundation::{
+ RtlNtStatusToDosError, HANDLE, NTSTATUS, STATUS_NOT_FOUND, STATUS_PENDING, STATUS_SUCCESS,
+};
+use windows_sys::Win32::System::IO::{IO_STATUS_BLOCK, IO_STATUS_BLOCK_0};
+
+const IOCTL_AFD_POLL: u32 = 0x00012024;
+
+/// Winsock2 AFD driver instance.
+///
+/// All operations are unsafe due to IO_STATUS_BLOCK parameter are being used by Afd driver during STATUS_PENDING before I/O Completion Port returns its result.
+#[derive(Debug)]
+pub struct Afd {
+ fd: File,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct AfdPollHandleInfo {
+ pub handle: HANDLE,
+ pub events: u32,
+ pub status: NTSTATUS,
+}
+
+unsafe impl Send for AfdPollHandleInfo {}
+
+#[repr(C)]
+pub struct AfdPollInfo {
+ pub timeout: i64,
+ // Can have only value 1.
+ pub number_of_handles: u32,
+ pub exclusive: u32,
+ pub handles: [AfdPollHandleInfo; 1],
+}
+
+impl fmt::Debug for AfdPollInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AfdPollInfo").finish()
+ }
+}
+
+impl Afd {
+ /// Poll `Afd` instance with `AfdPollInfo`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// `iosb` needs to be untouched after the call while operation is in effective at ALL TIME except for `cancel` method.
+ /// So be careful not to `poll` twice while polling.
+ /// User should deallocate there overlapped value when error to prevent memory leak.
+ pub unsafe fn poll(
+ &self,
+ info: &mut AfdPollInfo,
+ iosb: *mut IO_STATUS_BLOCK,
+ overlapped: *mut c_void,
+ ) -> io::Result<bool> {
+ let info_ptr = info as *mut _ as *mut c_void;
+ (*iosb).Anonymous.Status = STATUS_PENDING;
+ let status = NtDeviceIoControlFile(
+ self.fd.as_raw_handle() as HANDLE,
+ std::ptr::null_mut(),
+ None,
+ overlapped,
+ iosb,
+ IOCTL_AFD_POLL,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ );
+ match status {
+ STATUS_SUCCESS => Ok(true),
+ STATUS_PENDING => Ok(false),
+ _ => Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ )),
+ }
+ }
+
+ /// Cancel previous polled request of `Afd`.
+ ///
+ /// iosb needs to be used by `poll` first for valid `cancel`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// Use it only with request is still being polled so that you have valid `IO_STATUS_BLOCK` to use.
+ /// User should NOT deallocate there overlapped value after the `cancel` to prevent double free.
+ pub unsafe fn cancel(&self, iosb: *mut IO_STATUS_BLOCK) -> io::Result<()> {
+ if (*iosb).Anonymous.Status != STATUS_PENDING {
+ return Ok(());
+ }
+
+ let mut cancel_iosb = IO_STATUS_BLOCK {
+ Anonymous: IO_STATUS_BLOCK_0 { Status: 0 },
+ Information: 0,
+ };
+ let status = NtCancelIoFileEx(self.fd.as_raw_handle() as HANDLE, iosb, &mut cancel_iosb);
+ if status == STATUS_SUCCESS || status == STATUS_NOT_FOUND {
+ return Ok(());
+ }
+ Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ ))
+ }
+}
+
+cfg_io_source! {
+ use std::mem::zeroed;
+ use std::os::windows::io::{FromRawHandle, RawHandle};
+ use std::ptr::null_mut;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ use windows_sys::Wdk::Foundation::OBJECT_ATTRIBUTES;
+ use windows_sys::Wdk::Storage::FileSystem::{NtCreateFile, FILE_OPEN};
+ use windows_sys::Win32::Foundation::{INVALID_HANDLE_VALUE, UNICODE_STRING};
+ use windows_sys::Win32::Storage::FileSystem::{
+ SetFileCompletionNotificationModes, FILE_SHARE_READ, FILE_SHARE_WRITE, SYNCHRONIZE,
+ };
+ use windows_sys::Win32::System::WindowsProgramming::FILE_SKIP_SET_EVENT_ON_HANDLE;
+
+ use super::iocp::CompletionPort;
+
+ const AFD_HELPER_ATTRIBUTES: OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES {
+ Length: size_of::<OBJECT_ATTRIBUTES>() as u32,
+ RootDirectory: null_mut(),
+ ObjectName: &AFD_OBJ_NAME as *const _ as *mut _,
+ Attributes: 0,
+ SecurityDescriptor: null_mut(),
+ SecurityQualityOfService: null_mut(),
+ };
+
+ const AFD_OBJ_NAME: UNICODE_STRING = UNICODE_STRING {
+ Length: (AFD_HELPER_NAME.len() * size_of::<u16>()) as u16,
+ MaximumLength: (AFD_HELPER_NAME.len() * size_of::<u16>()) as u16,
+ Buffer: AFD_HELPER_NAME.as_ptr() as *mut _,
+ };
+
+ const AFD_HELPER_NAME: &[u16] = &[
+ '\\' as _,
+ 'D' as _,
+ 'e' as _,
+ 'v' as _,
+ 'i' as _,
+ 'c' as _,
+ 'e' as _,
+ '\\' as _,
+ 'A' as _,
+ 'f' as _,
+ 'd' as _,
+ '\\' as _,
+ 'M' as _,
+ 'i' as _,
+ 'o' as _
+ ];
+
+ static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(0);
+
+ impl AfdPollInfo {
+ pub fn zeroed() -> AfdPollInfo {
+ unsafe { zeroed() }
+ }
+ }
+
+ impl Afd {
+ /// Create new Afd instance.
+ pub(crate) fn new(cp: &CompletionPort) -> io::Result<Afd> {
+ let mut afd_helper_handle: HANDLE = INVALID_HANDLE_VALUE;
+ let mut iosb = IO_STATUS_BLOCK {
+ Anonymous: IO_STATUS_BLOCK_0 { Status: 0 },
+ Information: 0,
+ };
+
+ unsafe {
+ let status = NtCreateFile(
+ &mut afd_helper_handle as *mut _,
+ SYNCHRONIZE,
+ &AFD_HELPER_ATTRIBUTES as *const _ as *mut _,
+ &mut iosb,
+ null_mut(),
+ 0,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ FILE_OPEN,
+ 0,
+ null_mut(),
+ 0,
+ );
+ if status != STATUS_SUCCESS {
+ let raw_err = io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ );
+ let msg = format!("Failed to open \\Device\\Afd\\Mio: {}", raw_err);
+ return Err(io::Error::new(raw_err.kind(), msg));
+ }
+ let fd = File::from_raw_handle(afd_helper_handle as RawHandle);
+ // Increment by 2 to reserve space for other types of handles.
+ // Non-AFD types (currently only NamedPipe), use odd numbered
+ // tokens. This allows the selector to differentiate between them
+ // and dispatch events accordingly.
+ let token = NEXT_TOKEN.fetch_add(2, Ordering::Relaxed) + 2;
+ let afd = Afd { fd };
+ cp.add_handle(token, &afd.fd)?;
+ match SetFileCompletionNotificationModes(
+ afd_helper_handle,
+ FILE_SKIP_SET_EVENT_ON_HANDLE as u8 // This is just 2, so fits in u8
+ ) {
+ 0 => Err(io::Error::last_os_error()),
+ _ => Ok(afd),
+ }
+ }
+ }
+ }
+}
+
+pub const POLL_RECEIVE: u32 = 0b0_0000_0001;
+pub const POLL_RECEIVE_EXPEDITED: u32 = 0b0_0000_0010;
+pub const POLL_SEND: u32 = 0b0_0000_0100;
+pub const POLL_DISCONNECT: u32 = 0b0_0000_1000;
+pub const POLL_ABORT: u32 = 0b0_0001_0000;
+pub const POLL_LOCAL_CLOSE: u32 = 0b0_0010_0000;
+// Not used as it indicated in each event where a connection is connected, not
+// just the first time a connection is established.
+// Also see https://github.com/piscisaureus/wepoll/commit/8b7b340610f88af3d83f40fb728e7b850b090ece.
+pub const POLL_CONNECT: u32 = 0b0_0100_0000;
+pub const POLL_ACCEPT: u32 = 0b0_1000_0000;
+pub const POLL_CONNECT_FAIL: u32 = 0b1_0000_0000;
+
+pub const KNOWN_EVENTS: u32 = POLL_RECEIVE
+ | POLL_RECEIVE_EXPEDITED
+ | POLL_SEND
+ | POLL_DISCONNECT
+ | POLL_ABORT
+ | POLL_LOCAL_CLOSE
+ | POLL_ACCEPT
+ | POLL_CONNECT_FAIL;
diff --git a/vendor/mio/src/sys/windows/event.rs b/vendor/mio/src/sys/windows/event.rs
new file mode 100644
index 00000000..66656d0e
--- /dev/null
+++ b/vendor/mio/src/sys/windows/event.rs
@@ -0,0 +1,169 @@
+use std::fmt;
+
+use super::afd;
+use super::iocp::CompletionStatus;
+use crate::Token;
+
+#[derive(Clone)]
+pub struct Event {
+ pub flags: u32,
+ pub data: u64,
+}
+
+pub fn token(event: &Event) -> Token {
+ Token(event.data as usize)
+}
+
+impl Event {
+ pub(super) fn new(token: Token) -> Event {
+ Event {
+ flags: 0,
+ data: usize::from(token) as u64,
+ }
+ }
+
+ pub(super) fn set_readable(&mut self) {
+ self.flags |= afd::POLL_RECEIVE
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn set_writable(&mut self) {
+ self.flags |= afd::POLL_SEND;
+ }
+
+ pub(super) fn from_completion_status(status: &CompletionStatus) -> Event {
+ Event {
+ flags: status.bytes_transferred(),
+ data: status.token() as u64,
+ }
+ }
+
+ pub(super) fn to_completion_status(&self) -> CompletionStatus {
+ CompletionStatus::new(self.flags, self.data as usize, std::ptr::null_mut())
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn to_completion_status_with_overlapped(
+ &self,
+ overlapped: *mut super::Overlapped,
+ ) -> CompletionStatus {
+ CompletionStatus::new(self.flags, self.data as usize, overlapped)
+ }
+}
+
+pub(crate) const READABLE_FLAGS: u32 = afd::POLL_RECEIVE
+ | afd::POLL_DISCONNECT
+ | afd::POLL_ACCEPT
+ | afd::POLL_ABORT
+ | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITABLE_FLAGS: u32 = afd::POLL_SEND | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const ERROR_FLAGS: u32 = afd::POLL_CONNECT_FAIL;
+pub(crate) const READ_CLOSED_FLAGS: u32 =
+ afd::POLL_DISCONNECT | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITE_CLOSED_FLAGS: u32 = afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+
+pub fn is_readable(event: &Event) -> bool {
+ event.flags & READABLE_FLAGS != 0
+}
+
+pub fn is_writable(event: &Event) -> bool {
+ event.flags & WRITABLE_FLAGS != 0
+}
+
+pub fn is_error(event: &Event) -> bool {
+ event.flags & ERROR_FLAGS != 0
+}
+
+pub fn is_read_closed(event: &Event) -> bool {
+ event.flags & READ_CLOSED_FLAGS != 0
+}
+
+pub fn is_write_closed(event: &Event) -> bool {
+ event.flags & WRITE_CLOSED_FLAGS != 0
+}
+
+pub fn is_priority(event: &Event) -> bool {
+ event.flags & afd::POLL_RECEIVE_EXPEDITED != 0
+}
+
+pub fn is_aio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flags(got: &u32, want: &u32) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FlagsDetails(u32),
+ check_flags,
+ afd::POLL_RECEIVE,
+ afd::POLL_RECEIVE_EXPEDITED,
+ afd::POLL_SEND,
+ afd::POLL_DISCONNECT,
+ afd::POLL_ABORT,
+ afd::POLL_LOCAL_CLOSE,
+ afd::POLL_CONNECT,
+ afd::POLL_ACCEPT,
+ afd::POLL_CONNECT_FAIL,
+ );
+
+ f.debug_struct("event")
+ .field("flags", &FlagsDetails(event.flags))
+ .field("data", &event.data)
+ .finish()
+}
+
+pub struct Events {
+ /// Raw I/O event completions are filled in here by the call to `get_many`
+ /// on the completion port above. These are then processed to run callbacks
+ /// which figure out what to do after the event is done.
+ pub statuses: Box<[CompletionStatus]>,
+
+ /// Literal events returned by `get` to the upwards `EventLoop`. This file
+ /// doesn't really modify this (except for the waker), instead almost all
+ /// events are filled in by the `ReadinessQueue` from the `poll` module.
+ pub events: Vec<Event>,
+}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ // Note that it's possible for the output `events` to grow beyond the
+ // capacity as it can also include deferred events, but that's certainly
+ // not the end of the world!
+ Events {
+ statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(),
+ events: Vec::with_capacity(cap),
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<&Event> {
+ self.events.get(idx)
+ }
+
+ pub fn clear(&mut self) {
+ self.events.clear();
+ for status in self.statuses.iter_mut() {
+ *status = CompletionStatus::zero();
+ }
+ }
+}
diff --git a/vendor/mio/src/sys/windows/handle.rs b/vendor/mio/src/sys/windows/handle.rs
new file mode 100644
index 00000000..5b9ac0b6
--- /dev/null
+++ b/vendor/mio/src/sys/windows/handle.rs
@@ -0,0 +1,30 @@
+use std::os::windows::io::RawHandle;
+use windows_sys::Win32::Foundation::{CloseHandle, HANDLE};
+
+/// Wrapper around a Windows HANDLE so that we close it upon drop in all scenarios
+#[derive(Debug)]
+pub struct Handle(HANDLE);
+
+impl Handle {
+ #[inline]
+ pub fn new(handle: HANDLE) -> Self {
+ Self(handle)
+ }
+
+ pub fn raw(&self) -> HANDLE {
+ self.0
+ }
+
+ pub fn into_raw(self) -> RawHandle {
+ let ret = self.0;
+ // This is super important so that drop is not called!
+ std::mem::forget(self);
+ ret as RawHandle
+ }
+}
+
+impl Drop for Handle {
+ fn drop(&mut self) {
+ unsafe { CloseHandle(self.0) };
+ }
+}
diff --git a/vendor/mio/src/sys/windows/io_status_block.rs b/vendor/mio/src/sys/windows/io_status_block.rs
new file mode 100644
index 00000000..bd2a6dcf
--- /dev/null
+++ b/vendor/mio/src/sys/windows/io_status_block.rs
@@ -0,0 +1,40 @@
+use std::fmt;
+use std::ops::{Deref, DerefMut};
+
+use windows_sys::Win32::System::IO::IO_STATUS_BLOCK;
+
+pub struct IoStatusBlock(IO_STATUS_BLOCK);
+
+cfg_io_source! {
+ use windows_sys::Win32::System::IO::IO_STATUS_BLOCK_0;
+
+ impl IoStatusBlock {
+ pub fn zeroed() -> Self {
+ Self(IO_STATUS_BLOCK {
+ Anonymous: IO_STATUS_BLOCK_0 { Status: 0 },
+ Information: 0,
+ })
+ }
+ }
+}
+
+unsafe impl Send for IoStatusBlock {}
+
+impl Deref for IoStatusBlock {
+ type Target = IO_STATUS_BLOCK;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for IoStatusBlock {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl fmt::Debug for IoStatusBlock {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IoStatusBlock").finish()
+ }
+}
diff --git a/vendor/mio/src/sys/windows/iocp.rs b/vendor/mio/src/sys/windows/iocp.rs
new file mode 100644
index 00000000..0e1d17cf
--- /dev/null
+++ b/vendor/mio/src/sys/windows/iocp.rs
@@ -0,0 +1,282 @@
+//! Bindings to IOCP, I/O Completion Ports
+
+use super::{Handle, Overlapped};
+use std::cmp;
+use std::fmt;
+use std::io;
+use std::mem;
+use std::os::windows::io::*;
+use std::ptr::null_mut;
+use std::time::Duration;
+
+use windows_sys::Win32::Foundation::{HANDLE, INVALID_HANDLE_VALUE};
+use windows_sys::Win32::System::IO::{
+ CreateIoCompletionPort, GetQueuedCompletionStatusEx, PostQueuedCompletionStatus, OVERLAPPED,
+ OVERLAPPED_ENTRY,
+};
+
+/// A handle to an Windows I/O Completion Port.
+#[derive(Debug)]
+pub(crate) struct CompletionPort {
+ handle: Handle,
+}
+
+// SAFETY: `Handles`s are, in general, not thread-safe. However, we only used `Handle`s for
+// resources that are thread-safe in `CompletionPort`.
+unsafe impl Send for CompletionPort {}
+
+// SAFETY: `Handles`s are, in general, not thread-safe. However, we only used `Handle`s for
+// resources that are thread-safe in `CompletionPort`.
+unsafe impl Sync for CompletionPort {}
+
+/// A status message received from an I/O completion port.
+///
+/// These statuses can be created via the `new` or `empty` constructors and then
+/// provided to a completion port, or they are read out of a completion port.
+/// The fields of each status are read through its accessor methods.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct CompletionStatus(OVERLAPPED_ENTRY);
+
+impl fmt::Debug for CompletionStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "CompletionStatus(OVERLAPPED_ENTRY)")
+ }
+}
+
+unsafe impl Send for CompletionStatus {}
+unsafe impl Sync for CompletionStatus {}
+
+impl CompletionPort {
+ /// Creates a new I/O completion port with the specified concurrency value.
+ ///
+ /// The number of threads given corresponds to the level of concurrency
+ /// allowed for threads associated with this port. Consult the Windows
+ /// documentation for more information about this value.
+ pub fn new(threads: u32) -> io::Result<CompletionPort> {
+ let ret = unsafe { CreateIoCompletionPort(INVALID_HANDLE_VALUE, null_mut(), 0, threads) };
+ if ret.is_null() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(CompletionPort {
+ handle: Handle::new(ret),
+ })
+ }
+ }
+
+ /// Associates a new `HANDLE` to this I/O completion port.
+ ///
+ /// This function will associate the given handle to this port with the
+ /// given `token` to be returned in status messages whenever it receives a
+ /// notification.
+ ///
+ /// Any object which is convertible to a `HANDLE` via the `AsRawHandle`
+ /// trait can be provided to this function, such as `std::fs::File` and
+ /// friends.
+ #[cfg(any(feature = "net", feature = "os-ext"))]
+ pub fn add_handle<T: AsRawHandle + ?Sized>(&self, token: usize, t: &T) -> io::Result<()> {
+ let ret = unsafe {
+ CreateIoCompletionPort(t.as_raw_handle() as HANDLE, self.handle.raw(), token, 0)
+ };
+ if ret.is_null() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Dequeues a number of completion statuses from this I/O completion port.
+ ///
+ /// This function is the same as `get` except that it may return more than
+ /// one status. A buffer of "zero" statuses is provided (the contents are
+ /// not read) and then on success this function will return a sub-slice of
+ /// statuses which represent those which were dequeued from this port. This
+ /// function does not wait to fill up the entire list of statuses provided.
+ ///
+ /// Like with `get`, a timeout may be specified for this operation.
+ pub fn get_many<'a>(
+ &self,
+ list: &'a mut [CompletionStatus],
+ timeout: Option<Duration>,
+ ) -> io::Result<&'a mut [CompletionStatus]> {
+ debug_assert_eq!(
+ mem::size_of::<CompletionStatus>(),
+ mem::size_of::<OVERLAPPED_ENTRY>()
+ );
+ let mut removed = 0;
+ let timeout = duration_millis(timeout);
+ let len = cmp::min(list.len(), u32::MAX as usize) as u32;
+ let ret = unsafe {
+ GetQueuedCompletionStatusEx(
+ self.handle.raw(),
+ list.as_ptr() as *mut _,
+ len,
+ &mut removed,
+ timeout,
+ 0,
+ )
+ };
+
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(&mut list[..removed as usize])
+ }
+ }
+
+ /// Posts a new completion status onto this I/O completion port.
+ ///
+ /// This function will post the given status, with custom parameters, to the
+ /// port. Threads blocked in `get` or `get_many` will eventually receive
+ /// this status.
+ pub fn post(&self, status: CompletionStatus) -> io::Result<()> {
+ let ret = unsafe {
+ PostQueuedCompletionStatus(
+ self.handle.raw(),
+ status.0.dwNumberOfBytesTransferred,
+ status.0.lpCompletionKey,
+ status.0.lpOverlapped,
+ )
+ };
+
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+ }
+}
+
+impl AsRawHandle for CompletionPort {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.handle.raw() as RawHandle
+ }
+}
+
+impl FromRawHandle for CompletionPort {
+ unsafe fn from_raw_handle(handle: RawHandle) -> CompletionPort {
+ CompletionPort {
+ handle: Handle::new(handle as HANDLE),
+ }
+ }
+}
+
+impl IntoRawHandle for CompletionPort {
+ fn into_raw_handle(self) -> RawHandle {
+ self.handle.into_raw()
+ }
+}
+
+impl CompletionStatus {
+ /// Creates a new completion status with the provided parameters.
+ ///
+ /// This function is useful when creating a status to send to a port with
+ /// the `post` method. The parameters are opaquely passed through and not
+ /// interpreted by the system at all.
+ pub(crate) fn new(bytes: u32, token: usize, overlapped: *mut Overlapped) -> Self {
+ CompletionStatus(OVERLAPPED_ENTRY {
+ dwNumberOfBytesTransferred: bytes,
+ lpCompletionKey: token,
+ lpOverlapped: overlapped as *mut _,
+ Internal: 0,
+ })
+ }
+
+ /// Creates a new borrowed completion status from the borrowed
+ /// `OVERLAPPED_ENTRY` argument provided.
+ ///
+ /// This method will wrap the `OVERLAPPED_ENTRY` in a `CompletionStatus`,
+ /// returning the wrapped structure.
+ #[cfg(feature = "os-ext")]
+ pub fn from_entry(entry: &OVERLAPPED_ENTRY) -> &Self {
+ // Safety: CompletionStatus is repr(transparent) w/ OVERLAPPED_ENTRY, so
+ // a reference to one is guaranteed to be layout compatible with the
+ // reference to another.
+ unsafe { &*(entry as *const _ as *const _) }
+ }
+
+ /// Creates a new "zero" completion status.
+ ///
+ /// This function is useful when creating a stack buffer or vector of
+ /// completion statuses to be passed to the `get_many` function.
+ pub fn zero() -> Self {
+ Self::new(0, 0, null_mut())
+ }
+
+ /// Returns the number of bytes that were transferred for the I/O operation
+ /// associated with this completion status.
+ pub fn bytes_transferred(&self) -> u32 {
+ self.0.dwNumberOfBytesTransferred
+ }
+
+ /// Returns the completion key value associated with the file handle whose
+ /// I/O operation has completed.
+ ///
+ /// A completion key is a per-handle key that is specified when it is added
+ /// to an I/O completion port via `add_handle` or `add_socket`.
+ pub fn token(&self) -> usize {
+ self.0.lpCompletionKey as usize
+ }
+
+ /// Returns a pointer to the `Overlapped` structure that was specified when
+ /// the I/O operation was started.
+ pub fn overlapped(&self) -> *mut OVERLAPPED {
+ self.0.lpOverlapped
+ }
+
+ /// Returns a pointer to the internal `OVERLAPPED_ENTRY` object.
+ pub fn entry(&self) -> &OVERLAPPED_ENTRY {
+ &self.0
+ }
+}
+
+#[inline]
+fn duration_millis(dur: Option<Duration>) -> u32 {
+ if let Some(dur) = dur {
+ // `Duration::as_millis` truncates, so round up. This avoids
+ // turning sub-millisecond timeouts into a zero timeout, unless
+ // the caller explicitly requests that by specifying a zero
+ // timeout.
+ let dur_ms = dur
+ .checked_add(Duration::from_nanos(999_999))
+ .unwrap_or(dur)
+ .as_millis();
+ cmp::min(dur_ms, u32::MAX as u128) as u32
+ } else {
+ u32::MAX
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{CompletionPort, CompletionStatus};
+
+ #[test]
+ fn is_send_sync() {
+ fn is_send_sync<T: Send + Sync>() {}
+ is_send_sync::<CompletionPort>();
+ }
+
+ #[test]
+ fn get_many() {
+ let c = CompletionPort::new(1).unwrap();
+
+ c.post(CompletionStatus::new(1, 2, 3 as *mut _)).unwrap();
+ c.post(CompletionStatus::new(4, 5, 6 as *mut _)).unwrap();
+
+ let mut s = vec![CompletionStatus::zero(); 4];
+ {
+ let s = c.get_many(&mut s, None).unwrap();
+ assert_eq!(s.len(), 2);
+ assert_eq!(s[0].bytes_transferred(), 1);
+ assert_eq!(s[0].token(), 2);
+ assert_eq!(s[0].overlapped(), 3 as *mut _);
+ assert_eq!(s[1].bytes_transferred(), 4);
+ assert_eq!(s[1].token(), 5);
+ assert_eq!(s[1].overlapped(), 6 as *mut _);
+ }
+ assert_eq!(s[2].bytes_transferred(), 0);
+ assert_eq!(s[2].token(), 0);
+ assert_eq!(s[2].overlapped(), 0 as *mut _);
+ }
+}
diff --git a/vendor/mio/src/sys/windows/mod.rs b/vendor/mio/src/sys/windows/mod.rs
new file mode 100644
index 00000000..89d74b1a
--- /dev/null
+++ b/vendor/mio/src/sys/windows/mod.rs
@@ -0,0 +1,154 @@
+mod afd;
+
+pub mod event;
+pub use event::{Event, Events};
+
+mod handle;
+use handle::Handle;
+
+mod io_status_block;
+mod iocp;
+
+mod overlapped;
+use overlapped::Overlapped;
+
+mod selector;
+pub use selector::Selector;
+
+// Macros must be defined before the modules that use them
+cfg_net! {
+ /// Helper macro to execute a system call that returns an `io::Result`.
+ //
+ // Macro must be defined before any modules that uses them.
+ macro_rules! syscall {
+ ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{
+ let res = unsafe { $fn($($arg, )*) };
+ if $err_test(&res, &$err_value) {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(res)
+ }
+ }};
+ }
+
+ mod net;
+
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+
+ pub use selector::{SelectorInner, SockState};
+}
+
+cfg_os_ext! {
+ pub(crate) mod named_pipe;
+}
+
+mod waker;
+pub(crate) use waker::Waker;
+
+cfg_io_source! {
+ use std::io;
+ use std::os::windows::io::RawSocket;
+ use std::pin::Pin;
+ use std::sync::{Arc, Mutex};
+
+ use crate::{Interest, Registry, Token};
+
+ struct InternalState {
+ selector: Arc<SelectorInner>,
+ token: Token,
+ interests: Interest,
+ sock_state: Pin<Arc<Mutex<SockState>>>,
+ }
+
+ impl Drop for InternalState {
+ fn drop(&mut self) {
+ let mut sock_state = self.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ }
+
+ pub struct IoSourceState {
+ // This is `None` if the socket has not yet been registered.
+ //
+ // We box the internal state to not increase the size on the stack as the
+ // type might move around a lot.
+ inner: Option<Box<InternalState>>,
+ }
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState { inner: None }
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ let result = f(io);
+ if let Err(ref e) = result {
+ if e.kind() == io::ErrorKind::WouldBlock {
+ self.inner.as_ref().map_or(Ok(()), |state| {
+ state
+ .selector
+ .reregister(state.sock_state.clone(), state.token, state.interests)
+ })?;
+ }
+ }
+ result
+ }
+
+ pub fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ socket: RawSocket,
+ ) -> io::Result<()> {
+ if self.inner.is_some() {
+ Err(io::ErrorKind::AlreadyExists.into())
+ } else {
+ registry
+ .selector()
+ .register(socket, token, interests)
+ .map(|state| {
+ self.inner = Some(Box::new(state));
+ })
+ }
+ }
+
+ pub fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ registry
+ .selector()
+ .reregister(state.sock_state.clone(), token, interests)
+ .map(|()| {
+ state.token = token;
+ state.interests = interests;
+ })
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+
+ pub fn deregister(&mut self) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ {
+ let mut sock_state = state.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ self.inner = None;
+ Ok(())
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+ }
+}
diff --git a/vendor/mio/src/sys/windows/named_pipe.rs b/vendor/mio/src/sys/windows/named_pipe.rs
new file mode 100644
index 00000000..dce2b667
--- /dev/null
+++ b/vendor/mio/src/sys/windows/named_pipe.rs
@@ -0,0 +1,1060 @@
+use std::ffi::OsStr;
+use std::io::{self, Read, Write};
+use std::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle};
+use std::sync::atomic::Ordering::{Relaxed, SeqCst};
+use std::sync::atomic::{AtomicBool, AtomicUsize};
+use std::sync::{Arc, Mutex};
+use std::{fmt, mem, slice};
+
+use windows_sys::Win32::Foundation::{
+ ERROR_BROKEN_PIPE, ERROR_IO_INCOMPLETE, ERROR_IO_PENDING, ERROR_NO_DATA, ERROR_PIPE_CONNECTED,
+ ERROR_PIPE_LISTENING, HANDLE, INVALID_HANDLE_VALUE,
+};
+use windows_sys::Win32::Storage::FileSystem::{
+ ReadFile, WriteFile, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, PIPE_ACCESS_DUPLEX,
+};
+use windows_sys::Win32::System::Pipes::{
+ ConnectNamedPipe, CreateNamedPipeW, DisconnectNamedPipe, PIPE_TYPE_BYTE,
+ PIPE_UNLIMITED_INSTANCES,
+};
+use windows_sys::Win32::System::IO::{
+ CancelIoEx, GetOverlappedResult, OVERLAPPED, OVERLAPPED_ENTRY,
+};
+
+use crate::event::Source;
+use crate::sys::windows::iocp::{CompletionPort, CompletionStatus};
+use crate::sys::windows::{Event, Handle, Overlapped};
+use crate::Registry;
+use crate::{Interest, Token};
+
+/// Non-blocking windows named pipe.
+///
+/// This structure internally contains a `HANDLE` which represents the named
+/// pipe, and also maintains state associated with the mio event loop and active
+/// I/O operations that have been scheduled to translate IOCP to a readiness
+/// model.
+///
+/// Note, IOCP is a *completion* based model whereas mio is a *readiness* based
+/// model. To bridge this, `NamedPipe` performs internal buffering. Writes are
+/// written to an internal buffer and the buffer is submitted to IOCP. IOCP
+/// reads are submitted using internal buffers and `NamedPipe::read` reads from
+/// this internal buffer.
+///
+/// # Trait implementations
+///
+/// The `Read` and `Write` traits are implemented for `NamedPipe` and for
+/// `&NamedPipe`. This represents that a named pipe can be concurrently read and
+/// written to and also can be read and written to at all. Typically a named
+/// pipe needs to be connected to a client before it can be read or written,
+/// however.
+///
+/// Note that for I/O operations on a named pipe to succeed then the named pipe
+/// needs to be associated with an event loop. Until this happens all I/O
+/// operations will return a "would block" error.
+///
+/// # Managing connections
+///
+/// The `NamedPipe` type supports a `connect` method to connect to a client and
+/// a `disconnect` method to disconnect from that client. These two methods only
+/// work once a named pipe is associated with an event loop.
+///
+/// The `connect` method will succeed asynchronously and a completion can be
+/// detected once the object receives a writable notification.
+///
+/// # Named pipe clients
+///
+/// Currently to create a client of a named pipe server then you can use the
+/// `OpenOptions` type in the standard library to create a `File` that connects
+/// to a named pipe. Afterwards you can use the `into_raw_handle` method coupled
+/// with the `NamedPipe::from_raw_handle` method to convert that to a named pipe
+/// that can operate asynchronously. Don't forget to pass the
+/// `FILE_FLAG_OVERLAPPED` flag when opening the `File`.
+pub struct NamedPipe {
+ inner: Arc<Inner>,
+}
+
+/// # Notes
+///
+/// The memory layout of this structure must be fixed as the
+/// `ptr_from_*_overlapped` methods depend on it, see the `ptr_from` test.
+#[repr(C)]
+struct Inner {
+ // NOTE: careful modifying the order of these three fields, the `ptr_from_*`
+ // methods depend on the layout!
+ connect: Overlapped,
+ read: Overlapped,
+ write: Overlapped,
+ event: Overlapped,
+ // END NOTE.
+ handle: Handle,
+ connecting: AtomicBool,
+ io: Mutex<Io>,
+ pool: Mutex<BufferPool>,
+}
+
+// SAFETY: `Handles`s are, in general, not thread-safe. However, we only used `Handle`s for
+// resources that are thread-safe in `Inner`.
+unsafe impl Send for Inner {}
+
+// SAFETY: `Handles`s are, in general, not thread-safe. However, we only used `Handle`s for
+// resources that are thread-safe in `Inner`.
+unsafe impl Sync for Inner {}
+
+impl Inner {
+ /// Converts a pointer to `Inner.connect` to a pointer to `Inner`.
+ ///
+ /// # Unsafety
+ ///
+ /// Caller must ensure `ptr` is pointing to `Inner.connect`.
+ unsafe fn ptr_from_conn_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `connect` is the first field, so the pointer are the same.
+ ptr.cast()
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.read`.
+ unsafe fn ptr_from_read_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `read` is after `connect: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(1) as *const Inner
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.write`.
+ unsafe fn ptr_from_write_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `write` is after `connect: Overlapped` and `read: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(2) as *const Inner
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.event`.
+ unsafe fn ptr_from_event_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `event` is after `connect: Overlapped`, `read: Overlapped`, and `write: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(3) as *const Inner
+ }
+
+ /// Issue a connection request with the specified overlapped operation.
+ ///
+ /// This function will issue a request to connect a client to this server,
+ /// returning immediately after starting the overlapped operation.
+ ///
+ /// If this function immediately succeeds then `Ok(true)` is returned. If
+ /// the overlapped operation is enqueued and pending, then `Ok(false)` is
+ /// returned. Otherwise an error is returned indicating what went wrong.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the
+ /// `overlapped` pointer is valid until the end of the I/O operation. The
+ /// kernel also requires that `overlapped` is unique for this I/O operation
+ /// and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that this pointer is
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ pub unsafe fn connect_overlapped(&self, overlapped: *mut OVERLAPPED) -> io::Result<bool> {
+ if ConnectNamedPipe(self.handle.raw(), overlapped) != 0 {
+ return Ok(true);
+ }
+
+ let err = io::Error::last_os_error();
+
+ match err.raw_os_error().map(|e| e as u32) {
+ Some(ERROR_PIPE_CONNECTED) => Ok(true),
+ Some(ERROR_NO_DATA) => Ok(true),
+ Some(ERROR_IO_PENDING) => Ok(false),
+ _ => Err(err),
+ }
+ }
+
+ /// Disconnects this named pipe from any connected client.
+ pub fn disconnect(&self) -> io::Result<()> {
+ if unsafe { DisconnectNamedPipe(self.handle.raw()) } == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Issues an overlapped read operation to occur on this pipe.
+ ///
+ /// This function will issue an asynchronous read to occur in an overlapped
+ /// fashion, returning immediately. The `buf` provided will be filled in
+ /// with data and the request is tracked by the `overlapped` function
+ /// provided.
+ ///
+ /// If the operation succeeds immediately, `Ok(Some(n))` is returned where
+ /// `n` is the number of bytes read. If an asynchronous operation is
+ /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred
+ /// it is returned.
+ ///
+ /// When this operation completes (or if it completes immediately), another
+ /// mechanism must be used to learn how many bytes were transferred (such as
+ /// looking at the filed in the IOCP status message).
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers to be valid until the end of the I/O operation.
+ /// The kernel also requires that `overlapped` is unique for this I/O
+ /// operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that the pointers are
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ pub unsafe fn read_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let len = std::cmp::min(buf.len(), u32::MAX as usize) as u32;
+ let res = ReadFile(
+ self.handle.raw(),
+ buf.as_mut_ptr() as *mut _,
+ len,
+ std::ptr::null_mut(),
+ overlapped,
+ );
+ if res == 0 {
+ let err = io::Error::last_os_error();
+ if err.raw_os_error() != Some(ERROR_IO_PENDING as i32) {
+ return Err(err);
+ }
+ }
+
+ let mut bytes = 0;
+ let res = GetOverlappedResult(self.handle.raw(), overlapped, &mut bytes, 0);
+ if res == 0 {
+ let err = io::Error::last_os_error();
+ if err.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) {
+ Ok(None)
+ } else {
+ Err(err)
+ }
+ } else {
+ Ok(Some(bytes as usize))
+ }
+ }
+
+ /// Issues an overlapped write operation to occur on this pipe.
+ ///
+ /// This function will issue an asynchronous write to occur in an overlapped
+ /// fashion, returning immediately. The `buf` provided will be filled in
+ /// with data and the request is tracked by the `overlapped` function
+ /// provided.
+ ///
+ /// If the operation succeeds immediately, `Ok(Some(n))` is returned where
+ /// `n` is the number of bytes written. If an asynchronous operation is
+ /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred
+ /// it is returned.
+ ///
+ /// When this operation completes (or if it completes immediately), another
+ /// mechanism must be used to learn how many bytes were transferred (such as
+ /// looking at the filed in the IOCP status message).
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers to be valid until the end of the I/O operation.
+ /// The kernel also requires that `overlapped` is unique for this I/O
+ /// operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that the pointers are
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ pub unsafe fn write_overlapped(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let len = std::cmp::min(buf.len(), u32::MAX as usize) as u32;
+ let res = WriteFile(
+ self.handle.raw(),
+ buf.as_ptr() as *const _,
+ len,
+ std::ptr::null_mut(),
+ overlapped,
+ );
+ if res == 0 {
+ let err = io::Error::last_os_error();
+ if err.raw_os_error() != Some(ERROR_IO_PENDING as i32) {
+ return Err(err);
+ }
+ }
+
+ let mut bytes = 0;
+ let res = GetOverlappedResult(self.handle.raw(), overlapped, &mut bytes, 0);
+ if res == 0 {
+ let err = io::Error::last_os_error();
+ if err.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) {
+ Ok(None)
+ } else {
+ Err(err)
+ }
+ } else {
+ Ok(Some(bytes as usize))
+ }
+ }
+
+ /// Calls the `GetOverlappedResult` function to get the result of an
+ /// overlapped operation for this handle.
+ ///
+ /// This function takes the `OVERLAPPED` argument which must have been used
+ /// to initiate an overlapped I/O operation, and returns either the
+ /// successful number of bytes transferred during the operation or an error
+ /// if one occurred.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe as `overlapped` must have previously been used
+ /// to execute an operation for this handle, and it must also be a valid
+ /// pointer to an `Overlapped` instance.
+ #[inline]
+ unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<usize> {
+ let mut transferred = 0;
+ let r = GetOverlappedResult(self.handle.raw(), overlapped, &mut transferred, 0);
+ if r == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(transferred as usize)
+ }
+ }
+}
+
+#[test]
+fn ptr_from() {
+ use std::mem::ManuallyDrop;
+ use std::ptr;
+
+ let pipe = unsafe { ManuallyDrop::new(NamedPipe::from_raw_handle(ptr::null_mut())) };
+ let inner: &Inner = &pipe.inner;
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_conn_overlapped(&inner.connect as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_conn_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_read_overlapped(&inner.read as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_read_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_write_overlapped(&inner.write as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_write_overlapped` incorrect"
+ );
+}
+
+struct Io {
+ // Uniquely identifies the selector associated with this named pipe
+ cp: Option<Arc<CompletionPort>>,
+ // Token used to identify events
+ token: Option<Token>,
+ read: State,
+ write: State,
+ connect_error: Option<io::Error>,
+}
+
+#[derive(Debug)]
+enum State {
+ None,
+ Pending(Vec<u8>, usize),
+ Ok(Vec<u8>, usize),
+ Err(io::Error),
+}
+
+// Odd tokens are for named pipes
+static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(1);
+
+fn would_block() -> io::Error {
+ io::ErrorKind::WouldBlock.into()
+}
+
+impl NamedPipe {
+ /// Creates a new named pipe at the specified `addr` given a "reasonable
+ /// set" of initial configuration options.
+ pub fn new<A: AsRef<OsStr>>(addr: A) -> io::Result<NamedPipe> {
+ use std::os::windows::ffi::OsStrExt;
+ let name: Vec<_> = addr.as_ref().encode_wide().chain(Some(0)).collect();
+
+ // Safety: syscall
+ let h = unsafe {
+ CreateNamedPipeW(
+ name.as_ptr(),
+ PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE | FILE_FLAG_OVERLAPPED,
+ PIPE_TYPE_BYTE,
+ PIPE_UNLIMITED_INSTANCES,
+ 65536,
+ 65536,
+ 0,
+ std::ptr::null_mut(),
+ )
+ };
+
+ if h == INVALID_HANDLE_VALUE {
+ Err(io::Error::last_os_error())
+ } else {
+ // Safety: nothing actually unsafe about this. The trait fn includes
+ // `unsafe`.
+ Ok(unsafe { Self::from_raw_handle(h as RawHandle) })
+ }
+ }
+
+ /// Attempts to call `ConnectNamedPipe`, if possible.
+ ///
+ /// This function will attempt to connect this pipe to a client in an
+ /// asynchronous fashion. If the function immediately establishes a
+ /// connection to a client then `Ok(())` is returned. Otherwise if a
+ /// connection attempt was issued and is now in progress then a "would
+ /// block" error is returned.
+ ///
+ /// When the connection is finished then this object will be flagged as
+ /// being ready for a write, or otherwise in the writable state.
+ ///
+ /// # Errors
+ ///
+ /// This function will return a "would block" error if the pipe has not yet
+ /// been registered with an event loop, if the connection operation has
+ /// previously been issued but has not yet completed, or if the connect
+ /// itself was issued and didn't finish immediately.
+ ///
+ /// Normal I/O errors from the call to `ConnectNamedPipe` are returned
+ /// immediately.
+ pub fn connect(&self) -> io::Result<()> {
+ // "Acquire the connecting lock" or otherwise just make sure we're the
+ // only operation that's using the `connect` overlapped instance.
+ if self.inner.connecting.swap(true, SeqCst) {
+ return Err(would_block());
+ }
+
+ // Now that we've flagged ourselves in the connecting state, issue the
+ // connection attempt. Afterwards interpret the return value and set
+ // internal state accordingly.
+ let res = unsafe {
+ let overlapped = self.inner.connect.as_ptr() as *mut _;
+ self.inner.connect_overlapped(overlapped)
+ };
+
+ match res {
+ // The connection operation finished immediately, so let's schedule
+ // reads/writes and such.
+ Ok(true) => {
+ self.inner.connecting.store(false, SeqCst);
+ Inner::post_register(&self.inner, None);
+ Ok(())
+ }
+
+ // If the overlapped operation was successful and didn't finish
+ // immediately then we forget a copy of the arc we hold
+ // internally. This ensures that when the completion status comes
+ // in for the I/O operation finishing it'll have a reference
+ // associated with it and our data will still be valid. The
+ // `connect_done` function will "reify" this forgotten pointer to
+ // drop the refcount on the other side.
+ Ok(false) => {
+ mem::forget(self.inner.clone());
+ Err(would_block())
+ }
+
+ Err(e) => {
+ self.inner.connecting.store(false, SeqCst);
+ Err(e)
+ }
+ }
+ }
+
+ /// Takes any internal error that has happened after the last I/O operation
+ /// which hasn't been retrieved yet.
+ ///
+ /// This is particularly useful when detecting failed attempts to `connect`.
+ /// After a completed `connect` flags this pipe as writable then callers
+ /// must invoke this method to determine whether the connection actually
+ /// succeeded. If this function returns `None` then a client is connected,
+ /// otherwise it returns an error of what happened and a client shouldn't be
+ /// connected.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ Ok(self.inner.io.lock().unwrap().connect_error.take())
+ }
+
+ /// Disconnects this named pipe from a connected client.
+ ///
+ /// This function will disconnect the pipe from a connected client, if any,
+ /// transitively calling the `DisconnectNamedPipe` function.
+ ///
+ /// After a `disconnect` is issued, then a `connect` may be called again to
+ /// connect to another client.
+ pub fn disconnect(&self) -> io::Result<()> {
+ self.inner.disconnect()
+ }
+}
+
+impl FromRawHandle for NamedPipe {
+ unsafe fn from_raw_handle(handle: RawHandle) -> NamedPipe {
+ NamedPipe {
+ inner: Arc::new(Inner {
+ handle: Handle::new(handle as HANDLE),
+ connect: Overlapped::new(connect_done),
+ connecting: AtomicBool::new(false),
+ read: Overlapped::new(read_done),
+ write: Overlapped::new(write_done),
+ event: Overlapped::new(event_done),
+ io: Mutex::new(Io {
+ cp: None,
+ token: None,
+ read: State::None,
+ write: State::None,
+ connect_error: None,
+ }),
+ pool: Mutex::new(BufferPool::with_capacity(2)),
+ }),
+ }
+ }
+}
+
+impl Read for NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ <&NamedPipe as Read>::read(&mut &*self, buf)
+ }
+}
+
+impl Write for NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ <&NamedPipe as Write>::write(&mut &*self, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ <&NamedPipe as Write>::flush(&mut &*self)
+ }
+}
+
+impl<'a> Read for &'a NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut state = self.inner.io.lock().unwrap();
+
+ if state.token.is_none() {
+ return Err(would_block());
+ }
+
+ match mem::replace(&mut state.read, State::None) {
+ // In theory not possible with `token` checked above,
+ // but return would block for now.
+ State::None => Err(would_block()),
+
+ // A read is in flight, still waiting for it to finish
+ State::Pending(buf, amt) => {
+ state.read = State::Pending(buf, amt);
+ Err(would_block())
+ }
+
+ // We previously read something into `data`, try to copy out some
+ // data. If we copy out all the data schedule a new read and
+ // otherwise store the buffer to get read later.
+ State::Ok(data, cur) => {
+ let n = {
+ let mut remaining = &data[cur..];
+ remaining.read(buf)?
+ };
+ let next = cur + n;
+ if next != data.len() {
+ state.read = State::Ok(data, next);
+ } else {
+ self.inner.put_buffer(data);
+ Inner::schedule_read(&self.inner, &mut state, None);
+ }
+ Ok(n)
+ }
+
+ // Looks like an in-flight read hit an error, return that here while
+ // we schedule a new one.
+ State::Err(e) => {
+ Inner::schedule_read(&self.inner, &mut state, None);
+ if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) {
+ Ok(0)
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Write for &'a NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // Make sure there's no writes pending
+ let mut io = self.inner.io.lock().unwrap();
+
+ if io.token.is_none() {
+ return Err(would_block());
+ }
+
+ match io.write {
+ State::None => {}
+ State::Err(_) => match mem::replace(&mut io.write, State::None) {
+ State::Err(e) => return Err(e),
+ // `io` is locked, so this branch is unreachable
+ _ => unreachable!(),
+ },
+ // any other state should be handled in `write_done`
+ _ => {
+ return Err(would_block());
+ }
+ }
+
+ // Move `buf` onto the heap and fire off the write
+ let mut owned_buf = self.inner.get_buffer();
+ owned_buf.extend(buf);
+ match Inner::maybe_schedule_write(&self.inner, owned_buf, 0, &mut io)? {
+ // Some bytes are written immediately
+ Some(n) => Ok(n),
+ // Write operation is anqueued for whole buffer
+ None => Ok(buf.len()),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Source for NamedPipe {
+ fn register(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, false)?;
+
+ if io.token.is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a `Registry`",
+ ));
+ }
+
+ if io.cp.is_none() {
+ let selector = registry.selector();
+
+ io.cp = Some(selector.clone_port());
+
+ let inner_token = NEXT_TOKEN.fetch_add(2, Relaxed) + 2;
+ selector.inner.cp.add_handle(inner_token, self)?;
+ }
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn reregister(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ if io.token.is_none() {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ));
+ }
+
+ io.token = None;
+ Ok(())
+ }
+}
+
+impl AsRawHandle for NamedPipe {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.inner.handle.raw() as RawHandle
+ }
+}
+
+impl fmt::Debug for NamedPipe {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.handle.fmt(f)
+ }
+}
+
+impl Drop for NamedPipe {
+ fn drop(&mut self) {
+ // Cancel pending reads/connects, but don't cancel writes to ensure that
+ // everything is flushed out.
+ unsafe {
+ if self.inner.connecting.load(SeqCst) {
+ drop(cancel(&self.inner.handle, &self.inner.connect));
+ }
+
+ let io = self.inner.io.lock().unwrap();
+ if let State::Pending(..) = io.read {
+ drop(cancel(&self.inner.handle, &self.inner.read));
+ }
+ }
+ }
+}
+
+impl Inner {
+ /// Schedules a read to happen in the background, executing an overlapped
+ /// operation.
+ ///
+ /// This function returns `true` if a normal error happens or if the read
+ /// is scheduled in the background. If the pipe is no longer connected
+ /// (ERROR_PIPE_LISTENING) then `false` is returned and no read is
+ /// scheduled.
+ fn schedule_read(me: &Arc<Inner>, io: &mut Io, events: Option<&mut Vec<Event>>) -> bool {
+ // Check to see if a read is already scheduled/completed
+ match io.read {
+ State::None => {}
+ _ => return true,
+ }
+
+ // Allocate a buffer and schedule the read.
+ let mut buf = me.get_buffer();
+ let e = unsafe {
+ let overlapped = me.read.as_ptr() as *mut _;
+ let slice = slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.capacity());
+ me.read_overlapped(slice, overlapped)
+ };
+
+ match e {
+ // See `NamedPipe::connect` above for the rationale behind `forget`
+ Ok(_) => {
+ io.read = State::Pending(buf, 0); // 0 is ignored on read side
+ mem::forget(me.clone());
+ true
+ }
+
+ // If ERROR_PIPE_LISTENING happens then it's not a real read error,
+ // we just need to wait for a connect.
+ Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_LISTENING as i32) => false,
+
+ // If some other error happened, though, we're now readable to give
+ // out the error.
+ Err(e) => {
+ io.read = State::Err(e);
+ io.notify_readable(me, events);
+ true
+ }
+ }
+ }
+
+ /// Maybe schedules overlapped write operation.
+ ///
+ /// * `None` means that overlapped operation was enqueued
+ /// * `Some(n)` means that `n` bytes was immediately written.
+ /// Note, that `write_done` will fire anyway to clean up the state.
+ fn maybe_schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ ) -> io::Result<Option<usize>> {
+ // Very similar to `schedule_read` above, just done for the write half.
+ let e = unsafe {
+ let overlapped = me.write.as_ptr() as *mut _;
+ me.write_overlapped(&buf[pos..], overlapped)
+ };
+
+ // See `connect` above for the rationale behind `forget`
+ match e {
+ // `n` bytes are written immediately
+ Ok(Some(n)) => {
+ io.write = State::Ok(buf, pos);
+ mem::forget(me.clone());
+ Ok(Some(n))
+ }
+ // write operation is enqueued
+ Ok(None) => {
+ io.write = State::Pending(buf, pos);
+ mem::forget(me.clone());
+ Ok(None)
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ fn schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ events: Option<&mut Vec<Event>>,
+ ) {
+ match Inner::maybe_schedule_write(me, buf, pos, io) {
+ Ok(Some(_)) => {
+ // immediate result will be handled in `write_done`,
+ // so we'll reinterpret the `Ok` state
+ let state = mem::replace(&mut io.write, State::None);
+ io.write = match state {
+ State::Ok(buf, pos) => State::Pending(buf, pos),
+ // io is locked, so this branch is unreachable
+ _ => unreachable!(),
+ };
+ mem::forget(me.clone());
+ }
+ Ok(None) => (),
+ Err(e) => {
+ io.write = State::Err(e);
+ io.notify_writable(me, events);
+ }
+ }
+ }
+
+ fn post_register(me: &Arc<Inner>, mut events: Option<&mut Vec<Event>>) {
+ let mut io = me.io.lock().unwrap();
+ #[allow(clippy::needless_option_as_deref)]
+ if Inner::schedule_read(me, &mut io, events.as_deref_mut()) {
+ if let State::None = io.write {
+ io.notify_writable(me, events);
+ }
+ }
+ }
+
+ fn get_buffer(&self) -> Vec<u8> {
+ self.pool.lock().unwrap().get(4 * 1024)
+ }
+
+ fn put_buffer(&self, buf: Vec<u8>) {
+ self.pool.lock().unwrap().put(buf)
+ }
+}
+
+unsafe fn cancel(handle: &Handle, overlapped: &Overlapped) -> io::Result<()> {
+ let ret = CancelIoEx(handle.raw(), overlapped.as_ptr());
+ // `CancelIoEx` returns 0 on error:
+ // https://docs.microsoft.com/en-us/windows/win32/fileio/cancelioex-func
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+fn connect_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `connect` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_conn_overlapped(status.overlapped())) };
+
+ // Flag ourselves as no longer using the `connect` overlapped instances.
+ let prev = me.connecting.swap(false, SeqCst);
+ assert!(prev, "NamedPipe was not previously connecting");
+
+ // Stash away our connect error if one happened
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ unsafe {
+ match me.result(status.overlapped()) {
+ Ok(n) => debug_assert_eq!(n, 0),
+ Err(e) => me.io.lock().unwrap().connect_error = Some(e),
+ }
+ }
+
+ // We essentially just finished a registration, so kick off a
+ // read and register write readiness.
+ Inner::post_register(&me, events);
+}
+
+fn read_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_read` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_read_overlapped(status.overlapped())) };
+
+ // Move from the `Pending` to `Ok` state.
+ let mut io = me.io.lock().unwrap();
+ let mut buf = match mem::replace(&mut io.read, State::None) {
+ State::Pending(buf, _) => buf,
+ _ => unreachable!(),
+ };
+ unsafe {
+ match me.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ buf.set_len(status.bytes_transferred() as usize);
+ io.read = State::Ok(buf, 0);
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.read = State::Err(e);
+ }
+ }
+ }
+
+ // Flag our readiness that we've got data.
+ io.notify_readable(&me, events);
+}
+
+fn write_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_write` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_write_overlapped(status.overlapped())) };
+
+ // Make the state change out of `Pending`. If we wrote the entire buffer
+ // then we're writable again and otherwise we schedule another write.
+ let mut io = me.io.lock().unwrap();
+ let (buf, pos) = match mem::replace(&mut io.write, State::None) {
+ // `Ok` here means, that the operation was completed immediately
+ // `bytes_transferred` is already reported to a client
+ State::Ok(..) => {
+ io.notify_writable(&me, events);
+ return;
+ }
+ State::Pending(buf, pos) => (buf, pos),
+ _ => unreachable!(),
+ };
+
+ unsafe {
+ match me.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ let new_pos = pos + (status.bytes_transferred() as usize);
+ if new_pos == buf.len() {
+ me.put_buffer(buf);
+ io.notify_writable(&me, events);
+ } else {
+ Inner::schedule_write(&me, buf, new_pos, &mut io, events);
+ }
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.write = State::Err(e);
+ io.notify_writable(&me, events);
+ }
+ }
+ }
+}
+
+fn event_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_write` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_event_overlapped(status.overlapped())) };
+
+ let io = me.io.lock().unwrap();
+
+ // Make sure the I/O handle is still registered with the selector
+ if io.token.is_some() {
+ // This method is also called during `Selector::drop` to perform
+ // cleanup. In this case, `events` is `None` and we don't need to track
+ // the event.
+ if let Some(events) = events {
+ let mut ev = Event::from_completion_status(&status);
+ // Reverse the `.data` alteration done in `schedule_event`. This
+ // alteration was done so the selector recognized the event as one from
+ // a named pipe.
+ ev.data >>= 1;
+ events.push(ev);
+ }
+ }
+}
+
+impl Io {
+ fn check_association(&self, registry: &Registry, required: bool) -> io::Result<()> {
+ match self.cp {
+ Some(ref cp) if !registry.selector().same_port(cp) => Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a different `Registry`",
+ )),
+ None if required => Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ )),
+ _ => Ok(()),
+ }
+ }
+
+ fn notify_readable(&self, me: &Arc<Inner>, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_readable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ self.schedule_event(me, ev);
+ }
+ }
+ }
+
+ fn notify_writable(&self, me: &Arc<Inner>, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_writable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ self.schedule_event(me, ev);
+ }
+ }
+ }
+
+ fn schedule_event(&self, me: &Arc<Inner>, mut event: Event) {
+ // Alter the token so that the selector will identify the IOCP event as
+ // one for a named pipe. This will be reversed in `event_done`
+ //
+ // `data` for named pipes is an auto-incrementing counter. Because
+ // `data` is `u64` we do not risk losing the most-significant bit
+ // (unless a user creates 2^62 named pipes during the lifetime of the
+ // process).
+ event.data <<= 1;
+ event.data += 1;
+
+ let completion_status =
+ event.to_completion_status_with_overlapped(me.event.as_ptr() as *mut _);
+
+ match self.cp.as_ref().unwrap().post(completion_status) {
+ Ok(_) => {
+ // Increase the ref count of `Inner` for the completion event.
+ mem::forget(me.clone());
+ }
+ Err(_) => {
+ // Nothing to do here
+ }
+ }
+ }
+}
+
+struct BufferPool {
+ pool: Vec<Vec<u8>>,
+}
+
+impl BufferPool {
+ fn with_capacity(cap: usize) -> BufferPool {
+ BufferPool {
+ pool: Vec::with_capacity(cap),
+ }
+ }
+
+ fn get(&mut self, default_cap: usize) -> Vec<u8> {
+ self.pool
+ .pop()
+ .unwrap_or_else(|| Vec::with_capacity(default_cap))
+ }
+
+ fn put(&mut self, mut buf: Vec<u8>) {
+ if self.pool.len() < self.pool.capacity() {
+ unsafe {
+ buf.set_len(0);
+ }
+ self.pool.push(buf);
+ }
+ }
+}
diff --git a/vendor/mio/src/sys/windows/net.rs b/vendor/mio/src/sys/windows/net.rs
new file mode 100644
index 00000000..5cc23533
--- /dev/null
+++ b/vendor/mio/src/sys/windows/net.rs
@@ -0,0 +1,111 @@
+use std::io;
+use std::mem;
+use std::net::SocketAddr;
+use std::sync::Once;
+
+use windows_sys::Win32::Networking::WinSock::{
+ closesocket, ioctlsocket, socket, AF_INET, AF_INET6, FIONBIO, IN6_ADDR, IN6_ADDR_0,
+ INVALID_SOCKET, IN_ADDR, IN_ADDR_0, SOCKADDR, SOCKADDR_IN, SOCKADDR_IN6, SOCKADDR_IN6_0,
+ SOCKET,
+};
+
+/// Initialise the network stack for Windows.
+fn init() {
+ static INIT: Once = Once::new();
+ INIT.call_once(|| {
+ // Let standard library call `WSAStartup` for us, we can't do it
+ // ourselves because otherwise using any type in `std::net` would panic
+ // when it tries to call `WSAStartup` a second time.
+ drop(std::net::UdpSocket::bind("127.0.0.1:0"));
+ });
+}
+
+/// Create a new non-blocking socket.
+pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: i32) -> io::Result<SOCKET> {
+ let domain = match addr {
+ SocketAddr::V4(..) => AF_INET,
+ SocketAddr::V6(..) => AF_INET6,
+ };
+
+ new_socket(domain.into(), socket_type)
+}
+
+pub(crate) fn new_socket(domain: u32, socket_type: i32) -> io::Result<SOCKET> {
+ init();
+
+ let socket = syscall!(
+ socket(domain as i32, socket_type, 0),
+ PartialEq::eq,
+ INVALID_SOCKET
+ )?;
+
+ if let Err(err) = syscall!(ioctlsocket(socket, FIONBIO, &mut 1), PartialEq::ne, 0) {
+ let _ = unsafe { closesocket(socket) };
+ return Err(err);
+ }
+
+ Ok(socket as SOCKET)
+}
+
+/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: SOCKADDR_IN,
+ v6: SOCKADDR_IN6,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const SOCKADDR {
+ self as *const _ as *const SOCKADDR
+ }
+}
+
+pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, i32) {
+ match addr {
+ SocketAddr::V4(ref addr) => {
+ // `s_addr` is stored as BE on all machine and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ let sin_addr = unsafe {
+ let mut s_un = mem::zeroed::<IN_ADDR_0>();
+ s_un.S_addr = u32::from_ne_bytes(addr.ip().octets());
+ IN_ADDR { S_un: s_un }
+ };
+
+ let sockaddr_in = SOCKADDR_IN {
+ sin_family: AF_INET as u16, // 1
+ sin_port: addr.port().to_be(),
+ sin_addr,
+ sin_zero: [0; 8],
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ (sockaddr, mem::size_of::<SOCKADDR_IN>() as i32)
+ }
+ SocketAddr::V6(ref addr) => {
+ let sin6_addr = unsafe {
+ let mut u = mem::zeroed::<IN6_ADDR_0>();
+ u.Byte = addr.ip().octets();
+ IN6_ADDR { u }
+ };
+ let u = unsafe {
+ let mut u = mem::zeroed::<SOCKADDR_IN6_0>();
+ u.sin6_scope_id = addr.scope_id();
+ u
+ };
+
+ let sockaddr_in6 = SOCKADDR_IN6 {
+ sin6_family: AF_INET6 as u16, // 23
+ sin6_port: addr.port().to_be(),
+ sin6_addr,
+ sin6_flowinfo: addr.flowinfo(),
+ Anonymous: u,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ (sockaddr, mem::size_of::<SOCKADDR_IN6>() as i32)
+ }
+ }
+}
diff --git a/vendor/mio/src/sys/windows/overlapped.rs b/vendor/mio/src/sys/windows/overlapped.rs
new file mode 100644
index 00000000..d1456ded
--- /dev/null
+++ b/vendor/mio/src/sys/windows/overlapped.rs
@@ -0,0 +1,35 @@
+use crate::sys::windows::Event;
+
+use std::cell::UnsafeCell;
+use std::fmt;
+
+use windows_sys::Win32::System::IO::{OVERLAPPED, OVERLAPPED_ENTRY};
+
+#[repr(C)]
+pub(crate) struct Overlapped {
+ inner: UnsafeCell<OVERLAPPED>,
+ pub(crate) callback: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>),
+}
+
+#[cfg(feature = "os-ext")]
+impl Overlapped {
+ pub(crate) fn new(cb: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>)) -> Overlapped {
+ Overlapped {
+ inner: UnsafeCell::new(unsafe { std::mem::zeroed() }),
+ callback: cb,
+ }
+ }
+
+ pub(crate) fn as_ptr(&self) -> *const OVERLAPPED {
+ self.inner.get()
+ }
+}
+
+impl fmt::Debug for Overlapped {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Overlapped").finish()
+ }
+}
+
+unsafe impl Send for Overlapped {}
+unsafe impl Sync for Overlapped {}
diff --git a/vendor/mio/src/sys/windows/selector.rs b/vendor/mio/src/sys/windows/selector.rs
new file mode 100644
index 00000000..aaf77c29
--- /dev/null
+++ b/vendor/mio/src/sys/windows/selector.rs
@@ -0,0 +1,741 @@
+use super::afd::{self, Afd, AfdPollInfo};
+use super::io_status_block::IoStatusBlock;
+use super::Event;
+use crate::sys::Events;
+
+cfg_net! {
+ use crate::sys::event::{
+ ERROR_FLAGS, READABLE_FLAGS, READ_CLOSED_FLAGS, WRITABLE_FLAGS, WRITE_CLOSED_FLAGS,
+ };
+ use crate::Interest;
+}
+
+use super::iocp::{CompletionPort, CompletionStatus};
+use std::collections::VecDeque;
+use std::ffi::c_void;
+use std::io;
+use std::marker::PhantomPinned;
+use std::os::windows::io::RawSocket;
+use std::pin::Pin;
+#[cfg(debug_assertions)]
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+
+use windows_sys::Win32::Foundation::{
+ ERROR_INVALID_HANDLE, ERROR_IO_PENDING, HANDLE, STATUS_CANCELLED, WAIT_TIMEOUT,
+};
+use windows_sys::Win32::System::IO::OVERLAPPED;
+
+#[derive(Debug)]
+struct AfdGroup {
+ #[cfg_attr(not(feature = "net"), allow(dead_code))]
+ cp: Arc<CompletionPort>,
+ afd_group: Mutex<Vec<Arc<Afd>>>,
+}
+
+impl AfdGroup {
+ pub fn new(cp: Arc<CompletionPort>) -> AfdGroup {
+ AfdGroup {
+ afd_group: Mutex::new(Vec::new()),
+ cp,
+ }
+ }
+
+ pub fn release_unused_afd(&self) {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ afd_group.retain(|g| Arc::strong_count(g) > 1);
+ }
+}
+
+cfg_io_source! {
+ const POLL_GROUP_MAX_GROUP_SIZE: usize = 32;
+
+ impl AfdGroup {
+ pub fn acquire(&self) -> io::Result<Arc<Afd>> {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ if afd_group.len() == 0 {
+ self._alloc_afd_group(&mut afd_group)?;
+ } else {
+ // + 1 reference in Vec
+ if Arc::strong_count(afd_group.last().unwrap()) > POLL_GROUP_MAX_GROUP_SIZE {
+ self._alloc_afd_group(&mut afd_group)?;
+ }
+ }
+
+ match afd_group.last() {
+ Some(arc) => Ok(arc.clone()),
+ None => unreachable!(
+ "Cannot acquire afd, {:#?}, afd_group: {:#?}",
+ self, afd_group
+ ),
+ }
+ }
+
+ fn _alloc_afd_group(&self, afd_group: &mut Vec<Arc<Afd>>) -> io::Result<()> {
+ let afd = Afd::new(&self.cp)?;
+ let arc = Arc::new(afd);
+ afd_group.push(arc);
+ Ok(())
+ }
+ }
+}
+
+#[derive(Debug)]
+enum SockPollStatus {
+ Idle,
+ Pending,
+ Cancelled,
+}
+
+#[derive(Debug)]
+pub struct SockState {
+ iosb: IoStatusBlock,
+ poll_info: AfdPollInfo,
+ afd: Arc<Afd>,
+
+ base_socket: RawSocket,
+
+ user_evts: u32,
+ pending_evts: u32,
+
+ user_data: u64,
+
+ poll_status: SockPollStatus,
+ delete_pending: bool,
+
+ // last raw os error
+ error: Option<i32>,
+
+ _pinned: PhantomPinned,
+}
+
+impl SockState {
+ fn update(&mut self, self_arc: &Pin<Arc<Mutex<SockState>>>) -> io::Result<()> {
+ assert!(!self.delete_pending);
+
+ // make sure to reset previous error before a new update
+ self.error = None;
+
+ if let SockPollStatus::Pending = self.poll_status {
+ if (self.user_evts & afd::KNOWN_EVENTS & !self.pending_evts) == 0 {
+ /* All the events the user is interested in are already being monitored by
+ * the pending poll operation. It might spuriously complete because of an
+ * event that we're no longer interested in; when that happens we'll submit
+ * a new poll operation with the updated event mask. */
+ } else {
+ /* A poll operation is already pending, but it's not monitoring for all the
+ * events that the user is interested in. Therefore, cancel the pending
+ * poll operation; when we receive it's completion package, a new poll
+ * operation will be submitted with the correct event mask. */
+ if let Err(e) = self.cancel() {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ return Ok(());
+ }
+ } else if let SockPollStatus::Cancelled = self.poll_status {
+ /* The poll operation has already been cancelled, we're still waiting for
+ * it to return. For now, there's nothing that needs to be done. */
+ } else if let SockPollStatus::Idle = self.poll_status {
+ /* No poll operation is pending; start one. */
+ self.poll_info.exclusive = 0;
+ self.poll_info.number_of_handles = 1;
+ self.poll_info.timeout = i64::MAX;
+ self.poll_info.handles[0].handle = self.base_socket as HANDLE;
+ self.poll_info.handles[0].status = 0;
+ self.poll_info.handles[0].events = self.user_evts | afd::POLL_LOCAL_CLOSE;
+
+ // Increase the ref count as the memory will be used by the kernel.
+ let overlapped_ptr = into_overlapped(self_arc.clone());
+
+ let result = unsafe {
+ self.afd
+ .poll(&mut self.poll_info, &mut *self.iosb, overlapped_ptr)
+ };
+ if let Err(e) = result {
+ let code = e.raw_os_error().unwrap();
+ if code == ERROR_IO_PENDING as i32 {
+ /* Overlapped poll operation in progress; this is expected. */
+ } else {
+ // Since the operation failed it means the kernel won't be
+ // using the memory any more.
+ drop(from_overlapped(overlapped_ptr as *mut _));
+ if code == ERROR_INVALID_HANDLE as i32 {
+ /* Socket closed; it'll be dropped. */
+ self.mark_delete();
+ return Ok(());
+ } else {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ }
+ }
+
+ self.poll_status = SockPollStatus::Pending;
+ self.pending_evts = self.user_evts;
+ } else {
+ unreachable!("Invalid poll status during update, {:#?}", self)
+ }
+
+ Ok(())
+ }
+
+ fn cancel(&mut self) -> io::Result<()> {
+ match self.poll_status {
+ SockPollStatus::Pending => {}
+ _ => unreachable!("Invalid poll status during cancel, {:#?}", self),
+ };
+ unsafe {
+ self.afd.cancel(&mut *self.iosb)?;
+ }
+ self.poll_status = SockPollStatus::Cancelled;
+ self.pending_evts = 0;
+ Ok(())
+ }
+
+ // This is the function called from the overlapped using as Arc<Mutex<SockState>>. Watch out for reference counting.
+ fn feed_event(&mut self) -> Option<Event> {
+ self.poll_status = SockPollStatus::Idle;
+ self.pending_evts = 0;
+
+ let mut afd_events = 0;
+ // We use the status info in IO_STATUS_BLOCK to determine the socket poll status. It is unsafe to use a pointer of IO_STATUS_BLOCK.
+ unsafe {
+ if self.delete_pending {
+ return None;
+ } else if self.iosb.Anonymous.Status == STATUS_CANCELLED {
+ /* The poll request was cancelled by CancelIoEx. */
+ } else if self.iosb.Anonymous.Status < 0 {
+ /* The overlapped request itself failed in an unexpected way. */
+ afd_events = afd::POLL_CONNECT_FAIL;
+ } else if self.poll_info.number_of_handles < 1 {
+ /* This poll operation succeeded but didn't report any socket events. */
+ } else if self.poll_info.handles[0].events & afd::POLL_LOCAL_CLOSE != 0 {
+ /* The poll operation reported that the socket was closed. */
+ self.mark_delete();
+ return None;
+ } else {
+ afd_events = self.poll_info.handles[0].events;
+ }
+ }
+
+ afd_events &= self.user_evts;
+
+ if afd_events == 0 {
+ return None;
+ }
+
+ // In mio, we have to simulate Edge-triggered behavior to match API usage.
+ // The strategy here is to intercept all read/write from user that could cause WouldBlock usage,
+ // then reregister the socket to reset the interests.
+ self.user_evts &= !afd_events;
+
+ Some(Event {
+ data: self.user_data,
+ flags: afd_events,
+ })
+ }
+
+ pub fn is_pending_deletion(&self) -> bool {
+ self.delete_pending
+ }
+
+ pub fn mark_delete(&mut self) {
+ if !self.delete_pending {
+ if let SockPollStatus::Pending = self.poll_status {
+ drop(self.cancel());
+ }
+
+ self.delete_pending = true;
+ }
+ }
+
+ fn has_error(&self) -> bool {
+ self.error.is_some()
+ }
+}
+
+cfg_io_source! {
+ impl SockState {
+ fn new(raw_socket: RawSocket, afd: Arc<Afd>) -> io::Result<SockState> {
+ Ok(SockState {
+ iosb: IoStatusBlock::zeroed(),
+ poll_info: AfdPollInfo::zeroed(),
+ afd,
+ base_socket: get_base_socket(raw_socket)?,
+ user_evts: 0,
+ pending_evts: 0,
+ user_data: 0,
+ poll_status: SockPollStatus::Idle,
+ delete_pending: false,
+ error: None,
+ _pinned: PhantomPinned,
+ })
+ }
+
+ /// True if need to be added on update queue, false otherwise.
+ fn set_event(&mut self, ev: Event) -> bool {
+ /* afd::POLL_CONNECT_FAIL and afd::POLL_ABORT are always reported, even when not requested by the caller. */
+ let events = ev.flags | afd::POLL_CONNECT_FAIL | afd::POLL_ABORT;
+
+ self.user_evts = events;
+ self.user_data = ev.data;
+
+ (events & !self.pending_evts) != 0
+ }
+ }
+}
+
+impl Drop for SockState {
+ fn drop(&mut self) {
+ self.mark_delete();
+ }
+}
+
+/// Converts the pointer to a `SockState` into a raw pointer.
+/// To revert see `from_overlapped`.
+fn into_overlapped(sock_state: Pin<Arc<Mutex<SockState>>>) -> *mut c_void {
+ let overlapped_ptr: *const Mutex<SockState> =
+ unsafe { Arc::into_raw(Pin::into_inner_unchecked(sock_state)) };
+ overlapped_ptr as *mut _
+}
+
+/// Convert a raw overlapped pointer into a reference to `SockState`.
+/// Reverts `into_overlapped`.
+fn from_overlapped(ptr: *mut OVERLAPPED) -> Pin<Arc<Mutex<SockState>>> {
+ let sock_ptr: *const Mutex<SockState> = ptr as *const _;
+ unsafe { Pin::new_unchecked(Arc::from_raw(sock_ptr)) }
+}
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
+
+/// Windows implementation of `sys::Selector`
+///
+/// Edge-triggered event notification is simulated by resetting internal event flag of each socket state `SockState`
+/// and setting all events back by intercepting all requests that could cause `io::ErrorKind::WouldBlock` happening.
+///
+/// This selector is currently only support socket due to `Afd` driver is winsock2 specific.
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ pub(super) inner: Arc<SelectorInner>,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ SelectorInner::new().map(|inner| {
+ #[cfg(debug_assertions)]
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+ Selector {
+ #[cfg(debug_assertions)]
+ id,
+ inner: Arc::new(inner),
+ }
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ Ok(Selector {
+ #[cfg(debug_assertions)]
+ id: self.id,
+ inner: Arc::clone(&self.inner),
+ })
+ }
+
+ /// # Safety
+ ///
+ /// This requires a mutable reference to self because only a single thread
+ /// can poll IOCP at a time.
+ pub fn select(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ self.inner.select(events, timeout)
+ }
+
+ pub(super) fn clone_port(&self) -> Arc<CompletionPort> {
+ self.inner.cp.clone()
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn same_port(&self, other: &Arc<CompletionPort>) -> bool {
+ Arc::ptr_eq(&self.inner.cp, other)
+ }
+}
+
+cfg_io_source! {
+ use super::InternalState;
+ use crate::Token;
+
+ impl Selector {
+ pub(super) fn register(
+ &self,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ SelectorInner::register(&self.inner, socket, token, interests)
+ }
+
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(state, token, interests)
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct SelectorInner {
+ pub(super) cp: Arc<CompletionPort>,
+ update_queue: Mutex<VecDeque<Pin<Arc<Mutex<SockState>>>>>,
+ afd_group: AfdGroup,
+ is_polling: AtomicBool,
+}
+
+// We have ensured thread safety by introducing lock manually.
+unsafe impl Sync for SelectorInner {}
+
+impl SelectorInner {
+ pub fn new() -> io::Result<SelectorInner> {
+ CompletionPort::new(0).map(|cp| {
+ let cp = Arc::new(cp);
+ let cp_afd = Arc::clone(&cp);
+
+ SelectorInner {
+ cp,
+ update_queue: Mutex::new(VecDeque::new()),
+ afd_group: AfdGroup::new(cp_afd),
+ is_polling: AtomicBool::new(false),
+ }
+ })
+ }
+
+ /// # Safety
+ ///
+ /// May only be calling via `Selector::select`.
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ events.clear();
+
+ if timeout.is_none() {
+ loop {
+ let len = self.select2(&mut events.statuses, &mut events.events, None)?;
+ if len == 0 {
+ continue;
+ }
+ break Ok(());
+ }
+ } else {
+ self.select2(&mut events.statuses, &mut events.events, timeout)?;
+ Ok(())
+ }
+ }
+
+ pub fn select2(
+ &self,
+ statuses: &mut [CompletionStatus],
+ events: &mut Vec<Event>,
+ timeout: Option<Duration>,
+ ) -> io::Result<usize> {
+ assert!(!self.is_polling.swap(true, Ordering::AcqRel));
+
+ unsafe { self.update_sockets_events() }?;
+
+ let result = self.cp.get_many(statuses, timeout);
+
+ self.is_polling.store(false, Ordering::Relaxed);
+
+ match result {
+ Ok(iocp_events) => Ok(unsafe { self.feed_events(events, iocp_events) }),
+ Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => Ok(0),
+ Err(e) => Err(e),
+ }
+ }
+
+ unsafe fn update_sockets_events(&self) -> io::Result<()> {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for sock in update_queue.iter_mut() {
+ let mut sock_internal = sock.lock().unwrap();
+ if !sock_internal.is_pending_deletion() {
+ sock_internal.update(sock)?;
+ }
+ }
+
+ // remove all sock which do not have error, they have afd op pending
+ update_queue.retain(|sock| sock.lock().unwrap().has_error());
+
+ self.afd_group.release_unused_afd();
+ Ok(())
+ }
+
+ // It returns processed count of iocp_events rather than the events itself.
+ unsafe fn feed_events(
+ &self,
+ events: &mut Vec<Event>,
+ iocp_events: &[CompletionStatus],
+ ) -> usize {
+ let mut n = 0;
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ events.push(Event::from_completion_status(iocp_event));
+ n += 1;
+ continue;
+ } else if iocp_event.token() % 2 == 1 {
+ // Handle is a named pipe. This could be extended to be any non-AFD event.
+ let callback = (*(iocp_event.overlapped() as *mut super::Overlapped)).callback;
+
+ let len = events.len();
+ callback(iocp_event.entry(), Some(events));
+ n += events.len() - len;
+ continue;
+ }
+
+ let sock_state = from_overlapped(iocp_event.overlapped());
+ let mut sock_guard = sock_state.lock().unwrap();
+ if let Some(e) = sock_guard.feed_event() {
+ events.push(e);
+ n += 1;
+ }
+
+ if !sock_guard.is_pending_deletion() {
+ update_queue.push_back(sock_state.clone());
+ }
+ }
+ self.afd_group.release_unused_afd();
+ n
+ }
+}
+
+cfg_io_source! {
+ use std::mem::size_of;
+ use std::ptr::null_mut;
+
+ use windows_sys::Win32::Networking::WinSock::{
+ WSAGetLastError, WSAIoctl, SIO_BASE_HANDLE, SIO_BSP_HANDLE,
+ SIO_BSP_HANDLE_POLL, SIO_BSP_HANDLE_SELECT, SOCKET_ERROR,
+ };
+
+
+ impl SelectorInner {
+ fn register(
+ this: &Arc<Self>,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ let flags = interests_to_afd_flags(interests);
+
+ let sock = {
+ let sock = this._alloc_sock_for_rawsocket(socket)?;
+ let event = Event {
+ flags,
+ data: token.0 as u64,
+ };
+ sock.lock().unwrap().set_event(event);
+ sock
+ };
+
+ let state = InternalState {
+ selector: this.clone(),
+ token,
+ interests,
+ sock_state: sock.clone(),
+ };
+
+ this.queue_state(sock);
+ unsafe { this.update_sockets_events_if_polling()? };
+
+ Ok(state)
+ }
+
+ // Directly accessed in `IoSourceState::do_io`.
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ {
+ let event = Event {
+ flags: interests_to_afd_flags(interests),
+ data: token.0 as u64,
+ };
+
+ state.lock().unwrap().set_event(event);
+ }
+
+ // FIXME: a sock which has_error true should not be re-added to
+ // the update queue because it's already there.
+ self.queue_state(state);
+ unsafe { self.update_sockets_events_if_polling() }
+ }
+
+ /// This function is called by register() and reregister() to start an
+ /// IOCTL_AFD_POLL operation corresponding to the registered events, but
+ /// only if necessary.
+ ///
+ /// Since it is not possible to modify or synchronously cancel an AFD_POLL
+ /// operation, and there can be only one active AFD_POLL operation per
+ /// (socket, completion port) pair at any time, it is expensive to change
+ /// a socket's event registration after it has been submitted to the kernel.
+ ///
+ /// Therefore, if no other threads are polling when interest in a socket
+ /// event is (re)registered, the socket is added to the 'update queue', but
+ /// the actual syscall to start the IOCTL_AFD_POLL operation is deferred
+ /// until just before the GetQueuedCompletionStatusEx() syscall is made.
+ ///
+ /// However, when another thread is already blocked on
+ /// GetQueuedCompletionStatusEx() we tell the kernel about the registered
+ /// socket event(s) immediately.
+ unsafe fn update_sockets_events_if_polling(&self) -> io::Result<()> {
+ if self.is_polling.load(Ordering::Acquire) {
+ self.update_sockets_events()
+ } else {
+ Ok(())
+ }
+ }
+
+ fn queue_state(&self, sock_state: Pin<Arc<Mutex<SockState>>>) {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ update_queue.push_back(sock_state);
+ }
+
+ fn _alloc_sock_for_rawsocket(
+ &self,
+ raw_socket: RawSocket,
+ ) -> io::Result<Pin<Arc<Mutex<SockState>>>> {
+ let afd = self.afd_group.acquire()?;
+ Ok(Arc::pin(Mutex::new(SockState::new(raw_socket, afd)?)))
+ }
+ }
+
+ fn try_get_base_socket(raw_socket: RawSocket, ioctl: u32) -> Result<RawSocket, i32> {
+ let mut base_socket: RawSocket = 0;
+ let mut bytes: u32 = 0;
+ unsafe {
+ if WSAIoctl(
+ raw_socket as usize,
+ ioctl,
+ null_mut(),
+ 0,
+ &mut base_socket as *mut _ as *mut c_void,
+ size_of::<RawSocket>() as u32,
+ &mut bytes,
+ null_mut(),
+ None,
+ ) != SOCKET_ERROR
+ {
+ Ok(base_socket)
+ } else {
+ Err(WSAGetLastError())
+ }
+ }
+ }
+
+ fn get_base_socket(raw_socket: RawSocket) -> io::Result<RawSocket> {
+ let res = try_get_base_socket(raw_socket, SIO_BASE_HANDLE);
+ if let Ok(base_socket) = res {
+ return Ok(base_socket);
+ }
+
+ // The `SIO_BASE_HANDLE` should not be intercepted by LSPs, therefore
+ // it should not fail as long as `raw_socket` is a valid socket. See
+ // https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls.
+ // However, at least one known LSP deliberately breaks it, so we try
+ // some alternative IOCTLs, starting with the most appropriate one.
+ for &ioctl in &[
+ SIO_BSP_HANDLE_SELECT,
+ SIO_BSP_HANDLE_POLL,
+ SIO_BSP_HANDLE,
+ ] {
+ if let Ok(base_socket) = try_get_base_socket(raw_socket, ioctl) {
+ // Since we know now that we're dealing with an LSP (otherwise
+ // SIO_BASE_HANDLE wouldn't have failed), only return any result
+ // when it is different from the original `raw_socket`.
+ if base_socket != raw_socket {
+ return Ok(base_socket);
+ }
+ }
+ }
+
+ // If the alternative IOCTLs also failed, return the original error.
+ let os_error = res.unwrap_err();
+ let err = io::Error::from_raw_os_error(os_error);
+ Err(err)
+ }
+}
+
+impl Drop for SelectorInner {
+ fn drop(&mut self) {
+ loop {
+ let events_num: usize;
+ let mut statuses: [CompletionStatus; 1024] = [CompletionStatus::zero(); 1024];
+
+ let result = self
+ .cp
+ .get_many(&mut statuses, Some(std::time::Duration::from_millis(0)));
+ match result {
+ Ok(iocp_events) => {
+ events_num = iocp_events.iter().len();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ // Custom event
+ } else if iocp_event.token() % 2 == 1 {
+ // Named pipe, dispatch the event so it can release resources
+ let callback = unsafe {
+ (*(iocp_event.overlapped() as *mut super::Overlapped)).callback
+ };
+
+ callback(iocp_event.entry(), None);
+ } else {
+ // drain sock state to release memory of Arc reference
+ let _sock_state = from_overlapped(iocp_event.overlapped());
+ }
+ }
+ }
+
+ Err(_) => {
+ break;
+ }
+ }
+
+ if events_num == 0 {
+ // continue looping until all completion statuses have been drained
+ break;
+ }
+ }
+
+ self.afd_group.release_unused_afd();
+ }
+}
+
+cfg_net! {
+ fn interests_to_afd_flags(interests: Interest) -> u32 {
+ let mut flags = 0;
+
+ if interests.is_readable() {
+ flags |= READABLE_FLAGS | READ_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ if interests.is_writable() {
+ flags |= WRITABLE_FLAGS | WRITE_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ flags
+ }
+}
diff --git a/vendor/mio/src/sys/windows/tcp.rs b/vendor/mio/src/sys/windows/tcp.rs
new file mode 100644
index 00000000..4f77d5d6
--- /dev/null
+++ b/vendor/mio/src/sys/windows/tcp.rs
@@ -0,0 +1,66 @@
+use std::io;
+use std::net::{self, SocketAddr};
+use std::os::windows::io::AsRawSocket;
+
+use windows_sys::Win32::Networking::WinSock::{self, SOCKET, SOCKET_ERROR, SOCK_STREAM};
+
+use crate::sys::windows::net::{new_ip_socket, socket_addr};
+
+pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<SOCKET> {
+ new_ip_socket(address, SOCK_STREAM)
+}
+
+pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
+ use WinSock::bind;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ bind(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
+ use WinSock::connect;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ let res = syscall!(
+ connect(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ );
+
+ match res {
+ Err(err) if err.kind() != io::ErrorKind::WouldBlock => Err(err),
+ _ => Ok(()),
+ }
+}
+
+pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
+ use std::convert::TryInto;
+ use WinSock::listen;
+
+ let backlog = backlog.try_into().unwrap_or(i32::MAX);
+ syscall!(
+ listen(socket.as_raw_socket() as _, backlog),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ // The non-blocking state of `listener` is inherited. See
+ // https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-accept#remarks.
+ listener.accept()
+}
diff --git a/vendor/mio/src/sys/windows/udp.rs b/vendor/mio/src/sys/windows/udp.rs
new file mode 100644
index 00000000..87e269fa
--- /dev/null
+++ b/vendor/mio/src/sys/windows/udp.rs
@@ -0,0 +1,46 @@
+use std::io;
+use std::mem::{self, MaybeUninit};
+use std::net::{self, SocketAddr};
+use std::os::windows::io::{AsRawSocket, FromRawSocket};
+use std::os::windows::raw::SOCKET as StdSocket; // windows-sys uses usize, stdlib uses u32/u64.
+
+use crate::sys::windows::net::{new_ip_socket, socket_addr};
+use windows_sys::Win32::Networking::WinSock::{
+ bind as win_bind, getsockopt, IPPROTO_IPV6, IPV6_V6ONLY, SOCKET_ERROR, SOCK_DGRAM,
+};
+
+pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
+ let raw_socket = new_ip_socket(addr, SOCK_DGRAM)?;
+ let socket = unsafe { net::UdpSocket::from_raw_socket(raw_socket as StdSocket) };
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ win_bind(raw_socket, raw_addr.as_ptr(), raw_addr_length),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+
+ Ok(socket)
+}
+
+pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
+ let mut optval: MaybeUninit<i32> = MaybeUninit::uninit();
+ let mut optlen = mem::size_of::<i32>() as i32;
+
+ syscall!(
+ getsockopt(
+ socket.as_raw_socket() as usize,
+ IPPROTO_IPV6 as i32,
+ IPV6_V6ONLY as i32,
+ optval.as_mut_ptr().cast(),
+ &mut optlen,
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+
+ debug_assert_eq!(optlen as usize, mem::size_of::<i32>());
+ // Safety: `getsockopt` initialised `optval` for us.
+ let optval = unsafe { optval.assume_init() };
+ Ok(optval != 0)
+}
diff --git a/vendor/mio/src/sys/windows/waker.rs b/vendor/mio/src/sys/windows/waker.rs
new file mode 100644
index 00000000..103aa01a
--- /dev/null
+++ b/vendor/mio/src/sys/windows/waker.rs
@@ -0,0 +1,29 @@
+use crate::sys::windows::Event;
+use crate::sys::windows::Selector;
+use crate::Token;
+
+use super::iocp::CompletionPort;
+use std::io;
+use std::sync::Arc;
+
+#[derive(Debug)]
+pub struct Waker {
+ token: Token,
+ port: Arc<CompletionPort>,
+}
+
+impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ Ok(Waker {
+ token,
+ port: selector.clone_port(),
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ let mut ev = Event::new(self.token);
+ ev.set_readable();
+
+ self.port.post(ev.to_completion_status())
+ }
+}