Merge pull request #141 from kevinmehall/io-traits

Add EndpointRead and EndpointWrite
This commit is contained in:
Kevin Mehall 2025-06-15 12:57:32 -06:00 committed by GitHub
commit 8b5dde13f4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 951 additions and 6 deletions

View file

@ -35,10 +35,11 @@ io-kit-sys = "0.4.0"
[target.'cfg(any(target_os="linux", target_os="android", target_os="windows", target_os="macos"))'.dependencies]
blocking = { version = "1.6.1", optional = true }
tokio = { version = "1", optional = true, features = ["rt"] }
futures-io = { version = "0.3", optional = true }
[features]
# Use the `blocking` crate for making blocking IO async
smol = ["dep:blocking"]
smol = ["dep:blocking", "dep:futures-io"]
# Use `tokio`'s IO threadpool for making blocking IO async
tokio = ["dep:tokio"]

87
examples/bulk_io.rs Normal file
View file

@ -0,0 +1,87 @@
use std::{
io::{BufRead, Read, Write},
time::Duration,
};
use nusb::{
transfer::{Bulk, In, Out},
MaybeFuture,
};
fn main() {
env_logger::init();
let di = nusb::list_devices()
.wait()
.unwrap()
.find(|d| d.vendor_id() == 0x59e3 && d.product_id() == 0x00aa)
.expect("device should be connected");
println!("Device info: {di:?}");
let device = di.open().wait().unwrap();
let main_interface = device.claim_interface(0).wait().unwrap();
let mut writer = main_interface
.endpoint::<Bulk, Out>(0x03)
.unwrap()
.writer(128)
.with_num_transfers(8);
let mut reader = main_interface
.endpoint::<Bulk, In>(0x83)
.unwrap()
.reader(128);
writer.write_all(&[1; 16]).unwrap();
writer.write_all(&[2; 256]).unwrap();
writer.flush().unwrap();
writer.write_all(&[3; 64]).unwrap();
writer.flush_end().unwrap();
let mut buf = [0; 16];
reader.read_exact(&mut buf).unwrap();
let mut buf = [0; 64];
reader.read_exact(&mut buf).unwrap();
dbg!(reader.fill_buf().unwrap().len());
let echo_interface = device.claim_interface(1).wait().unwrap();
echo_interface.set_alt_setting(1).wait().unwrap();
let mut writer = echo_interface
.endpoint::<Bulk, Out>(0x01)
.unwrap()
.writer(64)
.with_num_transfers(1);
let mut reader = echo_interface
.endpoint::<Bulk, In>(0x81)
.unwrap()
.reader(64)
.with_num_transfers(8)
.with_read_timeout(Duration::from_millis(100));
assert_eq!(
reader.fill_buf().unwrap_err().kind(),
std::io::ErrorKind::TimedOut
);
let mut pkt_reader = reader.until_short_packet();
writer.write_all(&[1; 16]).unwrap();
writer.flush_end().unwrap();
writer.write_all(&[2; 128]).unwrap();
writer.flush_end().unwrap();
let mut v = Vec::new();
pkt_reader.read_to_end(&mut v).unwrap();
assert_eq!(&v[..], &[1; 16]);
pkt_reader.consume_end().unwrap();
let mut v = Vec::new();
pkt_reader.read_to_end(&mut v).unwrap();
assert_eq!(&v[..], &[2; 128]);
pkt_reader.consume_end().unwrap();
}

View file

@ -3,10 +3,11 @@ use crate::{
decode_string_descriptor, validate_string_descriptor, ConfigurationDescriptor,
DeviceDescriptor, InterfaceDescriptor, DESCRIPTOR_TYPE_STRING,
},
io::{EndpointRead, EndpointWrite},
platform,
transfer::{
Buffer, BulkOrInterrupt, Completion, ControlIn, ControlOut, Direction, EndpointDirection,
EndpointType, TransferError,
EndpointType, In, Out, TransferError,
},
ActiveConfigurationError, DeviceInfo, Error, ErrorKind, GetDescriptorError, MaybeFuture, Speed,
};
@ -544,9 +545,9 @@ impl Debug for Interface {
/// a non-blocking operation that adds the operation to the queue. Completed
/// transfers can be popped from the queue synchronously or asynchronously.
///
/// This separation of submission and completion makes the API cancel-safe,
/// and makes it easy to submit multiple transfers at once, regardless of
/// whether you are using asynchronous or blocking waits.
/// This separation of submission and completion makes the API cancel-safe, and
/// makes it easy to submit multiple transfers at once, regardless of whether
/// you are using asynchronous or blocking waits.
///
/// To maximize throughput and minimize latency when streaming data, the host
/// controller needs to attempt a transfer in every possible frame. That
@ -555,6 +556,11 @@ impl Debug for Interface {
/// complete.
///
/// When the `Endpoint` is dropped, any pending transfers are cancelled.
///
/// This type provides a low-level API letting you manage transfers directly.
/// For a higher-level buffered API implementing [`std::io::Read`] and
/// [`std::io::Write`], use the adapters from [`nusb::io`][`crate::io`]. See
/// [`Self::reader`] and [`Self::writer`].
pub struct Endpoint<EpType, Dir> {
backend: platform::Endpoint,
ep_type: PhantomData<EpType>,
@ -592,6 +598,28 @@ impl<EpType: EndpointType, Dir: EndpointDirection> Endpoint<EpType, Dir> {
}
}
impl<EpType: BulkOrInterrupt> Endpoint<EpType, Out> {
/// Create an [`EndpointWrite`] wrapping the given endpoint to provide a
/// high-level buffered API implementing [`std::io::Write`] and async
/// equivalents.
///
/// See [`EndpointWrite::new`][`crate::io::EndpointWrite::new`] for details.
pub fn writer(self, buffer_size: usize) -> EndpointWrite<EpType> {
EndpointWrite::new(self, buffer_size)
}
}
impl<EpType: BulkOrInterrupt> Endpoint<EpType, In> {
/// Create an [`EndpointRead`] wrapping the given endpoint to provide a
/// high-level buffered API implementing [`std::io::Read`] and async
/// equivalents.
///
/// See [`EndpointRead::new`][`crate::io::EndpointRead::new`] for details.
pub fn reader(self, buffer_size: usize) -> EndpointRead<EpType> {
EndpointRead::new(self, buffer_size)
}
}
/// Methods for Bulk and Interrupt endpoints.
impl<EpType: BulkOrInterrupt, Dir: EndpointDirection> Endpoint<EpType, Dir> {
/// Allocate a buffer for use on this endpoint, zero-copy if possible.

41
src/io/mod.rs Normal file
View file

@ -0,0 +1,41 @@
//! Adapters for using [`std::io`] traits and their async equivalents with Bulk
//! and Interrupt endpoints.
//!
//! These types wrap an [`Endpoint`](crate::Endpoint) and manage transfers to
//! provide a higher-level buffered API.
//!
//! ## Examples
//!
//! ### Request-response
//!
//! ```no_run
//! use std::{io::{Read, Write}, time::Duration};
//! use nusb::{self, MaybeFuture, transfer::{Bulk, In, Out}};
//! let device_info = nusb::list_devices().wait().unwrap()
//! .find(|dev| dev.vendor_id() == 0xAAAA && dev.product_id() == 0xBBBB)
//! .expect("device not connected");
//!
//! let device = device_info.open().wait().expect("failed to open device");
//! let interface = device.claim_interface(0).wait().expect("failed to claim interface");
//!
//! let mut tx = interface.endpoint::<Bulk, Out>(0x01).unwrap()
//! .writer(256)
//! .with_num_transfers(4);
//! let mut rx = interface.endpoint::<Bulk, In>(0x81).unwrap()
//! .reader(256)
//! .with_num_transfers(4)
//! .with_read_timeout(Duration::from_secs(1));
//!
//! tx.write_all(&[0x01, 0x02, 0x03]).unwrap();
//! tx.flush_end().unwrap();
//!
//! let mut rx_pkt = rx.until_short_packet();
//! let mut v = Vec::new();
//! rx_pkt.read_to_end(&mut v).unwrap();
//! rx_pkt.consume_end().unwrap();
//! ```
mod read;
pub use read::*;
mod write;
pub use write::*;

470
src/io/read.rs Normal file
View file

@ -0,0 +1,470 @@
use std::{
io::{BufRead, Read},
time::Duration,
};
#[cfg(any(feature = "tokio", feature = "smol"))]
use std::{
pin::Pin,
task::{ready, Context, Poll},
};
use crate::{
transfer::{Buffer, BulkOrInterrupt, In, TransferError},
Endpoint,
};
/// Wrapper for a Bulk or Interrupt IN [`Endpoint`](crate::Endpoint) that
/// implements [`Read`](std::io::Read) and [`BufRead`](std::io::BufRead) and
/// their `tokio` and `smol` async equivalents.
pub struct EndpointRead<EpType: BulkOrInterrupt> {
endpoint: Endpoint<EpType, In>,
reading: Option<ReadBuffer>,
num_transfers: usize,
transfer_size: usize,
read_timeout: Duration,
}
struct ReadBuffer {
pos: usize,
buf: Buffer,
status: Result<(), TransferError>,
}
impl ReadBuffer {
#[inline]
fn error(&self) -> Option<TransferError> {
self.status.err().filter(|e| *e != TransferError::Cancelled)
}
#[inline]
fn has_remaining(&self) -> bool {
self.pos < self.buf.len() || self.error().is_some()
}
#[inline]
fn has_remaining_or_short_end(&self) -> bool {
self.pos < self.buf.requested_len() || self.error().is_some()
}
#[inline]
fn clear_short_packet(&mut self) {
self.pos = usize::MAX
}
#[inline]
fn remaining(&self) -> Result<&[u8], std::io::Error> {
let remaining = &self.buf[self.pos..];
match (remaining.len(), self.error()) {
(0, Some(e)) => Err(e.into()),
_ => Ok(remaining),
}
}
#[inline]
fn consume(&mut self, len: usize) {
let remaining = self.buf.len().saturating_sub(self.pos);
assert!(len <= remaining, "consumed more than available");
self.pos += len;
}
}
fn copy_min(dest: &mut [u8], src: &[u8]) -> usize {
let len = dest.len().min(src.len());
dest[..len].copy_from_slice(&src[..len]);
len
}
impl<EpType: BulkOrInterrupt> EndpointRead<EpType> {
/// Create a new `EndpointRead` wrapping the given endpoint.
///
/// The `transfer_size` parameter is the size of the buffer passed to the OS
/// for each transfer. It will be rounded up to the next multiple of the
/// endpoint's max packet size.
pub fn new(endpoint: Endpoint<EpType, In>, transfer_size: usize) -> Self {
let packet_size = endpoint.max_packet_size();
let transfer_size = (transfer_size.div_ceil(packet_size)).max(1) * packet_size;
Self {
endpoint,
reading: None,
num_transfers: 0,
transfer_size,
read_timeout: Duration::MAX,
}
}
/// Set the number of transfers to maintain pending at all times.
///
/// A value of 0 means that transfers will only be submitted when calling
/// `read()` or `fill_buf()` and the buffer is empty. To maximize throughput,
/// a value of 2 or more is recommended for applications that stream data
/// continuously.
///
/// This submits more transfers when increasing the number, but does not
/// cancel transfers when decreasing it.
pub fn set_num_transfers(&mut self, num_transfers: usize) {
self.num_transfers = num_transfers;
while self.endpoint.pending() < num_transfers {
let buf = self.endpoint.allocate(self.transfer_size);
self.endpoint.submit(buf);
}
}
/// Set the number of transfers to maintain pending at all times.
///
/// See [Self::set_num_transfers] -- this is for method chaining with `EndpointRead::new()`.
pub fn with_num_transfers(mut self, num_transfers: usize) -> Self {
self.set_num_transfers(num_transfers);
self
}
/// Set the timeout for waiting for a transfer in the blocking `read` APIs.
///
/// This affects the `std::io::Read` and `std::io::BufRead` implementations
/// only, and not the async trait implementations.
///
/// When a timeout occurs, the call fails but the transfer is not cancelled
/// and may complete later if the read is retried.
pub fn set_read_timeout(&mut self, timeout: Duration) {
self.read_timeout = timeout;
}
/// Set the timeout for an individual transfer for the blocking `read` APIs.
///
/// See [Self::set_read_timeout] -- this is for method chaining with `EndpointWrite::new()`.
pub fn with_read_timeout(mut self, timeout: Duration) -> Self {
self.set_read_timeout(timeout);
self
}
/// Cancel all pending transfers.
///
/// They will be re-submitted on the next read.
pub fn cancel_all(&mut self) {
self.endpoint.cancel_all();
}
/// Destroy this `EndpointRead` and return the underlying [`Endpoint`].
///
/// Any pending transfers are not cancelled.
pub fn into_inner(self) -> Endpoint<EpType, In> {
self.endpoint
}
/// Get an [`EndpointReadUntilShortPacket`] adapter that will read only until
/// the end of a short or zero-length packet.
///
/// Some USB protocols use packets shorter than the endpoint's max packet size
/// as a delimiter marking the end of a message. By default, [`EndpointRead`]
/// ignores packet boundaries, but this adapter allows you to observe these
/// delimiters.
pub fn until_short_packet(&mut self) -> EndpointReadUntilShortPacket<EpType> {
EndpointReadUntilShortPacket { reader: self }
}
#[inline]
fn has_data(&self) -> bool {
self.reading.as_ref().is_some_and(|r| r.has_remaining())
}
#[inline]
fn has_data_or_short_end(&self) -> bool {
self.reading
.as_ref()
.is_some_and(|r| r.has_remaining_or_short_end())
}
fn resubmit(&mut self) {
if let Some(c) = self.reading.take() {
debug_assert!(!c.has_remaining());
self.endpoint.submit(c.buf);
}
}
fn start_read(&mut self) {
let t = usize::max(1, self.num_transfers);
if self.endpoint.pending() < t {
self.resubmit();
while self.endpoint.pending() < t {
let buf = self.endpoint.allocate(self.transfer_size);
self.endpoint.submit(buf);
}
}
}
#[inline]
fn remaining(&self) -> Result<&[u8], std::io::Error> {
self.reading.as_ref().unwrap().remaining()
}
#[inline]
fn consume(&mut self, len: usize) {
if let Some(ref mut c) = self.reading {
c.consume(len);
} else {
assert!(len == 0, "consumed more than available");
}
}
fn wait(&mut self) -> Result<(), std::io::Error> {
self.start_read();
let c = self.endpoint.wait_next_complete(self.read_timeout);
let c = c.ok_or(std::io::Error::new(
std::io::ErrorKind::TimedOut,
"timeout waiting for read",
))?;
self.reading = Some(ReadBuffer {
pos: 0,
buf: c.buffer,
status: c.status,
});
Ok(())
}
#[cfg(any(feature = "tokio", feature = "smol"))]
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<()> {
self.start_read();
let c = ready!(self.endpoint.poll_next_complete(cx));
self.reading = Some(ReadBuffer {
pos: 0,
buf: c.buffer,
status: c.status,
});
Poll::Ready(())
}
#[cfg(any(feature = "tokio", feature = "smol"))]
#[inline]
fn poll_fill_buf(&mut self, cx: &mut Context<'_>) -> Poll<Result<&[u8], std::io::Error>> {
while !self.has_data() {
ready!(self.poll(cx));
}
Poll::Ready(self.remaining())
}
#[cfg(any(feature = "tokio", feature = "smol"))]
#[inline]
fn poll_fill_buf_until_short(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<&[u8], std::io::Error>> {
while !self.has_data_or_short_end() {
ready!(self.poll(cx));
}
Poll::Ready(self.remaining())
}
}
impl<EpType: BulkOrInterrupt> Read for EndpointRead<EpType> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
let remaining = self.fill_buf()?;
let len = copy_min(buf, remaining);
self.consume(len);
Ok(len)
}
}
impl<EpType: BulkOrInterrupt> BufRead for EndpointRead<EpType> {
#[inline]
fn fill_buf(&mut self) -> Result<&[u8], std::io::Error> {
while !self.has_data() {
self.wait()?;
}
self.remaining()
}
#[inline]
fn consume(&mut self, len: usize) {
self.consume(len);
}
}
#[cfg(feature = "tokio")]
impl<EpType: BulkOrInterrupt> tokio::io::AsyncRead for EndpointRead<EpType> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
let this = Pin::into_inner(self);
let remaining = ready!(this.poll_fill_buf(cx))?;
let len = remaining.len().min(buf.remaining());
buf.put_slice(&remaining[..len]);
this.consume(len);
Poll::Ready(Ok(()))
}
}
#[cfg(feature = "tokio")]
impl<EpType: BulkOrInterrupt> tokio::io::AsyncBufRead for EndpointRead<EpType> {
fn poll_fill_buf(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<&[u8], std::io::Error>> {
Pin::into_inner(self).poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
Pin::into_inner(self).consume(amt);
}
}
#[cfg(feature = "smol")]
impl<EpType: BulkOrInterrupt> futures_io::AsyncRead for EndpointRead<EpType> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<Result<usize, std::io::Error>> {
let this = Pin::into_inner(self);
let remaining = ready!(this.poll_fill_buf(cx))?;
let len = copy_min(buf, remaining);
this.consume(len);
Poll::Ready(Ok(len))
}
}
#[cfg(feature = "smol")]
impl<EpType: BulkOrInterrupt> futures_io::AsyncBufRead for EndpointRead<EpType> {
fn poll_fill_buf(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<&[u8], std::io::Error>> {
Pin::into_inner(self).poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
Pin::into_inner(self).consume(amt);
}
}
/// Adapter for [`EndpointRead`] that ends after a short or zero-length packet.
///
/// This can be obtained from [`EndpointRead::until_short_packet()`]. It does
/// have any state other than that of the underlying [`EndpointRead`], so
/// dropping and re-creating with another call to
/// [`EndpointRead::until_short_packet()`] has no effect.
pub struct EndpointReadUntilShortPacket<'a, EpType: BulkOrInterrupt> {
reader: &'a mut EndpointRead<EpType>,
}
impl<EpType: BulkOrInterrupt> EndpointReadUntilShortPacket<'_, EpType> {
/// Check if the underlying endpoint has reached the end of a short packet.
///
/// Upon reading the end of a short packet, the next `read()` or `fill_buf()`
/// will return 0 bytes (EOF). To read the next message, call `consume_end()`.
pub fn is_end(&self) -> bool {
self.reader
.reading
.as_ref()
.is_some_and(|r| !r.has_remaining() && r.has_remaining_or_short_end())
}
/// Consume the end of a short packet.
///
/// Use this after `read()` or `fill_buf()` have returned EOF to reset the reader
/// to read the next message.
///
/// Returns an error and does nothing if the reader [is not at the end of a short packet](Self::is_end).
pub fn consume_end(&mut self) -> Result<(), ()> {
if self.is_end() {
self.reader.reading.as_mut().unwrap().clear_short_packet();
Ok(())
} else {
Err(())
}
}
}
impl<EpType: BulkOrInterrupt> Read for EndpointReadUntilShortPacket<'_, EpType> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
let remaining = self.fill_buf()?;
let len = copy_min(buf, remaining);
self.reader.consume(len);
Ok(len)
}
}
impl<EpType: BulkOrInterrupt> BufRead for EndpointReadUntilShortPacket<'_, EpType> {
#[inline]
fn fill_buf(&mut self) -> Result<&[u8], std::io::Error> {
while !self.reader.has_data_or_short_end() {
self.reader.wait()?;
}
self.reader.remaining()
}
#[inline]
fn consume(&mut self, len: usize) {
if self.reader.has_data_or_short_end() {
assert!(len == 0, "consumed more than available");
} else {
self.reader.consume(len);
}
}
}
#[cfg(feature = "tokio")]
impl<EpType: BulkOrInterrupt> tokio::io::AsyncRead for EndpointReadUntilShortPacket<'_, EpType> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
let this = Pin::into_inner(self);
let remaining = ready!(this.reader.poll_fill_buf_until_short(cx))?;
let len = remaining.len().min(buf.remaining());
buf.put_slice(&remaining[..len]);
this.reader.consume(len);
Poll::Ready(Ok(()))
}
}
#[cfg(feature = "tokio")]
impl<EpType: BulkOrInterrupt> tokio::io::AsyncBufRead for EndpointReadUntilShortPacket<'_, EpType> {
fn poll_fill_buf(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<&[u8], std::io::Error>> {
Pin::into_inner(self).reader.poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
Pin::into_inner(self).reader.consume(amt);
}
}
#[cfg(feature = "smol")]
impl<EpType: BulkOrInterrupt> futures_io::AsyncRead for EndpointReadUntilShortPacket<'_, EpType> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<Result<usize, std::io::Error>> {
let this = Pin::into_inner(self);
let remaining = ready!(this.reader.poll_fill_buf_until_short(cx))?;
let len = copy_min(buf, remaining);
this.reader.consume(len);
Poll::Ready(Ok(len))
}
}
#[cfg(feature = "smol")]
impl<EpType: BulkOrInterrupt> futures_io::AsyncBufRead
for EndpointReadUntilShortPacket<'_, EpType>
{
fn poll_fill_buf(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<&[u8], std::io::Error>> {
Pin::into_inner(self).reader.poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
Pin::into_inner(self).reader.consume(amt);
}
}

316
src/io/write.rs Normal file
View file

@ -0,0 +1,316 @@
use crate::{
transfer::{Buffer, BulkOrInterrupt, Completion, Out},
Endpoint,
};
use std::{
future::poll_fn,
io::{Error, ErrorKind, Write},
task::{Context, Poll},
time::Duration,
};
#[cfg(any(feature = "tokio", feature = "smol"))]
use std::{pin::Pin, task::ready};
/// Wrapper for a Bulk or Interrupt OUT [`Endpoint`](crate::Endpoint) that
/// implements [`Write`](std::io::Write) and its `tokio` and `smol` async equivalents.
pub struct EndpointWrite<EpType: BulkOrInterrupt> {
endpoint: Endpoint<EpType, Out>,
writing: Option<Buffer>,
transfer_size: usize,
num_transfers: usize,
write_timeout: Duration,
}
impl<EpType: BulkOrInterrupt> EndpointWrite<EpType> {
/// Create a new `EndpointWrite` wrapping the given endpoint.
///
/// The `transfer_size` parameter is the size of the buffer passed to the OS
/// for each transfer. It will be rounded up to the next multiple of the
/// endpoint's max packet size. Data will be buffered and sent in chunks of
/// this size, unless `flush` or [`submit`](Self::submit) are
/// called to force sending a partial buffer immediately.
pub fn new(endpoint: Endpoint<EpType, Out>, transfer_size: usize) -> Self {
let packet_size = endpoint.max_packet_size();
let transfer_size = (transfer_size.div_ceil(packet_size)).max(1) * packet_size;
Self {
endpoint,
writing: None,
transfer_size,
num_transfers: 1,
write_timeout: Duration::MAX,
}
}
/// Set the maximum number of transfers that can be queued with the OS
/// before backpressure is applied.
///
/// If more than `num_transfers` transfers are pending, calls to `write`
/// will block or async methods will return `Pending` until a transfer
/// completes.
pub fn set_num_transfers(&mut self, num_transfers: usize) {
self.num_transfers = num_transfers;
}
/// Set the maximum number of transfers that can be queued with the OS
/// before backpressure is applied.
///
/// See [Self::set_num_transfers] -- this is for method chaining with `EndpointWrite::new()`.
pub fn with_num_transfers(mut self, num_transfers: usize) -> Self {
self.set_num_transfers(num_transfers);
self
}
/// Set the timeout for a transfer in the blocking `write` APIs.
///
/// This affects the `std::io::Write` implementation only, and not the async
/// trait implementations.
///
/// When a timeout occurs, writing new data fails but transfers for
/// previously-written data are not cancelled. The data passed in the failed
/// `write` call is not written to the buffer, though note that functions
/// like `write_all` that call `write` multiple times may have successfully
/// written some of the data.
pub fn set_write_timeout(&mut self, timeout: Duration) {
self.write_timeout = timeout;
}
/// Set the timeout for an individual transfer for the blocking `write` APIs.
///
/// See [Self::set_write_timeout] -- this is for method chaining with `EndpointWrite::new()`.
pub fn with_write_timeout(mut self, timeout: Duration) -> Self {
self.set_write_timeout(timeout);
self
}
/// Destroy this `EndpointWrite` and return the underlying [`Endpoint`].
///
/// Any pending transfers are not cancelled.
pub fn into_inner(self) -> Endpoint<EpType, Out> {
self.endpoint
}
fn handle_completion(&mut self, c: Completion) -> Result<(), Error> {
debug_assert_eq!(self.writing.as_ref().map_or(0, |b| b.len()), 0);
let mut buf = c.buffer;
if buf.capacity() > 0 && self.endpoint.pending() < self.num_transfers {
debug_assert!(buf.capacity() == self.transfer_size);
buf.clear();
self.writing = Some(buf);
}
Ok(c.status?)
}
fn wait_one(&mut self) -> Result<(), Error> {
let t = self.endpoint.wait_next_complete(self.write_timeout);
let t = t.ok_or_else(|| Error::new(ErrorKind::TimedOut, "write timeout"))?;
self.handle_completion(t)
}
fn poll_one(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> {
self.endpoint
.poll_next_complete(cx)
.map(|c| self.handle_completion(c))
}
#[cfg(any(feature = "tokio", feature = "smol"))]
fn poll_write(&mut self, cx: &mut Context<'_>, src: &[u8]) -> Poll<Result<usize, Error>> {
let buf = loop {
if let Some(buf) = self.writing.as_mut() {
break buf;
}
if self.endpoint.pending() < self.num_transfers {
self.writing = Some(self.endpoint.allocate(self.transfer_size));
} else {
ready!(self.poll_one(cx))?;
}
};
let len = src.len().min(buf.remaining_capacity());
buf.extend_from_slice(&src[..len]);
if buf.remaining_capacity() == 0 {
self.endpoint.submit(self.writing.take().unwrap());
}
Poll::Ready(Ok(len))
}
/// Submit any buffered data to the OS immediately.
///
/// This submits the current buffer even if it not full, but does not wait
/// for the transfer to complete or confirm that it was successful (see
/// [Write::flush]). If the buffer is empty, this does nothing.
pub fn submit(&mut self) {
if self.writing.as_ref().is_some_and(|b| !b.is_empty()) {
self.endpoint.submit(self.writing.take().unwrap())
}
}
/// Submit any buffered data to the OS immediately, terminating with a short
/// or zero-length packet.
///
/// Some USB protocols use packets shorter than the endpoint's max packet
/// size as a delimiter marking the end of a message. This method forces
/// such a delimiter by adding a zero-length packet if the current buffer is
/// a multiple of the endpoint's max packet size.
///
/// This does not wait for the transfer to complete or confirm that it was
/// successful (see [Self::flush_end]). If the buffer is empty, this sends a
/// zero-length packet.
pub fn submit_end(&mut self) {
let zlp = if let Some(t) = self.writing.take() {
let len = t.len();
self.endpoint.submit(t);
len != 0 && len % self.endpoint.max_packet_size() == 0
} else {
true
};
if zlp {
self.endpoint.submit(Buffer::new(0));
}
}
/// Submit any buffered data immediately and wait for all pending transfers
/// to complete or fail.
fn flush_blocking(&mut self) -> Result<(), Error> {
self.submit();
while self.endpoint.pending() > 0 {
self.wait_one()?;
}
Ok(())
}
/// Submit any buffered data immediately, terminating with a short or
/// zero-length packet, and wait for all pending transfers to complete or
/// fail.
pub fn flush_end(&mut self) -> Result<(), Error> {
self.submit_end();
while self.endpoint.pending() > 0 {
self.wait_one()?;
}
Ok(())
}
#[cfg(any(feature = "tokio", feature = "smol"))]
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
self.submit();
while self.endpoint.pending() > 0 {
ready!(self.poll_one(cx))?;
}
Poll::Ready(Ok(()))
}
/// Submit any buffered data immediately, terminating with a short or zero-length
/// packet, and wait for all pending transfers to complete.
///
/// Async version of [Self::flush_end].
pub async fn flush_end_async(&mut self) -> Result<(), Error> {
self.submit_end();
while self.endpoint.pending() > 0 {
poll_fn(|cx| self.poll_one(cx)).await?;
}
Ok(())
}
}
impl<EpType: BulkOrInterrupt> Write for EndpointWrite<EpType> {
/// Write data to the endpoint.
///
/// Data is buffered and not written until the buffer is full or `submit()`
/// or `flush()` are called. Writing will block if there are already too
/// many transfers pending, as configured by
/// [`set_num_transfers`][EndpointWrite::set_num_transfers].
fn write(&mut self, src: &[u8]) -> std::io::Result<usize> {
let buf = loop {
if let Some(buf) = self.writing.as_mut() {
break buf;
}
if self.endpoint.pending() < self.num_transfers {
self.writing = Some(self.endpoint.allocate(self.transfer_size));
} else {
self.wait_one()?
}
};
let len = src.len().min(buf.remaining_capacity());
buf.extend_from_slice(&src[..len]);
if buf.remaining_capacity() == 0 {
self.endpoint.submit(self.writing.take().unwrap());
}
Ok(len)
}
/// Submit any buffered data immediately and wait for all pending transfers
/// to complete or fail.
fn flush(&mut self) -> std::io::Result<()> {
self.flush_blocking()
}
}
#[cfg(feature = "smol")]
impl<EpType: BulkOrInterrupt> futures_io::AsyncWrite for EndpointWrite<EpType> {
/// Write data to the endpoint.
///
/// Data is buffered and not written until the buffer is full or `submit()`
/// or `flush()` are called. Writing will return [`Poll::Pending`] if there
/// are already too many transfers pending, as configured by
/// [`set_num_transfers`][EndpointWrite::set_num_transfers].
fn poll_write(
self: std::pin::Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
Pin::into_inner(self).poll_write(cx, buf)
}
/// Submit any buffered data immediately and wait for all pending transfers
/// to complete or fail.
fn poll_flush(
self: std::pin::Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<std::io::Result<()>> {
Pin::into_inner(self).poll_flush(cx)
}
fn poll_close(
self: std::pin::Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<std::io::Result<()>> {
Pin::into_inner(self).poll_flush(cx)
}
}
#[cfg(feature = "tokio")]
impl<EpType: BulkOrInterrupt> tokio::io::AsyncWrite for EndpointWrite<EpType> {
/// Write data to the endpoint.
///
/// Data is buffered and not written until the buffer is full or `submit()`
/// or `flush()` are called. Writing will return [`Poll::Pending`] if there
/// are already too many transfers pending, as configured by
/// [`set_num_transfers`][EndpointWrite::set_num_transfers].
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
Pin::into_inner(self).poll_write(cx, buf)
}
/// Submit any buffered data immediately and wait for all pending transfers
/// to complete or fail.
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Pin::into_inner(self).poll_flush(cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Pin::into_inner(self).poll_flush(cx)
}
}

View file

@ -153,6 +153,8 @@ pub use maybe_future::MaybeFuture;
mod bitset;
pub mod io;
mod error;
pub use error::{ActiveConfigurationError, Error, ErrorKind, GetDescriptorError};

View file

@ -108,7 +108,7 @@ impl EndpointDirection for Out {
}
/// Type-level endpoint direction
pub trait EndpointType: private::Sealed + Send + Sync {
pub trait EndpointType: private::Sealed + Send + Sync + Unpin {
/// Runtime direction value
const TYPE: TransferType;
}