Expose buffer instead of transfer

This commit is contained in:
Kevin Mehall 2025-03-23 20:49:03 -06:00
parent ba3f7d65cf
commit 992fd16078
17 changed files with 556 additions and 596 deletions

View file

@ -22,7 +22,7 @@ env_logger = "0.10.0"
futures-lite = "1.13.0"
[target.'cfg(any(target_os="linux", target_os="android"))'.dependencies]
rustix = { version = "1.0.1", features = ["fs", "event", "net", "time"] }
rustix = { version = "1.0.1", features = ["fs", "event", "net", "time", "mm"] }
linux-raw-sys = { version = "0.9.2", features = ["ioctl"] }
[target.'cfg(target_os="windows")'.dependencies]

View file

@ -1,6 +1,6 @@
use futures_lite::future::block_on;
use nusb::{
transfer::{Bulk, In, Out},
transfer::{Buffer, Bulk, In, Out},
MaybeFuture,
};
@ -18,20 +18,16 @@ fn main() {
let interface = device.claim_interface(0).wait().unwrap();
let mut ep_out = interface.endpoint::<Bulk, Out>(0x02).unwrap();
let mut ep_in = interface.endpoint::<Bulk, In>(0x81).unwrap();
let mut transfer = ep_out.allocate(64);
transfer.extend_from_slice(&[1, 2, 3, 4, 5]);
ep_out.submit(transfer);
block_on(ep_out.next_complete()).status().unwrap();
ep_out.submit(vec![1, 2, 3, 4, 5].into());
block_on(ep_out.next_complete()).status.unwrap();
loop {
while ep_in.pending() < 8 {
let transfer = ep_in.allocate(256);
ep_in.submit(transfer);
ep_in.submit(Buffer::new(256));
}
let result = block_on(ep_in.next_complete());
println!("{result:?}");
if result.status().is_err() {
if result.status.is_err() {
break;
}
}

View file

@ -5,22 +5,17 @@ use crate::{
},
platform,
transfer::{
BulkOrInterrupt, ControlIn, ControlOut, Direction, EndpointDirection, EndpointType, In,
Out, TransferError,
Buffer, BulkOrInterrupt, ControlIn, ControlOut, Direction, EndpointDirection, EndpointType,
TransferError,
},
util::write_copy_of_slice,
DeviceInfo, Error, MaybeFuture, Speed,
};
use core::slice;
use log::error;
use std::{
fmt::Debug,
future::{poll_fn, Future},
io::ErrorKind,
marker::PhantomData,
mem::MaybeUninit,
num::NonZeroU8,
ops::{Deref, DerefMut},
sync::Arc,
task::{Context, Poll},
time::Duration,
@ -556,79 +551,21 @@ pub struct Endpoint<EpType, Dir> {
ep_dir: PhantomData<Dir>,
}
/// Methods for all endpoints.
impl<EpType: EndpointType, Dir: EndpointDirection> Endpoint<EpType, Dir> {
/// Get the endpoint address.
pub fn endpoint_address(&self) -> u8 {
self.backend.endpoint_address()
}
}
/// Methods for Bulk and Interrupt endpoints.
impl<EpType: BulkOrInterrupt, Dir: EndpointDirection> Endpoint<EpType, Dir> {
/// Get the maximum packet size for this endpoint.
///
/// Transfers can consist of multiple packets, but are split into packets
/// of this size when transmitted.
/// of this size on the bus.
pub fn max_packet_size(&self) -> usize {
self.backend.max_packet_size
}
/// Create a transfer with a buffer of `len` bytes.
///
/// `len` is rounded up to a multiple of `max_packet_size`.
///
/// For an `IN` endpoint, the request length defaults to `len`. For an `OUT`
/// endpoint, `len` is the capacity which can be written to the `Request`
/// before submitting it.
pub fn allocate(&mut self, len: usize) -> Request<EpType, Dir> {
let len = len.div_ceil(self.max_packet_size()) * self.max_packet_size();
Request {
transfer: self.backend.make_transfer(len),
_phantom: PhantomData,
}
}
/// Begin a transfer on the endpoint.
///
/// Submitted transfers are queued and completed in order. Once the transfer
/// completes, it will be returned from [`Self::next_complete`]. Any error
/// in submitting or performing the transfer is deferred until
/// [`next_complete`][`Self::next_complete`].
pub fn submit(&mut self, transfer: Request<EpType, Dir>) {
self.backend.submit(transfer.transfer)
}
/// Return a `Future` that waits for the next pending transfer to complete.
///
/// This future is cancel-safe: it can be cancelled and re-created without
/// side effects, enabling its use in `select!{}` or similar.
///
/// ## Panics
/// * if there are no transfers pending (that is, if [`Self::pending()`]
/// would return 0).
pub fn next_complete(
&mut self,
) -> impl Future<Output = Completion<EpType, Dir>> + Send + Sync + '_ {
poll_fn(|cx| self.poll_next_complete(cx))
}
/// Poll for a pending transfer completion.
///
/// Returns a completed transfer if one is available, or arranges for the
/// context's waker to be notified when a transfer completes.
///
/// ## Panics
/// * if there are no transfers pending (that is, if [`Self::pending()`]
/// would return 0).
pub fn poll_next_complete(&mut self, cx: &mut Context<'_>) -> Poll<Completion<EpType, Dir>> {
self.backend
.poll_next_complete(cx)
.map(|transfer| Completion {
transfer,
_phantom: PhantomData,
})
}
/// Get the number of transfers that have been submitted with `submit` that
/// have not yet been returned from `next_complete`.
pub fn pending(&self) -> usize {
@ -643,6 +580,50 @@ impl<EpType: BulkOrInterrupt, Dir: EndpointDirection> Endpoint<EpType, Dir> {
pub fn cancel_all(&mut self) {
self.backend.cancel_all()
}
}
/// Methods for Bulk and Interrupt endpoints.
impl<EpType: BulkOrInterrupt, Dir: EndpointDirection> Endpoint<EpType, Dir> {
/// Begin a transfer on the endpoint.
///
/// Submitted transfers are queued and completed in order. Once the transfer
/// completes, it will be returned from [`Self::next_complete`]. Any error
/// in submitting or performing the transfer is deferred until
/// [`next_complete`][`Self::next_complete`].
///
/// For an OUT transfer, the buffer's `len` field is the number of bytes
/// initialized, which will be sent to the device.
///
/// For an IN transfer, the buffer's `transfer_len` field is the number of
/// bytes requested. It must be a multiple of the endpoint's [maximum packet
/// size][`Self::max_packet_size`].
pub fn submit(&mut self, buf: Buffer) {
self.backend.submit(buf)
}
/// Return a `Future` that waits for the next pending transfer to complete.
///
/// This future is cancel-safe: it can be cancelled and re-created without
/// side effects, enabling its use in `select!{}` or similar.
///
/// ## Panics
/// * if there are no transfers pending (that is, if [`Self::pending()`]
/// would return 0).
pub fn next_complete(&mut self) -> impl Future<Output = Completion> + Send + Sync + '_ {
poll_fn(|cx| self.poll_next_complete(cx))
}
/// Poll for a pending transfer completion.
///
/// Returns a completed transfer if one is available, or arranges for the
/// context's waker to be notified when a transfer completes.
///
/// ## Panics
/// * if there are no transfers pending (that is, if [`Self::pending()`]
/// would return 0).
pub fn poll_next_complete(&mut self, cx: &mut Context<'_>) -> Poll<Completion> {
self.backend.poll_next_complete(cx)
}
/// Clear the endpoint's halt / stall condition.
///
@ -660,232 +641,32 @@ impl<EpType: BulkOrInterrupt, Dir: EndpointDirection> Endpoint<EpType, Dir> {
}
}
/// A transfer that has not yet been submitted.
///
/// A request contains of a fixed-size buffer and other platform-specific
/// resources used to perform the transfer.
///
/// Create a `Request` with [`Endpoint::allocate`], or turn a [`Completion`]
/// back into a `Request` with [`Completion::reuse`].
pub struct Request<EpType: EndpointType, Dir: EndpointDirection> {
transfer: platform::Transfer,
_phantom: PhantomData<(EpType, Dir)>,
}
impl<EpType: BulkOrInterrupt> Request<EpType, In> {
/// Get the allocated buffer length.
#[inline]
pub fn capacity(&self) -> usize {
self.transfer.buffer().len()
}
/// Get the number of bytes requested by this transfer.
#[inline]
pub fn len(&self) -> usize {
self.transfer.request_len()
}
/// Set the number of bytes requested by this transfer.
///
/// ## Panics
/// * If `len` is greater than the buffer [capacity][`Self::capacity`].
#[inline]
pub fn set_len(&mut self, len: usize) {
assert!(len <= self.capacity());
unsafe {
self.transfer.set_request_len(len);
}
}
}
impl<EpType: BulkOrInterrupt> Debug for Request<EpType, In> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Request")
.field(
"endpoint",
&format_args!("0x{:02X}", self.transfer.endpoint()),
)
.field("len", &self.len())
.finish()
}
}
impl<EpType: BulkOrInterrupt> Request<EpType, Out> {
/// Get the number of initialized bytes which will be sent if the transfer is submitted.
#[inline]
pub fn len(&self) -> usize {
self.transfer.request_len()
}
/// Get the allocated capacity of the buffer.
#[inline]
pub fn capacity(&self) -> usize {
self.transfer.buffer().len()
}
/// Get the number of bytes that can be written to the buffer.
///
/// This is a convenience method for `capacity() - len()`.
#[inline]
pub fn remaining_capacity(&self) -> usize {
self.capacity() - self.len()
}
/// Immutable access to the full allocated buffer, which may be uninitialized.
#[inline]
pub fn buffer(&self) -> &[MaybeUninit<u8>] {
self.transfer.buffer()
}
/// Mutable access to the full allocated buffer, which may be uninitialized.
#[inline]
pub fn buffer_mut(&mut self) -> &mut [MaybeUninit<u8>] {
self.transfer.buffer_mut()
}
/// Set the transfer length, assuming that it has been manually initialized.
///
/// ## Safety
/// * The buffer must be initialized up to `len`.
/// * `len` must be less than or equal to the buffer capacity.
#[inline]
pub unsafe fn set_len(&mut self, len: usize) {
self.transfer.set_request_len(len);
}
/// Clear the data by setting the length to zero.
#[inline]
pub fn clear(&mut self) {
unsafe {
self.set_len(0);
}
}
/// Append a slice of bytes to the transfer.
///
/// ## Panics
/// * If the buffer capacity is exceeded (`len() + slice.len() > capacity()`).
#[inline]
pub fn extend_from_slice<'a>(&mut self, slice: &'a [u8]) {
unsafe {
let prev_len = self.len();
let dest = self
.buffer_mut()
.get_mut(prev_len..prev_len + slice.len())
.expect("capacity exceeded");
write_copy_of_slice(dest, slice);
self.set_len(prev_len + slice.len())
}
}
}
impl<EpType: BulkOrInterrupt> Deref for Request<EpType, Out> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
unsafe { slice::from_raw_parts(self.buffer().as_ptr().cast::<u8>(), self.len()) }
}
}
impl<EpType: BulkOrInterrupt> DerefMut for Request<EpType, Out> {
fn deref_mut(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self.buffer_mut().as_mut_ptr().cast::<u8>(), self.len())
}
}
}
impl<EpType: BulkOrInterrupt> Debug for Request<EpType, Out> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Request")
.field(
"endpoint",
&format_args!("0x{:02X}", self.transfer.endpoint()),
)
.field("len", &self.len())
.field("data", &&self[..])
.finish()
}
}
/// A completed transfer returned from [`Endpoint::next_complete`].
///
/// A transfer can partially complete even in the case of failure or
/// cancellation, thus the [`actual_len`][`Self::actual_len`] may be nonzero
/// even if the [`status`][`Self::status`] is an error.
///
/// An `IN` transfer's received data is accessed by accessing the Completion
/// as a slice of bytes via `Deref`.
pub struct Completion<EpType: EndpointType, D: EndpointDirection> {
transfer: platform::Transfer,
_phantom: PhantomData<(EpType, D)>,
#[derive(Debug)]
pub struct Completion {
/// The transfer buffer.
pub data: Buffer,
/// Status of the transfer.
pub status: Result<(), TransferError>,
}
impl<EpType: BulkOrInterrupt, Dir: EndpointDirection> Completion<EpType, Dir> {
/// Get the status of the transfer.
pub fn status(&self) -> Result<(), TransferError> {
self.transfer.status()
}
/// Get the number of bytes transferred.
pub fn actual_len(&self) -> usize {
self.transfer.actual_len()
}
/// Turn the transfer back into a `Request`, reusing the buffer.
///
/// An `OUT` `Request`'s length is reset to zero so new data can be written to
/// the `Request`. An `IN` `Request`'s length is unchanged.
pub fn reuse(mut self) -> Request<EpType, Dir> {
if Dir::DIR == Direction::In {
unsafe {
self.transfer.set_request_len(0);
}
}
Request {
transfer: self.transfer,
_phantom: PhantomData,
}
}
}
impl<EpType: BulkOrInterrupt, Dir: EndpointDirection> From<Completion<EpType, Dir>>
for Request<EpType, Dir>
{
fn from(value: Completion<EpType, Dir>) -> Self {
value.reuse()
}
}
impl<EpType: BulkOrInterrupt> Debug for Completion<EpType, Out> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Completion")
.field("status", &self.status())
.field("len", &self.actual_len())
.finish()
}
}
impl<EpType: BulkOrInterrupt> Deref for Completion<EpType, In> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
unsafe { slice::from_raw_parts(self.transfer.buffer().as_ptr().cast(), self.actual_len()) }
}
}
impl<EpType: BulkOrInterrupt> Debug for Completion<EpType, In> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Completion")
.field("status", &self.status())
.field("data", &&self[..])
.finish()
impl Completion {
/// Ignore any partial completion, turning `self` into a `Result` containing
/// either the completed buffer for a successful transfer or a
/// `TransferError`.
pub fn into_result(self) -> Result<Buffer, TransferError> {
self.status.map(|()| self.data)
}
}
#[test]
fn assert_send_sync() {
use crate::transfer::{Bulk, Interrupt};
use crate::transfer::{Bulk, In, Interrupt, Out};
fn require_send_sync<T: Send + Sync>() {}
require_send_sync::<Interface>();
@ -894,8 +675,4 @@ fn assert_send_sync() {
require_send_sync::<Endpoint<Bulk, Out>>();
require_send_sync::<Endpoint<Interrupt, In>>();
require_send_sync::<Endpoint<Interrupt, Out>>();
require_send_sync::<Request<Bulk, In>>();
require_send_sync::<Request<Bulk, Out>>();
require_send_sync::<Completion<Bulk, In>>();
require_send_sync::<Completion<Bulk, Out>>();
}

View file

@ -139,11 +139,10 @@ mod platform;
pub mod descriptors;
mod enumeration;
mod util;
pub use enumeration::{BusInfo, DeviceId, DeviceInfo, InterfaceInfo, Speed, UsbControllerType};
mod device;
pub use device::{ClaimEndpointError, Completion, Device, Endpoint, Interface, Request};
pub use device::{ClaimEndpointError, Completion, Device, Endpoint, Interface};
pub mod transfer;

View file

@ -28,11 +28,6 @@ use super::{
usbfs::{self, Urb},
SysfsPath, TransferData,
};
use crate::bitset::EndpointBitSet;
use crate::descriptors::{
parse_concatenated_config_descriptors, ConfigurationDescriptor, DeviceDescriptor,
EndpointDescriptor, TransferType, DESCRIPTOR_LEN_DEVICE,
};
use crate::device::ClaimEndpointError;
use crate::maybe_future::{blocking::Blocking, MaybeFuture};
use crate::transfer::{
@ -41,6 +36,14 @@ use crate::transfer::{
},
request_type, ControlIn, ControlOut, ControlType, Direction, Recipient, TransferError,
};
use crate::{bitset::EndpointBitSet, Completion};
use crate::{
descriptors::{
parse_concatenated_config_descriptors, ConfigurationDescriptor, DeviceDescriptor,
EndpointDescriptor, TransferType, DESCRIPTOR_LEN_DEVICE,
},
transfer::Buffer,
};
use crate::{DeviceInfo, Error, Speed};
#[derive(PartialEq, Eq, PartialOrd, Ord)]
@ -629,6 +632,7 @@ impl LinuxInterface {
}),
max_packet_size,
pending: VecDeque::new(),
idle_transfer: None,
})
}
}
@ -658,6 +662,8 @@ pub(crate) struct LinuxEndpoint {
/// A queue of pending transfers, expected to complete in order
pending: VecDeque<Pending<super::TransferData>>,
idle_transfer: Option<Idle<TransferData>>,
}
struct EndpointInner {
@ -684,26 +690,25 @@ impl LinuxEndpoint {
}
}
pub(crate) fn make_transfer(&mut self, len: usize) -> Idle<TransferData> {
Idle::new(
self.inner.clone(),
super::TransferData::new(self.inner.address, self.inner.ep_type, len),
)
}
pub(crate) fn submit(&mut self, transfer: Idle<TransferData>) {
assert!(
transfer.notify_eq(&self.inner),
"transfer can only be submitted on the same endpoint"
);
pub(crate) fn submit(&mut self, data: Buffer) {
let mut transfer = self.idle_transfer.take().unwrap_or_else(|| {
Idle::new(
self.inner.clone(),
super::TransferData::new(self.inner.address, self.inner.ep_type),
)
});
transfer.set_buffer(data);
self.pending
.push_back(self.inner.interface.device.submit(transfer));
}
pub(crate) fn poll_next_complete(&mut self, cx: &mut Context) -> Poll<Idle<TransferData>> {
pub(crate) fn poll_next_complete(&mut self, cx: &mut Context) -> Poll<Completion> {
self.inner.notify.subscribe(cx);
if let Some(transfer) = take_completed_from_queue(&mut self.pending) {
Poll::Ready(transfer)
if let Some(mut transfer) = take_completed_from_queue(&mut self.pending) {
let status = transfer.status();
let data = transfer.take_buffer();
self.idle_transfer = Some(transfer);
Poll::Ready(Completion { status, data })
} else {
Poll::Pending
}

View file

@ -11,12 +11,11 @@ mod device;
pub(crate) use device::LinuxDevice as Device;
pub(crate) use device::LinuxEndpoint as Endpoint;
pub(crate) use device::LinuxInterface as Interface;
pub(crate) type Transfer = Idle<TransferData>;
mod hotplug;
pub(crate) use hotplug::LinuxHotplugWatch as HotplugWatch;
use crate::transfer::{internal::Idle, TransferError};
use crate::transfer::TransferError;
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub struct DeviceId {

View file

@ -1,5 +1,5 @@
use std::{
mem::{ManuallyDrop, MaybeUninit},
mem::{self, ManuallyDrop},
ptr::{addr_of_mut, null_mut},
slice,
time::Instant,
@ -7,10 +7,11 @@ use std::{
use rustix::io::Errno;
use crate::descriptors::TransferType;
use crate::transfer::{
internal::Pending, ControlIn, ControlOut, Direction, TransferError, SETUP_PACKET_SIZE,
internal::Pending, Allocator, Buffer, ControlIn, ControlOut, Direction, TransferError,
SETUP_PACKET_SIZE,
};
use crate::{descriptors::TransferType, util::write_copy_of_slice};
use super::{
errno_to_transfer_error,
@ -28,7 +29,8 @@ use super::{
/// `iso_packet_desc` array.
pub struct TransferData {
urb: *mut Urb,
capacity: usize,
capacity: u32,
allocator: Allocator,
pub(crate) deadline: Option<Instant>,
}
@ -36,7 +38,7 @@ unsafe impl Send for TransferData {}
unsafe impl Sync for TransferData {}
impl TransferData {
pub(super) fn new(endpoint: u8, ep_type: TransferType, capacity: usize) -> TransferData {
pub(super) fn new(endpoint: u8, ep_type: TransferType) -> TransferData {
let ep_type = match ep_type {
TransferType::Control => USBDEVFS_URB_TYPE_CONTROL,
TransferType::Interrupt => USBDEVFS_URB_TYPE_INTERRUPT,
@ -44,12 +46,7 @@ impl TransferData {
TransferType::Isochronous => USBDEVFS_URB_TYPE_ISO,
};
let request_len: i32 = match Direction::from_address(endpoint) {
Direction::Out => 0,
Direction::In => capacity.try_into().unwrap(),
};
let mut v = ManuallyDrop::new(Vec::with_capacity(capacity));
let mut empty = ManuallyDrop::new(Vec::new());
TransferData {
urb: Box::into_raw(Box::new(Urb {
@ -57,8 +54,8 @@ impl TransferData {
endpoint,
status: 0,
flags: 0,
buffer: v.as_mut_ptr(),
buffer_length: request_len,
buffer: empty.as_mut_ptr(),
buffer_length: 0,
actual_length: 0,
start_frame: 0,
number_of_packets_or_stream_id: 0,
@ -66,46 +63,67 @@ impl TransferData {
signr: 0,
usercontext: null_mut(),
})),
capacity: v.capacity(),
capacity: 0,
allocator: Allocator::Default,
deadline: None,
}
}
pub(super) fn new_control_out(data: ControlOut) -> TransferData {
let len = SETUP_PACKET_SIZE + data.data.len();
let mut t = TransferData::new(0x00, TransferType::Control, len);
write_copy_of_slice(
&mut t.buffer_mut()[..SETUP_PACKET_SIZE],
&data.setup_packet(),
);
write_copy_of_slice(
&mut t.buffer_mut()[SETUP_PACKET_SIZE..SETUP_PACKET_SIZE + data.data.len()],
&data.data,
);
unsafe {
t.set_request_len(len);
}
let mut t = TransferData::new(0x00, TransferType::Control);
let mut buffer = Buffer::new(SETUP_PACKET_SIZE.checked_add(data.data.len()).unwrap());
buffer.extend_from_slice(&data.setup_packet());
buffer.extend_from_slice(&data.data);
t.set_buffer(buffer);
t
}
pub(super) fn new_control_in(data: ControlIn) -> TransferData {
let len = SETUP_PACKET_SIZE + data.length as usize;
let mut t = TransferData::new(0x80, TransferType::Control, len);
write_copy_of_slice(
&mut t.buffer_mut()[..SETUP_PACKET_SIZE],
&data.setup_packet(),
);
unsafe {
t.set_request_len(len);
}
let mut t = TransferData::new(0x80, TransferType::Control);
let mut buffer = Buffer::new(SETUP_PACKET_SIZE.checked_add(data.length as usize).unwrap());
buffer.extend_from_slice(&data.setup_packet());
t.set_buffer(buffer);
t
}
#[inline]
pub fn endpoint(&self) -> u8 {
unsafe { (*self.urb).endpoint }
pub fn set_buffer(&mut self, buf: Buffer) {
debug_assert!(self.capacity == 0);
let buf = ManuallyDrop::new(buf);
self.capacity = buf.capacity;
self.urb_mut().buffer = buf.ptr;
self.urb_mut().actual_length = 0;
self.urb_mut().buffer_length = match Direction::from_address(self.urb().endpoint) {
Direction::Out => buf.len as i32,
Direction::In => buf.transfer_len as i32,
};
self.allocator = buf.allocator;
}
pub fn take_buffer(&mut self) -> Buffer {
let mut empty = ManuallyDrop::new(Vec::new());
let ptr = mem::replace(&mut self.urb_mut().buffer, empty.as_mut_ptr());
let capacity = mem::replace(&mut self.capacity, 0);
let (len, transfer_len) = match Direction::from_address(self.urb().endpoint) {
Direction::Out => (
self.urb().buffer_length as u32,
self.urb().actual_length as u32,
),
Direction::In => (
self.urb().actual_length as u32,
self.urb().buffer_length as u32,
),
};
self.urb_mut().buffer_length = 0;
self.urb_mut().actual_length = 0;
let allocator = mem::replace(&mut self.allocator, Allocator::Default);
Buffer {
ptr,
len,
transfer_len,
capacity,
allocator,
}
}
#[inline]
@ -123,32 +141,6 @@ impl TransferData {
self.urb
}
#[inline]
pub fn buffer(&self) -> &[MaybeUninit<u8>] {
unsafe { slice::from_raw_parts(self.urb().buffer.cast(), self.capacity) }
}
#[inline]
pub fn buffer_mut(&mut self) -> &mut [MaybeUninit<u8>] {
unsafe { slice::from_raw_parts_mut(self.urb().buffer.cast(), self.capacity) }
}
#[inline]
pub fn request_len(&self) -> usize {
self.urb().buffer_length as usize
}
#[inline]
pub unsafe fn set_request_len(&mut self, len: usize) {
assert!(len <= self.capacity);
self.urb_mut().buffer_length = len.try_into().unwrap();
}
#[inline]
pub fn actual_len(&self) -> usize {
self.urb().actual_length as usize
}
#[inline]
pub fn status(&self) -> Result<(), TransferError> {
if self.urb().status == 0 {
@ -185,7 +177,7 @@ impl Pending<TransferData> {
impl Drop for TransferData {
fn drop(&mut self) {
unsafe {
drop(Vec::from_raw_parts((*self.urb).buffer, 0, self.capacity));
drop(self.take_buffer());
drop(Box::from_raw(self.urb));
}
}

View file

@ -2,6 +2,7 @@ use std::{
collections::VecDeque,
ffi::c_void,
io::ErrorKind,
mem::{self, ManuallyDrop},
sync::{
atomic::{AtomicU8, AtomicUsize, Ordering},
Arc, Mutex,
@ -22,10 +23,9 @@ use crate::{
internal::{
notify_completion, take_completed_from_queue, Idle, Notify, Pending, TransferFuture,
},
ControlIn, ControlOut, Direction, TransferError,
Allocator, Buffer, ControlIn, ControlOut, Direction, TransferError,
},
util::write_copy_of_slice,
DeviceInfo, Error, MaybeFuture, Speed,
Completion, DeviceInfo, Error, MaybeFuture, Speed,
};
use super::{
@ -227,7 +227,10 @@ impl MacDevice {
timeout: Duration,
) -> impl MaybeFuture<Output = Result<Vec<u8>, TransferError>> {
let timeout = timeout.as_millis().try_into().expect("timeout too long");
let t = TransferData::new(0x80, data.length as usize);
let mut v = ManuallyDrop::new(Vec::with_capacity(data.length as usize));
let t = unsafe {
TransferData::from_raw(v.as_mut_ptr(), data.length as u32, v.capacity() as u32)
};
let req = IOUSBDevRequestTO {
bmRequestType: data.request_type(),
@ -241,9 +244,10 @@ impl MacDevice {
noDataTimeout: timeout,
};
TransferFuture::new(t, |t| self.submit_control(t, req)).map(|mut t| {
TransferFuture::new(t, |t| self.submit_control(Direction::In, t, req)).map(|t| {
t.status()?;
Ok(unsafe { t.take_vec() })
let t = ManuallyDrop::new(t);
Ok(unsafe { Vec::from_raw_parts(t.buf, t.actual_len as usize, t.capacity as usize) })
})
}
@ -253,8 +257,9 @@ impl MacDevice {
timeout: Duration,
) -> impl MaybeFuture<Output = Result<(), TransferError>> {
let timeout = timeout.as_millis().try_into().expect("timeout too long");
let mut t = TransferData::new(0, data.data.len());
write_copy_of_slice(t.buffer_mut(), &data.data);
let mut v = ManuallyDrop::new(data.data.to_vec());
let t =
unsafe { TransferData::from_raw(v.as_mut_ptr(), v.len() as u32, v.capacity() as u32) };
let req = IOUSBDevRequestTO {
bmRequestType: data.request_type(),
@ -268,7 +273,7 @@ impl MacDevice {
noDataTimeout: timeout,
};
TransferFuture::new(t, |t| self.submit_control(t, req)).map(|t| {
TransferFuture::new(t, |t| self.submit_control(Direction::Out, t, req)).map(|t| {
t.status()?;
Ok(())
})
@ -276,11 +281,11 @@ impl MacDevice {
fn submit_control(
&self,
dir: Direction,
mut t: Idle<TransferData>,
mut req: IOUSBDevRequestTO,
) -> Pending<TransferData> {
t.actual_len = 0;
let dir = Direction::from_address(t.endpoint_addr);
assert!(req.pData == t.buf.cast());
let t = t.pre_submit();
@ -416,6 +421,7 @@ impl MacInterface {
}),
max_packet_size,
pending: VecDeque::new(),
idle_transfer: None,
})
}
}
@ -437,6 +443,8 @@ pub(crate) struct MacEndpoint {
/// A queue of pending transfers, expected to complete in order
pending: VecDeque<Pending<TransferData>>,
idle_transfer: Option<Idle<TransferData>>,
}
struct EndpointInner {
@ -468,27 +476,28 @@ impl MacEndpoint {
);
}
pub(crate) fn make_transfer(&mut self, len: usize) -> Idle<TransferData> {
Idle::new(
self.inner.clone(),
TransferData::new(self.inner.address, len),
)
}
pub(crate) fn submit(&mut self, buffer: Buffer) {
let mut transfer = self
.idle_transfer
.take()
.unwrap_or_else(|| Idle::new(self.inner.clone(), super::TransferData::new()));
pub(crate) fn submit(&mut self, mut t: Idle<TransferData>) {
assert!(
t.notify_eq(&self.inner),
"transfer can only be submitted on the same endpoint"
);
let endpoint = t.endpoint_addr;
let buffer = ManuallyDrop::new(buffer);
let endpoint = self.inner.address;
let dir = Direction::from_address(endpoint);
let pipe_ref = self.inner.pipe_ref;
let len = t.request_len;
let buf = t.buf;
t.actual_len = 0;
let t = t.pre_submit();
let ptr = t.as_ptr();
transfer.buf = buffer.ptr;
transfer.capacity = buffer.capacity;
transfer.actual_len = 0;
let req_len = match dir {
Direction::Out => buffer.len,
Direction::In => buffer.transfer_len,
};
transfer.requested_len = req_len;
let transfer = transfer.pre_submit();
let ptr = transfer.as_ptr();
let res = unsafe {
match dir {
@ -496,8 +505,8 @@ impl MacEndpoint {
self.inner.interface.interface.raw,
WritePipeAsync(
pipe_ref,
buf as *mut c_void,
u32::try_from(len).expect("request too large"),
buffer.ptr as *mut c_void,
buffer.len,
transfer_callback,
ptr as *mut c_void
)
@ -506,8 +515,8 @@ impl MacEndpoint {
self.inner.interface.interface.raw,
ReadPipeAsync(
pipe_ref,
buf as *mut c_void,
u32::try_from(len).expect("request too large"),
buffer.ptr as *mut c_void,
buffer.transfer_len,
transfer_callback,
ptr as *mut c_void
)
@ -516,9 +525,11 @@ impl MacEndpoint {
};
if res == kIOReturnSuccess {
debug!("Submitted {dir:?} transfer {ptr:?} on endpoint {endpoint:02X}, {len} bytes");
debug!(
"Submitted {dir:?} transfer {ptr:?} of len {req_len} on endpoint {endpoint:02X}"
);
} else {
error!("Failed to submit transfer {ptr:?} on endpoint {endpoint:02X}: {res:x}");
error!("Failed to submit transfer {ptr:?} of len {req_len} on endpoint {endpoint:02X}: {res:x}");
unsafe {
// Complete the transfer in the place of the callback
(*ptr).status = res;
@ -526,13 +537,34 @@ impl MacEndpoint {
}
}
self.pending.push_back(t);
self.pending.push_back(transfer);
}
pub(crate) fn poll_next_complete(&mut self, cx: &mut Context) -> Poll<Idle<TransferData>> {
pub(crate) fn poll_next_complete(&mut self, cx: &mut Context) -> Poll<Completion> {
self.inner.notify.subscribe(cx);
if let Some(transfer) = take_completed_from_queue(&mut self.pending) {
Poll::Ready(transfer)
if let Some(mut transfer) = take_completed_from_queue(&mut self.pending) {
let status = transfer.status();
let mut empty = ManuallyDrop::new(Vec::new());
let ptr = mem::replace(&mut transfer.buf, empty.as_mut_ptr());
let capacity = mem::replace(&mut transfer.capacity, 0);
let (len, transfer_len) = match Direction::from_address(self.inner.address) {
Direction::Out => (transfer.requested_len, transfer.actual_len),
Direction::In => (transfer.actual_len, transfer.requested_len),
};
transfer.requested_len = 0;
transfer.actual_len = 0;
self.idle_transfer = Some(transfer);
let data = Buffer {
ptr,
len,
transfer_len,
capacity,
allocator: Allocator::Default,
};
Poll::Ready(Completion { status, data })
} else {
Poll::Pending
}
@ -573,7 +605,7 @@ impl Drop for EndpointInner {
}
extern "C" fn transfer_callback(refcon: *mut c_void, result: IOReturn, len: *mut c_void) {
let len = len as usize;
let len = len as u32;
let transfer: *mut TransferData = refcon.cast();
debug!("Completion for transfer {transfer:?}, status={result:x}, len={len}");

View file

@ -1,4 +1,4 @@
use crate::transfer::{internal::Idle, TransferError};
use crate::transfer::TransferError;
mod transfer;
use io_kit_sys::ret::IOReturn;
@ -12,7 +12,6 @@ mod device;
pub(crate) use device::MacDevice as Device;
pub(crate) use device::MacEndpoint as Endpoint;
pub(crate) use device::MacInterface as Interface;
pub(crate) type Transfer = Idle<TransferData>;
mod hotplug;
pub(crate) use hotplug::MacHotplugWatch as HotplugWatch;

View file

@ -1,92 +1,41 @@
use std::{
mem::{ManuallyDrop, MaybeUninit},
slice,
};
use std::mem::ManuallyDrop;
use io_kit_sys::ret::{kIOReturnSuccess, IOReturn};
use crate::transfer::{Direction, TransferError};
use super::status_to_transfer_result;
use crate::transfer::TransferError;
pub struct TransferData {
pub(super) endpoint_addr: u8,
pub(super) buf: *mut u8,
pub(super) capacity: usize,
pub(super) request_len: usize,
pub(super) actual_len: usize,
pub(super) capacity: u32,
pub(super) requested_len: u32,
pub(super) actual_len: u32,
pub(super) status: IOReturn,
}
impl Drop for TransferData {
fn drop(&mut self) {
unsafe { drop(Vec::from_raw_parts(self.buf, 0, self.capacity)) }
unsafe { drop(Vec::from_raw_parts(self.buf, 0, self.capacity as usize)) }
}
}
impl TransferData {
pub(super) fn new(endpoint_addr: u8, capacity: usize) -> TransferData {
let request_len = match Direction::from_address(endpoint_addr) {
Direction::Out => 0,
Direction::In => capacity,
};
let mut v = ManuallyDrop::new(Vec::with_capacity(capacity));
pub(super) fn new() -> TransferData {
let mut empty = ManuallyDrop::new(Vec::with_capacity(0));
unsafe { Self::from_raw(empty.as_mut_ptr(), 0, 0) }
}
pub(super) unsafe fn from_raw(buf: *mut u8, requested_len: u32, capacity: u32) -> TransferData {
TransferData {
endpoint_addr,
buf: v.as_mut_ptr(),
capacity: v.capacity(),
buf,
capacity,
requested_len,
actual_len: 0,
request_len,
status: kIOReturnSuccess,
}
}
#[inline]
pub fn endpoint(&self) -> u8 {
self.endpoint_addr
}
#[inline]
pub fn buffer(&self) -> &[MaybeUninit<u8>] {
unsafe { slice::from_raw_parts(self.buf.cast(), self.capacity) }
}
#[inline]
pub fn buffer_mut(&mut self) -> &mut [MaybeUninit<u8>] {
unsafe { slice::from_raw_parts_mut(self.buf.cast(), self.capacity) }
}
#[inline]
pub fn request_len(&self) -> usize {
self.request_len as usize
}
#[inline]
pub unsafe fn set_request_len(&mut self, len: usize) {
assert!(len <= self.capacity);
self.request_len = len;
}
#[inline]
pub fn actual_len(&self) -> usize {
self.actual_len as usize
}
#[inline]
pub fn status(&self) -> Result<(), TransferError> {
status_to_transfer_result(self.status)
}
/// Safety: Must be an IN transfer and must have completed to initialize the buffer
pub unsafe fn take_vec(&mut self) -> Vec<u8> {
let mut n = ManuallyDrop::new(Vec::new());
let v = unsafe { Vec::from_raw_parts(self.buf, self.actual_len as usize, self.capacity) };
self.capacity = n.capacity();
self.buf = n.as_mut_ptr();
self.actual_len = 0;
v
super::status_to_transfer_result(self.status)
}
}

View file

@ -36,10 +36,9 @@ use crate::{
internal::{
notify_completion, take_completed_from_queue, Idle, Notify, Pending, TransferFuture,
},
ControlIn, ControlOut, Direction, Recipient, TransferError,
Buffer, ControlIn, ControlOut, Direction, Recipient, TransferError,
},
util::write_copy_of_slice,
DeviceInfo, Error, MaybeFuture, Speed,
Completion, DeviceInfo, Error, MaybeFuture, Speed,
};
use super::{
@ -411,7 +410,8 @@ impl WindowsInterface {
warn!("WinUSB sends interface number instead of passed `index` when performing a control transfer with `Recipient::Interface`");
}
let t = TransferData::new(0x80, data.length as usize);
let mut t = TransferData::new(0x80);
t.set_buffer(Buffer::new(data.length as usize));
let pkt = WINUSB_SETUP_PACKET {
RequestType: data.request_type(),
@ -423,7 +423,7 @@ impl WindowsInterface {
TransferFuture::new(t, |t| self.submit_control(t, pkt)).map(|mut t| {
t.status()?;
Ok(unsafe { t.take_vec() })
Ok(t.take_buffer().into_vec())
})
}
@ -436,8 +436,8 @@ impl WindowsInterface {
warn!("WinUSB sends interface number instead of passed `index` when performing a control transfer with `Recipient::Interface`");
}
let mut t = TransferData::new(0, data.data.len());
write_copy_of_slice(t.buffer_mut(), &data.data);
let mut t = TransferData::new(0x00);
t.set_buffer(Buffer::from(data.data.to_vec()));
let pkt = WINUSB_SETUP_PACKET {
RequestType: data.request_type(),
@ -523,6 +523,7 @@ impl WindowsInterface {
}),
max_packet_size,
pending: VecDeque::new(),
idle_transfer: None,
})
}
@ -637,6 +638,8 @@ pub(crate) struct WindowsEndpoint {
/// A queue of pending transfers, expected to complete in order
pending: VecDeque<Pending<TransferData>>,
idle_transfer: Option<Idle<TransferData>>,
}
struct EndpointInner {
@ -662,28 +665,22 @@ impl WindowsEndpoint {
}
}
pub(crate) fn make_transfer(&mut self, len: usize) -> Idle<TransferData> {
let t = Idle::new(
self.inner.clone(),
TransferData::new(self.inner.address, len),
);
t
pub(crate) fn submit(&mut self, buffer: Buffer) {
let mut t = self.idle_transfer.take().unwrap_or_else(|| {
Idle::new(self.inner.clone(), TransferData::new(self.inner.address))
});
t.set_buffer(buffer);
let t = self.inner.interface.submit(t);
self.pending.push_back(t);
}
pub(crate) fn submit(&mut self, transfer: Idle<TransferData>) {
assert!(
transfer.notify_eq(&self.inner),
"transfer can only be submitted on the same endpoint"
);
let transfer = self.inner.interface.submit(transfer);
self.pending.push_back(transfer);
}
pub(crate) fn poll_next_complete(&mut self, cx: &mut Context) -> Poll<Idle<TransferData>> {
pub(crate) fn poll_next_complete(&mut self, cx: &mut Context) -> Poll<Completion> {
self.inner.notify.subscribe(cx);
if let Some(transfer) = take_completed_from_queue(&mut self.pending) {
Poll::Ready(transfer)
if let Some(mut transfer) = take_completed_from_queue(&mut self.pending) {
let status = transfer.status();
let data = transfer.take_buffer();
self.idle_transfer = Some(transfer);
Poll::Ready(Completion { status, data })
} else {
Poll::Pending
}

View file

@ -7,7 +7,6 @@ mod device;
pub(crate) use device::WindowsDevice as Device;
pub(crate) use device::WindowsEndpoint as Endpoint;
pub(crate) use device::WindowsInterface as Interface;
pub(crate) type Transfer = Idle<transfer::TransferData>;
mod transfer;
@ -19,5 +18,3 @@ pub(crate) use DevInst as DeviceId;
mod hotplug;
mod util;
pub(crate) use hotplug::WindowsHotplugWatch as HotplugWatch;
use crate::transfer::internal::Idle;

View file

@ -1,7 +1,4 @@
use std::{
mem::{self, ManuallyDrop, MaybeUninit},
slice,
};
use std::mem::{self, ManuallyDrop};
use log::debug;
use windows_sys::Win32::{
@ -13,16 +10,16 @@ use windows_sys::Win32::{
System::IO::OVERLAPPED,
};
use crate::transfer::{internal::notify_completion, Direction, TransferError};
use crate::transfer::{internal::notify_completion, Buffer, Direction, TransferError};
#[repr(C)]
pub struct TransferData {
// first member of repr(C) struct; can cast pointer between types
// overlapped.Internal contains the stauts
// overlapped.Internal contains the status
// overlapped.InternalHigh contains the number of bytes transferred
pub(crate) overlapped: OVERLAPPED,
pub(crate) buf: *mut u8,
pub(crate) capacity: usize,
pub(crate) capacity: u32,
pub(crate) request_len: u32,
pub(crate) endpoint: u8,
}
@ -31,49 +28,18 @@ unsafe impl Send for TransferData {}
unsafe impl Sync for TransferData {}
impl TransferData {
pub(crate) fn new(endpoint: u8, capacity: usize) -> TransferData {
let request_len = match Direction::from_address(endpoint) {
Direction::Out => 0,
Direction::In => capacity.try_into().expect("transfer size must fit in u32"),
};
let mut v = ManuallyDrop::new(Vec::with_capacity(capacity));
pub(crate) fn new(endpoint: u8) -> TransferData {
let mut empty = ManuallyDrop::new(Vec::with_capacity(0));
TransferData {
overlapped: unsafe { mem::zeroed() },
buf: v.as_mut_ptr(),
capacity: v.capacity(),
request_len,
buf: empty.as_mut_ptr(),
capacity: 0,
request_len: 0,
endpoint,
}
}
#[inline]
pub fn endpoint(&self) -> u8 {
self.endpoint
}
#[inline]
pub fn buffer(&self) -> &[MaybeUninit<u8>] {
unsafe { slice::from_raw_parts(self.buf.cast(), self.capacity) }
}
#[inline]
pub fn buffer_mut(&mut self) -> &mut [MaybeUninit<u8>] {
unsafe { slice::from_raw_parts_mut(self.buf.cast(), self.capacity) }
}
#[inline]
pub fn request_len(&self) -> usize {
self.request_len as usize
}
#[inline]
pub unsafe fn set_request_len(&mut self, len: usize) {
assert!(len <= self.capacity);
self.request_len = len.try_into().expect("transfer size must fit in u32");
}
#[inline]
pub fn actual_len(&self) -> usize {
self.overlapped.InternalHigh
@ -93,20 +59,42 @@ impl TransferData {
}
}
/// Safety: Must be an IN transfer and must have completed to initialize the buffer
pub unsafe fn take_vec(&mut self) -> Vec<u8> {
let mut n = ManuallyDrop::new(Vec::new());
let v = unsafe { Vec::from_raw_parts(self.buf, self.actual_len(), self.capacity) };
self.capacity = n.capacity();
self.buf = n.as_mut_ptr();
pub fn set_buffer(&mut self, buf: Buffer) {
debug_assert!(self.capacity == 0);
let buf = ManuallyDrop::new(buf);
self.capacity = buf.capacity;
self.buf = buf.ptr;
self.overlapped.InternalHigh = 0;
v
self.request_len = match Direction::from_address(self.endpoint) {
Direction::Out => buf.len,
Direction::In => buf.transfer_len,
};
}
pub fn take_buffer(&mut self) -> Buffer {
let mut empty = ManuallyDrop::new(Vec::new());
let ptr = mem::replace(&mut self.buf, empty.as_mut_ptr());
let capacity = mem::replace(&mut self.capacity, 0);
let (len, transfer_len) = match Direction::from_address(self.endpoint) {
Direction::Out => (self.request_len as u32, self.overlapped.InternalHigh as u32),
Direction::In => (self.overlapped.InternalHigh as u32, self.request_len as u32),
};
self.request_len = 0;
self.overlapped.InternalHigh = 0;
Buffer {
ptr,
len,
transfer_len,
capacity,
allocator: crate::transfer::Allocator::Default,
}
}
}
impl Drop for TransferData {
fn drop(&mut self) {
unsafe { drop(Vec::from_raw_parts(self.buf, 0, self.capacity)) }
drop(self.take_buffer())
}
}

248
src/transfer/buffer.rs Normal file
View file

@ -0,0 +1,248 @@
use std::{
fmt::Debug,
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
};
#[derive(Copy, Clone)]
pub(crate) enum Allocator {
Default,
#[cfg(any(target_os = "linux", target_os = "android"))]
Mmap,
}
/// Buffer for bulk and interrupt transfers.
///
/// The fixed-capacity buffer can be backed either by the system allocator or a
/// platform-specific way of allocating memory for zero-copy transfers.
///
/// It has two length fields, and their meaning depends on the transfer
/// direction:
///
/// * For OUT transfers, you fill the buffer with the data prior to submitting
/// it. The `len` field is how many bytes are submitted, and when the buffer
/// is returned on completion, the `transfer_len` field is set to the number
/// of bytes that were actually sent. The `len` field is unmodified; call
/// [`clear()`][Self::clear] when re-using the buffer.
///
/// * For IN transfers, the `transfer_length` field specifies the number of
/// bytes requested from the device. It must be a multiple of the endpoint's
/// maximum packet size. When the transfer is completed, the `len` is set to
/// the number of bytes actually received. The `transfer_len` field is
/// unmodified, so the same buffer can be submitted again to perform another
/// transfer of the same length.
pub struct Buffer {
/// Data pointer
pub(crate) ptr: *mut u8,
/// Initialized bytes
pub(crate) len: u32,
/// Requested length for IN transfer or actual length for OUT transfer
pub(crate) transfer_len: u32,
/// Allocated memory at `ptr`
pub(crate) capacity: u32,
/// Whether the system allocator or a special allocator was used
pub(crate) allocator: Allocator,
}
impl Buffer {
/// Allocate a new bufffer with the default allocator.
///
/// This buffer will not support zero-copy transfers, but can be cheaply
/// converted to a `Vec<u8>`.
///
/// The passed size will be used as the `transfer_len`, and the `capacity`
/// be at least that large.
///
/// ### Panics
/// * If the requested length is greater than `u32::MAX`.
#[inline]
pub fn new(transfer_len: usize) -> Self {
let mut vec = ManuallyDrop::new(Vec::with_capacity(transfer_len));
Buffer {
ptr: vec.as_mut_ptr(),
len: 0,
transfer_len: transfer_len.try_into().expect("capacity overflow"),
capacity: vec.capacity().try_into().expect("capacity overflow"),
allocator: Allocator::Default,
}
}
/// Get the number of initialized bytes in the buffer.
///
/// For OUT transfers, this is the amount of data written to the buffer which will be sent when the buffer is submitted.
/// For IN transfers, this is the amount of data received from the device. This length is updated when the transfer is returned.
#[inline]
pub fn len(&self) -> usize {
self.len as usize
}
/// Requested length for IN transfer or actual length for OUT transfer.
#[inline]
pub fn transfer_len(&self) -> usize {
self.transfer_len as usize
}
/// Number of allocated bytes.
#[inline]
pub fn capacity(&self) -> usize {
self.capacity as usize
}
/// Get the number of bytes that can be written to the buffer.
///
/// This is a convenience method for `capacity() - len()`.
#[inline]
pub fn remaining_capacity(&self) -> usize {
self.capacity() - self.len()
}
/// Set the requested length for an IN transfer.
///
/// ### Panics
/// * If the requested length is greater than the capacity.
#[inline]
pub fn set_transfer_len(&mut self, len: usize) {
assert!(len <= self.capacity as usize, "length exceeds capacity");
self.transfer_len = len.try_into().expect("transfer_len overflow");
}
/// Clear the buffer.
///
/// This sets `len` to 0, but does not change the `capacity` or `transfer_len`.
/// This is useful for reusing the buffer for a new transfer.
#[inline]
pub fn clear(&mut self) {
self.len = 0;
}
/// Extend the buffer by initializing `len` bytes to `value`, and get a
/// mutable slice to the newly initialized bytes.
///
/// # Panics
/// * If the resulting length exceeds the buffer's capacity.
pub fn extend_fill(&mut self, len: usize, value: u8) -> &mut [u8] {
assert!(len <= self.remaining_capacity(), "length exceeds capacity");
unsafe {
std::ptr::write_bytes(self.ptr.add(self.len()), value, len);
}
self.len += len as u32;
unsafe { std::slice::from_raw_parts_mut(self.ptr.add(self.len() - len), len) }
}
/// Append a slice of bytes to the buffer.
///
/// # Panics
/// * If the resulting length exceeds the buffer's capacity.
pub fn extend_from_slice(&mut self, slice: &[u8]) {
assert!(
slice.len() <= self.remaining_capacity(),
"length exceeds capacity"
);
unsafe {
std::ptr::copy_nonoverlapping(slice.as_ptr(), self.ptr.add(self.len()), slice.len());
}
self.len += slice.len() as u32;
}
/// Returns whether the buffer is specially-allocated for zero-copy IO.
pub fn is_zero_copy(&self) -> bool {
!matches!(self.allocator, Allocator::Default)
}
/// Convert the buffer into a `Vec<u8>`.
///
/// This is zero-cost if the buffer was allocated with the default allocator
/// (if [`is_zero_copy()`] returns false), otherwise it will copy the data
/// into a new `Vec<u8>`.
pub fn into_vec(self) -> Vec<u8> {
match self.allocator {
Allocator::Default => {
let buf = ManuallyDrop::new(self);
unsafe { Vec::from_raw_parts(buf.ptr, buf.len as usize, buf.capacity as usize) }
}
#[allow(unreachable_patterns)]
_ => self[..].to_vec(),
}
}
}
unsafe impl Send for Buffer {}
unsafe impl Sync for Buffer {}
/// A `Vec<u8>` can be converted to a `Buffer` cheaply.
///
/// The Vec's `len` will be used for both the `len` and `transfer_len`.
impl From<Vec<u8>> for Buffer {
fn from(vec: Vec<u8>) -> Self {
let mut vec = ManuallyDrop::new(vec);
Buffer {
ptr: vec.as_mut_ptr(),
len: vec.len().try_into().expect("len overflow"),
transfer_len: vec.len().try_into().expect("len overflow"),
capacity: vec.capacity().try_into().expect("capacity overflow"),
allocator: Allocator::Default,
}
}
}
/// A `Vec<MaybeUninit<u8>>` can be converted to a `Buffer` cheaply.
///
/// The Vec's `len` will be used for the `transfer_len`, and the `len` will be 0.
impl From<Vec<MaybeUninit<u8>>> for Buffer {
fn from(vec: Vec<MaybeUninit<u8>>) -> Self {
let mut vec = ManuallyDrop::new(vec);
Buffer {
ptr: vec.as_mut_ptr().cast(),
len: 0,
transfer_len: vec.len().try_into().expect("len overflow"),
capacity: vec.capacity().try_into().expect("capacity overflow"),
allocator: Allocator::Default,
}
}
}
impl Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.ptr, self.len as usize) }
}
}
impl DerefMut for Buffer {
fn deref_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len as usize) }
}
}
impl Debug for Buffer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Buffer")
.field("len", &self.len)
.field("transfer_len", &self.transfer_len)
.field("data", &format_args!("{:02x?}", &self[..]))
.finish()
}
}
impl Drop for Buffer {
fn drop(&mut self) {
match self.allocator {
Allocator::Default => unsafe {
drop(Vec::from_raw_parts(
self.ptr,
self.len as usize,
self.capacity as usize,
));
},
#[cfg(any(target_os = "linux", target_os = "android"))]
Allocator::Mmap => unsafe {
rustix::mm::munmap(self.ptr as *mut _, self.capacity as usize).unwrap();
},
}
}
}

View file

@ -106,10 +106,6 @@ impl<P> Idle<P> {
ptr: unsafe { NonNull::new_unchecked(Box::into_raw(self.0)) },
}
}
pub(crate) fn notify_eq<T>(&self, other: &Arc<T>) -> bool {
Arc::as_ptr(&self.0.notify) as *const () == Arc::as_ptr(other) as *const ()
}
}
impl<P> Deref for Idle<P> {

View file

@ -10,6 +10,10 @@ mod control;
pub(crate) use control::{request_type, SETUP_PACKET_SIZE};
pub use control::{ControlIn, ControlOut, ControlType, Direction, Recipient};
mod buffer;
pub(crate) use buffer::Allocator;
pub use buffer::Buffer;
pub(crate) mod internal;
use crate::descriptors::TransferType;
@ -114,4 +118,4 @@ impl EndpointType for Interrupt {
}
impl BulkOrInterrupt for Interrupt {}
pub use crate::device::{Completion, Request};
pub use crate::device::Completion;

View file

@ -1,18 +0,0 @@
use std::mem::MaybeUninit;
/// Copies the elements from `src` to `dest`,
/// returning a mutable reference to the now initialized contents of `dest`.
///
/// Port of the `[MaybeUninit<T>]` method from std, which is not stable yet.
pub fn write_copy_of_slice<'a, T>(dest: &'a mut [MaybeUninit<T>], src: &[T]) -> &'a mut [T]
where
T: Copy,
{
// SAFETY: &[T] and &[MaybeUninit<T>] have the same layout
let uninit_src: &[MaybeUninit<T>] = unsafe { std::mem::transmute(src) };
dest.copy_from_slice(uninit_src);
// SAFETY: Valid elements have just been copied into `self` so it is initialized
unsafe { &mut *(dest as *mut [MaybeUninit<T>] as *mut [T]) }
}