diff --git a/esp-hal/CHANGELOG.md b/esp-hal/CHANGELOG.md index a912c67dfdc..903e54c5350 100644 --- a/esp-hal/CHANGELOG.md +++ b/esp-hal/CHANGELOG.md @@ -8,9 +8,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ### Added +- Introduce DMA buffer objects (#1856) ### Changed - Peripheral driver constructors don't take `InterruptHandler`s anymore. Use `set_interrupt_handler` to explicitly set the interrupt handler now. (#1819) +- Migrate SPI driver to use DMA buffer objects (#1856) ### Fixed - Improve error detection in the I2C driver (#1847) diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 289f74ce755..cfa59824f06 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -46,7 +46,13 @@ //! For convenience you can use the [crate::dma_buffers] macro. #![warn(missing_docs)] -use core::{fmt::Debug, marker::PhantomData, ptr::addr_of_mut, sync::atomic::compiler_fence}; +use core::{ + cmp::min, + fmt::Debug, + marker::PhantomData, + ptr::addr_of_mut, + sync::atomic::compiler_fence, +}; bitfield::bitfield! { #[doc(hidden)] @@ -119,14 +125,13 @@ impl DmaDescriptor { } } -use embedded_dma::{ReadBuffer, WriteBuffer}; use enumset::{EnumSet, EnumSetType}; #[cfg(gdma)] pub use self::gdma::*; #[cfg(pdma)] pub use self::pdma::*; -use crate::{interrupt::InterruptHandler, Mode}; +use crate::{interrupt::InterruptHandler, soc::is_slice_in_dram, Mode}; #[cfg(gdma)] mod gdma; @@ -965,6 +970,12 @@ pub trait RxPrivate: crate::private::Sealed { chain: &DescriptorChain, ) -> Result<(), DmaError>; + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + first_desc: *mut DmaDescriptor, + ) -> Result<(), DmaError>; + fn start_transfer(&mut self) -> Result<(), DmaError>; #[cfg(gdma)] @@ -1037,14 +1048,14 @@ where unsafe fn prepare_transfer_without_start( &mut self, - descriptors: &DescriptorChain, + first_desc: *mut DmaDescriptor, peri: DmaPeripheral, ) -> Result<(), DmaError> { compiler_fence(core::sync::atomic::Ordering::SeqCst); R::clear_in_interrupts(); R::reset_in(); - R::set_in_descriptors(descriptors.first() as u32); + R::set_in_descriptors(first_desc as u32); R::set_in_peripheral(peri as u8); Ok(()) @@ -1119,7 +1130,22 @@ where return Err(DmaError::InvalidAlignment); } - self.rx_impl.prepare_transfer_without_start(chain, peri) + self.rx_impl + .prepare_transfer_without_start(chain.first() as _, peri) + } + + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + first_desc: *mut DmaDescriptor, + ) -> Result<(), DmaError> { + // TODO: Figure out burst mode for DmaBuf. + if self.burst_mode { + return Err(DmaError::InvalidAlignment); + } + + self.rx_impl + .prepare_transfer_without_start(first_desc, peri) } fn start_transfer(&mut self) -> Result<(), DmaError> { @@ -1242,6 +1268,12 @@ pub trait TxPrivate: crate::private::Sealed { chain: &DescriptorChain, ) -> Result<(), DmaError>; + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + desc: *mut DmaDescriptor, + ) -> Result<(), DmaError>; + fn start_transfer(&mut self) -> Result<(), DmaError>; fn clear_ch_out_done(&self); @@ -1294,14 +1326,14 @@ where unsafe fn prepare_transfer_without_start( &mut self, - descriptors: &DescriptorChain, + first_desc: *mut DmaDescriptor, peri: DmaPeripheral, ) -> Result<(), DmaError> { compiler_fence(core::sync::atomic::Ordering::SeqCst); R::clear_out_interrupts(); R::reset_out(); - R::set_out_descriptors(descriptors.first() as u32); + R::set_out_descriptors(first_desc as u32); R::set_out_peripheral(peri as u8); Ok(()) @@ -1403,7 +1435,16 @@ where peri: DmaPeripheral, chain: &DescriptorChain, ) -> Result<(), DmaError> { - self.tx_impl.prepare_transfer_without_start(chain, peri) + self.tx_impl + .prepare_transfer_without_start(chain.first() as _, peri) + } + + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + desc: *mut DmaDescriptor, + ) -> Result<(), DmaError> { + self.tx_impl.prepare_transfer_without_start(desc, peri) } fn start_transfer(&mut self) -> Result<(), DmaError> { @@ -1628,6 +1669,538 @@ where } } +/// Error returned from Dma[Tx|Rx|TxRx]Buf operations. +#[derive(Debug)] +pub enum DmaBufError { + /// More descriptors are needed for the buffer size + InsufficientDescriptors, + /// Descriptors or buffers are not located in a supported memory region + UnsupportedMemoryRegion, +} + +/// DMA transmit buffer +/// +/// This is a contiguous buffer linked together by DMA descriptors of length +/// 4092. It can only be used for transmitting data to a peripheral's FIFO. +/// See [DmaRxBuf] for receiving data. +#[derive(Debug)] +pub struct DmaTxBuf { + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], +} + +impl DmaTxBuf { + /// Creates a new [DmaTxBuf] from some descriptors and a buffer. + /// + /// There must be enough descriptors for the provided buffer. + /// Each descriptor can handle 4092 bytes worth of buffer. + /// + /// Both the descriptors and buffer must be in DMA-capable memory. + /// Only DRAM is supported. + pub fn new( + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], + ) -> Result { + let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE); + if descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + + if !is_slice_in_dram(descriptors) || !is_slice_in_dram(buffer) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + // Setup size and buffer pointer as these will not change for the remainder of + // this object's lifetime + let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(CHUNK_SIZE)); + for (desc, chunk) in chunk_iter { + desc.set_size(chunk.len()); + desc.buffer = chunk.as_mut_ptr(); + } + + let mut buf = Self { + descriptors, + buffer, + }; + buf.set_length(buf.capacity()); + + Ok(buf) + } + + /// Consume the buf, returning the descriptors and buffer. + pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { + (self.descriptors, self.buffer) + } + + /// Returns the size of the underlying buffer + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Return the number of bytes that would be transmitted by this buf. + pub fn len(&self) -> usize { + let mut result = 0; + for desc in self.descriptors.iter() { + result += desc.len(); + if desc.next.is_null() { + break; + } + } + result + } + + /// Reset the descriptors to only transmit `len` amount of bytes from this + /// buf. + /// + /// The number of bytes in data must be less than or equal to the buffer + /// size. + pub fn set_length(&mut self, len: usize) { + if len == 0 { + self.descriptors.fill(DmaDescriptor::EMPTY); + return; + } + + assert!(len <= self.buffer.len()); + + // Get the minimum number of descriptors needed for this length of data. + let descriptor_count = len.div_ceil(CHUNK_SIZE); + let required_descriptors = &mut self.descriptors[0..descriptor_count]; + + // Link up the relevant descriptors. + let mut next = core::ptr::null_mut(); + for desc in required_descriptors.iter_mut().rev() { + desc.next = next; + next = desc; + } + + let mut remaining_length = len; + for desc in required_descriptors.iter_mut() { + // As this is a simple dma buffer implementation we won't + // be making use of this feature. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + let chunk_size = min(remaining_length, desc.flags.size() as usize); + desc.set_length(chunk_size); + remaining_length -= chunk_size; + } + debug_assert_eq!(remaining_length, 0); + + // ESP32-S3: The last descriptor in the linked list must have the EOF bit set, + // otherwise transfers of less than 24 bytes (size of the L1FIFO) don't + // make it to the peripheral and the channel hangs forever. + // As of writing, the TRM or Errata don't mention this. + required_descriptors.last_mut().unwrap().set_suc_eof(true); + } + + /// Fills the TX buffer with the bytes provided in `data` and reset the + /// descriptors to only cover the filled section. + /// + /// The number of bytes in data must be less than or equal to the buffer + /// size. + pub fn fill(&mut self, data: &[u8]) { + self.set_length(data.len()); + self.as_mut_slice()[..data.len()].copy_from_slice(data); + } + + /// Returns the buf as a mutable slice than can be written. + pub fn as_mut_slice(&mut self) -> &mut [u8] { + &mut self.buffer[..] + } + + /// Returns the buf as a slice than can be read. + pub fn as_slice(&self) -> &[u8] { + &self.buffer[..] + } + + pub(crate) fn first(&self) -> *mut DmaDescriptor { + self.descriptors.as_ptr() as _ + } +} + +/// DMA receive buffer +/// +/// This is a contiguous buffer linked together by DMA descriptors of length +/// 4092. It can only be used for receiving data from a peripheral's FIFO. +/// See [DmaTxBuf] for transmitting data. +pub struct DmaRxBuf { + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], +} + +impl DmaRxBuf { + /// Creates a new [DmaRxBuf] from some descriptors and a buffer. + /// + /// There must be enough descriptors for the provided buffer. + /// Each descriptor can handle 4092 bytes worth of buffer. + /// + /// Both the descriptors and buffer must be in DMA-capable memory. + /// Only DRAM is supported. + pub fn new( + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], + ) -> Result { + let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE); + if descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + + if !is_slice_in_dram(descriptors) || !is_slice_in_dram(buffer) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + // Setup size and buffer pointer as these will not change for the remainder of + // this object's lifetime + let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(CHUNK_SIZE)); + for (desc, chunk) in chunk_iter { + desc.set_size(chunk.len()); + desc.buffer = chunk.as_mut_ptr(); + } + + let mut buf = Self { + descriptors, + buffer, + }; + + buf.set_length(buf.capacity()); + + Ok(buf) + } + + /// Consume the buf, returning the descriptors and buffer. + pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { + (self.descriptors, self.buffer) + } + + /// Returns the size of the underlying buffer + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Returns the maximum number of bytes that this buf has been configured to + /// receive. + pub fn len(&self) -> usize { + let mut result = 0; + for desc in self.descriptors.iter() { + result += desc.flags.size() as usize; + if desc.next.is_null() { + break; + } + } + result + } + + /// Reset the descriptors to only receive `len` amount of bytes into this + /// buf. + /// + /// The number of bytes in data must be less than or equal to the buffer + /// size. + pub fn set_length(&mut self, len: usize) { + if len == 0 { + self.descriptors.fill(DmaDescriptor::EMPTY); + return; + } + + assert!(len <= self.buffer.len()); + + // Get the minimum number of descriptors needed for this length of data. + let descriptor_count = len.div_ceil(CHUNK_SIZE); + let required_descriptors = &mut self.descriptors[..descriptor_count]; + + // Link up the relevant descriptors. + let mut next = core::ptr::null_mut(); + for desc in required_descriptors.iter_mut().rev() { + desc.next = next; + next = desc; + } + + // Get required part of the buffer. + let required_buf = &self.buffer[..len]; + + let chunk_iter = required_descriptors + .iter_mut() + .zip(required_buf.chunks(CHUNK_SIZE)); + for (desc, chunk) in chunk_iter { + // Clear this to allow hardware to set it when the peripheral returns an EOF + // bit. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + // Clear this to allow hardware to set it when it's + // done receiving data for this descriptor. + desc.set_length(0); + + desc.set_size(chunk.len()); + } + } + + /// Returns the entire underlying buffer as a slice than can be read. + pub fn as_slice(&self) -> &[u8] { + &self.buffer[..] + } + + /// Returns the entire underlying buffer as a slice than can be read. + pub fn as_mut_slice(&mut self) -> &mut [u8] { + &mut self.buffer[..] + } + + /// Return the number of bytes that was received by this buf. + pub fn number_of_received_bytes(&self) -> usize { + let mut result = 0; + for desc in self.descriptors.iter() { + result += desc.len(); + if desc.next.is_null() { + break; + } + } + result + } + + /// Reads the received data into the provided `buf`. + /// + /// If `buf.len()` is less than the amount of received data then only the + /// first `buf.len()` bytes of received data is written into `buf`. + /// + /// Returns the number of bytes in written to `buf`. + pub fn read_received_data(&self, buf: &mut [u8]) -> usize { + let mut remaining = &mut buf[..]; + + let mut buffer_offset = 0; + for desc in self.descriptors.iter() { + if remaining.is_empty() { + break; + } + + let amount_to_copy = min(desc.len(), remaining.len()); + + let (to_fill, to_remain) = remaining.split_at_mut(amount_to_copy); + to_fill.copy_from_slice(&self.buffer[buffer_offset..][..amount_to_copy]); + remaining = to_remain; + + if desc.next.is_null() { + break; + } + buffer_offset += desc.flags.size() as usize; + } + + let remaining_bytes = remaining.len(); + buf.len() - remaining_bytes + } + + /// Returns the received data as an iterator of slices. + pub fn received_data(&self) -> impl Iterator { + let mut descriptors = self.descriptors.iter(); + let mut buf = &self.buffer[..]; + + core::iter::from_fn(move || { + let mut chunk_size = 0; + let mut skip_size = 0; + while let Some(desc) = descriptors.next() { + chunk_size += desc.len(); + skip_size += desc.flags.size() as usize; + + // If this is the end of the linked list, we can skip the remaining descriptors. + if desc.next.is_null() { + while descriptors.next().is_some() { + // Drain the iterator so the next call to from_fn return + // None. + } + break; + } + + // This typically happens when the DMA gets an EOF bit from the peripheral. + // It can also happen if the DMA is restarted. + if desc.len() < desc.flags.size() as usize { + break; + } + } + + if chunk_size == 0 { + return None; + } + + let chunk = &buf[..chunk_size]; + buf = &buf[skip_size..]; + Some(chunk) + }) + } + + pub(crate) fn first(&self) -> *mut DmaDescriptor { + self.descriptors.as_ptr() as _ + } +} + +/// DMA transmit and receive buffer. +/// +/// This is a (single) contiguous buffer linked together by two sets of DMA +/// descriptors of length 4092 each. +/// It can be used for simultaneously transmitting to and receiving from a +/// peripheral's FIFO. These are typically full-duplex transfers. +pub struct DmaTxRxBuf { + tx_descriptors: &'static mut [DmaDescriptor], + rx_descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], +} + +impl DmaTxRxBuf { + /// Creates a new [DmaTxRxBuf] from some descriptors and a buffer. + /// + /// There must be enough descriptors for the provided buffer. + /// Each descriptor can handle 4092 bytes worth of buffer. + /// + /// Both the descriptors and buffer must be in DMA-capable memory. + /// Only DRAM is supported. + pub fn new( + tx_descriptors: &'static mut [DmaDescriptor], + rx_descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], + ) -> Result { + let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE); + if tx_descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + if rx_descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + + if !is_slice_in_dram(tx_descriptors) + || !is_slice_in_dram(rx_descriptors) + || !is_slice_in_dram(buffer) + { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + // Reset the provided descriptors + tx_descriptors.fill(DmaDescriptor::EMPTY); + rx_descriptors.fill(DmaDescriptor::EMPTY); + + let descriptors = tx_descriptors.iter_mut().zip(rx_descriptors.iter_mut()); + let chunks = buffer.chunks_mut(CHUNK_SIZE); + + for ((tx_desc, rx_desc), chunk) in descriptors.zip(chunks) { + tx_desc.set_size(chunk.len()); + tx_desc.buffer = chunk.as_mut_ptr(); + rx_desc.set_size(chunk.len()); + rx_desc.buffer = chunk.as_mut_ptr(); + } + + let mut buf = Self { + tx_descriptors, + rx_descriptors, + buffer, + }; + buf.set_length(buf.capacity()); + + Ok(buf) + } + + /// Consume the buf, returning the tx descriptors, rx descriptors and + /// buffer. + pub fn split( + self, + ) -> ( + &'static mut [DmaDescriptor], + &'static mut [DmaDescriptor], + &'static mut [u8], + ) { + (self.tx_descriptors, self.rx_descriptors, self.buffer) + } + + /// Return the size of the underlying buffer. + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Returns the entire buf as a slice than can be read. + pub fn as_slice(&self) -> &[u8] { + &self.buffer[..] + } + + /// Returns the entire buf as a slice than can be written. + pub fn as_slice_mut(&mut self) -> &mut [u8] { + &mut self.buffer[..] + } + + /// Reset the descriptors to only transmit/receive `len` amount of bytes + /// with this buf. + /// + /// `len` must be less than or equal to the buffer size. + pub fn set_length(&mut self, len: usize) { + if len == 0 { + self.tx_descriptors.fill(DmaDescriptor::EMPTY); + self.rx_descriptors.fill(DmaDescriptor::EMPTY); + return; + } + + assert!(len <= self.buffer.len()); + + // Get the minimum number of descriptors needed for this length of data. + let descriptor_count = len.div_ceil(CHUNK_SIZE); + + let relevant_tx_descriptors = &mut self.tx_descriptors[..descriptor_count]; + let relevant_rx_descriptors = &mut self.rx_descriptors[..descriptor_count]; + + // Link up the relevant descriptors. + for descriptors in [ + &mut relevant_tx_descriptors[..], + &mut relevant_rx_descriptors[..], + ] { + let mut next = core::ptr::null_mut(); + for desc in descriptors.iter_mut().rev() { + desc.next = next; + next = desc; + } + } + + // Get required part of the buffer. + let required_buf = &self.buffer[..len]; + + for (desc, chunk) in relevant_tx_descriptors + .iter_mut() + .zip(required_buf.chunks(CHUNK_SIZE)) + { + // As this is a simple dma buffer implementation we won't + // be making use of this feature. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + desc.set_length(chunk.len()); + } + relevant_tx_descriptors + .last_mut() + .unwrap() + .set_suc_eof(true); + + for (desc, chunk) in relevant_rx_descriptors + .iter_mut() + .zip(required_buf.chunks(CHUNK_SIZE)) + { + // Clear this to allow hardware to set it when the peripheral returns an EOF + // bit. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + // Clear this to allow hardware to set it when it is + // done receiving data for this descriptor. + desc.set_length(0); + + desc.set_size(chunk.len()); + } + } +} + pub(crate) mod dma_private { use super::*; @@ -1798,226 +2371,6 @@ where } } -/// DMA transaction for TX transfers with moved-in/moved-out peripheral and -/// buffer -#[non_exhaustive] -#[must_use] -pub struct DmaTransferTxOwned -where - I: dma_private::DmaSupportTx, - T: ReadBuffer, -{ - instance: I, - tx_buffer: T, -} - -impl DmaTransferTxOwned -where - I: dma_private::DmaSupportTx, - T: ReadBuffer, -{ - pub(crate) fn new(instance: I, tx_buffer: T) -> Self { - Self { - instance, - tx_buffer, - } - } - - /// Wait for the transfer to finish and return the peripheral and the - /// buffer. - pub fn wait(mut self) -> Result<(I, T), (DmaError, I, T)> { - self.instance.peripheral_wait_dma(true, false); - - let err = self.instance.tx().has_error(); - - // We need to have a `Drop` implementation, because we accept - // managed buffers that can free their memory on drop. Because of that - // we can't move out of the `Transfer`'s fields, so we use `ptr::read` - // and `mem::forget`. - // - // NOTE(unsafe) There is no panic branch between getting the resources - // and forgetting `self`. - - let (instance, tx_buffer) = unsafe { - let instance = core::ptr::read(&self.instance); - let tx_buffer = core::ptr::read(&self.tx_buffer); - core::mem::forget(self); - - (instance, tx_buffer) - }; - - if err { - Err((DmaError::DescriptorError, instance, tx_buffer)) - } else { - Ok((instance, tx_buffer)) - } - } - - /// Check if the transfer is finished. - pub fn is_done(&mut self) -> bool { - self.instance.tx().is_done() - } -} - -impl Drop for DmaTransferTxOwned -where - I: dma_private::DmaSupportTx, - T: ReadBuffer, -{ - fn drop(&mut self) { - self.instance.peripheral_wait_dma(true, false); - } -} - -/// DMA transaction for RX transfers with moved-in/moved-out peripheral and -/// buffer -#[non_exhaustive] -#[must_use] -pub struct DmaTransferRxOwned -where - I: dma_private::DmaSupportRx, - R: WriteBuffer, -{ - instance: I, - rx_buffer: R, -} - -impl DmaTransferRxOwned -where - I: dma_private::DmaSupportRx, - R: WriteBuffer, -{ - pub(crate) fn new(instance: I, rx_buffer: R) -> Self { - Self { - instance, - rx_buffer, - } - } - - /// Wait for the transfer to finish and return the peripheral and the - /// buffers. - pub fn wait(mut self) -> Result<(I, R), (DmaError, I, R)> { - self.instance.peripheral_wait_dma(false, true); - - let err = self.instance.rx().has_error(); - - // We need to have a `Drop` implementation, because we accept - // managed buffers that can free their memory on drop. Because of that - // we can't move out of the `Transfer`'s fields, so we use `ptr::read` - // and `mem::forget`. - // - // NOTE(unsafe) There is no panic branch between getting the resources - // and forgetting `self`. - - let (instance, rx_buffer) = unsafe { - let instance = core::ptr::read(&self.instance); - let rx_buffer = core::ptr::read(&self.rx_buffer); - core::mem::forget(self); - - (instance, rx_buffer) - }; - - if err { - Err((DmaError::DescriptorError, instance, rx_buffer)) - } else { - Ok((instance, rx_buffer)) - } - } - - /// Check if the transfer is finished. - pub fn is_done(&mut self) -> bool { - self.instance.rx().is_done() - } -} - -impl Drop for DmaTransferRxOwned -where - I: dma_private::DmaSupportRx, - R: WriteBuffer, -{ - fn drop(&mut self) { - self.instance.peripheral_wait_dma(false, true); - } -} - -/// DMA transaction for TX+RX transfers with moved-in/moved-out peripheral and -/// buffers -#[non_exhaustive] -#[must_use] -pub struct DmaTransferTxRxOwned -where - I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, - T: ReadBuffer, - R: WriteBuffer, -{ - instance: I, - tx_buffer: T, - rx_buffer: R, -} - -impl DmaTransferTxRxOwned -where - I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, - T: ReadBuffer, - R: WriteBuffer, -{ - pub(crate) fn new(instance: I, tx_buffer: T, rx_buffer: R) -> Self { - Self { - instance, - tx_buffer, - rx_buffer, - } - } - - /// Wait for the transfer to finish and return the peripheral and the - /// buffers. - #[allow(clippy::type_complexity)] - pub fn wait(mut self) -> Result<(I, T, R), (DmaError, I, T, R)> { - self.instance.peripheral_wait_dma(true, true); - - let err = self.instance.tx().has_error() || self.instance.rx().has_error(); - - // We need to have a `Drop` implementation, because we accept - // managed buffers that can free their memory on drop. Because of that - // we can't move out of the `Transfer`'s fields, so we use `ptr::read` - // and `mem::forget`. - // - // NOTE(unsafe) There is no panic branch between getting the resources - // and forgetting `self`. - - let (instance, tx_buffer, rx_buffer) = unsafe { - let instance = core::ptr::read(&self.instance); - let tx_buffer = core::ptr::read(&self.tx_buffer); - let rx_buffer = core::ptr::read(&self.rx_buffer); - core::mem::forget(self); - - (instance, tx_buffer, rx_buffer) - }; - - if err { - Err((DmaError::DescriptorError, instance, tx_buffer, rx_buffer)) - } else { - Ok((instance, tx_buffer, rx_buffer)) - } - } - - /// Check if the transfer is finished. - pub fn is_done(&mut self) -> bool { - self.instance.tx().is_done() && self.instance.rx().is_done() - } -} - -impl Drop for DmaTransferTxRxOwned -where - I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, - T: ReadBuffer, - R: WriteBuffer, -{ - fn drop(&mut self) { - self.instance.peripheral_wait_dma(true, true); - } -} - /// DMA transaction for TX only circular transfers #[non_exhaustive] #[must_use] @@ -2156,10 +2509,6 @@ pub(crate) mod asynch { pub fn new(tx: &'a mut TX) -> Self { Self { tx, _a: () } } - - pub fn tx(&mut self) -> &mut TX { - self.tx - } } impl<'a, TX> core::future::Future for DmaTxFuture<'a, TX> diff --git a/esp-hal/src/soc/mod.rs b/esp-hal/src/soc/mod.rs index 47090437149..ac1f8dbfa46 100644 --- a/esp-hal/src/soc/mod.rs +++ b/esp-hal/src/soc/mod.rs @@ -71,3 +71,10 @@ impl self::efuse::Efuse { pub(crate) fn is_valid_ram_address(address: u32) -> bool { (self::constants::SOC_DRAM_LOW..=self::constants::SOC_DRAM_HIGH).contains(&address) } + +#[allow(unused)] +pub(crate) fn is_slice_in_dram(slice: &[T]) -> bool { + let start = slice.as_ptr() as u32; + let end = start + slice.len() as u32; + self::constants::SOC_DRAM_LOW <= start && end <= self::constants::SOC_DRAM_HIGH +} diff --git a/esp-hal/src/spi/master.rs b/esp-hal/src/spi/master.rs index 63579dfc289..069a2f5c225 100644 --- a/esp-hal/src/spi/master.rs +++ b/esp-hal/src/spi/master.rs @@ -79,7 +79,7 @@ use super::{ }; use crate::{ clock::Clocks, - dma::{DescriptorChain, DmaPeripheral, Rx, Tx}, + dma::{DmaDescriptor, DmaPeripheral, Rx, Tx}, gpio::{InputPin, InputSignal, OutputPin, OutputSignal}, interrupt::InterruptHandler, peripheral::{Peripheral, PeripheralRef}, @@ -839,26 +839,23 @@ where } pub mod dma { - use embedded_dma::{ReadBuffer, WriteBuffer}; + use core::{ + cmp::min, + sync::atomic::{fence, Ordering}, + }; use super::*; + #[cfg(feature = "async")] + use crate::dma::asynch::{DmaRxFuture, DmaTxFuture}; #[cfg(spi3)] use crate::dma::Spi3Peripheral; use crate::{ dma::{ - dma_private::{DmaSupport, DmaSupportRx, DmaSupportTx}, Channel, - ChannelRx, - ChannelTx, - DescriptorChain, DmaChannel, - DmaDescriptor, - DmaTransferRx, - DmaTransferRxOwned, - DmaTransferTx, - DmaTransferTxOwned, - DmaTransferTxRx, - DmaTransferTxRxOwned, + DmaRxBuf, + DmaTxBuf, + RxPrivate, Spi2Peripheral, SpiPeripheral, TxPrivate, @@ -877,8 +874,6 @@ pub mod dma { fn with_dma( self, channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI2, C, M, DmaMode>; } @@ -893,8 +888,6 @@ pub mod dma { fn with_dma( self, channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI3, C, M, DmaMode>; } @@ -908,16 +901,12 @@ pub mod dma { fn with_dma( self, mut channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI2, C, M, DmaMode> { channel.tx.init_channel(); // no need to call this for both, TX and RX SpiDma { spi: self.spi, channel, - tx_chain: DescriptorChain::new(tx_descriptors), - rx_chain: DescriptorChain::new(rx_descriptors), _mode: PhantomData, } } @@ -934,16 +923,12 @@ pub mod dma { fn with_dma( self, mut channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI3, C, M, DmaMode> { channel.tx.init_channel(); // no need to call this for both, TX and RX SpiDma { spi: self.spi, channel, - tx_chain: DescriptorChain::new(tx_descriptors), - rx_chain: DescriptorChain::new(rx_descriptors), _mode: PhantomData, } } @@ -959,8 +944,6 @@ pub mod dma { { pub(crate) spi: PeripheralRef<'d, T>, pub(crate) channel: Channel<'d, C, DmaMode>, - tx_chain: DescriptorChain, - rx_chain: DescriptorChain, _mode: PhantomData, } @@ -978,7 +961,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1018,7 +1001,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> crate::private::Sealed for SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1028,7 +1011,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> InterruptConfigurable for SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1041,7 +1024,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1052,64 +1035,84 @@ pub mod dma { } } - impl<'d, T, C, M, DmaMode> DmaSupport for SpiDma<'d, T, C, M, DmaMode> + pub struct SpiDmaTransfer<'d, T, C, M, DmaMode, Buf> where - T: InstanceDma, ChannelRx<'d, C>>, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, DmaMode: Mode, { - fn peripheral_wait_dma(&mut self, _is_tx: bool, _is_rx: bool) { - self.spi.flush().ok(); - } - - fn peripheral_dma_stop(&mut self) { - unreachable!("unsupported") - } + spi_dma: SpiDma<'d, T, C, M, DmaMode>, + dma_buf: Buf, + is_rx: bool, + is_tx: bool, } - impl<'d, T, C, M, DmaMode> DmaSupportTx for SpiDma<'d, T, C, M, DmaMode> + impl<'d, T, C, M, DmaMode, Buf> SpiDmaTransfer<'d, T, C, M, DmaMode, Buf> where - T: InstanceDma, ChannelRx<'d, C>>, + T: Instance, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, DmaMode: Mode, { - type TX = ChannelTx<'d, C>; - - fn tx(&mut self) -> &mut Self::TX { - &mut self.channel.tx + pub fn is_done(&self) -> bool { + if self.is_tx { + if !self.spi_dma.channel.tx.is_done() { + return false; + } + } + if self.spi_dma.spi.busy() { + return false; + } + if self.is_rx { + // If this is an asymmetric transfer and the RX side is smaller, the RX channel + // will never be "done" as it won't have enough descriptors/buffer to receive + // the EOF bit from the SPI. So instead the RX channel will hit + // a "descriptor empty" which means the DMA is written as much + // of the received data as possible into the buffer and + // discarded the rest. The user doesn't care about this discarded data. + + if !self.spi_dma.channel.rx.is_done() + && !self.spi_dma.channel.rx.has_dscr_empty_error() + { + return false; + } + } + true } - fn chain(&mut self) -> &mut DescriptorChain { - &mut self.tx_chain + pub fn wait(self) -> (SpiDma<'d, T, C, M, DmaMode>, Buf) { + while !self.is_done() {} + fence(Ordering::Acquire); + (self.spi_dma, self.dma_buf) } } - impl<'d, T, C, M, DmaMode> DmaSupportRx for SpiDma<'d, T, C, M, DmaMode> + #[cfg(feature = "async")] + impl<'d, T, C, M, Buf> SpiDmaTransfer<'d, T, C, M, crate::Async, Buf> where - T: InstanceDma, ChannelRx<'d, C>>, + T: Instance, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, - DmaMode: Mode, { - type RX = ChannelRx<'d, C>; + pub async fn wait_for_done(&mut self) { + if self.is_tx { + let _ = DmaTxFuture::new(&mut self.spi_dma.channel.tx).await; + } - fn rx(&mut self) -> &mut Self::RX { - &mut self.channel.rx - } + // As a future enhancement, setup Spi Future in here as well. - fn chain(&mut self) -> &mut DescriptorChain { - &mut self.rx_chain + if self.is_rx { + let _ = DmaRxFuture::new(&mut self.spi_dma.channel.rx).await; + } } } impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: IsFullDuplex, @@ -1117,207 +1120,137 @@ pub mod dma { { /// Perform a DMA write. /// - /// This will return a [DmaTransferTx]. The maximum amount of data to be - /// sent is 32736 bytes. - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_write<'t, TXBUF>( - &'t mut self, - words: &'t TXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, - { - self.dma_write_start(words)?; - Ok(DmaTransferTx::new(self)) - } - - /// Perform a DMA write. - /// - /// This will return a [DmaTransferTxOwned] owning the buffer and the + /// This will return a [SpiDmaTransfer] owning the buffer and the /// SPI instance. The maximum amount of data to be sent is 32736 /// bytes. #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_write_owned( + pub fn dma_write( mut self, - words: TXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, + buffer: DmaTxBuf, + ) -> Result, (Error, Self, DmaTxBuf)> { - self.dma_write_start(&words)?; - Ok(DmaTransferTxOwned::new(self, words)) - } - - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - fn dma_write_start<'t, TXBUF>(&'t mut self, words: &'t TXBUF) -> Result<(), super::Error> - where - TXBUF: ReadBuffer, - { - let (ptr, len) = unsafe { words.read_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_write = buffer.len(); + if bytes_to_write > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } - unsafe { - self.spi.start_write_bytes_dma( - &mut self.tx_chain, - ptr, - len, - &mut self.channel.tx, - )?; + let result = unsafe { + self.spi + .start_write_bytes_dma(buffer.first(), buffer.len(), &mut self.channel.tx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(()) - } - /// Perform a DMA read. - /// - /// This will return a [DmaTransferRx]. The maximum amount of data to be - /// received is 32736 bytes. - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_read<'t, RXBUF>( - &'t mut self, - words: &'t mut RXBUF, - ) -> Result, super::Error> - where - RXBUF: WriteBuffer, - { - self.dma_read_start(words)?; - Ok(DmaTransferRx::new(self)) + Ok(SpiDmaTransfer { + spi_dma: self, + dma_buf: buffer, + is_tx: true, + is_rx: false, + }) } /// Perform a DMA read. /// - /// This will return a [DmaTransferRxOwned] owning the buffer and + /// This will return a [SpiDmaTransfer] owning the buffer and /// the SPI instance. The maximum amount of data to be /// received is 32736 bytes. #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_read_owned( + pub fn dma_read( mut self, - mut words: RXBUF, - ) -> Result, super::Error> - where - RXBUF: WriteBuffer, - { - self.dma_read_start(&mut words)?; - Ok(DmaTransferRxOwned::new(self, words)) - } - - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - fn dma_read_start<'t, RXBUF>(&'t mut self, words: &'t mut RXBUF) -> Result<(), super::Error> - where - RXBUF: WriteBuffer, + buffer: DmaRxBuf, + ) -> Result, (Error, Self, DmaRxBuf)> { - let (ptr, len) = unsafe { words.write_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_read = buffer.capacity(); + if bytes_to_read > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } - unsafe { - self.spi.start_read_bytes_dma( - &mut self.rx_chain, - ptr, - len, - &mut self.channel.rx, - )?; + let result = unsafe { + self.spi + .start_read_bytes_dma(buffer.first(), bytes_to_read, &mut self.channel.rx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(()) - } - - /// Perform a DMA transfer. - /// - /// This will return a [DmaTransferTxRx]. - /// The maximum amount of data to be sent/received is 32736 bytes. - pub fn dma_transfer<'t, TXBUF, RXBUF>( - &'t mut self, - words: &'t TXBUF, - read_buffer: &'t mut RXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, - RXBUF: WriteBuffer, - { - self.dma_transfer_start(words, read_buffer)?; - Ok(DmaTransferTxRx::new(self)) + Ok(SpiDmaTransfer { + spi_dma: self, + dma_buf: buffer, + is_tx: false, + is_rx: true, + }) } /// Perform a DMA transfer /// - /// This will return a [DmaTransferTxRxOwned] owning the buffers and + /// This will return a [SpiDmaTransfer] owning the buffers and /// the SPI instance. The maximum amount of data to be /// sent/received is 32736 bytes. - pub fn dma_transfer_owned( + pub fn dma_transfer( mut self, - words: TXBUF, - mut read_buffer: RXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, - RXBUF: WriteBuffer, - { - self.dma_transfer_start(&words, &mut read_buffer)?; - Ok(DmaTransferTxRxOwned::new(self, words, read_buffer)) - } - - fn dma_transfer_start<'t, TXBUF, RXBUF>( - &'t mut self, - words: &'t TXBUF, - read_buffer: &'t mut RXBUF, - ) -> Result<(), super::Error> - where - TXBUF: ReadBuffer, - RXBUF: WriteBuffer, - { - let (write_ptr, write_len) = unsafe { words.read_buffer() }; - let (read_ptr, read_len) = unsafe { read_buffer.write_buffer() }; - - if write_len > MAX_DMA_SIZE || read_len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); - } - - unsafe { + tx_buffer: DmaTxBuf, + rx_buffer: DmaRxBuf, + ) -> Result< + SpiDmaTransfer<'d, T, C, M, DmaMode, (DmaTxBuf, DmaRxBuf)>, + (Error, Self, DmaTxBuf, DmaRxBuf), + > { + let bytes_to_write = tx_buffer.len(); + let bytes_to_read = rx_buffer.capacity(); + + if bytes_to_write > MAX_DMA_SIZE || bytes_to_read > MAX_DMA_SIZE { + return Err(( + Error::MaxDmaTransferSizeExceeded, + self, + tx_buffer, + rx_buffer, + )); + } + + let result = unsafe { self.spi.start_transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - write_ptr, - write_len, - read_ptr, - read_len, + tx_buffer.first(), + rx_buffer.first(), + bytes_to_write, + bytes_to_read, &mut self.channel.tx, &mut self.channel.rx, - )?; + ) + }; + if let Err(e) = result { + return Err((e, self, tx_buffer, rx_buffer)); } - Ok(()) + Ok(SpiDmaTransfer { + spi_dma: self, + dma_buf: (tx_buffer, rx_buffer), + is_tx: true, + is_rx: true, + }) } } impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: IsHalfDuplex, DmaMode: Mode, { #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn read<'t, RXBUF>( - &'t mut self, + pub fn read( + mut self, data_mode: SpiDataMode, cmd: Command, address: Address, dummy: u8, - buffer: &'t mut RXBUF, - ) -> Result, super::Error> - where - RXBUF: WriteBuffer, + buffer: DmaRxBuf, + ) -> Result, (Error, Self, DmaRxBuf)> { - let (ptr, len) = unsafe { buffer.write_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_read = buffer.capacity(); + if bytes_to_read > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } self.spi.init_half_duplex( @@ -1326,7 +1259,7 @@ pub mod dma { !address.is_none(), false, dummy != 0, - len == 0, + bytes_to_read == 0, ); self.spi .init_spi_data_mode(cmd.mode(), address.mode(), data_mode); @@ -1370,33 +1303,35 @@ pub mod dma { .modify(|_, w| unsafe { w.usr_dummy_cyclelen().bits(dummy - 1) }); } - unsafe { - self.spi.start_read_bytes_dma( - &mut self.rx_chain, - ptr, - len, - &mut self.channel.rx, - )?; + let result = unsafe { + self.spi + .start_read_bytes_dma(buffer.first(), bytes_to_read, &mut self.channel.rx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(DmaTransferRx::new(self)) + + Ok(SpiDmaTransfer { + spi_dma: self, + dma_buf: buffer, + is_tx: false, + is_rx: true, + }) } #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn write<'t, TXBUF>( - &'t mut self, + pub fn write( + mut self, data_mode: SpiDataMode, cmd: Command, address: Address, dummy: u8, - buffer: &'t TXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, + buffer: DmaTxBuf, + ) -> Result, (Error, Self, DmaTxBuf)> { - let (ptr, len) = unsafe { buffer.read_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_write = buffer.len(); + if bytes_to_write > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } self.spi.init_half_duplex( @@ -1405,7 +1340,7 @@ pub mod dma { !address.is_none(), false, dummy != 0, - len == 0, + bytes_to_write == 0, ); self.spi .init_spi_data_mode(cmd.mode(), address.mode(), data_mode); @@ -1449,270 +1384,486 @@ pub mod dma { .modify(|_, w| unsafe { w.usr_dummy_cyclelen().bits(dummy - 1) }); } - unsafe { - self.spi.start_write_bytes_dma( - &mut self.tx_chain, - ptr, - len, - &mut self.channel.tx, - )?; + let result = unsafe { + self.spi + .start_write_bytes_dma(buffer.first(), bytes_to_write, &mut self.channel.tx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(DmaTransferTx::new(self)) + + Ok(SpiDmaTransfer { + spi_dma: self, + dma_buf: buffer, + is_tx: true, + is_rx: false, + }) } } - #[cfg(feature = "embedded-hal-02")] - impl<'d, T, C, M, DmaMode> embedded_hal_02::blocking::spi::Transfer - for SpiDma<'d, T, C, M, DmaMode> + pub struct SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, - DmaMode: Mode, { - type Error = super::Error; - - fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { - self.spi.transfer_in_place_dma( - &mut self.tx_chain, - &mut self.rx_chain, - words, - &mut self.channel.tx, - &mut self.channel.rx, - ) - } + spi_dma: Option>, + buffers: Option<(DmaTxBuf, DmaRxBuf)>, } - #[cfg(feature = "embedded-hal-02")] - impl<'d, T, C, M, DmaMode> embedded_hal_02::blocking::spi::Write - for SpiDma<'d, T, C, M, DmaMode> + impl<'d, T, C> SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, - DmaMode: Mode, { - type Error = super::Error; + pub fn new( + spi_dma: SpiDma<'d, T, C, FullDuplexMode, crate::Blocking>, + tx_buffer: DmaTxBuf, + rx_buffer: DmaRxBuf, + ) -> Self { + Self { + spi_dma: Some(spi_dma), + buffers: Some((tx_buffer, rx_buffer)), + } + } + + fn read(&mut self, words: &mut [u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (tx_buf, mut rx_buf) = self.buffers.take().unwrap(); + + for chunk in words.chunks_mut(rx_buf.capacity()) { + rx_buf.set_length(chunk.len()); + + let transfer = match spi_dma.dma_read(rx_buf) { + Ok(transfer) => transfer, + Err((e, spi, rx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx_buf, rx)); + return Err(e); + } + }; + (spi_dma, rx_buf) = transfer.wait(); + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); - fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - self.spi - .write_bytes_dma(&mut self.tx_chain, words, &mut self.channel.tx)?; - self.spi.flush()?; Ok(()) } - } - #[cfg(feature = "embedded-hal-02")] - impl, const SIZE: usize> - embedded_hal_02::blocking::spi::Transfer for crate::FlashSafeDma - { - type Error = T::Error; + fn write(&mut self, words: &[u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (mut tx_buf, rx_buf) = self.buffers.take().unwrap(); - fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { - self.inner.transfer(words) + for chunk in words.chunks(tx_buf.capacity()) { + tx_buf.fill(chunk); + + let transfer = match spi_dma.dma_write(tx_buf) { + Ok(transfer) => transfer, + Err((e, spi, tx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx, rx_buf)); + return Err(e); + } + }; + (spi_dma, tx_buf) = transfer.wait(); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); + + Ok(()) } - } - #[cfg(feature = "embedded-hal-02")] - impl, const SIZE: usize> - embedded_hal_02::blocking::spi::Write for crate::FlashSafeDma - { - type Error = T::Error; + fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (mut tx_buf, mut rx_buf) = self.buffers.take().unwrap(); - fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&words[0] as *const _ as u32) { - for chunk in words.chunks(SIZE) { - self.buffer[..chunk.len()].copy_from_slice(chunk); - self.inner.write(&self.buffer[..chunk.len()])?; - } + let chunk_size = min(tx_buf.capacity(), rx_buf.capacity()); + + let common_length = min(read.len(), write.len()); + let (read_common, read_remainder) = read.split_at_mut(common_length); + let (write_common, write_remainder) = write.split_at(common_length); + + for (read_chunk, write_chunk) in read_common + .chunks_mut(chunk_size) + .zip(write_common.chunks(chunk_size)) + { + tx_buf.fill(write_chunk); + rx_buf.set_length(read_chunk.len()); + + let transfer = match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => transfer, + Err((e, spi, tx, rx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx, rx)); + return Err(e); + } + }; + (spi_dma, (tx_buf, rx_buf)) = transfer.wait(); + + let bytes_read = rx_buf.read_received_data(read_chunk); + debug_assert_eq!(bytes_read, read_chunk.len()); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); + + if !read_remainder.is_empty() { + self.read(read_remainder) + } else if !write_remainder.is_empty() { + self.write(write_remainder) } else { - self.inner.write(words)?; - }; + Ok(()) + } + } + + fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (mut tx_buf, mut rx_buf) = self.buffers.take().unwrap(); + + let chunk_size = min(tx_buf.capacity(), rx_buf.capacity()); + + for chunk in words.chunks_mut(chunk_size) { + tx_buf.fill(chunk); + rx_buf.set_length(chunk.len()); + + let transfer = match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => transfer, + Err((e, spi, tx, rx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx, rx)); + return Err(e); + } + }; + (spi_dma, (tx_buf, rx_buf)) = transfer.wait(); + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); Ok(()) } } #[cfg(feature = "embedded-hal-02")] - impl, const SIZE: usize> - embedded_hal_02::spi::FullDuplex for crate::FlashSafeDma + impl<'d, T, C> embedded_hal_02::blocking::spi::Transfer for SpiDmaBus<'d, T, C> where - Self: embedded_hal_02::blocking::spi::Transfer, - Self: embedded_hal_02::blocking::spi::Write, + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, { - type Error = T::Error; + type Error = super::Error; - fn read(&mut self) -> nb::Result { - use embedded_hal_02::blocking::spi::Transfer; - let mut buf = [0; 1]; - self.transfer(&mut buf)?; - Ok(buf[0]) + fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { + self.transfer_in_place(words)?; + Ok(words) } + } - fn send(&mut self, word: u8) -> nb::Result<(), Self::Error> { - use embedded_hal_02::blocking::spi::Write; - self.write(&[word])?; + #[cfg(feature = "embedded-hal-02")] + impl<'d, T, C> embedded_hal_02::blocking::spi::Write for SpiDmaBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + type Error = super::Error; + + fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { + self.write(words)?; Ok(()) } } #[cfg(feature = "async")] - mod asynch { + pub mod asynch { + use core::{cmp::min, mem::take}; + + use embedded_hal::spi::ErrorType; + use super::*; - impl<'d, T, C, M> embedded_hal_async::spi::SpiBus for SpiDma<'d, T, C, M, crate::Async> + #[derive(Default)] + enum State<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, { - async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - let mut future = crate::dma::asynch::DmaRxFuture::new(&mut self.channel.rx); - unsafe { - self.spi.start_read_bytes_dma( - &mut self.rx_chain, - words.as_mut_ptr(), - words.len(), - future.rx(), - )?; - } - future.await?; + Idle( + SpiDma<'d, T, C, FullDuplexMode, crate::Async>, + DmaTxBuf, + DmaRxBuf, + ), + Reading( + SpiDmaTransfer<'d, T, C, FullDuplexMode, crate::Async, DmaRxBuf>, + DmaTxBuf, + ), + Writing( + SpiDmaTransfer<'d, T, C, FullDuplexMode, crate::Async, DmaTxBuf>, + DmaRxBuf, + ), + Transferring( + SpiDmaTransfer<'d, T, C, FullDuplexMode, crate::Async, (DmaTxBuf, DmaRxBuf)>, + ), + #[default] + InUse, + } + + pub struct SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + state: State<'d, T, C>, + } - Ok(()) + impl<'d, T, C> SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + pub fn new( + spi: SpiDma<'d, T, C, FullDuplexMode, crate::Async>, + dma_tx_buf: DmaTxBuf, + dma_rx_buf: DmaRxBuf, + ) -> Self { + Self { + state: State::Idle(spi, dma_tx_buf, dma_rx_buf), + } } - async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - for chunk in words.chunks(MAX_DMA_SIZE) { - let mut future = crate::dma::asynch::DmaTxFuture::new(&mut self.channel.tx); - unsafe { - self.spi.start_write_bytes_dma( - &mut self.tx_chain, - chunk.as_ptr(), - chunk.len(), - future.tx(), - )?; + async fn wait_for_idle( + &mut self, + ) -> ( + SpiDma<'d, T, C, FullDuplexMode, crate::Async>, + DmaTxBuf, + DmaRxBuf, + ) { + match &mut self.state { + State::Idle(_, _, _) => (), + State::Reading(transfer, _) => transfer.wait_for_done().await, + State::Writing(transfer, _) => transfer.wait_for_done().await, + State::Transferring(transfer) => transfer.wait_for_done().await, + State::InUse => unreachable!(), + } + match take(&mut self.state) { + State::Idle(spi, tx_buf, rx_buf) => (spi, tx_buf, rx_buf), + State::Reading(transfer, tx_buf) => { + let (spi, rx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) } - future.await?; - - self.spi.flush()?; + State::Writing(transfer, rx_buf) => { + let (spi, tx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + State::Transferring(transfer) => { + let (spi, (tx_buf, rx_buf)) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + State::InUse => unreachable!(), } - - Ok(()) } + } - async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - let mut idx = 0; - loop { - let write_idx = isize::min(idx, write.len() as isize); - let write_len = usize::min(write.len() - idx as usize, MAX_DMA_SIZE); - - let read_idx = isize::min(idx, read.len() as isize); - let read_len = usize::min(read.len() - idx as usize, MAX_DMA_SIZE); - - let mut tx_future = crate::dma::asynch::DmaTxFuture::new(&mut self.channel.tx); - let mut rx_future = crate::dma::asynch::DmaRxFuture::new(&mut self.channel.rx); - - unsafe { - self.spi.start_transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - write.as_ptr().offset(write_idx), - write_len, - read.as_mut_ptr().offset(read_idx), - read_len, - tx_future.tx(), - rx_future.rx(), - )?; - } - let (tx_res, rx_res) = embassy_futures::join::join(tx_future, rx_future).await; - tx_res?; - rx_res?; - - self.spi.flush()?; + impl<'d, T, C> ErrorType for SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + type Error = Error; + } - idx += MAX_DMA_SIZE as isize; - if idx >= write.len() as isize && idx >= read.len() as isize { - break; - } + impl<'d, T, C> embedded_hal_async::spi::SpiBus for SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + for chunk in words.chunks_mut(rx_buf.capacity()) { + rx_buf.set_length(chunk.len()); + + match spi_dma.dma_read(rx_buf) { + Ok(transfer) => { + self.state = State::Reading(transfer, tx_buf); + } + Err((e, spi, rx)) => { + self.state = State::Idle(spi, tx_buf, rx); + return Err(e); + } + }; + + match &mut self.state { + State::Reading(transfer, _) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Reading(transfer, tx_buf) => { + let (spi, rx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); } + self.state = State::Idle(spi_dma, tx_buf, rx_buf); + Ok(()) } - async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - for chunk in words.chunks_mut(MAX_DMA_SIZE) { - let mut tx_future = crate::dma::asynch::DmaTxFuture::new(&mut self.channel.tx); - let mut rx_future = crate::dma::asynch::DmaRxFuture::new(&mut self.channel.rx); - - unsafe { - self.spi.start_transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - chunk.as_ptr(), - chunk.len(), - chunk.as_mut_ptr(), - chunk.len(), - tx_future.tx(), - rx_future.rx(), - )?; - } - - let (tx_res, rx_res) = embassy_futures::join::join(tx_future, rx_future).await; - tx_res?; - rx_res?; - - self.spi.flush()?; + async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + for chunk in words.chunks(tx_buf.capacity()) { + tx_buf.fill(chunk); + + match spi_dma.dma_write(tx_buf) { + Ok(transfer) => { + self.state = State::Writing(transfer, rx_buf); + } + Err((e, spi, tx)) => { + self.state = State::Idle(spi, tx, rx_buf); + return Err(e); + } + }; + + match &mut self.state { + State::Writing(transfer, _) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Writing(transfer, rx_buf) => { + let (spi, tx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; } + self.state = State::Idle(spi_dma, tx_buf, rx_buf); + Ok(()) } - async fn flush(&mut self) -> Result<(), Self::Error> { - self.spi.flush() - } - } + async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + let chunk_size = min(tx_buf.capacity(), rx_buf.capacity()); + + let common_length = min(read.len(), write.len()); + let (read_common, read_remainder) = read.split_at_mut(common_length); + let (write_common, write_remainder) = write.split_at(common_length); + + for (read_chunk, write_chunk) in read_common + .chunks_mut(chunk_size) + .zip(write_common.chunks(chunk_size)) + { + tx_buf.fill(write_chunk); + rx_buf.set_length(read_chunk.len()); + + match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => { + self.state = State::Transferring(transfer); + } + Err((e, spi, tx, rx)) => { + self.state = State::Idle(spi, tx, rx); + return Err(e); + } + }; + + match &mut self.state { + State::Transferring(transfer) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Transferring(transfer) => { + let (spi, (tx_buf, rx_buf)) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; + + let bytes_read = rx_buf.read_received_data(read_chunk); + assert_eq!(bytes_read, read_chunk.len()); + } - impl embedded_hal_async::spi::SpiBus - for crate::FlashSafeDma - { - async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.read(words).await - } + self.state = State::Idle(spi_dma, tx_buf, rx_buf); - async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&words[0] as *const _ as u32) { - for chunk in words.chunks(SIZE) { - self.buffer[..chunk.len()].copy_from_slice(chunk); - self.inner.write(&self.buffer[..chunk.len()]).await?; - } + if !read_remainder.is_empty() { + self.read(read_remainder).await + } else if !write_remainder.is_empty() { + self.write(write_remainder).await } else { - self.inner.write(words).await?; + Ok(()) } - Ok(()) - } - - async fn flush(&mut self) -> Result<(), Self::Error> { - self.inner.flush().await } async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.transfer_in_place(words).await + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + for chunk in words.chunks_mut(tx_buf.capacity()) { + tx_buf.fill(chunk); + rx_buf.set_length(chunk.len()); + + match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => { + self.state = State::Transferring(transfer); + } + Err((e, spi, tx, rx)) => { + self.state = State::Idle(spi, tx, rx); + return Err(e); + } + }; + + match &mut self.state { + State::Transferring(transfer) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Transferring(transfer) => { + let (spi, (tx_buf, rx_buf)) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); + } + + self.state = State::Idle(spi_dma, tx_buf, rx_buf); + + Ok(()) } - async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&write[0] as *const _ as u32) { - for (read, write) in read.chunks_mut(SIZE).zip(write.chunks(SIZE)) { - self.buffer[..write.len()].copy_from_slice(write); - self.inner - .transfer(read, &self.buffer[..write.len()]) - .await?; - } - } else { - self.inner.transfer(read, write).await?; - } + async fn flush(&mut self) -> Result<(), Self::Error> { + // Get previous transfer. + let (spi_dma, tx_buf, rx_buf) = self.wait_for_idle().await; + self.state = State::Idle(spi_dma, tx_buf, rx_buf); Ok(()) } } @@ -1724,116 +1875,41 @@ pub mod dma { use super::*; - impl<'d, T, C, M, DmaMode> ErrorType for SpiDma<'d, T, C, M, DmaMode> + impl<'d, T, C> ErrorType for SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, - DmaMode: Mode, { type Error = Error; } - impl<'d, T, C, M> SpiBus for SpiDma<'d, T, C, M, crate::Blocking> + impl<'d, T, C> SpiBus for SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, { fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.spi.transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - &[], - words, - &mut self.channel.tx, - &mut self.channel.rx, - )?; - self.flush() + self.read(words) } fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - self.spi - .write_bytes_dma(&mut self.tx_chain, words, &mut self.channel.tx)?; - self.flush() + self.write(words) } fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - self.spi.transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - write, - read, - &mut self.channel.tx, - &mut self.channel.rx, - )?; - self.flush() + self.transfer(read, write) } - /// Transfer data in place. - /// - /// Writes data from `words` out on the bus and stores the reply - /// into `words`. A convenient wrapper around - /// [`write`](SpiBus::write), [`flush`](SpiBus::flush) and - /// [`read`](SpiBus::read). fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.spi.transfer_in_place_dma( - &mut self.tx_chain, - &mut self.rx_chain, - words, - &mut self.channel.tx, - &mut self.channel.rx, - )?; - self.flush() + self.transfer_in_place(words) } fn flush(&mut self) -> Result<(), Self::Error> { - self.spi.flush() - } - } - - impl ErrorType for crate::FlashSafeDma { - type Error = T::Error; - } - - impl SpiBus for crate::FlashSafeDma { - fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.read(words) - } - - fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&words[0] as *const _ as u32) { - for chunk in words.chunks(SIZE) { - self.buffer[..chunk.len()].copy_from_slice(chunk); - self.inner.write(&self.buffer[..chunk.len()])?; - } - } else { - self.inner.write(words)?; - } + // All operations currently flush so this is no-op. Ok(()) } - - fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&write[0] as *const _ as u32) { - for (read, write) in read.chunks_mut(SIZE).zip(write.chunks(SIZE)) { - self.buffer[..write.len()].copy_from_slice(write); - self.inner.transfer(read, &self.buffer[..write.len()])?; - } - } else { - self.inner.transfer(read, write)?; - } - Ok(()) - } - - fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.transfer_in_place(words) - } - - fn flush(&mut self) -> Result<(), Self::Error> { - self.inner.flush() - } } } } @@ -1938,90 +2014,13 @@ mod ehal1 { } #[doc(hidden)] -pub trait InstanceDma: Instance -where - TX: Tx, - RX: Rx, -{ - fn transfer_in_place_dma<'w>( - &mut self, - tx_chain: &mut DescriptorChain, - rx_chain: &mut DescriptorChain, - words: &'w mut [u8], - tx: &mut TX, - rx: &mut RX, - ) -> Result<&'w [u8], Error> { - for chunk in words.chunks_mut(MAX_DMA_SIZE) { - unsafe { - self.start_transfer_dma( - tx_chain, - rx_chain, - chunk.as_ptr(), - chunk.len(), - chunk.as_mut_ptr(), - chunk.len(), - tx, - rx, - )?; - } - - while !tx.is_done() && !rx.is_done() {} - self.flush().unwrap(); - } - - Ok(words) - } - - fn transfer_dma<'w>( - &mut self, - tx_chain: &mut DescriptorChain, - rx_chain: &mut DescriptorChain, - write_buffer: &'w [u8], - read_buffer: &'w mut [u8], - tx: &mut TX, - rx: &mut RX, - ) -> Result<&'w [u8], Error> { - let mut idx = 0; - loop { - let write_idx = isize::min(idx, write_buffer.len() as isize); - let write_len = usize::min(write_buffer.len() - idx as usize, MAX_DMA_SIZE); - - let read_idx = isize::min(idx, read_buffer.len() as isize); - let read_len = usize::min(read_buffer.len() - idx as usize, MAX_DMA_SIZE); - - unsafe { - self.start_transfer_dma( - tx_chain, - rx_chain, - write_buffer.as_ptr().offset(write_idx), - write_len, - read_buffer.as_mut_ptr().offset(read_idx), - read_len, - tx, - rx, - )?; - } - - while !tx.is_done() && !rx.is_done() {} - self.flush().unwrap(); - - idx += MAX_DMA_SIZE as isize; - if idx >= write_buffer.len() as isize && idx >= read_buffer.len() as isize { - break; - } - } - - Ok(read_buffer) - } - +pub trait InstanceDma: Instance { #[allow(clippy::too_many_arguments)] - unsafe fn start_transfer_dma( + unsafe fn start_transfer_dma( &mut self, - tx_chain: &mut DescriptorChain, - rx_chain: &mut DescriptorChain, - write_buffer_ptr: *const u8, + tx_desc: *mut DmaDescriptor, + rx_desc: *mut DmaDescriptor, write_buffer_len: usize, - read_buffer_ptr: *mut u8, read_buffer_len: usize, tx: &mut TX, rx: &mut RX, @@ -2035,15 +2034,13 @@ where self.enable_dma(); self.update(); + self.clear_dma_interrupts(); reset_dma_before_load_dma_dscr(reg_block); - tx_chain.fill_for_tx(false, write_buffer_ptr, write_buffer_len)?; - tx.prepare_transfer_without_start(self.dma_peripheral(), tx_chain) + tx.prepare_transfer(self.dma_peripheral(), tx_desc) .and_then(|_| tx.start_transfer())?; - rx_chain.fill_for_rx(false, read_buffer_ptr, read_buffer_len)?; - rx.prepare_transfer_without_start(self.dma_peripheral(), rx_chain) + rx.prepare_transfer(self.dma_peripheral(), rx_desc) .and_then(|_| rx.start_transfer())?; - self.clear_dma_interrupts(); reset_dma_before_usr_cmd(reg_block); reg_block.cmd().modify(|_, w| w.usr().set_bit()); @@ -2051,29 +2048,10 @@ where Ok(()) } - fn write_bytes_dma<'w>( - &mut self, - chain: &mut DescriptorChain, - words: &'w [u8], - tx: &mut TX, - ) -> Result<&'w [u8], Error> { - for chunk in words.chunks(MAX_DMA_SIZE) { - unsafe { - self.start_write_bytes_dma(chain, chunk.as_ptr(), chunk.len(), tx)?; - } - - while !tx.is_done() {} - self.flush().unwrap(); // seems "is_done" doesn't work as intended? - } - - Ok(words) - } - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - unsafe fn start_write_bytes_dma( + unsafe fn start_write_bytes_dma( &mut self, - chain: &mut DescriptorChain, - ptr: *const u8, + first_desc: *mut DmaDescriptor, len: usize, tx: &mut TX, ) -> Result<(), Error> { @@ -2086,13 +2064,10 @@ where self.update(); reset_dma_before_load_dma_dscr(reg_block); - chain.fill_for_tx(false, ptr, len)?; - unsafe { - tx.prepare_transfer_without_start(self.dma_peripheral(), chain) - .and_then(|_| tx.start_transfer())?; - } - self.clear_dma_interrupts(); + + tx.prepare_transfer(self.dma_peripheral(), first_desc)?; + tx.start_transfer()?; reset_dma_before_usr_cmd(reg_block); reg_block.cmd().modify(|_, w| w.usr().set_bit()); @@ -2101,15 +2076,14 @@ where } #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - unsafe fn start_read_bytes_dma( + unsafe fn start_read_bytes_dma( &mut self, - chain: &mut DescriptorChain, - ptr: *mut u8, - len: usize, + desc: *mut DmaDescriptor, + data_length: usize, rx: &mut RX, ) -> Result<(), Error> { let reg_block = self.register_block(); - self.configure_datalen(len as u32 * 8); + self.configure_datalen(data_length as u32 * 8); rx.is_done(); @@ -2117,13 +2091,13 @@ where self.update(); reset_dma_before_load_dma_dscr(reg_block); - chain.fill_for_rx(false, ptr, len)?; - rx.prepare_transfer_without_start(self.dma_peripheral(), chain) - .and_then(|_| rx.start_transfer())?; self.clear_dma_interrupts(); reset_dma_before_usr_cmd(reg_block); + rx.prepare_transfer(self.dma_peripheral(), desc)?; + rx.start_transfer()?; + reg_block.cmd().modify(|_, w| w.usr().set_bit()); Ok(()) @@ -2236,20 +2210,10 @@ fn reset_dma_before_load_dma_dscr(reg_block: &RegisterBlock) { }); } -impl InstanceDma for crate::peripherals::SPI2 -where - TX: Tx, - RX: Rx, -{ -} +impl InstanceDma for crate::peripherals::SPI2 {} #[cfg(spi3)] -impl InstanceDma for crate::peripherals::SPI3 -where - TX: Tx, - RX: Rx, -{ -} +impl InstanceDma for crate::peripherals::SPI3 {} #[doc(hidden)] pub trait ExtendedInstance: Instance { diff --git a/examples/src/bin/embassy_spi.rs b/examples/src/bin/embassy_spi.rs index 834ecd3a1bf..38d877b721c 100644 --- a/examples/src/bin/embassy_spi.rs +++ b/examples/src/bin/embassy_spi.rs @@ -24,12 +24,12 @@ use esp_backtrace as _; use esp_hal::{ clock::ClockControl, dma::*, - dma_descriptors, + dma_buffers, gpio::Io, peripherals::Peripherals, prelude::*, spi::{ - master::{prelude::*, Spi}, + master::{dma::asynch::SpiDmaAsyncBus, prelude::*, Spi}, SpiMode, }, system::SystemControl, @@ -73,21 +73,21 @@ async fn main(_spawner: Spawner) { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (descriptors, rx_descriptors) = dma_descriptors!(32000); + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(32000); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure_for_async(false, DmaPriority::Priority0), - descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure_for_async(false, DmaPriority::Priority0)); + + let mut spi_bus = SpiDmaAsyncBus::new(spi, dma_tx_buf, dma_rx_buf); let send_buffer = [0, 1, 2, 3, 4, 5, 6, 7]; loop { let mut buffer = [0; 8]; esp_println::println!("Sending bytes"); - embedded_hal_async::spi::SpiBus::transfer(&mut spi, &mut buffer, &send_buffer) + embedded_hal_async::spi::SpiBus::transfer(&mut spi_bus, &mut buffer, &send_buffer) .await .unwrap(); esp_println::println!("Bytes received: {:?}", buffer); diff --git a/examples/src/bin/qspi_flash.rs b/examples/src/bin/qspi_flash.rs index cabe617872a..b0aacf8fbb3 100644 --- a/examples/src/bin/qspi_flash.rs +++ b/examples/src/bin/qspi_flash.rs @@ -31,7 +31,7 @@ use esp_backtrace as _; use esp_hal::{ clock::ClockControl, delay::Delay, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaRxBuf, DmaTxBuf}, dma_buffers, gpio::Io, peripherals::Peripherals, @@ -77,6 +77,8 @@ fn main() -> ! { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(256, 320); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins( @@ -87,30 +89,23 @@ fn main() -> ! { Some(sio3), Some(cs), ) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let delay = Delay::new(&clocks); - // DMA buffer require a static life-time - let (zero_buf, _, _, _) = dma_buffers!(0); - let send = tx_buffer; - let mut receive = rx_buffer; - // write enable + dma_tx_buf.set_length(0); let transfer = spi .write( SpiDataMode::Single, Command::Command8(0x06, SpiDataMode::Single), Address::None, 0, - &zero_buf, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); delay.delay_millis(250); // erase sector @@ -120,10 +115,11 @@ fn main() -> ! { Command::Command8(0x20, SpiDataMode::Single), Address::Address24(0x000000, SpiDataMode::Single), 0, - &zero_buf, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); delay.delay_millis(250); // write enable @@ -133,25 +129,28 @@ fn main() -> ! { Command::Command8(0x06, SpiDataMode::Single), Address::None, 0, - &zero_buf, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); delay.delay_millis(250); // write data / program page - send.fill(b'!'); - send[0..][..5].copy_from_slice(&b"Hello"[..]); + dma_tx_buf.set_length(dma_tx_buf.capacity()); + dma_tx_buf.as_mut_slice().fill(b'!'); + dma_tx_buf.as_mut_slice()[0..][..5].copy_from_slice(&b"Hello"[..]); let transfer = spi .write( SpiDataMode::Quad, Command::Command8(0x32, SpiDataMode::Single), Address::Address24(0x000000, SpiDataMode::Single), 0, - &send, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, _) = transfer.wait(); delay.delay_millis(250); loop { @@ -162,17 +161,18 @@ fn main() -> ! { Command::Command8(0xeb, SpiDataMode::Single), Address::Address32(0x000000 << 8, SpiDataMode::Quad), 4, - &mut receive, + dma_rx_buf, ) + .map_err(|e| e.0) .unwrap(); // here we could do something else while DMA transfer is in progress // the buffers and spi is moved into the transfer and we can get it back via // `wait` - transfer.wait().unwrap(); + (spi, dma_rx_buf) = transfer.wait(); - println!("{:x?}", &receive); - for b in &mut receive.iter() { + println!("{:x?}", dma_rx_buf.as_slice()); + for b in &mut dma_rx_buf.as_slice().iter() { if *b >= 32 && *b <= 127 { print!("{}", *b as char); } else { diff --git a/examples/src/bin/spi_loopback_dma.rs b/examples/src/bin/spi_loopback_dma.rs index 991b856a163..1fb15d4cc17 100644 --- a/examples/src/bin/spi_loopback_dma.rs +++ b/examples/src/bin/spi_loopback_dma.rs @@ -22,7 +22,7 @@ use esp_backtrace as _; use esp_hal::{ clock::ClockControl, delay::Delay, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaRxBuf, DmaTxBuf}, dma_buffers, gpio::Io, peripherals::Peripherals, @@ -55,32 +55,30 @@ fn main() -> ! { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(32000); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let delay = Delay::new(&clocks); - // DMA buffer require a static life-time - let mut send = tx_buffer; - let mut receive = rx_buffer; let mut i = 0; - for (i, v) in send.iter_mut().enumerate() { + for (i, v) in dma_tx_buf.as_slice().iter_mut().enumerate() { *v = (i % 255) as u8; } loop { - send[0] = i; - send[send.len() - 1] = i; + dma_tx_buf.as_mut_slice()[0] = i; + *dma_tx_buf.as_slice().last_mut().unwrap() = i; i = i.wrapping_add(1); - let mut transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); // here we could do something else while DMA transfer is in progress let mut n = 0; // Check is_done until the transfer is almost done (32000 bytes at 100kHz is @@ -90,11 +88,11 @@ fn main() -> ! { n += 1; } - transfer.wait().unwrap(); + (spi, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); println!( "{:x?} .. {:x?}", - &receive[..10], - &receive[receive.len() - 10..] + &dma_rx_buf.as_slice()[..10], + &dma_rx_buf.as_slice().last_chunk::<10>().unwrap() ); delay.delay_millis(250); diff --git a/hil-test/tests/spi_full_duplex_dma.rs b/hil-test/tests/spi_full_duplex_dma.rs index 15d2b2510e0..f6c2e3d7b17 100644 --- a/hil-test/tests/spi_full_duplex_dma.rs +++ b/hil-test/tests/spi_full_duplex_dma.rs @@ -33,6 +33,11 @@ use esp_hal::{ #[embedded_test::tests] mod tests { use defmt::assert_eq; + use embedded_hal::spi::SpiBus; + use esp_hal::{ + dma::{DmaRxBuf, DmaTxBuf}, + spi::master::dma::SpiDmaBus, + }; use super::*; @@ -59,24 +64,21 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - // DMA buffer require a static life-time - let mut send = tx_buffer; - let mut receive = rx_buffer; + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); - send.copy_from_slice(&[0xde, 0xad, 0xbe, 0xef]); + dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]); - let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(send, receive); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); + (_, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); + assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice()); } #[test] @@ -100,24 +102,21 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(4, 2); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - // DMA buffer require a static life-time - let mut send = tx_buffer; - let mut receive = rx_buffer; + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); - send.copy_from_slice(&[0xde, 0xad, 0xbe, 0xef]); + dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]); - let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(send[0..1], receive[0..1]); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); + (_, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); + assert_eq!(dma_tx_buf.as_slice()[0..1], dma_rx_buf.as_slice()[0..1]); } #[test] @@ -143,33 +142,29 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - // DMA buffer require a static life-time - let mut send = tx_buffer; - let mut receive = rx_buffer; - - send.copy_from_slice(&[0x55u8; 4096]); - for byte in 0..send.len() { - send[byte] = byte as u8; + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); + + for (i, d) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() { + *d = i as _; } - let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(send, receive); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); + (_, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); + assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice()); } #[test] #[timeout(3)] - fn test_try_using_non_dma_memory_tx_buffer() { - const DMA_BUFFER_SIZE: usize = 4096; + fn test_dma_bus_symmetric_transfer() { + const DMA_BUFFER_SIZE: usize = 4; let peripherals = Peripherals::take(); let system = SystemControl::new(peripherals.SYSTEM); @@ -188,37 +183,27 @@ mod tests { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (_, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); - - let tx_buffer = { - // using `static`, not `static mut`, places the array in .rodata - static TX_BUFFER: [u8; DMA_BUFFER_SIZE] = [42u8; DMA_BUFFER_SIZE]; - unsafe { &mut *(core::ptr::addr_of!(TX_BUFFER) as *mut u8) } - }; + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let mut receive = rx_buffer; - - assert!(matches!( - spi.dma_transfer(&tx_buffer, &mut receive), - Err(esp_hal::spi::Error::DmaError( - esp_hal::dma::DmaError::UnsupportedMemoryRegion - )) - )); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); + + let mut spi_bus = SpiDmaBus::new(spi, dma_tx_buf, dma_rx_buf); + + let tx_buf = [0xde, 0xad, 0xbe, 0xef]; + let mut rx_buf = [0; 4]; + + spi_bus.transfer(&mut rx_buf, &tx_buf).unwrap(); + + assert_eq!(tx_buf, rx_buf); } #[test] #[timeout(3)] - fn test_try_using_non_dma_memory_rx_buffer() { - const DMA_BUFFER_SIZE: usize = 4096; - + fn test_dma_bus_asymmetric_transfer() { let peripherals = Peripherals::take(); let system = SystemControl::new(peripherals.SYSTEM); let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); @@ -236,34 +221,27 @@ mod tests { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (tx_buffer, tx_descriptors, _, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); - - let rx_buffer = { - // using `static`, not `static mut`, places the array in .rodata - static RX_BUFFER: [u8; DMA_BUFFER_SIZE] = [42u8; DMA_BUFFER_SIZE]; - unsafe { &mut *(core::ptr::addr_of!(RX_BUFFER) as *mut u8) } - }; + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(4); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let mut receive = rx_buffer; - assert!(matches!( - spi.dma_transfer(&tx_buffer, &mut receive), - Err(esp_hal::spi::Error::DmaError( - esp_hal::dma::DmaError::UnsupportedMemoryRegion - )) - )); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); + + let mut spi_bus = SpiDmaBus::new(spi, dma_tx_buf, dma_rx_buf); + + let tx_buf = [0xde, 0xad, 0xbe, 0xef]; + let mut rx_buf = [0; 4]; + + spi_bus.transfer(&mut rx_buf, &tx_buf).unwrap(); + + assert_eq!(&tx_buf[0..1], &rx_buf[0..1]); } #[test] #[timeout(3)] - fn test_symmetric_dma_transfer_owned() { + fn test_dma_bus_symmetric_transfer_huge_buffer() { const DMA_BUFFER_SIZE: usize = 4096; let peripherals = Peripherals::take(); @@ -283,27 +261,21 @@ mod tests { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(40); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - // DMA buffer require a static life-time - let send = tx_buffer; - let receive = rx_buffer; - - send.copy_from_slice(&[0x55u8; 4096]); - for byte in 0..send.len() { - send[byte] = byte as u8; - } + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); + + let mut spi_bus = SpiDmaBus::new(spi, dma_tx_buf, dma_rx_buf); + + let tx_buf = core::array::from_fn(|i| i as _); + let mut rx_buf = [0; DMA_BUFFER_SIZE]; + + spi_bus.transfer(&mut rx_buf, &tx_buf).unwrap(); - let transfer = spi.dma_transfer_owned(send, receive).unwrap(); - let (_, send, receive) = transfer.wait().unwrap(); - assert_eq!(send, receive); + assert_eq!(tx_buf, rx_buf); } } diff --git a/hil-test/tests/spi_half_duplex_read.rs b/hil-test/tests/spi_half_duplex_read.rs index a3c95dacd5d..3b79521fc62 100644 --- a/hil-test/tests/spi_half_duplex_read.rs +++ b/hil-test/tests/spi_half_duplex_read.rs @@ -21,7 +21,7 @@ use esp_backtrace as _; mod tests { use esp_hal::{ clock::ClockControl, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaRxBuf}, dma_buffers, gpio::{Io, Level, Output}, peripherals::Peripherals, @@ -59,19 +59,13 @@ mod tests { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (_, tx_descriptors, mut rx_buffer, rx_descriptors) = dma_buffers!(0, DMA_BUFFER_SIZE); + let (buffer, descriptors, _, _) = dma_buffers!(DMA_BUFFER_SIZE, 0); + let mut dma_rx_buf = DmaRxBuf::new(descriptors, buffer).unwrap(); let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_sck(sclk) .with_miso(miso) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - // Fill with neither 0x00 nor 0xFF. - rx_buffer.fill(5); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); // SPI should read '0's from the MISO pin miso_mirror.set_low(); @@ -82,12 +76,13 @@ mod tests { Command::None, Address::None, 0, - &mut rx_buffer, + dma_rx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_rx_buf) = transfer.wait(); - assert_eq!(rx_buffer, &[0x00; DMA_BUFFER_SIZE]); + assert_eq!(dma_rx_buf.as_slice(), &[0x00; DMA_BUFFER_SIZE]); // SPI should read '1's from the MISO pin miso_mirror.set_high(); @@ -98,11 +93,13 @@ mod tests { Command::None, Address::None, 0, - &mut rx_buffer, + dma_rx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); - assert_eq!(rx_buffer, &[0xFF; DMA_BUFFER_SIZE]); + (_, dma_rx_buf) = transfer.wait(); + + assert_eq!(dma_rx_buf.as_slice(), &[0xFF; DMA_BUFFER_SIZE]); } } diff --git a/hil-test/tests/spi_half_duplex_write.rs b/hil-test/tests/spi_half_duplex_write.rs index c5aed0ec133..02e990f7fee 100644 --- a/hil-test/tests/spi_half_duplex_write.rs +++ b/hil-test/tests/spi_half_duplex_write.rs @@ -21,7 +21,7 @@ use esp_backtrace as _; mod tests { use esp_hal::{ clock::ClockControl, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaTxBuf}, dma_buffers, gpio::{Io, Pull}, pcnt::{ @@ -63,16 +63,13 @@ mod tests { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (tx_buffer, tx_descriptors, _, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE, 0); + let (buffer, descriptors, _, _) = dma_buffers!(DMA_BUFFER_SIZE, 0); + let mut dma_tx_buf = DmaTxBuf::new(descriptors, buffer).unwrap(); let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_sck(sclk) .with_mosi(mosi) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let unit = pcnt.unit0; unit.channel0.set_edge_signal(PcntSource::from_pin( @@ -83,7 +80,7 @@ mod tests { .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); // Fill the buffer where each byte 3 pos edges. - tx_buffer.fill(0b0110_1010); + dma_tx_buf.fill(&[0b0110_1010; DMA_BUFFER_SIZE]); let transfer = spi .write( @@ -91,10 +88,11 @@ mod tests { Command::None, Address::None, 0, - &tx_buffer, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); assert_eq!(unit.get_value(), (3 * DMA_BUFFER_SIZE) as _); @@ -104,10 +102,11 @@ mod tests { Command::None, Address::None, 0, - &tx_buffer, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + transfer.wait(); assert_eq!(unit.get_value(), (6 * DMA_BUFFER_SIZE) as _); }