diff --git a/esp-hal/CHANGELOG.md b/esp-hal/CHANGELOG.md index 09abacb56c1..cc15592ae00 100644 --- a/esp-hal/CHANGELOG.md +++ b/esp-hal/CHANGELOG.md @@ -52,6 +52,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Allow handling interrupts while trying to lock critical section on multi-core chips. (#2197) - Removed the PS-RAM related features, replaced by `quad-psram`/`octal-psram`, `init_psram` takes a configuration parameter, it's now possible to auto-detect PS-RAM size (#2178) - `EspTwaiFrame` constructors now accept any type that converts into `esp_hal::twai::Id` (#2207) +- Change `DmaTxBuf` to support PSRAM on `esp32s3` (#2161) ### Fixed diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index f000c46f49f..7c57073a763 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -205,7 +205,6 @@ where bitfield::bitfield! { #[doc(hidden)] #[derive(Clone, Copy)] - #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct DmaDescriptorFlags(u32); u16; @@ -226,6 +225,20 @@ impl Debug for DmaDescriptorFlags { } } +#[cfg(feature = "defmt")] +impl defmt::Format for DmaDescriptorFlags { + fn format(&self, fmt: defmt::Formatter<'_>) { + defmt::write!( + fmt, + "DmaDescriptorFlags {{ size: {}, length: {}, suc_eof: {}, owner: {} }}", + self.size(), + self.length(), + self.suc_eof(), + if self.owner() { "DMA" } else { "CPU" } + ); + } +} + /// A DMA transfer descriptor. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] @@ -286,6 +299,8 @@ use enumset::{EnumSet, EnumSetType}; pub use self::gdma::*; #[cfg(pdma)] pub use self::pdma::*; +#[cfg(esp32s3)] +use crate::soc::is_slice_in_psram; use crate::{interrupt::InterruptHandler, soc::is_slice_in_dram, Mode}; #[cfg(gdma)] @@ -558,7 +573,7 @@ macro_rules! dma_circular_buffers_chunk_size { macro_rules! dma_descriptors_chunk_size { ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{ // these will check for size at compile time - const _: () = ::core::assert!($chunk_size <= 4092, "chunk size must be <= 4092"); + const _: () = ::core::assert!($chunk_size <= 4095, "chunk size must be <= 4095"); const _: () = ::core::assert!($chunk_size > 0, "chunk size must be > 0"); static mut RX_DESCRIPTORS: [$crate::dma::DmaDescriptor; @@ -593,7 +608,7 @@ macro_rules! dma_descriptors_chunk_size { macro_rules! dma_circular_descriptors_chunk_size { ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{ // these will check for size at compile time - const _: () = ::core::assert!($chunk_size <= 4092, "chunk size must be <= 4092"); + const _: () = ::core::assert!($chunk_size <= 4095, "chunk size must be <= 4095"); const _: () = ::core::assert!($chunk_size > 0, "chunk size must be > 0"); const rx_descriptor_len: usize = if $rx_size > $chunk_size * 2 { @@ -620,6 +635,33 @@ macro_rules! dma_circular_descriptors_chunk_size { }; } +/// Convenience macro to create a DmaTxBuf from buffer size. The buffer and +/// descriptors are statically allocated and used to create the `DmaTxBuf`. +/// +/// ## Usage +/// ```rust,no_run +#[doc = crate::before_snippet!()] +/// use esp_hal::dma_tx_buffer; +/// use esp_hal::dma::DmaBufBlkSize; +/// +/// let tx_buf = +/// dma_tx_buffer!(32000); +/// # } +/// ``` +#[macro_export] +macro_rules! dma_tx_buffer { + ($tx_size:expr) => {{ + const TX_DESCRIPTOR_LEN: usize = + $crate::dma::DmaTxBuf::compute_descriptor_count($tx_size, None); + $crate::declare_aligned_dma_buffer!(TX_BUFFER, $tx_size); + static mut TX_DESCRIPTORS: [$crate::dma::DmaDescriptor; TX_DESCRIPTOR_LEN] = + [$crate::dma::DmaDescriptor::EMPTY; TX_DESCRIPTOR_LEN]; + let tx_buffer = $crate::as_mut_byte_array!(TX_BUFFER, $tx_size); + let tx_descriptors = unsafe { &mut TX_DESCRIPTORS }; + $crate::dma::DmaTxBuf::new(tx_descriptors, tx_buffer) + }}; +} + /// DMA Errors #[derive(Debug, Clone, Copy, PartialEq)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] @@ -1001,6 +1043,16 @@ pub enum DmaExtMemBKSize { Size64 = 2, } +impl From for DmaExtMemBKSize { + fn from(size: DmaBufBlkSize) -> Self { + match size { + DmaBufBlkSize::Size16 => DmaExtMemBKSize::Size16, + DmaBufBlkSize::Size32 => DmaExtMemBKSize::Size32, + DmaBufBlkSize::Size64 => DmaExtMemBKSize::Size64, + } + } +} + pub(crate) struct TxCircularState { write_offset: usize, write_descr_ptr: *mut DmaDescriptor, @@ -1414,7 +1466,6 @@ where if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { return Err(DmaError::InvalidAlignment); } - // TODO: make this optional? crate::soc::cache_invalidate_addr(des.buffer as u32, des.size() as u32); } } @@ -1632,6 +1683,7 @@ where peri: DmaPeripheral, chain: &DescriptorChain, ) -> Result<(), DmaError> { + // TODO: based on the ESP32-S3 TRM the alignment check is not needed for TX! // for esp32s3 we check each descriptor buffer that points to psram for // alignment and writeback the cache for that buffer #[cfg(esp32s3)] @@ -1657,7 +1709,19 @@ where buffer: &mut BUF, ) -> Result<(), DmaError> { let preparation = buffer.prepare(); - + cfg_if::cfg_if!( + if #[cfg(esp32s3)] { + if let Some(block_size) = preparation.block_size { + self.set_ext_mem_block_size(block_size.into()); + } + } else { + // we insure that block_size is some only for PSRAM addresses + if preparation.block_size.is_some() { + return Err(DmaError::UnsupportedMemoryRegion); + } + } + ); + // TODO: Get burst mode from DmaBuf. self.tx_impl .prepare_transfer_without_start(preparation.start, peri) } @@ -1819,6 +1883,10 @@ where /// Holds all the information needed to configure a DMA channel for a transfer. pub struct Preparation { start: *mut DmaDescriptor, + /// block size for PSRAM transfers (TODO: enable burst mode for non external + /// memory?) + #[cfg_attr(not(esp32s3), allow(dead_code))] + block_size: Option, // burst_mode, alignment, check_owner, etc. } @@ -1861,22 +1929,41 @@ pub trait DmaRxBuffer { /// Error returned from Dma[Rx|Tx|RxTx]Buf operations. #[derive(Debug)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum DmaBufError { /// More descriptors are needed for the buffer size InsufficientDescriptors, /// Descriptors or buffers are not located in a supported memory region UnsupportedMemoryRegion, + /// Buffer is not aligned to the required size + InvalidAlignment, + /// Invalid chunk size: must be > 0 and <= 4095 + InvalidChunkSize, +} + +/// DMA buffer allignments +#[derive(Debug, Clone, Copy, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum DmaBufBlkSize { + /// 16 bytes + Size16 = 16, + /// 32 bytes + Size32 = 32, + /// 64 bytes + Size64 = 64, } /// DMA transmit buffer /// /// This is a contiguous buffer linked together by DMA descriptors of length -/// 4092. It can only be used for transmitting data to a peripheral's FIFO. -/// See [DmaRxBuf] for receiving data. +/// 4095 at most. It can only be used for transmitting data to a peripheral's +/// FIFO. See [DmaRxBuf] for receiving data. #[derive(Debug)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct DmaTxBuf { descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], + block_size: Option, } impl DmaTxBuf { @@ -1886,23 +1973,87 @@ impl DmaTxBuf { /// Each descriptor can handle 4092 bytes worth of buffer. /// /// Both the descriptors and buffer must be in DMA-capable memory. - /// Only DRAM is supported. + /// Only DRAM is supported for descriptors. pub fn new( descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], ) -> Result { - let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE); + Self::new_with_block_size(descriptors, buffer, None) + } + + /// Compute max chunk size based on block size + pub const fn compute_chunk_size(block_size: Option) -> usize { + match block_size { + Some(size) => 4096 - size as usize, + #[cfg(esp32)] + None => 4092, // esp32 requires 4 byte alignment + #[cfg(not(esp32))] + None => 4095, + } + } + + /// Compute the number of descriptors required for a given block size and + /// buffer size + pub const fn compute_descriptor_count( + buffer_size: usize, + block_size: Option, + ) -> usize { + buffer_size.div_ceil(Self::compute_chunk_size(block_size)) + } + + /// Creates a new [DmaTxBuf] from some descriptors and a buffer. + /// + /// There must be enough descriptors for the provided buffer. + /// Each descriptor can handle at most 4095 bytes worth of buffer. + /// Optionally, a block size can be provided for PSRAM & Burst transfers. + /// + /// Both the descriptors and buffer must be in DMA-capable memory. + /// Only DRAM is supported for descriptors. + pub fn new_with_block_size( + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], + block_size: Option, + ) -> Result { + let chunk_size = Self::compute_chunk_size(block_size); + let min_descriptors = Self::compute_descriptor_count(buffer.len(), block_size); if descriptors.len() < min_descriptors { return Err(DmaBufError::InsufficientDescriptors); } - if !is_slice_in_dram(descriptors) || !is_slice_in_dram(buffer) { + // descriptors are required to be in DRAM + if !is_slice_in_dram(descriptors) { return Err(DmaBufError::UnsupportedMemoryRegion); } + cfg_if::cfg_if! { + if #[cfg(esp32s3)] { + // buffer can be either DRAM or PSRAM (if supported) + if !is_slice_in_dram(buffer) && !is_slice_in_psram(buffer) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + // if its PSRAM, the block_size/alignment must be specified + if is_slice_in_psram(buffer) && block_size.is_none() { + return Err(DmaBufError::InvalidAlignment); + } + } else { + #[cfg(any(esp32,esp32s2))] + if buffer.len() % 4 != 0 && buffer.as_ptr() as usize % 4 != 0 { + // ESP32 requires word alignment for DMA buffers. + // ESP32-S2 technically supports byte-aligned DMA buffers, but the + // transfer ends up writing out of bounds if the buffer's length + // is 2 or 3 (mod 4). + return Err(DmaBufError::InvalidAlignment); + } + // buffer can only be DRAM + if !is_slice_in_dram(buffer) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + } + } + // Setup size and buffer pointer as these will not change for the remainder of // this object's lifetime - let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(CHUNK_SIZE)); + let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(chunk_size)); for (desc, chunk) in chunk_iter { desc.set_size(chunk.len()); desc.buffer = chunk.as_mut_ptr(); @@ -1911,9 +2062,13 @@ impl DmaTxBuf { let mut buf = Self { descriptors, buffer, + block_size, }; buf.set_length(buf.capacity()); - + // no need for block size if the buffer is in DRAM + if is_slice_in_dram(buf.buffer) { + buf.block_size = None; + } Ok(buf) } @@ -1949,7 +2104,7 @@ impl DmaTxBuf { assert!(len <= self.buffer.len()); // Get the minimum number of descriptors needed for this length of data. - let descriptor_count = len.div_ceil(CHUNK_SIZE).max(1); + let descriptor_count = len.div_ceil(self.descriptors[0].size()).max(1); let required_descriptors = &mut self.descriptors[0..descriptor_count]; // Link up the relevant descriptors. @@ -2010,8 +2165,19 @@ impl DmaTxBuffer for DmaTxBuf { } } + #[cfg(esp32s3)] + if crate::soc::is_valid_psram_address(self.buffer.as_ptr() as u32) { + unsafe { + crate::soc::cache_writeback_addr( + self.buffer.as_ptr() as u32, + self.buffer.len() as u32, + ) + }; + } + Preparation { start: self.descriptors.as_mut_ptr(), + block_size: self.block_size, } } @@ -2248,6 +2414,7 @@ impl DmaRxBuffer for DmaRxBuf { Preparation { start: self.descriptors.as_mut_ptr(), + block_size: None, } } @@ -2440,6 +2607,7 @@ impl DmaTxBuffer for DmaRxTxBuf { Preparation { start: self.tx_descriptors.as_mut_ptr(), + block_size: None, // TODO: support block size! } } @@ -2469,6 +2637,7 @@ impl DmaRxBuffer for DmaRxTxBuf { Preparation { start: self.rx_descriptors.as_mut_ptr(), + block_size: None, // TODO: support block size! } } diff --git a/esp-hal/src/soc/mod.rs b/esp-hal/src/soc/mod.rs index 2338237f515..a24f409bed2 100644 --- a/esp-hal/src/soc/mod.rs +++ b/esp-hal/src/soc/mod.rs @@ -105,6 +105,13 @@ pub(crate) fn is_valid_psram_address(address: u32) -> bool { false } +#[allow(unused)] +pub(crate) fn is_slice_in_psram(slice: &[T]) -> bool { + let start = slice.as_ptr() as u32; + let end = start + slice.len() as u32; + is_valid_psram_address(start) && is_valid_psram_address(end) +} + #[allow(unused)] pub(crate) fn is_valid_memory_address(address: u32) -> bool { is_valid_ram_address(address) || is_valid_psram_address(address) diff --git a/examples/src/bin/spi_loopback_dma_psram.rs b/examples/src/bin/spi_loopback_dma_psram.rs new file mode 100644 index 00000000000..250123752d5 --- /dev/null +++ b/examples/src/bin/spi_loopback_dma_psram.rs @@ -0,0 +1,132 @@ +//! SPI loopback test using DMA - send from PSRAM receive to internal RAM +//! +//! The following wiring is assumed: +//! - SCLK => GPIO42 +//! - MISO => (loopback to MOSI via peripheral_input()) +//! - MOSI => GPIO48 +//! - CS => GPIO38 +//! +//! Depending on your target and the board you are using you have to change the +//! pins. +//! +//! This example transfers data via SPI. +//! Connect MISO and MOSI pins to see the outgoing data is read as incoming +//! data. +//! +//! If your module is quad PSRAM then you need to change the `psram` feature in the +//! in the features line below to `quad-psram`. + +//% FEATURES: esp-hal/log esp-hal/octal-psram +//% CHIPS: esp32s3 + +#![no_std] +#![no_main] + +use esp_backtrace as _; +use esp_hal::{ + delay::Delay, + dma::{Dma, DmaBufBlkSize, DmaPriority, DmaRxBuf, DmaTxBuf}, + gpio::Io, + prelude::*, + spi::{master::Spi, SpiMode}, +}; +extern crate alloc; +use log::*; + +macro_rules! dma_alloc_buffer { + ($size:expr, $align:expr) => {{ + let layout = core::alloc::Layout::from_size_align($size, $align).unwrap(); + unsafe { + let ptr = alloc::alloc::alloc(layout); + if ptr.is_null() { + error!("dma_alloc_buffer: alloc failed"); + alloc::alloc::handle_alloc_error(layout); + } + core::slice::from_raw_parts_mut(ptr, $size) + } + }}; +} + +const DMA_BUFFER_SIZE: usize = 8192; +const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size64; +const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; + +#[entry] +fn main() -> ! { + esp_println::logger::init_logger(log::LevelFilter::Info); + info!("Starting SPI loopback test"); + let peripherals = esp_hal::init(esp_hal::Config::default()); + esp_alloc::psram_allocator!(peripherals.PSRAM, esp_hal::psram); + let delay = Delay::new(); + + let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); + let sclk = io.pins.gpio42; + let mosi = io.pins.gpio48; + let miso = mosi.peripheral_input(); + let cs = io.pins.gpio38; + + let dma = Dma::new(peripherals.DMA); + let dma_channel = dma.channel0; + + let (_, tx_descriptors) = + esp_hal::dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); + let tx_buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); + info!( + "TX: {:p} len {} ({} descripters)", + tx_buffer.as_ptr(), + tx_buffer.len(), + tx_descriptors.len() + ); + let mut dma_tx_buf = + DmaTxBuf::new_with_block_size(tx_descriptors, tx_buffer, Some(DMA_ALIGNMENT)).unwrap(); + let (rx_buffer, rx_descriptors, _, _) = esp_hal::dma_buffers!(DMA_BUFFER_SIZE, 0); + info!( + "RX: {:p} len {} ({} descripters)", + rx_buffer.as_ptr(), + rx_buffer.len(), + rx_descriptors.len() + ); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); + let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0) + .with_pins(sclk, mosi, miso, cs) + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); + + delay.delay_millis(100); // delay to let the above messages display + + for (i, v) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() { + *v = (i % 256) as u8; + } + + let mut i = 0; + + loop { + dma_tx_buf.as_mut_slice()[0] = i; + *dma_tx_buf.as_mut_slice().last_mut().unwrap() = i; + i = i.wrapping_add(1); + + let transfer = spi + .dma_transfer(dma_rx_buf, dma_tx_buf) + .map_err(|e| e.0) + .unwrap(); + + (spi, (dma_rx_buf, dma_tx_buf)) = transfer.wait(); + for (i, v) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() { + if dma_rx_buf.as_slice()[i] != *v { + error!( + "Mismatch at index {}: expected {}, got {}", + i, + *v, + dma_rx_buf.as_slice()[i] + ); + break; + } + } + info!( + "{:0x?} .. {:0x?}", + &dma_rx_buf.as_slice()[..10], + &dma_rx_buf.as_slice().last_chunk::<10>().unwrap() + ); + dma_tx_buf.as_mut_slice().reverse(); + delay.delay_millis(1000); + } +} diff --git a/hil-test/Cargo.toml b/hil-test/Cargo.toml index f7f693d2294..44291fef392 100644 --- a/hil-test/Cargo.toml +++ b/hil-test/Cargo.toml @@ -87,6 +87,10 @@ harness = false name = "spi_half_duplex_write" harness = false +[[test]] +name = "spi_half_duplex_write_psram" +harness = false + [[test]] name = "systimer" harness = false @@ -171,6 +175,7 @@ embedded-hal = "1.0.0" embedded-hal-02 = { version = "0.2.7", package = "embedded-hal", features = ["unproven"] } embedded-hal-async = "1.0.0" embedded-hal-nb = { version = "1.0.0", optional = true } +esp-alloc = { path = "../esp-alloc", optional = true } esp-backtrace = { path = "../esp-backtrace", default-features = false, features = ["exception-handler", "panic-handler", "defmt", "semihosting"] } esp-hal = { path = "../esp-hal", features = ["defmt", "digest"], optional = true } esp-hal-embassy = { path = "../esp-hal-embassy", optional = true } @@ -199,7 +204,7 @@ esp-metadata = { version = "0.3.0", path = "../esp-metadata" } [features] default = ["embassy"] -defmt = ["dep:defmt-rtt"] +defmt = ["dep:defmt-rtt", "embedded-test/defmt"] # Device support (required!): esp32 = [ @@ -236,6 +241,7 @@ generic-queue = [ integrated-timers = [ "esp-hal-embassy/integrated-timers", ] +octal-psram = ["esp-hal/octal-psram", "dep:esp-alloc"] # https://doc.rust-lang.org/cargo/reference/profiles.html#test # Test and bench profiles inherit from dev and release respectively. diff --git a/hil-test/tests/dma_macros.rs b/hil-test/tests/dma_macros.rs index e3c9502ef7d..bc3a7bbf414 100644 --- a/hil-test/tests/dma_macros.rs +++ b/hil-test/tests/dma_macros.rs @@ -201,4 +201,25 @@ mod tests { compute_circular_size(TX_SIZE, CHUNK_SIZE) ); } + + #[test] + fn test_dma_tx_buffer() { + use esp_hal::dma::{DmaBufError, DmaTxBuf}; + const TX_SIZE: usize = DATA_SIZE; + + fn check(result: Result, size: usize) { + match result { + Ok(tx_buf) => { + assert_eq!(tx_buf.len(), size); + } + Err(_) => { + panic!("Failed to create DmaTxBuf"); + } + } + } + check(esp_hal::dma_tx_buffer!(TX_SIZE), TX_SIZE); + check(esp_hal::dma_tx_buffer!(TX_SIZE + 1), TX_SIZE + 1); + check(esp_hal::dma_tx_buffer!(TX_SIZE + 2), TX_SIZE + 2); + check(esp_hal::dma_tx_buffer!(TX_SIZE + 3), TX_SIZE + 3); + } } diff --git a/hil-test/tests/spi_half_duplex_write_psram.rs b/hil-test/tests/spi_half_duplex_write_psram.rs new file mode 100644 index 00000000000..a63d8b335e5 --- /dev/null +++ b/hil-test/tests/spi_half_duplex_write_psram.rs @@ -0,0 +1,192 @@ +//! SPI Half Duplex Write Test +//% FEATURES: octal-psram +//% CHIPS: esp32s3 + +#![no_std] +#![no_main] +use esp_alloc as _; +use esp_hal::{ + dma::{Dma, DmaBufBlkSize, DmaPriority, DmaRxBuf, DmaTxBuf}, + dma_buffers, + dma_descriptors_chunk_size, + gpio::{interconnect::InputSignal, Io}, + pcnt::{channel::EdgeMode, unit::Unit, Pcnt}, + peripherals::SPI2, + prelude::*, + spi::{ + master::{Address, Command, HalfDuplexReadWrite, Spi, SpiDma}, + HalfDuplexMode, + SpiDataMode, + SpiMode, + }, + Blocking, +}; +use hil_test as _; +extern crate alloc; + +cfg_if::cfg_if! { + if #[cfg(any( + feature = "esp32", + feature = "esp32s2", + ))] { + use esp_hal::dma::Spi2DmaChannel as DmaChannel0; + } else { + use esp_hal::dma::DmaChannel0; + } +} + +macro_rules! dma_alloc_buffer { + ($size:expr, $align:expr) => {{ + let layout = core::alloc::Layout::from_size_align($size, $align).unwrap(); + unsafe { + let ptr = alloc::alloc::alloc(layout); + if ptr.is_null() { + error!("dma_alloc_buffer: alloc failed"); + alloc::alloc::handle_alloc_error(layout); + } + core::slice::from_raw_parts_mut(ptr, $size) + } + }}; +} + +struct Context { + spi: SpiDma<'static, SPI2, DmaChannel0, HalfDuplexMode, Blocking>, + pcnt_unit: Unit<'static, 0>, + pcnt_source: InputSignal, +} + +#[cfg(test)] +#[embedded_test::tests] +mod tests { + // defmt::* is load-bearing, it ensures that the assert in dma_buffers! is not + // using defmt's non-const assert. Doing so would result in a compile error. + #[allow(unused_imports)] + use defmt::{assert_eq, *}; + + use super::*; + + #[init] + fn init() -> Context { + let peripherals = esp_hal::init(esp_hal::Config::default()); + esp_alloc::psram_allocator!(peripherals.PSRAM, esp_hal::psram); + + let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); + let sclk = io.pins.gpio0; + let (mosi, _) = hil_test::common_test_pins!(io); + + let pcnt = Pcnt::new(peripherals.PCNT); + let dma = Dma::new(peripherals.DMA); + + let dma_channel = dma.channel0; + + let mosi_loopback = mosi.peripheral_input(); + + let spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0) + .with_sck(sclk) + .with_mosi(mosi) + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); + + Context { + spi, + pcnt_unit: pcnt.unit0, + pcnt_source: mosi_loopback, + } + } + + #[test] + #[timeout(3)] + fn test_spi_writes_are_correctly_by_pcnt(ctx: Context) { + const DMA_BUFFER_SIZE: usize = 4; + const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size32; + const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; + + let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); + let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); + let mut dma_tx_buf = + DmaTxBuf::new_with_block_size(descriptors, buffer, Some(DMA_ALIGNMENT)).unwrap(); + + let unit = ctx.pcnt_unit; + let mut spi = ctx.spi; + + unit.channel0.set_edge_signal(ctx.pcnt_source); + unit.channel0 + .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); + + // Fill the buffer where each byte has 3 pos edges. + dma_tx_buf.fill(&[0b0110_1010; DMA_BUFFER_SIZE]); + let transfer = spi + .write( + SpiDataMode::Single, + Command::None, + Address::None, + 0, + dma_tx_buf, + ) + .map_err(|e| e.0) + .unwrap(); + (spi, dma_tx_buf) = transfer.wait(); + + assert_eq!(unit.get_value(), (3 * DMA_BUFFER_SIZE) as _); + + let transfer = spi + .write( + SpiDataMode::Single, + Command::None, + Address::None, + 0, + dma_tx_buf, + ) + .map_err(|e| e.0) + .unwrap(); + transfer.wait(); + + assert_eq!(unit.get_value(), (6 * DMA_BUFFER_SIZE) as _); + } + + #[test] + #[timeout(3)] + fn test_spidmabus_writes_are_correctly_by_pcnt(ctx: Context) { + const DMA_BUFFER_SIZE: usize = 4; + const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size32; // matches dcache line size + const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; // 64 byte aligned + + let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); + let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); + let dma_tx_buf = + DmaTxBuf::new_with_block_size(descriptors, buffer, Some(DMA_ALIGNMENT)).unwrap(); + + let (rx, rxd, _, _) = dma_buffers!(1, 0); + let dma_rx_buf = DmaRxBuf::new(rxd, rx).unwrap(); + + let unit = ctx.pcnt_unit; + let mut spi = ctx.spi.with_buffers(dma_rx_buf, dma_tx_buf); + + unit.channel0.set_edge_signal(ctx.pcnt_source); + unit.channel0 + .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); + + let buffer = [0b0110_1010; DMA_BUFFER_SIZE]; + // Write the buffer where each byte has 3 pos edges. + spi.write( + SpiDataMode::Single, + Command::None, + Address::None, + 0, + &buffer, + ) + .unwrap(); + + assert_eq!(unit.get_value(), (3 * DMA_BUFFER_SIZE) as _); + + spi.write( + SpiDataMode::Single, + Command::None, + Address::None, + 0, + &buffer, + ) + .unwrap(); + + assert_eq!(unit.get_value(), (6 * DMA_BUFFER_SIZE) as _); + } +}