dma.rs 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. //! # Direct Memory Access
  2. #![allow(dead_code)]
  3. use core::{
  4. marker::PhantomData,
  5. sync::atomic::{compiler_fence, Ordering},
  6. };
  7. use embedded_dma::{StaticReadBuffer, StaticWriteBuffer};
  8. use crate::rcc::AHB;
  9. #[derive(Debug)]
  10. #[non_exhaustive]
  11. pub enum Error {
  12. Overrun,
  13. }
  14. pub enum Event {
  15. HalfTransfer,
  16. TransferComplete,
  17. }
  18. #[derive(Clone, Copy, PartialEq)]
  19. pub enum Half {
  20. First,
  21. Second,
  22. }
  23. pub struct CircBuffer<BUFFER, PAYLOAD>
  24. where
  25. BUFFER: 'static,
  26. {
  27. buffer: &'static mut [BUFFER; 2],
  28. payload: PAYLOAD,
  29. readable_half: Half,
  30. }
  31. impl<BUFFER, PAYLOAD> CircBuffer<BUFFER, PAYLOAD>
  32. where
  33. &'static mut [BUFFER; 2]: StaticWriteBuffer,
  34. BUFFER: 'static,
  35. {
  36. pub(crate) fn new(buf: &'static mut [BUFFER; 2], payload: PAYLOAD) -> Self {
  37. CircBuffer {
  38. buffer: buf,
  39. payload,
  40. readable_half: Half::Second,
  41. }
  42. }
  43. }
  44. pub trait DmaExt {
  45. type Channels;
  46. fn split(self, ahb: &mut AHB) -> Self::Channels;
  47. }
  48. pub trait TransferPayload {
  49. fn start(&mut self);
  50. fn stop(&mut self);
  51. }
  52. pub struct Transfer<MODE, BUFFER, PAYLOAD>
  53. where
  54. PAYLOAD: TransferPayload,
  55. {
  56. _mode: PhantomData<MODE>,
  57. buffer: BUFFER,
  58. payload: PAYLOAD,
  59. }
  60. impl<BUFFER, PAYLOAD> Transfer<R, BUFFER, PAYLOAD>
  61. where
  62. PAYLOAD: TransferPayload,
  63. {
  64. pub(crate) fn r(buffer: BUFFER, payload: PAYLOAD) -> Self {
  65. Transfer {
  66. _mode: PhantomData,
  67. buffer,
  68. payload,
  69. }
  70. }
  71. }
  72. impl<BUFFER, PAYLOAD> Transfer<W, BUFFER, PAYLOAD>
  73. where
  74. PAYLOAD: TransferPayload,
  75. {
  76. pub(crate) fn w(buffer: BUFFER, payload: PAYLOAD) -> Self {
  77. Transfer {
  78. _mode: PhantomData,
  79. buffer,
  80. payload,
  81. }
  82. }
  83. }
  84. impl<MODE, BUFFER, PAYLOAD> Drop for Transfer<MODE, BUFFER, PAYLOAD>
  85. where
  86. PAYLOAD: TransferPayload,
  87. {
  88. fn drop(&mut self) {
  89. self.payload.stop();
  90. compiler_fence(Ordering::SeqCst);
  91. }
  92. }
  93. /// Read transfer
  94. pub struct R;
  95. /// Write transfer
  96. pub struct W;
  97. macro_rules! dma {
  98. ($($DMAX:ident: ($dmaX:ident, {
  99. $($CX:ident: (
  100. $chX:ident,
  101. $htifX:ident,
  102. $tcifX:ident,
  103. $chtifX:ident,
  104. $ctcifX:ident,
  105. $cgifX:ident
  106. ),)+
  107. }),)+) => {
  108. $(
  109. pub mod $dmaX {
  110. use core::{sync::atomic::{self, Ordering}, ptr, mem};
  111. use crate::pac::{$DMAX, dma1};
  112. use crate::dma::{CircBuffer, DmaExt, Error, Event, Half, Transfer, W, RxDma, TxDma, TransferPayload};
  113. use crate::rcc::{AHB, Enable};
  114. #[allow(clippy::manual_non_exhaustive)]
  115. pub struct Channels((), $(pub $CX),+);
  116. $(
  117. /// A singleton that represents a single DMAx channel (channel X in this case)
  118. ///
  119. /// This singleton has exclusive access to the registers of the DMAx channel X
  120. pub struct $CX { _0: () }
  121. impl $CX {
  122. /// Associated peripheral `address`
  123. ///
  124. /// `inc` indicates whether the address will be incremented after every byte transfer
  125. pub fn set_peripheral_address(&mut self, address: u32, inc: bool) {
  126. self.ch().par.write(|w| w.pa().bits(address) );
  127. self.ch().cr.modify(|_, w| w.pinc().bit(inc) );
  128. }
  129. /// `address` where from/to data will be read/write
  130. ///
  131. /// `inc` indicates whether the address will be incremented after every byte transfer
  132. pub fn set_memory_address(&mut self, address: u32, inc: bool) {
  133. self.ch().mar.write(|w| w.ma().bits(address) );
  134. self.ch().cr.modify(|_, w| w.minc().bit(inc) );
  135. }
  136. /// Number of bytes to transfer
  137. pub fn set_transfer_length(&mut self, len: usize) {
  138. self.ch().ndtr.write(|w| w.ndt().bits(cast::u16(len).unwrap()));
  139. }
  140. /// Starts the DMA transfer
  141. pub fn start(&mut self) {
  142. self.ch().cr.modify(|_, w| w.en().set_bit() );
  143. }
  144. /// Stops the DMA transfer
  145. pub fn stop(&mut self) {
  146. self.ifcr().write(|w| w.$cgifX().set_bit());
  147. self.ch().cr.modify(|_, w| w.en().clear_bit() );
  148. }
  149. /// Returns `true` if there's a transfer in progress
  150. pub fn in_progress(&self) -> bool {
  151. self.isr().$tcifX().bit_is_clear()
  152. }
  153. }
  154. impl $CX {
  155. pub fn listen(&mut self, event: Event) {
  156. match event {
  157. Event::HalfTransfer => self.ch().cr.modify(|_, w| w.htie().set_bit()),
  158. Event::TransferComplete => {
  159. self.ch().cr.modify(|_, w| w.tcie().set_bit())
  160. }
  161. }
  162. }
  163. pub fn unlisten(&mut self, event: Event) {
  164. match event {
  165. Event::HalfTransfer => {
  166. self.ch().cr.modify(|_, w| w.htie().clear_bit())
  167. },
  168. Event::TransferComplete => {
  169. self.ch().cr.modify(|_, w| w.tcie().clear_bit())
  170. }
  171. }
  172. }
  173. pub fn ch(&mut self) -> &dma1::CH {
  174. unsafe { &(*$DMAX::ptr()).$chX }
  175. }
  176. pub fn isr(&self) -> dma1::isr::R {
  177. // NOTE(unsafe) atomic read with no side effects
  178. unsafe { (*$DMAX::ptr()).isr.read() }
  179. }
  180. pub fn ifcr(&self) -> &dma1::IFCR {
  181. unsafe { &(*$DMAX::ptr()).ifcr }
  182. }
  183. pub fn get_ndtr(&self) -> u32 {
  184. // NOTE(unsafe) atomic read with no side effects
  185. unsafe { &(*$DMAX::ptr())}.$chX.ndtr.read().bits()
  186. }
  187. }
  188. impl<B, PAYLOAD> CircBuffer<B, RxDma<PAYLOAD, $CX>>
  189. where
  190. RxDma<PAYLOAD, $CX>: TransferPayload,
  191. {
  192. /// Peeks into the readable half of the buffer
  193. pub fn peek<R, F>(&mut self, f: F) -> Result<R, Error>
  194. where
  195. F: FnOnce(&B, Half) -> R,
  196. {
  197. let half_being_read = self.readable_half()?;
  198. let buf = match half_being_read {
  199. Half::First => &self.buffer[0],
  200. Half::Second => &self.buffer[1],
  201. };
  202. // XXX does this need a compiler barrier?
  203. let ret = f(buf, half_being_read);
  204. let isr = self.payload.channel.isr();
  205. let first_half_is_done = isr.$htifX().bit_is_set();
  206. let second_half_is_done = isr.$tcifX().bit_is_set();
  207. if (half_being_read == Half::First && second_half_is_done) ||
  208. (half_being_read == Half::Second && first_half_is_done) {
  209. Err(Error::Overrun)
  210. } else {
  211. Ok(ret)
  212. }
  213. }
  214. /// Returns the `Half` of the buffer that can be read
  215. pub fn readable_half(&mut self) -> Result<Half, Error> {
  216. let isr = self.payload.channel.isr();
  217. let first_half_is_done = isr.$htifX().bit_is_set();
  218. let second_half_is_done = isr.$tcifX().bit_is_set();
  219. if first_half_is_done && second_half_is_done {
  220. return Err(Error::Overrun);
  221. }
  222. let last_read_half = self.readable_half;
  223. Ok(match last_read_half {
  224. Half::First => {
  225. if second_half_is_done {
  226. self.payload.channel.ifcr().write(|w| w.$ctcifX().set_bit());
  227. self.readable_half = Half::Second;
  228. Half::Second
  229. } else {
  230. last_read_half
  231. }
  232. }
  233. Half::Second => {
  234. if first_half_is_done {
  235. self.payload.channel.ifcr().write(|w| w.$chtifX().set_bit());
  236. self.readable_half = Half::First;
  237. Half::First
  238. } else {
  239. last_read_half
  240. }
  241. }
  242. })
  243. }
  244. /// Stops the transfer and returns the underlying buffer and RxDma
  245. pub fn stop(mut self) -> (&'static mut [B; 2], RxDma<PAYLOAD, $CX>) {
  246. self.payload.stop();
  247. (self.buffer, self.payload)
  248. }
  249. }
  250. impl<BUFFER, PAYLOAD, MODE> Transfer<MODE, BUFFER, RxDma<PAYLOAD, $CX>>
  251. where
  252. RxDma<PAYLOAD, $CX>: TransferPayload,
  253. {
  254. pub fn is_done(&self) -> bool {
  255. !self.payload.channel.in_progress()
  256. }
  257. pub fn wait(mut self) -> (BUFFER, RxDma<PAYLOAD, $CX>) {
  258. while !self.is_done() {}
  259. atomic::compiler_fence(Ordering::Acquire);
  260. self.payload.stop();
  261. // we need a read here to make the Acquire fence effective
  262. // we do *not* need this if `dma.stop` does a RMW operation
  263. unsafe { ptr::read_volatile(&0); }
  264. // we need a fence here for the same reason we need one in `Transfer.wait`
  265. atomic::compiler_fence(Ordering::Acquire);
  266. // `Transfer` needs to have a `Drop` implementation, because we accept
  267. // managed buffers that can free their memory on drop. Because of that
  268. // we can't move out of the `Transfer`'s fields, so we use `ptr::read`
  269. // and `mem::forget`.
  270. //
  271. // NOTE(unsafe) There is no panic branch between getting the resources
  272. // and forgetting `self`.
  273. unsafe {
  274. let buffer = ptr::read(&self.buffer);
  275. let payload = ptr::read(&self.payload);
  276. mem::forget(self);
  277. (buffer, payload)
  278. }
  279. }
  280. }
  281. impl<BUFFER, PAYLOAD, MODE> Transfer<MODE, BUFFER, TxDma<PAYLOAD, $CX>>
  282. where
  283. TxDma<PAYLOAD, $CX>: TransferPayload,
  284. {
  285. pub fn is_done(&self) -> bool {
  286. !self.payload.channel.in_progress()
  287. }
  288. pub fn wait(mut self) -> (BUFFER, TxDma<PAYLOAD, $CX>) {
  289. while !self.is_done() {}
  290. atomic::compiler_fence(Ordering::Acquire);
  291. self.payload.stop();
  292. // we need a read here to make the Acquire fence effective
  293. // we do *not* need this if `dma.stop` does a RMW operation
  294. unsafe { ptr::read_volatile(&0); }
  295. // we need a fence here for the same reason we need one in `Transfer.wait`
  296. atomic::compiler_fence(Ordering::Acquire);
  297. // `Transfer` needs to have a `Drop` implementation, because we accept
  298. // managed buffers that can free their memory on drop. Because of that
  299. // we can't move out of the `Transfer`'s fields, so we use `ptr::read`
  300. // and `mem::forget`.
  301. //
  302. // NOTE(unsafe) There is no panic branch between getting the resources
  303. // and forgetting `self`.
  304. unsafe {
  305. let buffer = ptr::read(&self.buffer);
  306. let payload = ptr::read(&self.payload);
  307. mem::forget(self);
  308. (buffer, payload)
  309. }
  310. }
  311. }
  312. impl<BUFFER, PAYLOAD> Transfer<W, BUFFER, RxDma<PAYLOAD, $CX>>
  313. where
  314. RxDma<PAYLOAD, $CX>: TransferPayload,
  315. {
  316. pub fn peek<T>(&self) -> &[T]
  317. where
  318. BUFFER: AsRef<[T]>,
  319. {
  320. let pending = self.payload.channel.get_ndtr() as usize;
  321. let slice = self.buffer.as_ref();
  322. let capacity = slice.len();
  323. &slice[..(capacity - pending)]
  324. }
  325. }
  326. )+
  327. impl DmaExt for $DMAX {
  328. type Channels = Channels;
  329. fn split(self, ahb: &mut AHB) -> Channels {
  330. $DMAX::enable(ahb);
  331. // reset the DMA control registers (stops all on-going transfers)
  332. $(
  333. self.$chX.cr.reset();
  334. )+
  335. Channels((), $($CX { _0: () }),+)
  336. }
  337. }
  338. }
  339. )+
  340. }
  341. }
  342. dma! {
  343. DMA1: (dma1, {
  344. C1: (
  345. ch1,
  346. htif1, tcif1,
  347. chtif1, ctcif1, cgif1
  348. ),
  349. C2: (
  350. ch2,
  351. htif2, tcif2,
  352. chtif2, ctcif2, cgif2
  353. ),
  354. C3: (
  355. ch3,
  356. htif3, tcif3,
  357. chtif3, ctcif3, cgif3
  358. ),
  359. C4: (
  360. ch4,
  361. htif4, tcif4,
  362. chtif4, ctcif4, cgif4
  363. ),
  364. C5: (
  365. ch5,
  366. htif5, tcif5,
  367. chtif5, ctcif5, cgif5
  368. ),
  369. C6: (
  370. ch6,
  371. htif6, tcif6,
  372. chtif6, ctcif6, cgif6
  373. ),
  374. C7: (
  375. ch7,
  376. htif7, tcif7,
  377. chtif7, ctcif7, cgif7
  378. ),
  379. }),
  380. DMA2: (dma2, {
  381. C1: (
  382. ch1,
  383. htif1, tcif1,
  384. chtif1, ctcif1, cgif1
  385. ),
  386. C2: (
  387. ch2,
  388. htif2, tcif2,
  389. chtif2, ctcif2, cgif2
  390. ),
  391. C3: (
  392. ch3,
  393. htif3, tcif3,
  394. chtif3, ctcif3, cgif3
  395. ),
  396. C4: (
  397. ch4,
  398. htif4, tcif4,
  399. chtif4, ctcif4, cgif4
  400. ),
  401. C5: (
  402. ch5,
  403. htif5, tcif5,
  404. chtif5, ctcif5, cgif5
  405. ),
  406. }),
  407. }
  408. /// DMA Receiver
  409. pub struct RxDma<PAYLOAD, RXCH> {
  410. pub(crate) payload: PAYLOAD,
  411. pub channel: RXCH,
  412. }
  413. /// DMA Transmitter
  414. pub struct TxDma<PAYLOAD, TXCH> {
  415. pub(crate) payload: PAYLOAD,
  416. pub channel: TXCH,
  417. }
  418. /// DMA Receiver/Transmitter
  419. pub struct RxTxDma<PAYLOAD, RXCH, TXCH> {
  420. pub(crate) payload: PAYLOAD,
  421. pub rxchannel: RXCH,
  422. pub txchannel: TXCH,
  423. }
  424. pub trait Receive {
  425. type RxChannel;
  426. type TransmittedWord;
  427. }
  428. pub trait Transmit {
  429. type TxChannel;
  430. type ReceivedWord;
  431. }
  432. /// Trait for circular DMA readings from peripheral to memory.
  433. pub trait CircReadDma<B, RS>: Receive
  434. where
  435. &'static mut [B; 2]: StaticWriteBuffer<Word = RS>,
  436. B: 'static,
  437. Self: core::marker::Sized,
  438. {
  439. fn circ_read(self, buffer: &'static mut [B; 2]) -> CircBuffer<B, Self>;
  440. }
  441. /// Trait for DMA readings from peripheral to memory.
  442. pub trait ReadDma<B, RS>: Receive
  443. where
  444. B: StaticWriteBuffer<Word = RS>,
  445. Self: core::marker::Sized + TransferPayload,
  446. {
  447. fn read(self, buffer: B) -> Transfer<W, B, Self>;
  448. }
  449. /// Trait for DMA writing from memory to peripheral.
  450. pub trait WriteDma<B, TS>: Transmit
  451. where
  452. B: StaticReadBuffer<Word = TS>,
  453. Self: core::marker::Sized + TransferPayload,
  454. {
  455. fn write(self, buffer: B) -> Transfer<R, B, Self>;
  456. }