dma.rs 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. //! # Direct Memory Access
  2. #![allow(dead_code)]
  3. use core::marker::PhantomData;
  4. use core::ops;
  5. use crate::rcc::AHB;
  6. #[derive(Debug)]
  7. pub enum Error {
  8. Overrun,
  9. #[doc(hidden)]
  10. _Extensible,
  11. }
  12. pub enum Event {
  13. HalfTransfer,
  14. TransferComplete,
  15. }
  16. #[derive(Clone, Copy, PartialEq)]
  17. pub enum Half {
  18. First,
  19. Second,
  20. }
  21. pub struct CircBuffer<BUFFER, CHANNEL>
  22. where
  23. BUFFER: 'static,
  24. {
  25. buffer: &'static mut [BUFFER; 2],
  26. channel: CHANNEL,
  27. readable_half: Half,
  28. }
  29. impl<BUFFER, CHANNEL> CircBuffer<BUFFER, CHANNEL> {
  30. pub(crate) fn new(buf: &'static mut [BUFFER; 2], chan: CHANNEL) -> Self {
  31. CircBuffer {
  32. buffer: buf,
  33. channel: chan,
  34. readable_half: Half::Second,
  35. }
  36. }
  37. }
  38. pub trait Static<B> {
  39. fn borrow(&self) -> &B;
  40. }
  41. impl<B> Static<B> for &'static B {
  42. fn borrow(&self) -> &B {
  43. *self
  44. }
  45. }
  46. impl<B> Static<B> for &'static mut B {
  47. fn borrow(&self) -> &B {
  48. *self
  49. }
  50. }
  51. pub trait DmaExt {
  52. type Channels;
  53. fn split(self, ahb: &mut AHB) -> Self::Channels;
  54. }
  55. pub struct Transfer<MODE, BUFFER, CHANNEL, PAYLOAD> {
  56. _mode: PhantomData<MODE>,
  57. buffer: BUFFER,
  58. channel: CHANNEL,
  59. payload: PAYLOAD,
  60. }
  61. impl<BUFFER, CHANNEL, PAYLOAD> Transfer<R, BUFFER, CHANNEL, PAYLOAD> {
  62. pub(crate) fn r(buffer: BUFFER, channel: CHANNEL, payload: PAYLOAD) -> Self {
  63. Transfer {
  64. _mode: PhantomData,
  65. buffer,
  66. channel,
  67. payload,
  68. }
  69. }
  70. }
  71. impl<BUFFER, CHANNEL, PAYLOAD> Transfer<W, BUFFER, CHANNEL, PAYLOAD> {
  72. pub(crate) fn w(buffer: BUFFER, channel: CHANNEL, payload: PAYLOAD) -> Self {
  73. Transfer {
  74. _mode: PhantomData,
  75. buffer,
  76. channel,
  77. payload,
  78. }
  79. }
  80. }
  81. impl<BUFFER, CHANNEL, PAYLOAD> ops::Deref for Transfer<R, BUFFER, CHANNEL, PAYLOAD> {
  82. type Target = BUFFER;
  83. fn deref(&self) -> &BUFFER {
  84. &self.buffer
  85. }
  86. }
  87. /// Read transfer
  88. pub struct R;
  89. /// Write transfer
  90. pub struct W;
  91. macro_rules! dma {
  92. ($($DMAX:ident: ($dmaX:ident, $dmaXen:ident, $dmaXrst:ident, {
  93. $($CX:ident: (
  94. $ccrX:ident,
  95. $CCRX:ident,
  96. $cndtrX:ident,
  97. $CNDTRX:ident,
  98. $cparX:ident,
  99. $CPARX:ident,
  100. $cmarX:ident,
  101. $CMARX:ident,
  102. $htifX:ident,
  103. $tcifX:ident,
  104. $chtifX:ident,
  105. $ctcifX:ident,
  106. $cgifX:ident
  107. ),)+
  108. }),)+) => {
  109. $(
  110. pub mod $dmaX {
  111. use core::sync::atomic::{self, Ordering};
  112. use crate::pac::{$DMAX, dma1};
  113. use dma::{CircBuffer, DmaExt, Error, Event, Half, Transfer, W};
  114. use rcc::AHB;
  115. pub struct Channels((), $(pub $CX),+);
  116. $(
  117. pub struct $CX { _0: () }
  118. impl $CX {
  119. pub fn listen(&mut self, event: Event) {
  120. match event {
  121. Event::HalfTransfer => self.ccr().modify(|_, w| w.htie().set_bit()),
  122. Event::TransferComplete => {
  123. self.ccr().modify(|_, w| w.tcie().set_bit())
  124. }
  125. }
  126. }
  127. pub fn unlisten(&mut self, event: Event) {
  128. match event {
  129. Event::HalfTransfer => {
  130. self.ccr().modify(|_, w| w.htie().clear_bit())
  131. },
  132. Event::TransferComplete => {
  133. self.ccr().modify(|_, w| w.tcie().clear_bit())
  134. }
  135. }
  136. }
  137. pub(crate) fn isr(&self) -> dma1::isr::R {
  138. // NOTE(unsafe) atomic read with no side effects
  139. unsafe { (*$DMAX::ptr()).isr.read() }
  140. }
  141. pub(crate) fn ifcr(&self) -> &dma1::IFCR {
  142. unsafe { &(*$DMAX::ptr()).ifcr }
  143. }
  144. pub(crate) fn ccr(&mut self) -> &dma1::$CCRX {
  145. unsafe { &(*$DMAX::ptr()).$ccrX }
  146. }
  147. pub(crate) fn cndtr(&mut self) -> &dma1::$CNDTRX {
  148. unsafe { &(*$DMAX::ptr()).$cndtrX }
  149. }
  150. pub(crate) fn cpar(&mut self) -> &dma1::$CPARX {
  151. unsafe { &(*$DMAX::ptr()).$cparX }
  152. }
  153. pub(crate) fn cmar(&mut self) -> &dma1::$CMARX {
  154. unsafe { &(*$DMAX::ptr()).$cmarX }
  155. }
  156. pub(crate) fn get_cndtr(&self) -> u32 {
  157. // NOTE(unsafe) atomic read with no side effects
  158. unsafe { (*$DMAX::ptr()).$cndtrX.read().bits() }
  159. }
  160. }
  161. impl<B> CircBuffer<B, $CX> {
  162. /// Peeks into the readable half of the buffer
  163. pub fn peek<R, F>(&mut self, f: F) -> Result<R, Error>
  164. where
  165. F: FnOnce(&B, Half) -> R,
  166. {
  167. let half_being_read = self.readable_half()?;
  168. let buf = match half_being_read {
  169. Half::First => &self.buffer[0],
  170. Half::Second => &self.buffer[1],
  171. };
  172. // XXX does this need a compiler barrier?
  173. let ret = f(buf, half_being_read);
  174. let isr = self.channel.isr();
  175. let first_half_is_done = isr.$htifX().bit_is_set();
  176. let second_half_is_done = isr.$tcifX().bit_is_set();
  177. if (half_being_read == Half::First && second_half_is_done) ||
  178. (half_being_read == Half::Second && first_half_is_done) {
  179. Err(Error::Overrun)
  180. } else {
  181. Ok(ret)
  182. }
  183. }
  184. /// Returns the `Half` of the buffer that can be read
  185. pub fn readable_half(&mut self) -> Result<Half, Error> {
  186. let isr = self.channel.isr();
  187. let first_half_is_done = isr.$htifX().bit_is_set();
  188. let second_half_is_done = isr.$tcifX().bit_is_set();
  189. if first_half_is_done && second_half_is_done {
  190. return Err(Error::Overrun);
  191. }
  192. let last_read_half = self.readable_half;
  193. Ok(match last_read_half {
  194. Half::First => {
  195. if second_half_is_done {
  196. self.channel.ifcr().write(|w| w.$ctcifX().set_bit());
  197. self.readable_half = Half::Second;
  198. Half::Second
  199. } else {
  200. last_read_half
  201. }
  202. }
  203. Half::Second => {
  204. if first_half_is_done {
  205. self.channel.ifcr().write(|w| w.$chtifX().set_bit());
  206. self.readable_half = Half::First;
  207. Half::First
  208. } else {
  209. last_read_half
  210. }
  211. }
  212. })
  213. }
  214. }
  215. impl<BUFFER, PAYLOAD, MODE> Transfer<MODE, BUFFER, $CX, PAYLOAD> {
  216. pub fn is_done(&self) -> bool {
  217. self.channel.isr().$tcifX().bit_is_set()
  218. }
  219. pub fn wait(mut self) -> (BUFFER, $CX, PAYLOAD) {
  220. // XXX should we check for transfer errors here?
  221. // The manual says "A DMA transfer error can be generated by reading
  222. // from or writing to a reserved address space". I think it's impossible
  223. // to get to that state with our type safe API and *safe* Rust.
  224. while !self.is_done() {}
  225. self.channel.ifcr().write(|w| w.$cgifX().set_bit());
  226. self.channel.ccr().modify(|_, w| w.en().clear_bit());
  227. // TODO can we weaken this compiler barrier?
  228. // NOTE(compiler_fence) operations on `buffer` should not be reordered
  229. // before the previous statement, which marks the DMA transfer as done
  230. atomic::compiler_fence(Ordering::SeqCst);
  231. (self.buffer, self.channel, self.payload)
  232. }
  233. }
  234. impl<BUFFER, PAYLOAD> Transfer<W, &'static mut BUFFER, $CX, PAYLOAD> {
  235. pub fn peek<T>(&self) -> &[T]
  236. where
  237. BUFFER: AsRef<[T]>,
  238. {
  239. let pending = self.channel.get_cndtr() as usize;
  240. let slice = self.buffer.as_ref();
  241. let capacity = slice.len();
  242. &slice[..(capacity - pending)]
  243. }
  244. }
  245. )+
  246. impl DmaExt for $DMAX {
  247. type Channels = Channels;
  248. fn split(self, ahb: &mut AHB) -> Channels {
  249. ahb.enr().modify(|_, w| w.$dmaXen().set_bit());
  250. // reset the DMA control registers (stops all on-going transfers)
  251. $(
  252. self.$ccrX.reset();
  253. )+
  254. Channels((), $($CX { _0: () }),+)
  255. }
  256. }
  257. }
  258. )+
  259. }
  260. }
  261. /*
  262. dma! {
  263. DMA1: (dma1, dma1en, dma1rst, {
  264. C1: (
  265. ccr1, CCR1,
  266. cndtr1, CNDTR1,
  267. cpar1, CPAR1,
  268. cmar1, CMAR1,
  269. htif1, tcif1,
  270. chtif1, ctcif1, cgif1
  271. ),
  272. C2: (
  273. ccr2, CCR2,
  274. cndtr2, CNDTR2,
  275. cpar2, CPAR2,
  276. cmar2, CMAR2,
  277. htif2, tcif2,
  278. chtif2, ctcif2, cgif2
  279. ),
  280. C3: (
  281. ccr3, CCR3,
  282. cndtr3, CNDTR3,
  283. cpar3, CPAR3,
  284. cmar3, CMAR3,
  285. htif3, tcif3,
  286. chtif3, ctcif3, cgif3
  287. ),
  288. C4: (
  289. ccr4, CCR4,
  290. cndtr4, CNDTR4,
  291. cpar4, CPAR4,
  292. cmar4, CMAR4,
  293. htif4, tcif4,
  294. chtif4, ctcif4, cgif4
  295. ),
  296. C5: (
  297. ccr5, CCR5,
  298. cndtr5, CNDTR5,
  299. cpar5, CPAR5,
  300. cmar5, CMAR5,
  301. htif5, tcif5,
  302. chtif5, ctcif5, cgif5
  303. ),
  304. C6: (
  305. ccr6, CCR6,
  306. cndtr6, CNDTR6,
  307. cpar6, CPAR6,
  308. cmar6, CMAR6,
  309. htif6, tcif6,
  310. chtif6, ctcif6, cgif6
  311. ),
  312. C7: (
  313. ccr7, CCR7,
  314. cndtr7, CNDTR7,
  315. cpar7, CPAR7,
  316. cmar7, CMAR7,
  317. htif7, tcif7,
  318. chtif7, ctcif7, cgif7
  319. ),
  320. }),
  321. DMA2: (dma2, dma2en, dma2rst, {
  322. C1: (
  323. ccr1, CCR1,
  324. cndtr1, CNDTR1,
  325. cpar1, CPAR1,
  326. cmar1, CMAR1,
  327. htif1, tcif1,
  328. chtif1, ctcif1, cgif1
  329. ),
  330. C2: (
  331. ccr2, CCR2,
  332. cndtr2, CNDTR2,
  333. cpar2, CPAR2,
  334. cmar2, CMAR2,
  335. htif2, tcif2,
  336. chtif2, ctcif2, cgif2
  337. ),
  338. C3: (
  339. ccr3, CCR3,
  340. cndtr3, CNDTR3,
  341. cpar3, CPAR3,
  342. cmar3, CMAR3,
  343. htif3, tcif3,
  344. chtif3, ctcif3, cgif3
  345. ),
  346. C4: (
  347. ccr4, CCR4,
  348. cndtr4, CNDTR4,
  349. cpar4, CPAR4,
  350. cmar4, CMAR4,
  351. htif4, tcif4,
  352. chtif4, ctcif4, cgif4
  353. ),
  354. C5: (
  355. ccr5, CCR5,
  356. cndtr5, CNDTR5,
  357. cpar5, CPAR5,
  358. cmar5, CMAR5,
  359. htif5, tcif5,
  360. chtif5, ctcif5, cgif5
  361. ),
  362. }),
  363. }
  364. */
  365. pub trait DmaChannel {
  366. type Dma;
  367. }