2 * linux/arch/arm/mach-lpc32xx/ma-lpc32xx.c
4 * Copyright (C) 2008 NXP Semiconductors
5 * (Based on parts of the PNX4008 DMA driver)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/errno.h>
26 #include <linux/err.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/clk.h>
30 #include <asm/system.h>
31 #include <mach/hardware.h>
32 #include <mach/platform.h>
33 #include <asm/dma-mapping.h>
36 #include <mach/dmac.h>
38 #define DMAIOBASE io_p2v(LPC32XX_DMA_BASE)
39 #define VALID_CHANNEL(c) (((c) >= 0) && ((c) < MAX_DMA_CHANNELS))
41 static DEFINE_SPINLOCK(dma_lock);
43 struct dma_linked_list {
50 /* For DMA linked list operation, a linked list of DMA descriptors
51 is maintained along with some data to manage the list in software. */
52 struct dma_list_ctrl {
53 struct dma_linked_list dmall; /* DMA list descriptor */
54 struct dma_list_ctrl *next_list_addr; /* Virtual address to next list entry */
55 struct dma_list_ctrl *prev_list_addr; /* Virtual address to previous list entry */
56 u32 next_list_phy; /* Physical address to next list entry */
57 u32 prev_list_phy; /* Physical address to previous list entry */
60 /* Each DMA channel has one of these structures */
63 void (*irq_handler) (int, int, void *);
65 struct dma_config *dmacfg;
70 int list_entries; /* Number of list entries */
71 u32 list_size; /* Total size of allocated list in bytes */
72 u32 list_vstart; /* Allocated (virtual) address of list */
73 u32 list_pstart; /* Allocated (physical) address of list */
74 int free_entries; /* Number of free descriptors */
75 struct dma_list_ctrl *list_head, *list_tail;
81 struct dma_channel dma_channels[MAX_DMA_CHANNELS];
83 static struct dma_control dma_ctrl;
85 static inline void __dma_regs_lock(void)
87 spin_lock_irq(&dma_lock);
90 static inline void __dma_regs_unlock(void)
92 spin_unlock_irq(&dma_lock);
95 static inline void __dma_enable(int ch) {
96 u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
97 ch_cfg |= DMAC_CHAN_ENABLE;
98 __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
101 static inline void __dma_disable(int ch) {
102 u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
103 ch_cfg &= ~DMAC_CHAN_ENABLE;
104 __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
107 static void dma_clocks_up(void)
109 /* Enable DMA clock if needed */
110 if (dma_ctrl.num_clks == 0)
112 clk_enable(dma_ctrl.clk);
113 __raw_writel(DMAC_CTRL_ENABLE, DMA_CONFIG(DMAIOBASE));
119 static void dma_clocks_down(void)
123 /* Disable DMA clock if needed */
124 if (dma_ctrl.num_clks == 0)
126 __raw_writel(0, DMA_CONFIG(DMAIOBASE));
127 clk_disable(dma_ctrl.clk);
131 static int lpc32xx_ch_setup(struct dma_config *dmachcfg)
133 u32 tmpctrl, tmpcfg, tmp;
134 int ch = dmachcfg->ch;
136 /* Channel control setup */
138 switch (dmachcfg->src_size)
141 tmpctrl |= DMAC_CHAN_SRC_WIDTH_8;
145 tmpctrl |= DMAC_CHAN_SRC_WIDTH_16;
149 tmpctrl |= DMAC_CHAN_SRC_WIDTH_32;
155 switch (dmachcfg->dst_size)
158 tmpctrl |= DMAC_CHAN_DEST_WIDTH_8;
162 tmpctrl |= DMAC_CHAN_DEST_WIDTH_16;
166 tmpctrl |= DMAC_CHAN_DEST_WIDTH_32;
172 if (dmachcfg->src_inc != 0)
174 tmpctrl |= DMAC_CHAN_SRC_AUTOINC;
176 if (dmachcfg->dst_inc != 0)
178 tmpctrl |= DMAC_CHAN_DEST_AUTOINC;
180 if (dmachcfg->src_ahb1 != 0)
182 tmpctrl |= DMAC_CHAN_SRC_AHB1;
184 if (dmachcfg->dst_ahb1 != 0)
186 tmpctrl |= DMAC_CHAN_DEST_AHB1;
188 if (dmachcfg->tc_inten != 0)
190 tmpctrl |= DMAC_CHAN_INT_TC_EN;
192 tmpctrl |= dmachcfg->src_bsize | dmachcfg->dst_bsize;
193 dma_ctrl.dma_channels[ch].control = tmpctrl;
195 /* Channel config setup */
196 tmpcfg = dmachcfg->src_prph | dmachcfg->dst_prph |
198 dma_ctrl.dma_channels[ch].config = tmpcfg;
200 dma_ctrl.dma_channels[ch].config_int_mask = 0;
201 if (dmachcfg->err_inten != 0)
203 dma_ctrl.dma_channels[ch].config_int_mask |=
206 if (dmachcfg->tc_inten != 0)
208 dma_ctrl.dma_channels[ch].config_int_mask |=
212 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
213 tmp &= ~DMAC_CHAN_ENABLE;
214 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
216 /* Clear interrupts for channel */
217 __raw_writel((1 << ch), DMA_INT_TC_CLEAR(DMAIOBASE));
218 __raw_writel((1 << ch), DMA_INT_ERR_CLEAR(DMAIOBASE));
220 /* Write control and config words */
221 __raw_writel(tmpctrl, DMACH_CONTROL(DMAIOBASE, ch));
222 __raw_writel(tmpcfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
227 int lpc32xx_dma_ch_enable(int ch)
229 if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
238 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_enable);
240 int lpc32xx_dma_ch_disable(int ch)
242 if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
251 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_disable);
253 int lpc32xx_dma_ch_get(struct dma_config *dmachcfg, char *name,
254 void *irq_handler, void *data) {
257 if (!VALID_CHANNEL(dmachcfg->ch))
260 /* If the channel is already enabled, return */
261 if (dma_ctrl.dma_channels[dmachcfg->ch].name != NULL)
264 /* Save channel data */
265 dma_ctrl.dma_channels[dmachcfg->ch].dmacfg = dmachcfg;
266 dma_ctrl.dma_channels[dmachcfg->ch].name = name;
267 dma_ctrl.dma_channels[dmachcfg->ch].irq_handler = irq_handler;
268 dma_ctrl.dma_channels[dmachcfg->ch].data = data;
273 ret = lpc32xx_ch_setup(dmachcfg);
278 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_get);
280 int lpc32xx_dma_ch_put(int ch)
284 if (!VALID_CHANNEL(ch))
287 /* If the channel is already disabled, return */
288 if (dma_ctrl.dma_channels[ch].name == NULL)
291 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
292 tmp &= ~DMAC_CHAN_ENABLE;
293 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
296 lpc32xx_dma_ch_disable(ch);
300 dma_ctrl.dma_channels[ch].name = NULL;
304 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_put);
306 int lpc32xx_dma_ch_pause_unpause(int ch, int pause) {
309 if (!VALID_CHANNEL(ch))
312 /* If the channel is already disabled, return */
313 if (dma_ctrl.dma_channels[ch].name == NULL)
316 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
318 tmp |= DMAC_CHAN_HALT;
321 tmp &= ~DMAC_CHAN_HALT;
323 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
327 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_pause_unpause);
329 int lpc32xx_dma_start_pflow_xfer(int ch,
336 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
339 /* When starting a DMA transfer where the peripheral is the flow
340 controller, DMA must be previously disabled */
341 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
342 if (tmp & DMAC_CHAN_ENABLE)
346 __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
347 __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
348 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
349 __raw_writel(dma_ctrl.dma_channels[ch].control, DMACH_CONTROL(DMAIOBASE, ch));
351 tmp = dma_ctrl.dma_channels[ch].config |
352 dma_ctrl.dma_channels[ch].config_int_mask;
354 tmp |= DMAC_CHAN_ENABLE;
355 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
361 EXPORT_SYMBOL_GPL(lpc32xx_dma_start_pflow_xfer);
363 extern u32 lpc32xx_dma_llist_v_to_p(int ch,
367 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
368 (dma_ctrl.dma_channels[ch].list_vstart == 0))
371 pptr = vlist - dma_ctrl.dma_channels[ch].list_vstart;
372 pptr += dma_ctrl.dma_channels[ch].list_pstart;
376 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_v_to_p);
378 u32 lpc32xx_dma_llist_p_to_v(int ch,
382 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
383 (dma_ctrl.dma_channels[ch].list_vstart == 0))
386 vptr = plist - dma_ctrl.dma_channels[ch].list_pstart;
387 vptr += dma_ctrl.dma_channels[ch].list_vstart;
391 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_p_to_v);
393 u32 lpc32xx_dma_alloc_llist(int ch,
396 dma_addr_t dma_handle;
397 struct dma_list_ctrl *pdmalist, *pdmalistst;
399 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
402 /* Limit number of list entries */
410 /* Save list information */
411 dma_ctrl.dma_channels[ch].list_entries = entries;
412 dma_ctrl.dma_channels[ch].list_size = (entries * sizeof(struct dma_list_ctrl));
413 dma_ctrl.dma_channels[ch].list_vstart = (u32) dma_alloc_coherent(NULL,
414 dma_ctrl.dma_channels[ch].list_size, &dma_handle, GFP_KERNEL);
415 if (dma_ctrl.dma_channels[ch].list_vstart == 0) {
416 /* No allocated DMA space */
419 dma_ctrl.dma_channels[ch].list_pstart = (u32) dma_handle;
421 /* Setup list tail and head pointers */
422 pdmalist = pdmalistst = (struct dma_list_ctrl *) dma_ctrl.dma_channels[ch].list_vstart;
423 for (i = 0; i < entries; i++) {
424 pdmalistst->next_list_addr = pdmalistst + 1;
425 pdmalistst->prev_list_addr = pdmalistst - 1;
426 pdmalistst->next_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->next_list_addr);
427 pdmalistst->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->prev_list_addr);
430 pdmalist[entries - 1].next_list_addr = pdmalist;
431 pdmalist[entries - 1].next_list_phy = lpc32xx_dma_llist_v_to_p(ch,
432 (u32) pdmalist[entries - 1].next_list_addr);
433 pdmalist->prev_list_addr = &pdmalist[entries - 1];
434 pdmalist->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalist->prev_list_addr);
436 /* Save current free descriptors and current head/tail */
437 dma_ctrl.dma_channels[ch].free_entries = entries;
438 dma_ctrl.dma_channels[ch].list_head = pdmalist;
439 dma_ctrl.dma_channels[ch].list_tail = pdmalist;
441 return dma_ctrl.dma_channels[ch].list_vstart;
443 EXPORT_SYMBOL_GPL(lpc32xx_dma_alloc_llist);
445 void lpc32xx_dma_dealloc_llist(int ch) {
447 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
448 (dma_ctrl.dma_channels[ch].list_vstart == 0))
451 dma_free_coherent(NULL, dma_ctrl.dma_channels[ch].list_size,
452 (void *) dma_ctrl.dma_channels[ch].list_vstart,
453 (dma_addr_t) dma_ctrl.dma_channels[ch].list_pstart);
454 dma_ctrl.dma_channels[ch].list_head = 0;
455 dma_ctrl.dma_channels[ch].list_tail = 0;
456 dma_ctrl.dma_channels[ch].list_entries = 0;
457 dma_ctrl.dma_channels[ch].free_entries = 0;
458 dma_ctrl.dma_channels[ch].list_vstart = 0;
460 EXPORT_SYMBOL_GPL(lpc32xx_dma_dealloc_llist);
462 extern u32 lpc32xx_dma_get_llist_head(int ch) {
463 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
464 (dma_ctrl.dma_channels[ch].list_vstart == 0))
467 /* Return the current list pointer (virtual) for the
469 return lpc32xx_dma_llist_p_to_v(ch,
470 __raw_readl(DMACH_LLI(DMAIOBASE, ch)));
472 EXPORT_SYMBOL_GPL(lpc32xx_dma_get_llist_head);
474 extern void lpc32xx_dma_flush_llist(int ch) {
475 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
476 (dma_ctrl.dma_channels[ch].list_vstart == 0))
479 /* Disable channel and clear LLI */
482 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
485 dma_ctrl.dma_channels[ch].list_head = (struct dma_list_ctrl *)
486 dma_ctrl.dma_channels[ch].list_vstart;
487 dma_ctrl.dma_channels[ch].list_tail = (struct dma_list_ctrl *)
488 dma_ctrl.dma_channels[ch].list_vstart;
489 dma_ctrl.dma_channels[ch].free_entries =
490 dma_ctrl.dma_channels[ch].list_entries;
492 EXPORT_SYMBOL_GPL(lpc32xx_dma_flush_llist);
494 u32 lpc32xx_dma_queue_llist_entry(int ch,
498 struct dma_list_ctrl *plhead;
501 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
502 (dma_ctrl.dma_channels[ch].list_vstart == 0))
505 /* Exit if all the buffers are used */
506 if (dma_ctrl.dma_channels[ch].free_entries == 0) {
510 /* Next available DMA link descriptor */
511 plhead = dma_ctrl.dma_channels[ch].list_head;
513 /* Adjust size to number of transfers (vs bytes) */
514 size = size / dma_ctrl.dma_channels[ch].dmacfg->dst_size;
516 /* Setup control and config words */
517 ctrl = dma_ctrl.dma_channels[ch].control | size;
518 cfg = dma_ctrl.dma_channels[ch].config | DMAC_CHAN_ENABLE |
519 dma_ctrl.dma_channels[ch].config_int_mask;
521 /* Populate DMA linked data structure */
522 plhead->dmall.src = (u32) src;
523 plhead->dmall.dest = (u32) dst;
524 plhead->dmall.next_lli = 0;
525 plhead->dmall.ctrl = ctrl;
529 /* Append this link to the end of the previous link */
530 plhead->prev_list_addr->dmall.next_lli = lpc32xx_dma_llist_v_to_p(ch, (u32) plhead);
532 /* Decrement available buffers */
533 dma_ctrl.dma_channels[ch].free_entries--;
535 /* If the DMA channel is idle, then the buffer needs to be placed directly into
537 if ((__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) & DMAC_CHAN_ENABLE) == 0) {
538 /* DMA is disabled, so move the current buffer into the
539 channel registers and start transfer */
540 __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
541 __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
542 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
543 __raw_writel(ctrl, DMACH_CONTROL(DMAIOBASE, ch));
544 __raw_writel(cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
547 /* Process next link on next call */
548 dma_ctrl.dma_channels[ch].list_head = plhead->next_list_addr;
554 EXPORT_SYMBOL_GPL(lpc32xx_dma_queue_llist_entry);
556 extern u32 lpc32xx_get_free_llist_entry(int ch) {
557 struct dma_list_ctrl *pltail;
559 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
560 (dma_ctrl.dma_channels[ch].list_vstart == 0))
563 /* Exit if no entries to free */
564 if (dma_ctrl.dma_channels[ch].free_entries ==
565 dma_ctrl.dma_channels[ch].list_entries) {
569 /* Get tail pointer */
570 pltail = dma_ctrl.dma_channels[ch].list_tail;
573 dma_ctrl.dma_channels[ch].list_tail = pltail->next_list_addr;
575 /* Increment available buffers */
576 dma_ctrl.dma_channels[ch].free_entries++;
580 EXPORT_SYMBOL_GPL(lpc32xx_get_free_llist_entry);
582 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
585 unsigned long dint = __raw_readl(DMA_INT_STAT(DMAIOBASE));
586 unsigned long tcint = __raw_readl(DMA_INT_TC_STAT(DMAIOBASE));
587 unsigned long eint = __raw_readl(DMA_INT_ERR_STAT(DMAIOBASE));
590 for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
593 struct dma_channel *channel = &dma_ctrl.dma_channels[i];
595 if (channel->name && channel->irq_handler) {
599 __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
600 cause |= DMA_ERR_INT;
603 __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
607 channel->irq_handler(i, cause, channel->data);
610 * IRQ for an unregistered DMA channel
612 __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
613 __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
615 "spurious IRQ for DMA channel %d\n", i);
623 static int __init lpc32xx_dma_init(void)
627 printk(KERN_INFO "LPC32XX DMA driver\n");
629 ret = request_irq(IRQ_LPC32XX_DMA, dma_irq_handler, 0, "DMA", NULL);
631 printk(KERN_CRIT "Wow! Can't register IRQ for DMA\n");
636 dma_ctrl.clk = clk_get(NULL, "clk_dmac");
637 if (IS_ERR(dma_ctrl.clk)) {
641 clk_enable(dma_ctrl.clk);
643 /* Clear DMA controller */
644 __raw_writel(1, DMA_CONFIG(DMAIOBASE));
645 __raw_writel(0xFF, DMA_INT_TC_CLEAR(DMAIOBASE));
646 __raw_writel(0xFF, DMA_INT_ERR_CLEAR(DMAIOBASE));
648 /* Clock is only enabled when needed to save power */
649 clk_disable(dma_ctrl.clk);
654 free_irq(IRQ_LPC32XX_DMA, NULL);
659 arch_initcall(lpc32xx_dma_init);