2 * linux/arch/arm/mach-lpc32xx/ma-lpc32xx.c
4 * Copyright (C) 2008 NXP Semiconductors
5 * (Based on parts of the PNX4008 DMA driver)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/errno.h>
26 #include <linux/err.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/clk.h>
30 #include <asm/system.h>
31 #include <mach/hardware.h>
32 #include <mach/platform.h>
33 #include <asm/dma-mapping.h>
36 #include <mach/dmac.h>
38 #define DMAIOBASE io_p2v(LPC32XX_DMA_BASE)
39 #define VALID_CHANNEL(c) (((c) >= 0) && ((c) < MAX_DMA_CHANNELS))
41 static DEFINE_SPINLOCK(dma_lock);
43 struct dma_linked_list {
50 /* For DMA linked list operation, a linked list of DMA descriptors
51 is maintained along with some data to manage the list in software. */
52 struct dma_list_ctrl {
53 struct dma_linked_list dmall; /* DMA list descriptor */
54 struct dma_list_ctrl *next_list_addr; /* Virtual address to next list entry */
55 struct dma_list_ctrl *prev_list_addr; /* Virtual address to previous list entry */
56 u32 next_list_phy; /* Physical address to next list entry */
57 u32 prev_list_phy; /* Physical address to previous list entry */
60 /* Each DMA channel has one of these structures */
63 void (*irq_handler) (int, int, void *);
65 struct dma_config *dmacfg;
70 int list_entries; /* Number of list entries */
71 u32 list_size; /* Total size of allocated list in bytes */
72 u32 list_vstart; /* Allocated (virtual) address of list */
73 u32 list_pstart; /* Allocated (physical) address of list */
74 int free_entries; /* Number of free descriptors */
75 struct dma_list_ctrl *list_head, *list_tail, *list_curr;
81 struct dma_channel dma_channels[MAX_DMA_CHANNELS];
83 static struct dma_control dma_ctrl;
85 static inline void __dma_regs_lock(void)
87 spin_lock_irq(&dma_lock);
90 static inline void __dma_regs_unlock(void)
92 spin_unlock_irq(&dma_lock);
95 static inline void __dma_enable(int ch) {
96 u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
97 ch_cfg |= DMAC_CHAN_ENABLE;
98 __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
101 static inline void __dma_disable(int ch) {
102 u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
103 ch_cfg &= ~DMAC_CHAN_ENABLE;
104 __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
107 static void dma_clocks_up(void)
109 /* Enable DMA clock if needed */
110 if (dma_ctrl.num_clks == 0)
112 clk_enable(dma_ctrl.clk);
113 __raw_writel(DMAC_CTRL_ENABLE, DMA_CONFIG(DMAIOBASE));
119 static void dma_clocks_down(void)
123 /* Disable DMA clock if needed */
124 if (dma_ctrl.num_clks == 0)
126 __raw_writel(0, DMA_CONFIG(DMAIOBASE));
127 clk_disable(dma_ctrl.clk);
131 static int lpc32xx_ch_setup(struct dma_config *dmachcfg)
133 u32 tmpctrl, tmpcfg, tmp;
134 int ch = dmachcfg->ch;
136 /* Channel control setup */
138 switch (dmachcfg->src_size)
141 tmpctrl |= DMAC_CHAN_SRC_WIDTH_8;
145 tmpctrl |= DMAC_CHAN_SRC_WIDTH_16;
149 tmpctrl |= DMAC_CHAN_SRC_WIDTH_32;
155 switch (dmachcfg->dst_size)
158 tmpctrl |= DMAC_CHAN_DEST_WIDTH_8;
162 tmpctrl |= DMAC_CHAN_DEST_WIDTH_16;
166 tmpctrl |= DMAC_CHAN_DEST_WIDTH_32;
172 if (dmachcfg->src_inc != 0)
174 tmpctrl |= DMAC_CHAN_SRC_AUTOINC;
176 if (dmachcfg->dst_inc != 0)
178 tmpctrl |= DMAC_CHAN_DEST_AUTOINC;
180 if (dmachcfg->src_ahb1 != 0)
182 tmpctrl |= DMAC_CHAN_SRC_AHB1;
184 if (dmachcfg->dst_ahb1 != 0)
186 tmpctrl |= DMAC_CHAN_DEST_AHB1;
188 if (dmachcfg->tc_inten != 0)
190 tmpctrl |= DMAC_CHAN_INT_TC_EN;
192 tmpctrl |= dmachcfg->src_bsize | dmachcfg->dst_bsize;
193 dma_ctrl.dma_channels[ch].control = tmpctrl;
195 /* Channel config setup */
196 tmpcfg = dmachcfg->src_prph | dmachcfg->dst_prph |
198 dma_ctrl.dma_channels[ch].config = tmpcfg;
200 dma_ctrl.dma_channels[ch].config_int_mask = 0;
201 if (dmachcfg->err_inten != 0)
203 dma_ctrl.dma_channels[ch].config_int_mask |=
206 if (dmachcfg->tc_inten != 0)
208 dma_ctrl.dma_channels[ch].config_int_mask |=
212 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
213 tmp &= ~DMAC_CHAN_ENABLE;
214 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
216 /* Clear interrupts for channel */
217 __raw_writel((1 << ch), DMA_INT_TC_CLEAR(DMAIOBASE));
218 __raw_writel((1 << ch), DMA_INT_ERR_CLEAR(DMAIOBASE));
220 /* Write control and config words */
221 __raw_writel(tmpctrl, DMACH_CONTROL(DMAIOBASE, ch));
222 __raw_writel(tmpcfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
227 int lpc32xx_dma_ch_enable(int ch)
229 if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
238 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_enable);
240 int lpc32xx_dma_ch_disable(int ch)
242 if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
251 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_disable);
253 int lpc32xx_dma_ch_get(struct dma_config *dmachcfg, char *name,
254 void *irq_handler, void *data) {
257 if (!VALID_CHANNEL(dmachcfg->ch))
260 /* If the channel is already enabled, return */
261 if (dma_ctrl.dma_channels[dmachcfg->ch].name != NULL)
264 /* Save channel data */
265 dma_ctrl.dma_channels[dmachcfg->ch].dmacfg = dmachcfg;
266 dma_ctrl.dma_channels[dmachcfg->ch].name = name;
267 dma_ctrl.dma_channels[dmachcfg->ch].irq_handler = irq_handler;
268 dma_ctrl.dma_channels[dmachcfg->ch].data = data;
273 ret = lpc32xx_ch_setup(dmachcfg);
278 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_get);
280 int lpc32xx_dma_ch_put(int ch)
284 if (!VALID_CHANNEL(ch))
287 /* If the channel is already disabled, return */
288 if (dma_ctrl.dma_channels[ch].name == NULL)
291 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
292 tmp &= ~DMAC_CHAN_ENABLE;
293 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
296 lpc32xx_dma_ch_disable(ch);
300 dma_ctrl.dma_channels[ch].name = NULL;
304 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_put);
306 int lpc32xx_dma_ch_pause_unpause(int ch, int pause) {
309 if (!VALID_CHANNEL(ch))
312 /* If the channel is already disabled, return */
313 if (dma_ctrl.dma_channels[ch].name == NULL)
316 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
318 tmp |= DMAC_CHAN_HALT;
321 tmp &= ~DMAC_CHAN_HALT;
323 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
327 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_pause_unpause);
329 int lpc32xx_dma_start_pflow_xfer(int ch,
336 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
339 /* When starting a DMA transfer where the peripheral is the flow
340 controller, DMA must be previously disabled */
341 tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
342 if (tmp & DMAC_CHAN_ENABLE)
346 __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
347 __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
348 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
349 __raw_writel(dma_ctrl.dma_channels[ch].control, DMACH_CONTROL(DMAIOBASE, ch));
351 tmp = dma_ctrl.dma_channels[ch].config |
352 dma_ctrl.dma_channels[ch].config_int_mask;
354 tmp |= DMAC_CHAN_ENABLE;
355 __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
361 EXPORT_SYMBOL_GPL(lpc32xx_dma_start_pflow_xfer);
363 extern u32 lpc32xx_dma_llist_v_to_p(int ch,
367 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
368 (dma_ctrl.dma_channels[ch].list_vstart == 0))
371 pptr = vlist - dma_ctrl.dma_channels[ch].list_vstart;
372 pptr += dma_ctrl.dma_channels[ch].list_pstart;
376 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_v_to_p);
378 u32 lpc32xx_dma_llist_p_to_v(int ch,
382 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
383 (dma_ctrl.dma_channels[ch].list_vstart == 0))
386 vptr = plist - dma_ctrl.dma_channels[ch].list_pstart;
387 vptr += dma_ctrl.dma_channels[ch].list_vstart;
391 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_p_to_v);
393 u32 lpc32xx_dma_alloc_llist(int ch,
396 dma_addr_t dma_handle;
397 struct dma_list_ctrl *pdmalist, *pdmalistst;
399 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
403 * Limit number of list entries, but add 1 extra entry as a spot holder
404 * for the end of the list
414 /* Save list information */
415 dma_ctrl.dma_channels[ch].list_entries = entries;
416 dma_ctrl.dma_channels[ch].list_size = (entries * sizeof(struct dma_list_ctrl));
417 dma_ctrl.dma_channels[ch].list_vstart = (u32) dma_alloc_coherent(NULL,
418 dma_ctrl.dma_channels[ch].list_size, &dma_handle, GFP_KERNEL);
419 if (dma_ctrl.dma_channels[ch].list_vstart == 0) {
420 /* No allocated DMA space */
423 dma_ctrl.dma_channels[ch].list_pstart = (u32) dma_handle;
425 /* Setup list tail and head pointers */
426 pdmalist = pdmalistst = (struct dma_list_ctrl *) dma_ctrl.dma_channels[ch].list_vstart;
427 for (i = 0; i < entries; i++) {
428 pdmalistst->next_list_addr = pdmalistst + 1;
429 pdmalistst->prev_list_addr = pdmalistst - 1;
430 pdmalistst->next_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->next_list_addr);
431 pdmalistst->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->prev_list_addr);
434 pdmalist[entries - 1].next_list_addr = pdmalist;
435 pdmalist[entries - 1].next_list_phy = lpc32xx_dma_llist_v_to_p(ch,
436 (u32) pdmalist[entries - 1].next_list_addr);
437 pdmalist->prev_list_addr = &pdmalist[entries - 1];
438 pdmalist->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalist->prev_list_addr);
440 /* Save current free descriptors and current head/tail */
441 dma_ctrl.dma_channels[ch].free_entries = entries - 1;
442 dma_ctrl.dma_channels[ch].list_head = pdmalist;
443 dma_ctrl.dma_channels[ch].list_tail = pdmalist;
444 dma_ctrl.dma_channels[ch].list_curr = pdmalist;
446 return dma_ctrl.dma_channels[ch].list_vstart;
448 EXPORT_SYMBOL_GPL(lpc32xx_dma_alloc_llist);
450 void lpc32xx_dma_dealloc_llist(int ch) {
452 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
453 (dma_ctrl.dma_channels[ch].list_vstart == 0))
456 dma_free_coherent(NULL, dma_ctrl.dma_channels[ch].list_size,
457 (void *) dma_ctrl.dma_channels[ch].list_vstart,
458 (dma_addr_t) dma_ctrl.dma_channels[ch].list_pstart);
459 dma_ctrl.dma_channels[ch].list_head = 0;
460 dma_ctrl.dma_channels[ch].list_tail = 0;
461 dma_ctrl.dma_channels[ch].list_entries = 0;
462 dma_ctrl.dma_channels[ch].free_entries = 0;
463 dma_ctrl.dma_channels[ch].list_vstart = 0;
465 EXPORT_SYMBOL_GPL(lpc32xx_dma_dealloc_llist);
467 extern u32 lpc32xx_dma_get_llist_head(int ch) {
468 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
469 (dma_ctrl.dma_channels[ch].list_vstart == 0))
472 /* Return the current list pointer (virtual) for the
474 return lpc32xx_dma_llist_p_to_v(ch,
475 __raw_readl(DMACH_LLI(DMAIOBASE, ch)));
477 EXPORT_SYMBOL_GPL(lpc32xx_dma_get_llist_head);
479 extern void lpc32xx_dma_flush_llist(int ch) {
480 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
481 (dma_ctrl.dma_channels[ch].list_vstart == 0))
484 /* Disable channel and clear LLI */
487 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
490 dma_ctrl.dma_channels[ch].list_head = (struct dma_list_ctrl *)
491 dma_ctrl.dma_channels[ch].list_vstart;
492 dma_ctrl.dma_channels[ch].list_tail = (struct dma_list_ctrl *)
493 dma_ctrl.dma_channels[ch].list_vstart;
494 dma_ctrl.dma_channels[ch].list_curr = (struct dma_list_ctrl *)
495 dma_ctrl.dma_channels[ch].list_vstart;
496 dma_ctrl.dma_channels[ch].free_entries =
497 dma_ctrl.dma_channels[ch].list_entries - 1;
499 EXPORT_SYMBOL_GPL(lpc32xx_dma_flush_llist);
501 u32 lpc32xx_dma_queue_llist_entry(int ch,
505 struct dma_list_ctrl *plhead;
508 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
509 (dma_ctrl.dma_channels[ch].list_vstart == 0))
512 /* Exit if all the buffers are used */
513 if (dma_ctrl.dma_channels[ch].free_entries == 0) {
517 /* Next available DMA link descriptor */
518 plhead = dma_ctrl.dma_channels[ch].list_head;
520 /* Adjust size to number of transfers (vs bytes) */
521 size = size / dma_ctrl.dma_channels[ch].dmacfg->dst_size;
523 /* Setup control and config words */
524 ctrl = dma_ctrl.dma_channels[ch].control | size;
525 cfg = dma_ctrl.dma_channels[ch].config | DMAC_CHAN_ENABLE |
526 dma_ctrl.dma_channels[ch].config_int_mask;
528 /* Populate DMA linked data structure */
529 plhead->dmall.src = (u32) src;
530 plhead->dmall.dest = (u32) dst;
531 plhead->dmall.next_lli = 0;
532 plhead->dmall.ctrl = ctrl;
536 /* Append this link to the end of the previous link */
537 plhead->prev_list_addr->dmall.next_lli = lpc32xx_dma_llist_v_to_p(ch, (u32) plhead);
539 /* Decrement available buffers */
540 dma_ctrl.dma_channels[ch].free_entries--;
542 /* If the DMA channel is idle, then the buffer needs to be placed directly into
544 if ((__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) & DMAC_CHAN_ENABLE) == 0) {
545 /* DMA is disabled, so move the current buffer into the
546 channel registers and start transfer */
547 __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
548 __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
549 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
550 __raw_writel(ctrl, DMACH_CONTROL(DMAIOBASE, ch));
551 __raw_writel(cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
553 else if (__raw_readl(DMACH_LLI(DMAIOBASE, ch)) == 0) {
554 /* Update current entry to next entry */
555 __raw_writel(dma_ctrl.dma_channels[ch].list_tail->next_list_phy,
556 DMACH_LLI(DMAIOBASE, ch));
559 * If the channel was stopped before the next entry made it into the
560 * hardware descriptor, the next entry didn't make it there fast enough,
561 * so load the new descriptor here.
563 if ((__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) & DMAC_CHAN_ENABLE) == 0) {
564 __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
565 __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
566 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
567 __raw_writel(ctrl, DMACH_CONTROL(DMAIOBASE, ch));
568 __raw_writel(cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
572 /* Process next link on next call */
573 dma_ctrl.dma_channels[ch].list_head = plhead->next_list_addr;
579 EXPORT_SYMBOL_GPL(lpc32xx_dma_queue_llist_entry);
581 extern u32 lpc32xx_get_free_llist_entry(int ch) {
582 struct dma_list_ctrl *pltail;
584 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
585 (dma_ctrl.dma_channels[ch].list_vstart == 0))
588 /* Exit if no entries to free */
589 if (dma_ctrl.dma_channels[ch].free_entries ==
590 dma_ctrl.dma_channels[ch].list_entries) {
594 /* Get tail pointer */
595 pltail = dma_ctrl.dma_channels[ch].list_tail;
598 dma_ctrl.dma_channels[ch].list_tail = pltail->next_list_addr;
600 /* Increment available buffers */
601 dma_ctrl.dma_channels[ch].free_entries++;
605 EXPORT_SYMBOL_GPL(lpc32xx_get_free_llist_entry);
607 int lpc32xx_dma_start_xfer(int ch, u32 config)
609 struct dma_list_ctrl *plhead;
611 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
612 (dma_ctrl.dma_channels[ch].list_vstart == 0))
615 plhead = dma_ctrl.dma_channels[ch].list_head;
617 __raw_writel(plhead->dmall.src, DMACH_SRC_ADDR(DMAIOBASE, ch));
618 __raw_writel(plhead->dmall.dest, DMACH_DEST_ADDR(DMAIOBASE, ch));
619 __raw_writel(plhead->dmall.next_lli, DMACH_LLI(DMAIOBASE, ch));
620 __raw_writel(plhead->dmall.ctrl, DMACH_CONTROL(DMAIOBASE, ch));
621 __raw_writel(config, DMACH_CONFIG_CH(DMAIOBASE, ch));
626 EXPORT_SYMBOL_GPL(lpc32xx_dma_start_xfer);
628 u32 lpc32xx_dma_queue_llist(int ch, void *src, void *dst,
631 struct dma_list_ctrl *plhead;
633 if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
634 (dma_ctrl.dma_channels[ch].list_vstart == 0))
637 /* Exit if all the buffers are used */
638 if (dma_ctrl.dma_channels[ch].free_entries == 0) {
642 /* Next available DMA link descriptor */
643 plhead = dma_ctrl.dma_channels[ch].list_curr;
645 /* Populate DMA linked data structure */
646 plhead->dmall.src = (u32) src;
647 plhead->dmall.dest = (u32) dst;
648 plhead->dmall.next_lli = 0;
649 plhead->dmall.ctrl = ctrl;
651 /* Append this link to the end of the previous link */
652 plhead->prev_list_addr->dmall.next_lli = lpc32xx_dma_llist_v_to_p(ch, (u32) plhead);
654 /* Decrement available buffers */
655 dma_ctrl.dma_channels[ch].free_entries--;
657 /* Process next link on next call */
658 dma_ctrl.dma_channels[ch].list_curr = plhead->next_list_addr;
662 EXPORT_SYMBOL_GPL(lpc32xx_dma_queue_llist);
664 extern void lpc32xx_dma_force_burst(int ch, int src)
666 __raw_writel(1 << src, DMA_SW_BURST_REQ(DMAIOBASE));
668 EXPORT_SYMBOL_GPL(lpc32xx_dma_force_burst);
670 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
673 unsigned long dint = __raw_readl(DMA_INT_STAT(DMAIOBASE));
674 unsigned long tcint = __raw_readl(DMA_INT_TC_STAT(DMAIOBASE));
675 unsigned long eint = __raw_readl(DMA_INT_ERR_STAT(DMAIOBASE));
678 for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
681 struct dma_channel *channel = &dma_ctrl.dma_channels[i];
683 if (channel->name && channel->irq_handler) {
687 __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
688 cause |= DMA_ERR_INT;
691 __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
695 channel->irq_handler(i, cause, channel->data);
698 * IRQ for an unregistered DMA channel
700 __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
701 __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
703 "spurious IRQ for DMA channel %d\n", i);
711 static int __init lpc32xx_dma_init(void)
715 ret = request_irq(IRQ_LPC32XX_DMA, dma_irq_handler, 0, "DMA", NULL);
717 printk(KERN_CRIT "Wow! Can't register IRQ for DMA\n");
722 dma_ctrl.clk = clk_get(NULL, "clk_dmac");
723 if (IS_ERR(dma_ctrl.clk)) {
727 clk_enable(dma_ctrl.clk);
729 /* Clear DMA controller */
730 __raw_writel(1, DMA_CONFIG(DMAIOBASE));
731 __raw_writel(0xFF, DMA_INT_TC_CLEAR(DMAIOBASE));
732 __raw_writel(0xFF, DMA_INT_ERR_CLEAR(DMAIOBASE));
734 /* Clock is only enabled when needed to save power */
735 clk_disable(dma_ctrl.clk);
740 free_irq(IRQ_LPC32XX_DMA, NULL);
745 arch_initcall(lpc32xx_dma_init);