arm: lpc32xx: Add polling DMA status function
[linux-2.6.34-lpc32xx.git] / arch / arm / mach-lpc32xx / dma.c
1 /*
2  *  linux/arch/arm/mach-lpc32xx/ma-lpc32xx.c
3  *
4  *  Copyright (C) 2008 NXP Semiconductors
5  *  (Based on parts of the PNX4008 DMA driver)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  */
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/errno.h>
26 #include <linux/err.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/clk.h>
29
30 #include <asm/system.h>
31 #include <mach/hardware.h>
32 #include <mach/platform.h>
33 #include <asm/dma-mapping.h>
34 #include <asm/io.h>
35 #include <mach/dma.h>
36 #include <mach/dmac.h>
37
38 #define DMAIOBASE io_p2v(LPC32XX_DMA_BASE)
39 #define VALID_CHANNEL(c) (((c) >= 0) && ((c) < MAX_DMA_CHANNELS))
40
41 static DEFINE_SPINLOCK(dma_lock);
42
43 struct dma_linked_list {
44         u32 src;
45         u32 dest;
46         u32 next_lli;
47         u32 ctrl;
48 };
49
50 /* For DMA linked list operation, a linked list of DMA descriptors
51    is maintained along with some data to manage the list in software. */
52 struct dma_list_ctrl {
53         struct dma_linked_list dmall; /* DMA list descriptor */
54         struct dma_list_ctrl *next_list_addr;   /* Virtual address to next list entry */
55         struct dma_list_ctrl *prev_list_addr;   /* Virtual address to previous list entry */
56         u32 next_list_phy;    /* Physical address to next list entry */
57         u32 prev_list_phy;    /* Physical address to previous list entry */
58 };
59
60 /* Each DMA channel has one of these structures */
61 struct dma_channel {
62         char *name;
63         void (*irq_handler) (int, int, void *);
64         void *data;
65         struct dma_config *dmacfg;
66         u32 control;
67         u32 config;
68         u32 config_int_mask;
69
70         int list_entries; /* Number of list entries */
71         u32 list_size; /* Total size of allocated list in bytes */
72         u32 list_vstart; /* Allocated (virtual) address of list */
73         u32 list_pstart; /* Allocated (physical) address of list */
74         int free_entries; /* Number of free descriptors */
75         struct dma_list_ctrl *list_head, *list_tail, *list_curr;
76 };
77
78 struct dma_control {
79         struct clk *clk;
80         int num_clks;
81         struct dma_channel dma_channels[MAX_DMA_CHANNELS];
82 };
83 static struct dma_control dma_ctrl;
84
85 static inline void __dma_regs_lock(void)
86 {
87         spin_lock_irq(&dma_lock);
88 }
89
90 static inline void __dma_regs_unlock(void)
91 {
92         spin_unlock_irq(&dma_lock);
93 }
94
95 static inline void __dma_enable(int ch) {
96         u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
97         ch_cfg |= DMAC_CHAN_ENABLE;
98         __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
99 }
100
101 static inline void __dma_disable(int ch) {
102         u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
103         ch_cfg &= ~DMAC_CHAN_ENABLE;
104         __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
105 }
106
107 static void dma_clocks_up(void)
108 {
109         /* Enable DMA clock if needed */
110         if (dma_ctrl.num_clks == 0)
111         {
112                 clk_enable(dma_ctrl.clk);
113                 __raw_writel(DMAC_CTRL_ENABLE, DMA_CONFIG(DMAIOBASE));
114         }
115
116         dma_ctrl.num_clks++;
117 }
118
119 static void dma_clocks_down(void)
120 {
121         dma_ctrl.num_clks--;
122
123         /* Disable DMA clock if needed */
124         if (dma_ctrl.num_clks == 0)
125         {
126                 __raw_writel(0, DMA_CONFIG(DMAIOBASE));
127                 clk_disable(dma_ctrl.clk);
128         }
129 }
130
131 static int lpc32xx_ch_setup(struct dma_config *dmachcfg)
132 {
133         u32 tmpctrl, tmpcfg, tmp;
134         int ch = dmachcfg->ch;
135
136         /* Channel control setup */
137         tmpctrl = 0;
138         switch (dmachcfg->src_size)
139         {
140                 case 1:
141                         tmpctrl |= DMAC_CHAN_SRC_WIDTH_8;
142                         break;
143
144                 case 2:
145                         tmpctrl |= DMAC_CHAN_SRC_WIDTH_16;
146                         break;
147
148                 case 4:
149                         tmpctrl |= DMAC_CHAN_SRC_WIDTH_32;
150                         break;
151
152                 default:
153                         return -EINVAL;
154         }
155         switch (dmachcfg->dst_size)
156         {
157                 case 1:
158                         tmpctrl |= DMAC_CHAN_DEST_WIDTH_8;
159                         break;
160
161                 case 2:
162                         tmpctrl |= DMAC_CHAN_DEST_WIDTH_16;
163                         break;
164
165                 case 4:
166                         tmpctrl |= DMAC_CHAN_DEST_WIDTH_32;
167                         break;
168
169                 default:
170                         return -EINVAL;
171         }
172         if (dmachcfg->src_inc != 0)
173         {
174                 tmpctrl |= DMAC_CHAN_SRC_AUTOINC;
175         }
176         if (dmachcfg->dst_inc != 0)
177         {
178                 tmpctrl |= DMAC_CHAN_DEST_AUTOINC;
179         }
180         if (dmachcfg->src_ahb1 != 0)
181         {
182                 tmpctrl |= DMAC_CHAN_SRC_AHB1;
183         }
184         if (dmachcfg->dst_ahb1 != 0)
185         {
186                 tmpctrl |= DMAC_CHAN_DEST_AHB1;
187         }
188         if (dmachcfg->tc_inten != 0)
189         {
190                 tmpctrl |= DMAC_CHAN_INT_TC_EN;
191         }
192         tmpctrl |= dmachcfg->src_bsize | dmachcfg->dst_bsize;
193         dma_ctrl.dma_channels[ch].control = tmpctrl;
194
195         /* Channel config setup */
196         tmpcfg = dmachcfg->src_prph | dmachcfg->dst_prph |
197                 dmachcfg->flowctrl;
198         dma_ctrl.dma_channels[ch].config = tmpcfg;
199
200         dma_ctrl.dma_channels[ch].config_int_mask = 0;
201         if (dmachcfg->err_inten != 0)
202         {
203                 dma_ctrl.dma_channels[ch].config_int_mask |=
204                         DMAC_CHAN_IE;
205         }
206         if (dmachcfg->tc_inten != 0)
207         {
208                 dma_ctrl.dma_channels[ch].config_int_mask |=
209                         DMAC_CHAN_ITC;
210         }
211
212         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
213         tmp &= ~DMAC_CHAN_ENABLE;
214         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
215
216         /* Clear interrupts for channel */
217         __raw_writel((1 << ch), DMA_INT_TC_CLEAR(DMAIOBASE));
218         __raw_writel((1 << ch), DMA_INT_ERR_CLEAR(DMAIOBASE));
219
220         /* Write control and config words */
221         __raw_writel(tmpctrl, DMACH_CONTROL(DMAIOBASE, ch));
222         __raw_writel(tmpcfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
223
224         return 0;
225 }
226
227 int lpc32xx_dma_ch_enable(int ch)
228 {
229         if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
230                 return -EINVAL;
231
232         __dma_regs_lock();
233         __dma_enable(ch);
234         __dma_regs_unlock();
235
236         return 0;
237 }
238 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_enable);
239
240 int lpc32xx_dma_ch_disable(int ch)
241 {
242         if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
243                 return -EINVAL;
244
245         __dma_regs_lock();
246         __dma_disable(ch);
247         __dma_regs_unlock();
248
249         return 0;
250 }
251 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_disable);
252
253 int lpc32xx_dma_ch_get(struct dma_config *dmachcfg, char *name,
254                 void *irq_handler, void *data) {
255         int ret;
256
257         if (!VALID_CHANNEL(dmachcfg->ch))
258                 return -EINVAL;
259
260         /* If the channel is already enabled, return */
261         if (dma_ctrl.dma_channels[dmachcfg->ch].name != NULL)
262                 return -ENODEV;
263
264         /* Save channel data */
265         dma_ctrl.dma_channels[dmachcfg->ch].dmacfg = dmachcfg;
266         dma_ctrl.dma_channels[dmachcfg->ch].name = name;
267         dma_ctrl.dma_channels[dmachcfg->ch].irq_handler = irq_handler;
268         dma_ctrl.dma_channels[dmachcfg->ch].data = data;
269
270         /* Setup channel */
271         __dma_regs_lock();
272         dma_clocks_up();
273         ret = lpc32xx_ch_setup(dmachcfg);
274         __dma_regs_unlock();
275
276         return ret;
277 }
278 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_get);
279
280 int lpc32xx_dma_ch_put(int ch)
281 {
282         u32 tmp;
283
284         if (!VALID_CHANNEL(ch))
285                 return -EINVAL;
286
287         /* If the channel is already disabled, return */
288         if (dma_ctrl.dma_channels[ch].name == NULL)
289                 return -EINVAL;
290
291         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
292         tmp &= ~DMAC_CHAN_ENABLE;
293         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
294
295         __dma_regs_lock();
296         lpc32xx_dma_ch_disable(ch);
297         dma_clocks_down();
298         __dma_regs_unlock();
299
300         dma_ctrl.dma_channels[ch].name = NULL;
301
302         return 0;
303 }
304 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_put);
305
306 int lpc32xx_dma_ch_pause_unpause(int ch, int pause) {
307         u32 tmp;
308
309         if (!VALID_CHANNEL(ch))
310                 return -EINVAL;
311
312         /* If the channel is already disabled, return */
313         if (dma_ctrl.dma_channels[ch].name == NULL)
314                 return -EINVAL;
315
316         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
317         if (pause) {
318                 tmp |= DMAC_CHAN_HALT;
319         }
320         else {
321                 tmp &= ~DMAC_CHAN_HALT;
322         }
323         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
324
325         return 0;
326 }
327 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_pause_unpause);
328
329 int lpc32xx_dma_start_pflow_xfer(int ch,
330                                 void *src,
331                                 void *dst,
332                                 int enable)
333 {
334         u32 tmp;
335
336         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
337                 return -EINVAL;
338
339         /* When starting a DMA transfer where the peripheral is the flow
340            controller, DMA must be previously disabled */
341         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
342         if (tmp & DMAC_CHAN_ENABLE)
343                 return -EBUSY;
344
345         __dma_regs_lock();
346         __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
347         __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
348         __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
349         __raw_writel(dma_ctrl.dma_channels[ch].control, DMACH_CONTROL(DMAIOBASE, ch));
350
351         tmp = dma_ctrl.dma_channels[ch].config |
352                 dma_ctrl.dma_channels[ch].config_int_mask;
353         if (enable != 0)
354                 tmp |= DMAC_CHAN_ENABLE;
355         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
356
357         __dma_regs_unlock();
358
359         return 0;
360 }
361 EXPORT_SYMBOL_GPL(lpc32xx_dma_start_pflow_xfer);
362
363 int lpc32xx_dma_is_active(int ch)
364 {
365         int active = 0;
366
367         if ((VALID_CHANNEL(ch)) && (dma_ctrl.dma_channels[ch].name != NULL)) {
368                 if (__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) &
369                         DMAC_CHAN_ENABLE)
370                         active = 1;
371         }
372
373         return active;
374
375 }
376 EXPORT_SYMBOL_GPL(lpc32xx_dma_is_active);
377
378 extern u32 lpc32xx_dma_llist_v_to_p(int ch,
379                                     u32 vlist) {
380         u32 pptr;
381
382         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
383                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
384                 return 0;
385
386         pptr = vlist - dma_ctrl.dma_channels[ch].list_vstart;
387         pptr += dma_ctrl.dma_channels[ch].list_pstart;
388
389         return pptr;
390 }
391 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_v_to_p);
392
393 u32 lpc32xx_dma_llist_p_to_v(int ch,
394                              u32 plist) {
395         u32 vptr;
396
397         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
398                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
399                 return 0;
400
401         vptr = plist - dma_ctrl.dma_channels[ch].list_pstart;
402         vptr += dma_ctrl.dma_channels[ch].list_vstart;
403
404         return vptr;
405 }
406 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_p_to_v);
407
408 u32 lpc32xx_dma_alloc_llist(int ch,
409                              int entries) {
410         int i;
411         dma_addr_t dma_handle;
412         struct dma_list_ctrl *pdmalist, *pdmalistst;
413
414         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
415                 return 0;
416
417         /*
418          * Limit number of list entries, but add 1 extra entry as a spot holder
419          * for the end of the list
420          */
421         if (entries < 2) {
422                 entries = 2;
423         }
424         if (entries > 64) {
425                 entries = 64;
426         }
427         entries++;
428
429         /* Save list information */
430         dma_ctrl.dma_channels[ch].list_entries = entries;
431         dma_ctrl.dma_channels[ch].list_size = (entries * sizeof(struct dma_list_ctrl));
432         dma_ctrl.dma_channels[ch].list_vstart = (u32) dma_alloc_coherent(NULL,
433                 dma_ctrl.dma_channels[ch].list_size, &dma_handle, GFP_KERNEL);
434         if (dma_ctrl.dma_channels[ch].list_vstart == 0) {
435                 /* No allocated DMA space */
436                 return 0;
437         }
438         dma_ctrl.dma_channels[ch].list_pstart = (u32) dma_handle;
439
440         /* Setup list tail and head pointers */
441         pdmalist = pdmalistst = (struct dma_list_ctrl *) dma_ctrl.dma_channels[ch].list_vstart;
442         for (i = 0; i < entries; i++) {
443                 pdmalistst->next_list_addr = pdmalistst + 1;
444                 pdmalistst->prev_list_addr = pdmalistst - 1;
445                 pdmalistst->next_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->next_list_addr);
446                 pdmalistst->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->prev_list_addr);
447                 pdmalistst++;
448         }
449         pdmalist[entries - 1].next_list_addr = pdmalist;
450         pdmalist[entries - 1].next_list_phy = lpc32xx_dma_llist_v_to_p(ch,
451                 (u32) pdmalist[entries - 1].next_list_addr);
452         pdmalist->prev_list_addr = &pdmalist[entries - 1];
453         pdmalist->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalist->prev_list_addr);
454
455         /* Save current free descriptors and current head/tail */
456         dma_ctrl.dma_channels[ch].free_entries = entries - 1;
457         dma_ctrl.dma_channels[ch].list_head = pdmalist;
458         dma_ctrl.dma_channels[ch].list_tail = pdmalist;
459         dma_ctrl.dma_channels[ch].list_curr = pdmalist;
460
461         return dma_ctrl.dma_channels[ch].list_vstart;
462 }
463 EXPORT_SYMBOL_GPL(lpc32xx_dma_alloc_llist);
464
465 void lpc32xx_dma_dealloc_llist(int ch) {
466
467         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
468                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
469                 return;
470
471         dma_free_coherent(NULL, dma_ctrl.dma_channels[ch].list_size,
472                 (void *) dma_ctrl.dma_channels[ch].list_vstart,
473                 (dma_addr_t) dma_ctrl.dma_channels[ch].list_pstart);
474         dma_ctrl.dma_channels[ch].list_head = 0;
475         dma_ctrl.dma_channels[ch].list_tail = 0;
476         dma_ctrl.dma_channels[ch].list_entries = 0;
477         dma_ctrl.dma_channels[ch].free_entries = 0;
478         dma_ctrl.dma_channels[ch].list_vstart = 0;
479 }
480 EXPORT_SYMBOL_GPL(lpc32xx_dma_dealloc_llist);
481
482 extern u32 lpc32xx_dma_get_llist_head(int ch) {
483         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
484                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
485                 return 0;
486
487         /* Return the current list pointer (virtual) for the
488            DMA channel */
489         return lpc32xx_dma_llist_p_to_v(ch,
490                 __raw_readl(DMACH_LLI(DMAIOBASE, ch)));
491 }
492 EXPORT_SYMBOL_GPL(lpc32xx_dma_get_llist_head);
493
494 extern void lpc32xx_dma_flush_llist(int ch) {
495         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
496                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
497                 return;
498
499         /* Disable channel and clear LLI */
500         __dma_regs_lock();
501         __dma_disable(ch);
502         __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
503         __dma_regs_unlock();
504
505         dma_ctrl.dma_channels[ch].list_head = (struct dma_list_ctrl *)
506                 dma_ctrl.dma_channels[ch].list_vstart;
507         dma_ctrl.dma_channels[ch].list_tail = (struct dma_list_ctrl *)
508                 dma_ctrl.dma_channels[ch].list_vstart;
509         dma_ctrl.dma_channels[ch].list_curr = (struct dma_list_ctrl *)
510                 dma_ctrl.dma_channels[ch].list_vstart;
511         dma_ctrl.dma_channels[ch].free_entries =
512                 dma_ctrl.dma_channels[ch].list_entries - 1;
513 }
514 EXPORT_SYMBOL_GPL(lpc32xx_dma_flush_llist);
515
516 u32 lpc32xx_dma_queue_llist_entry(int ch,
517                                   void *src,
518                                   void *dst,
519                                   int size) {
520         struct dma_list_ctrl *plhead;
521         u32 ctrl, cfg;
522
523         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
524                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
525                 return 0;
526
527         /* Exit if all the buffers are used */
528         if (dma_ctrl.dma_channels[ch].free_entries == 0) {
529                 return 0;
530         }
531
532         /* Next available DMA link descriptor */
533         plhead = dma_ctrl.dma_channels[ch].list_head;
534
535         /* Adjust size to number of transfers (vs bytes) */
536         size = size / dma_ctrl.dma_channels[ch].dmacfg->dst_size;
537
538         /* Setup control and config words */
539         ctrl = dma_ctrl.dma_channels[ch].control | size;
540         cfg = dma_ctrl.dma_channels[ch].config | DMAC_CHAN_ENABLE |
541                 dma_ctrl.dma_channels[ch].config_int_mask;
542
543         /* Populate DMA linked data structure */
544         plhead->dmall.src = (u32) src;
545         plhead->dmall.dest = (u32) dst;
546         plhead->dmall.next_lli = 0;
547         plhead->dmall.ctrl = ctrl;
548
549         __dma_regs_lock();
550
551         /* Append this link to the end of the previous link */
552         plhead->prev_list_addr->dmall.next_lli = lpc32xx_dma_llist_v_to_p(ch, (u32) plhead);
553
554         /* Decrement available buffers */
555         dma_ctrl.dma_channels[ch].free_entries--;
556
557         /* If the DMA channel is idle, then the buffer needs to be placed directly into
558            the DMA registers */
559         if ((__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) & DMAC_CHAN_ENABLE) == 0) {
560                 /* DMA is disabled, so move the current buffer into the
561                    channel registers and start transfer */
562                 __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
563                 __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
564                 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
565                 __raw_writel(ctrl, DMACH_CONTROL(DMAIOBASE, ch));
566                 __raw_writel(cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
567         }
568         else if (__raw_readl(DMACH_LLI(DMAIOBASE, ch)) == 0) {
569                 /* Update current entry to next entry */
570                 __raw_writel(dma_ctrl.dma_channels[ch].list_tail->next_list_phy,
571                         DMACH_LLI(DMAIOBASE, ch));
572
573                 /*
574                  * If the channel was stopped before the next entry made it into the
575                  * hardware descriptor, the next entry didn't make it there fast enough,
576                  * so load the new descriptor here.
577                  */
578                 if ((__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) & DMAC_CHAN_ENABLE) == 0) {
579                         __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
580                         __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
581                         __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
582                         __raw_writel(ctrl, DMACH_CONTROL(DMAIOBASE, ch));
583                         __raw_writel(cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
584                 }
585         }
586
587         /* Process next link on next call */
588         dma_ctrl.dma_channels[ch].list_head = plhead->next_list_addr;
589
590         __dma_regs_unlock();
591
592         return (u32) plhead;
593 }
594 EXPORT_SYMBOL_GPL(lpc32xx_dma_queue_llist_entry);
595
596 extern u32 lpc32xx_get_free_llist_entry(int ch) {
597         struct dma_list_ctrl *pltail;
598
599         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
600                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
601                 return 0;
602
603         /* Exit if no entries to free */
604         if (dma_ctrl.dma_channels[ch].free_entries ==
605                 dma_ctrl.dma_channels[ch].list_entries) {
606                 return 0;
607         }
608
609         /* Get tail pointer */
610         pltail = dma_ctrl.dma_channels[ch].list_tail;
611
612         /* Next tail */
613         dma_ctrl.dma_channels[ch].list_tail = pltail->next_list_addr;
614
615         /* Increment available buffers */
616         dma_ctrl.dma_channels[ch].free_entries++;
617
618         return (u32) pltail;
619 }
620 EXPORT_SYMBOL_GPL(lpc32xx_get_free_llist_entry);
621
622 int lpc32xx_dma_start_xfer(int ch, u32 config)
623 {
624         struct dma_list_ctrl *plhead;
625         
626         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
627                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
628                 return -1;
629
630         plhead = dma_ctrl.dma_channels[ch].list_head;
631         __dma_regs_lock();
632         __raw_writel(plhead->dmall.src, DMACH_SRC_ADDR(DMAIOBASE, ch));
633         __raw_writel(plhead->dmall.dest, DMACH_DEST_ADDR(DMAIOBASE, ch));
634         __raw_writel(plhead->dmall.next_lli, DMACH_LLI(DMAIOBASE, ch));
635         __raw_writel(plhead->dmall.ctrl, DMACH_CONTROL(DMAIOBASE, ch));
636         __raw_writel(config, DMACH_CONFIG_CH(DMAIOBASE, ch));
637         __dma_regs_unlock();
638
639         return 0;
640 }
641 EXPORT_SYMBOL_GPL(lpc32xx_dma_start_xfer);
642
643 u32 lpc32xx_dma_queue_llist(int ch, void *src, void *dst,
644                                   int size, u32 ctrl)
645 {
646         struct dma_list_ctrl *plhead;
647
648         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
649                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
650                 return 0;
651
652         /* Exit if all the buffers are used */
653         if (dma_ctrl.dma_channels[ch].free_entries == 0) {
654                 return 0;
655         }
656
657         /* Next available DMA link descriptor */
658         plhead = dma_ctrl.dma_channels[ch].list_curr;
659
660         /* Populate DMA linked data structure */
661         plhead->dmall.src = (u32) src;
662         plhead->dmall.dest = (u32) dst;
663         plhead->dmall.next_lli = 0;
664         plhead->dmall.ctrl = ctrl;
665
666         /* Append this link to the end of the previous link */
667         plhead->prev_list_addr->dmall.next_lli = lpc32xx_dma_llist_v_to_p(ch, (u32) plhead);
668
669         /* Decrement available buffers */
670         dma_ctrl.dma_channels[ch].free_entries--;
671
672         /* Process next link on next call */
673         dma_ctrl.dma_channels[ch].list_curr = plhead->next_list_addr;
674
675         return (u32) plhead;
676 }
677 EXPORT_SYMBOL_GPL(lpc32xx_dma_queue_llist);
678
679 extern void lpc32xx_dma_force_burst(int ch, int src)
680 {
681         __raw_writel(1 << src, DMA_SW_BURST_REQ(DMAIOBASE));
682 }
683 EXPORT_SYMBOL_GPL(lpc32xx_dma_force_burst);
684
685 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
686 {
687         int i;
688         unsigned long dint = __raw_readl(DMA_INT_STAT(DMAIOBASE));
689         unsigned long tcint = __raw_readl(DMA_INT_TC_STAT(DMAIOBASE));
690         unsigned long eint = __raw_readl(DMA_INT_ERR_STAT(DMAIOBASE));
691         unsigned long i_bit;
692
693         for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
694                 i_bit = 1 << i;
695                 if (dint & i_bit) {
696                         struct dma_channel *channel = &dma_ctrl.dma_channels[i];
697
698                         if (channel->name && channel->irq_handler) {
699                                 int cause = 0;
700
701                                 if (eint & i_bit) {
702                                         __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
703                                         cause |= DMA_ERR_INT;
704                                 }
705                                 if (tcint & i_bit) {
706                                         __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
707                                         cause |= DMA_TC_INT;
708                                 }
709
710                                 channel->irq_handler(i, cause, channel->data);
711                         } else {
712                                 /*
713                                  * IRQ for an unregistered DMA channel
714                                  */
715                                 __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
716                                 __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
717                                 printk(KERN_WARNING
718                                        "spurious IRQ for DMA channel %d\n", i);
719                         }
720                 }
721         }
722
723         return IRQ_HANDLED;
724 }
725
726 static int __init lpc32xx_dma_init(void)
727 {
728         int ret;
729
730         ret = request_irq(IRQ_LPC32XX_DMA, dma_irq_handler, 0, "DMA", NULL);
731         if (ret) {
732                 printk(KERN_CRIT "Wow!  Can't register IRQ for DMA\n");
733                 goto out;
734         }
735
736         /* Get DMA clock */
737         dma_ctrl.clk = clk_get(NULL, "clk_dmac");
738         if (IS_ERR(dma_ctrl.clk)) {
739                 ret = -ENODEV;
740                 goto errout;
741         }
742         clk_enable(dma_ctrl.clk);
743
744         /* Clear DMA controller */
745         __raw_writel(1, DMA_CONFIG(DMAIOBASE));
746         __raw_writel(0xFF, DMA_INT_TC_CLEAR(DMAIOBASE));
747         __raw_writel(0xFF, DMA_INT_ERR_CLEAR(DMAIOBASE));
748
749         /* Clock is only enabled when needed to save power */
750         clk_disable(dma_ctrl.clk);
751
752         return 0;
753
754 errout:
755         free_irq(IRQ_LPC32XX_DMA, NULL);
756
757 out:
758         return ret;
759 }
760 arch_initcall(lpc32xx_dma_init);