e9609416e542531026999a3a5d6c5af1a5aad96b
[linux-2.6.34-lpc32xx.git] / arch / arm / mach-lpc32xx / dma.c
1 /*
2  *  linux/arch/arm/mach-lpc32xx/ma-lpc32xx.c
3  *
4  *  Copyright (C) 2008 NXP Semiconductors
5  *  (Based on parts of the PNX4008 DMA driver)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  */
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/errno.h>
26 #include <linux/err.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/clk.h>
29
30 #include <asm/system.h>
31 #include <mach/hardware.h>
32 #include <mach/platform.h>
33 #include <asm/dma-mapping.h>
34 #include <asm/io.h>
35 #include <mach/dma.h>
36 #include <mach/dmac.h>
37
38 #define DMAIOBASE io_p2v(LPC32XX_DMA_BASE)
39 #define VALID_CHANNEL(c) (((c) >= 0) && ((c) < MAX_DMA_CHANNELS))
40
41 static DEFINE_SPINLOCK(dma_lock);
42
43 struct dma_linked_list {
44         u32 src;
45         u32 dest;
46         u32 next_lli;
47         u32 ctrl;
48 };
49
50 /* For DMA linked list operation, a linked list of DMA descriptors
51    is maintained along with some data to manage the list in software. */
52 struct dma_list_ctrl {
53         struct dma_linked_list dmall; /* DMA list descriptor */
54         struct dma_list_ctrl *next_list_addr;   /* Virtual address to next list entry */
55         struct dma_list_ctrl *prev_list_addr;   /* Virtual address to previous list entry */
56         u32 next_list_phy;    /* Physical address to next list entry */
57         u32 prev_list_phy;    /* Physical address to previous list entry */
58 };
59
60 /* Each DMA channel has one of these structures */
61 struct dma_channel {
62         char *name;
63         void (*irq_handler) (int, int, void *);
64         void *data;
65         struct dma_config *dmacfg;
66         u32 control;
67         u32 config;
68         u32 config_int_mask;
69
70         int list_entries; /* Number of list entries */
71         u32 list_size; /* Total size of allocated list in bytes */
72         u32 list_vstart; /* Allocated (virtual) address of list */
73         u32 list_pstart; /* Allocated (physical) address of list */
74         int free_entries; /* Number of free descriptors */
75         struct dma_list_ctrl *list_head, *list_tail, *list_curr;
76 };
77
78 struct dma_control {
79         struct clk *clk;
80         int num_clks;
81         struct dma_channel dma_channels[MAX_DMA_CHANNELS];
82 };
83 static struct dma_control dma_ctrl;
84
85 static inline void __dma_regs_lock(void)
86 {
87         spin_lock_irq(&dma_lock);
88 }
89
90 static inline void __dma_regs_unlock(void)
91 {
92         spin_unlock_irq(&dma_lock);
93 }
94
95 static inline void __dma_enable(int ch) {
96         u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
97         ch_cfg |= DMAC_CHAN_ENABLE;
98         __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
99 }
100
101 static inline void __dma_disable(int ch) {
102         u32 ch_cfg = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
103         ch_cfg &= ~DMAC_CHAN_ENABLE;
104         __raw_writel(ch_cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
105 }
106
107 static void dma_clocks_up(void)
108 {
109         /* Enable DMA clock if needed */
110         if (dma_ctrl.num_clks == 0)
111         {
112                 clk_enable(dma_ctrl.clk);
113                 __raw_writel(DMAC_CTRL_ENABLE, DMA_CONFIG(DMAIOBASE));
114         }
115
116         dma_ctrl.num_clks++;
117 }
118
119 static void dma_clocks_down(void)
120 {
121         dma_ctrl.num_clks--;
122
123         /* Disable DMA clock if needed */
124         if (dma_ctrl.num_clks == 0)
125         {
126                 __raw_writel(0, DMA_CONFIG(DMAIOBASE));
127                 clk_disable(dma_ctrl.clk);
128         }
129 }
130
131 static int lpc32xx_ch_setup(struct dma_config *dmachcfg)
132 {
133         u32 tmpctrl, tmpcfg, tmp;
134         int ch = dmachcfg->ch;
135
136         /* Channel control setup */
137         tmpctrl = 0;
138         switch (dmachcfg->src_size)
139         {
140                 case 1:
141                         tmpctrl |= DMAC_CHAN_SRC_WIDTH_8;
142                         break;
143
144                 case 2:
145                         tmpctrl |= DMAC_CHAN_SRC_WIDTH_16;
146                         break;
147
148                 case 4:
149                         tmpctrl |= DMAC_CHAN_SRC_WIDTH_32;
150                         break;
151
152                 default:
153                         return -EINVAL;
154         }
155         switch (dmachcfg->dst_size)
156         {
157                 case 1:
158                         tmpctrl |= DMAC_CHAN_DEST_WIDTH_8;
159                         break;
160
161                 case 2:
162                         tmpctrl |= DMAC_CHAN_DEST_WIDTH_16;
163                         break;
164
165                 case 4:
166                         tmpctrl |= DMAC_CHAN_DEST_WIDTH_32;
167                         break;
168
169                 default:
170                         return -EINVAL;
171         }
172         if (dmachcfg->src_inc != 0)
173         {
174                 tmpctrl |= DMAC_CHAN_SRC_AUTOINC;
175         }
176         if (dmachcfg->dst_inc != 0)
177         {
178                 tmpctrl |= DMAC_CHAN_DEST_AUTOINC;
179         }
180         if (dmachcfg->src_ahb1 != 0)
181         {
182                 tmpctrl |= DMAC_CHAN_SRC_AHB1;
183         }
184         if (dmachcfg->dst_ahb1 != 0)
185         {
186                 tmpctrl |= DMAC_CHAN_DEST_AHB1;
187         }
188         if (dmachcfg->tc_inten != 0)
189         {
190                 tmpctrl |= DMAC_CHAN_INT_TC_EN;
191         }
192         tmpctrl |= dmachcfg->src_bsize | dmachcfg->dst_bsize;
193         dma_ctrl.dma_channels[ch].control = tmpctrl;
194
195         /* Channel config setup */
196         tmpcfg = dmachcfg->src_prph | dmachcfg->dst_prph |
197                 dmachcfg->flowctrl;
198         dma_ctrl.dma_channels[ch].config = tmpcfg;
199
200         dma_ctrl.dma_channels[ch].config_int_mask = 0;
201         if (dmachcfg->err_inten != 0)
202         {
203                 dma_ctrl.dma_channels[ch].config_int_mask |=
204                         DMAC_CHAN_IE;
205         }
206         if (dmachcfg->tc_inten != 0)
207         {
208                 dma_ctrl.dma_channels[ch].config_int_mask |=
209                         DMAC_CHAN_ITC;
210         }
211
212         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
213         tmp &= ~DMAC_CHAN_ENABLE;
214         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
215
216         /* Clear interrupts for channel */
217         __raw_writel((1 << ch), DMA_INT_TC_CLEAR(DMAIOBASE));
218         __raw_writel((1 << ch), DMA_INT_ERR_CLEAR(DMAIOBASE));
219
220         /* Write control and config words */
221         __raw_writel(tmpctrl, DMACH_CONTROL(DMAIOBASE, ch));
222         __raw_writel(tmpcfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
223
224         return 0;
225 }
226
227 int lpc32xx_dma_ch_enable(int ch)
228 {
229         if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
230                 return -EINVAL;
231
232         __dma_regs_lock();
233         __dma_enable(ch);
234         __dma_regs_unlock();
235
236         return 0;
237 }
238 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_enable);
239
240 int lpc32xx_dma_ch_disable(int ch)
241 {
242         if (!VALID_CHANNEL(ch) || !dma_ctrl.dma_channels[ch].name)
243                 return -EINVAL;
244
245         __dma_regs_lock();
246         __dma_disable(ch);
247         __dma_regs_unlock();
248
249         return 0;
250 }
251 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_disable);
252
253 int lpc32xx_dma_ch_get(struct dma_config *dmachcfg, char *name,
254                 void *irq_handler, void *data) {
255         int ret;
256
257         if (!VALID_CHANNEL(dmachcfg->ch))
258                 return -EINVAL;
259
260         /* If the channel is already enabled, return */
261         if (dma_ctrl.dma_channels[dmachcfg->ch].name != NULL)
262                 return -ENODEV;
263
264         /* Save channel data */
265         dma_ctrl.dma_channels[dmachcfg->ch].dmacfg = dmachcfg;
266         dma_ctrl.dma_channels[dmachcfg->ch].name = name;
267         dma_ctrl.dma_channels[dmachcfg->ch].irq_handler = irq_handler;
268         dma_ctrl.dma_channels[dmachcfg->ch].data = data;
269
270         /* Setup channel */
271         __dma_regs_lock();
272         dma_clocks_up();
273         ret = lpc32xx_ch_setup(dmachcfg);
274         __dma_regs_unlock();
275
276         return ret;
277 }
278 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_get);
279
280 int lpc32xx_dma_ch_put(int ch)
281 {
282         u32 tmp;
283
284         if (!VALID_CHANNEL(ch))
285                 return -EINVAL;
286
287         /* If the channel is already disabled, return */
288         if (dma_ctrl.dma_channels[ch].name == NULL)
289                 return -EINVAL;
290
291         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
292         tmp &= ~DMAC_CHAN_ENABLE;
293         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
294
295         __dma_regs_lock();
296         lpc32xx_dma_ch_disable(ch);
297         dma_clocks_down();
298         __dma_regs_unlock();
299
300         dma_ctrl.dma_channels[ch].name = NULL;
301
302         return 0;
303 }
304 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_put);
305
306 int lpc32xx_dma_ch_pause_unpause(int ch, int pause) {
307         u32 tmp;
308
309         if (!VALID_CHANNEL(ch))
310                 return -EINVAL;
311
312         /* If the channel is already disabled, return */
313         if (dma_ctrl.dma_channels[ch].name == NULL)
314                 return -EINVAL;
315
316         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
317         if (pause) {
318                 tmp |= DMAC_CHAN_HALT;
319         }
320         else {
321                 tmp &= ~DMAC_CHAN_HALT;
322         }
323         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
324
325         return 0;
326 }
327 EXPORT_SYMBOL_GPL(lpc32xx_dma_ch_pause_unpause);
328
329 int lpc32xx_dma_start_pflow_xfer(int ch,
330                                 void *src,
331                                 void *dst,
332                                 int enable)
333 {
334         u32 tmp;
335
336         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
337                 return -EINVAL;
338
339         /* When starting a DMA transfer where the peripheral is the flow
340            controller, DMA must be previously disabled */
341         tmp = __raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch));
342         if (tmp & DMAC_CHAN_ENABLE)
343                 return -EBUSY;
344
345         __dma_regs_lock();
346         __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
347         __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
348         __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
349         __raw_writel(dma_ctrl.dma_channels[ch].control, DMACH_CONTROL(DMAIOBASE, ch));
350
351         tmp = dma_ctrl.dma_channels[ch].config |
352                 dma_ctrl.dma_channels[ch].config_int_mask;
353         if (enable != 0)
354                 tmp |= DMAC_CHAN_ENABLE;
355         __raw_writel(tmp, DMACH_CONFIG_CH(DMAIOBASE, ch));
356
357         __dma_regs_unlock();
358
359         return 0;
360 }
361 EXPORT_SYMBOL_GPL(lpc32xx_dma_start_pflow_xfer);
362
363 extern u32 lpc32xx_dma_llist_v_to_p(int ch,
364                                     u32 vlist) {
365         u32 pptr;
366
367         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
368                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
369                 return 0;
370
371         pptr = vlist - dma_ctrl.dma_channels[ch].list_vstart;
372         pptr += dma_ctrl.dma_channels[ch].list_pstart;
373
374         return pptr;
375 }
376 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_v_to_p);
377
378 u32 lpc32xx_dma_llist_p_to_v(int ch,
379                              u32 plist) {
380         u32 vptr;
381
382         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
383                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
384                 return 0;
385
386         vptr = plist - dma_ctrl.dma_channels[ch].list_pstart;
387         vptr += dma_ctrl.dma_channels[ch].list_vstart;
388
389         return vptr;
390 }
391 EXPORT_SYMBOL_GPL(lpc32xx_dma_llist_p_to_v);
392
393 u32 lpc32xx_dma_alloc_llist(int ch,
394                              int entries) {
395         int i;
396         dma_addr_t dma_handle;
397         struct dma_list_ctrl *pdmalist, *pdmalistst;
398
399         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL))
400                 return 0;
401
402         /*
403          * Limit number of list entries, but add 1 extra entry as a spot holder
404          * for the end of the list
405          */
406         if (entries < 2) {
407                 entries = 2;
408         }
409         if (entries > 64) {
410                 entries = 64;
411         }
412         entries++;
413
414         /* Save list information */
415         dma_ctrl.dma_channels[ch].list_entries = entries;
416         dma_ctrl.dma_channels[ch].list_size = (entries * sizeof(struct dma_list_ctrl));
417         dma_ctrl.dma_channels[ch].list_vstart = (u32) dma_alloc_coherent(NULL,
418                 dma_ctrl.dma_channels[ch].list_size, &dma_handle, GFP_KERNEL);
419         if (dma_ctrl.dma_channels[ch].list_vstart == 0) {
420                 /* No allocated DMA space */
421                 return 0;
422         }
423         dma_ctrl.dma_channels[ch].list_pstart = (u32) dma_handle;
424
425         /* Setup list tail and head pointers */
426         pdmalist = pdmalistst = (struct dma_list_ctrl *) dma_ctrl.dma_channels[ch].list_vstart;
427         for (i = 0; i < entries; i++) {
428                 pdmalistst->next_list_addr = pdmalistst + 1;
429                 pdmalistst->prev_list_addr = pdmalistst - 1;
430                 pdmalistst->next_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->next_list_addr);
431                 pdmalistst->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalistst->prev_list_addr);
432                 pdmalistst++;
433         }
434         pdmalist[entries - 1].next_list_addr = pdmalist;
435         pdmalist[entries - 1].next_list_phy = lpc32xx_dma_llist_v_to_p(ch,
436                 (u32) pdmalist[entries - 1].next_list_addr);
437         pdmalist->prev_list_addr = &pdmalist[entries - 1];
438         pdmalist->prev_list_phy = lpc32xx_dma_llist_v_to_p(ch, (u32) pdmalist->prev_list_addr);
439
440         /* Save current free descriptors and current head/tail */
441         dma_ctrl.dma_channels[ch].free_entries = entries - 1;
442         dma_ctrl.dma_channels[ch].list_head = pdmalist;
443         dma_ctrl.dma_channels[ch].list_tail = pdmalist;
444         dma_ctrl.dma_channels[ch].list_curr = pdmalist;
445
446         return dma_ctrl.dma_channels[ch].list_vstart;
447 }
448 EXPORT_SYMBOL_GPL(lpc32xx_dma_alloc_llist);
449
450 void lpc32xx_dma_dealloc_llist(int ch) {
451
452         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
453                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
454                 return;
455
456         dma_free_coherent(NULL, dma_ctrl.dma_channels[ch].list_size,
457                 (void *) dma_ctrl.dma_channels[ch].list_vstart,
458                 (dma_addr_t) dma_ctrl.dma_channels[ch].list_pstart);
459         dma_ctrl.dma_channels[ch].list_head = 0;
460         dma_ctrl.dma_channels[ch].list_tail = 0;
461         dma_ctrl.dma_channels[ch].list_entries = 0;
462         dma_ctrl.dma_channels[ch].free_entries = 0;
463         dma_ctrl.dma_channels[ch].list_vstart = 0;
464 }
465 EXPORT_SYMBOL_GPL(lpc32xx_dma_dealloc_llist);
466
467 extern u32 lpc32xx_dma_get_llist_head(int ch) {
468         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
469                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
470                 return 0;
471
472         /* Return the current list pointer (virtual) for the
473            DMA channel */
474         return lpc32xx_dma_llist_p_to_v(ch,
475                 __raw_readl(DMACH_LLI(DMAIOBASE, ch)));
476 }
477 EXPORT_SYMBOL_GPL(lpc32xx_dma_get_llist_head);
478
479 extern void lpc32xx_dma_flush_llist(int ch) {
480         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
481                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
482                 return;
483
484         /* Disable channel and clear LLI */
485         __dma_regs_lock();
486         __dma_disable(ch);
487         __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
488         __dma_regs_unlock();
489
490         dma_ctrl.dma_channels[ch].list_head = (struct dma_list_ctrl *)
491                 dma_ctrl.dma_channels[ch].list_vstart;
492         dma_ctrl.dma_channels[ch].list_tail = (struct dma_list_ctrl *)
493                 dma_ctrl.dma_channels[ch].list_vstart;
494         dma_ctrl.dma_channels[ch].list_curr = (struct dma_list_ctrl *)
495                 dma_ctrl.dma_channels[ch].list_vstart;
496         dma_ctrl.dma_channels[ch].free_entries =
497                 dma_ctrl.dma_channels[ch].list_entries - 1;
498 }
499 EXPORT_SYMBOL_GPL(lpc32xx_dma_flush_llist);
500
501 u32 lpc32xx_dma_queue_llist_entry(int ch,
502                                   void *src,
503                                   void *dst,
504                                   int size) {
505         struct dma_list_ctrl *plhead;
506         u32 ctrl, cfg;
507
508         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
509                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
510                 return 0;
511
512         /* Exit if all the buffers are used */
513         if (dma_ctrl.dma_channels[ch].free_entries == 0) {
514                 return 0;
515         }
516
517         /* Next available DMA link descriptor */
518         plhead = dma_ctrl.dma_channels[ch].list_head;
519
520         /* Adjust size to number of transfers (vs bytes) */
521         size = size / dma_ctrl.dma_channels[ch].dmacfg->dst_size;
522
523         /* Setup control and config words */
524         ctrl = dma_ctrl.dma_channels[ch].control | size;
525         cfg = dma_ctrl.dma_channels[ch].config | DMAC_CHAN_ENABLE |
526                 dma_ctrl.dma_channels[ch].config_int_mask;
527
528         /* Populate DMA linked data structure */
529         plhead->dmall.src = (u32) src;
530         plhead->dmall.dest = (u32) dst;
531         plhead->dmall.next_lli = 0;
532         plhead->dmall.ctrl = ctrl;
533
534         __dma_regs_lock();
535
536         /* Append this link to the end of the previous link */
537         plhead->prev_list_addr->dmall.next_lli = lpc32xx_dma_llist_v_to_p(ch, (u32) plhead);
538
539         /* Decrement available buffers */
540         dma_ctrl.dma_channels[ch].free_entries--;
541
542         /* If the DMA channel is idle, then the buffer needs to be placed directly into
543            the DMA registers */
544         if ((__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) & DMAC_CHAN_ENABLE) == 0) {
545                 /* DMA is disabled, so move the current buffer into the
546                    channel registers and start transfer */
547                 __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
548                 __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
549                 __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
550                 __raw_writel(ctrl, DMACH_CONTROL(DMAIOBASE, ch));
551                 __raw_writel(cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
552         }
553         else if (__raw_readl(DMACH_LLI(DMAIOBASE, ch)) == 0) {
554                 /* Update current entry to next entry */
555                 __raw_writel(dma_ctrl.dma_channels[ch].list_tail->next_list_phy,
556                         DMACH_LLI(DMAIOBASE, ch));
557
558                 /*
559                  * If the channel was stopped before the next entry made it into the
560                  * hardware descriptor, the next entry didn't make it there fast enough,
561                  * so load the new descriptor here.
562                  */
563                 if ((__raw_readl(DMACH_CONFIG_CH(DMAIOBASE, ch)) & DMAC_CHAN_ENABLE) == 0) {
564                         __raw_writel((u32) src, DMACH_SRC_ADDR(DMAIOBASE, ch));
565                         __raw_writel((u32) dst, DMACH_DEST_ADDR(DMAIOBASE, ch));
566                         __raw_writel(0, DMACH_LLI(DMAIOBASE, ch));
567                         __raw_writel(ctrl, DMACH_CONTROL(DMAIOBASE, ch));
568                         __raw_writel(cfg, DMACH_CONFIG_CH(DMAIOBASE, ch));
569                 }
570         }
571
572         /* Process next link on next call */
573         dma_ctrl.dma_channels[ch].list_head = plhead->next_list_addr;
574
575         __dma_regs_unlock();
576
577         return (u32) plhead;
578 }
579 EXPORT_SYMBOL_GPL(lpc32xx_dma_queue_llist_entry);
580
581 extern u32 lpc32xx_get_free_llist_entry(int ch) {
582         struct dma_list_ctrl *pltail;
583
584         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
585                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
586                 return 0;
587
588         /* Exit if no entries to free */
589         if (dma_ctrl.dma_channels[ch].free_entries ==
590                 dma_ctrl.dma_channels[ch].list_entries) {
591                 return 0;
592         }
593
594         /* Get tail pointer */
595         pltail = dma_ctrl.dma_channels[ch].list_tail;
596
597         /* Next tail */
598         dma_ctrl.dma_channels[ch].list_tail = pltail->next_list_addr;
599
600         /* Increment available buffers */
601         dma_ctrl.dma_channels[ch].free_entries++;
602
603         return (u32) pltail;
604 }
605 EXPORT_SYMBOL_GPL(lpc32xx_get_free_llist_entry);
606
607 int lpc32xx_dma_start_xfer(int ch, u32 config)
608 {
609         struct dma_list_ctrl *plhead;
610         
611         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
612                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
613                 return -1;
614
615         plhead = dma_ctrl.dma_channels[ch].list_head;
616         __dma_regs_lock();
617         __raw_writel(plhead->dmall.src, DMACH_SRC_ADDR(DMAIOBASE, ch));
618         __raw_writel(plhead->dmall.dest, DMACH_DEST_ADDR(DMAIOBASE, ch));
619         __raw_writel(plhead->dmall.next_lli, DMACH_LLI(DMAIOBASE, ch));
620         __raw_writel(plhead->dmall.ctrl, DMACH_CONTROL(DMAIOBASE, ch));
621         __raw_writel(config, DMACH_CONFIG_CH(DMAIOBASE, ch));
622         __dma_regs_unlock();
623
624         return 0;
625 }
626 EXPORT_SYMBOL_GPL(lpc32xx_dma_start_xfer);
627
628 u32 lpc32xx_dma_queue_llist(int ch, void *src, void *dst,
629                                   int size, u32 ctrl)
630 {
631         struct dma_list_ctrl *plhead;
632
633         if ((!VALID_CHANNEL(ch)) || (dma_ctrl.dma_channels[ch].name == NULL) ||
634                 (dma_ctrl.dma_channels[ch].list_vstart == 0))
635                 return 0;
636
637         /* Exit if all the buffers are used */
638         if (dma_ctrl.dma_channels[ch].free_entries == 0) {
639                 return 0;
640         }
641
642         /* Next available DMA link descriptor */
643         plhead = dma_ctrl.dma_channels[ch].list_curr;
644
645         /* Populate DMA linked data structure */
646         plhead->dmall.src = (u32) src;
647         plhead->dmall.dest = (u32) dst;
648         plhead->dmall.next_lli = 0;
649         plhead->dmall.ctrl = ctrl;
650
651         /* Append this link to the end of the previous link */
652         plhead->prev_list_addr->dmall.next_lli = lpc32xx_dma_llist_v_to_p(ch, (u32) plhead);
653
654         /* Decrement available buffers */
655         dma_ctrl.dma_channels[ch].free_entries--;
656
657         /* Process next link on next call */
658         dma_ctrl.dma_channels[ch].list_curr = plhead->next_list_addr;
659
660         return (u32) plhead;
661 }
662 EXPORT_SYMBOL_GPL(lpc32xx_dma_queue_llist);
663
664 extern void lpc32xx_dma_force_burst(int ch, int src)
665 {
666         __raw_writel(1 << src, DMA_SW_BURST_REQ(DMAIOBASE));
667 }
668 EXPORT_SYMBOL_GPL(lpc32xx_dma_force_burst);
669
670 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
671 {
672         int i;
673         unsigned long dint = __raw_readl(DMA_INT_STAT(DMAIOBASE));
674         unsigned long tcint = __raw_readl(DMA_INT_TC_STAT(DMAIOBASE));
675         unsigned long eint = __raw_readl(DMA_INT_ERR_STAT(DMAIOBASE));
676         unsigned long i_bit;
677
678         for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
679                 i_bit = 1 << i;
680                 if (dint & i_bit) {
681                         struct dma_channel *channel = &dma_ctrl.dma_channels[i];
682
683                         if (channel->name && channel->irq_handler) {
684                                 int cause = 0;
685
686                                 if (eint & i_bit) {
687                                         __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
688                                         cause |= DMA_ERR_INT;
689                                 }
690                                 if (tcint & i_bit) {
691                                         __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
692                                         cause |= DMA_TC_INT;
693                                 }
694
695                                 channel->irq_handler(i, cause, channel->data);
696                         } else {
697                                 /*
698                                  * IRQ for an unregistered DMA channel
699                                  */
700                                 __raw_writel(i_bit, DMA_INT_ERR_CLEAR(DMAIOBASE));
701                                 __raw_writel(i_bit, DMA_INT_TC_CLEAR(DMAIOBASE));
702                                 printk(KERN_WARNING
703                                        "spurious IRQ for DMA channel %d\n", i);
704                         }
705                 }
706         }
707
708         return IRQ_HANDLED;
709 }
710
711 static int __init lpc32xx_dma_init(void)
712 {
713         int ret;
714
715         ret = request_irq(IRQ_LPC32XX_DMA, dma_irq_handler, 0, "DMA", NULL);
716         if (ret) {
717                 printk(KERN_CRIT "Wow!  Can't register IRQ for DMA\n");
718                 goto out;
719         }
720
721         /* Get DMA clock */
722         dma_ctrl.clk = clk_get(NULL, "clk_dmac");
723         if (IS_ERR(dma_ctrl.clk)) {
724                 ret = -ENODEV;
725                 goto errout;
726         }
727         clk_enable(dma_ctrl.clk);
728
729         /* Clear DMA controller */
730         __raw_writel(1, DMA_CONFIG(DMAIOBASE));
731         __raw_writel(0xFF, DMA_INT_TC_CLEAR(DMAIOBASE));
732         __raw_writel(0xFF, DMA_INT_ERR_CLEAR(DMAIOBASE));
733
734         /* Clock is only enabled when needed to save power */
735         clk_disable(dma_ctrl.clk);
736
737         return 0;
738
739 errout:
740         free_irq(IRQ_LPC32XX_DMA, NULL);
741
742 out:
743         return ret;
744 }
745 arch_initcall(lpc32xx_dma_init);