]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dma.c
ARM: OMAP: Remove __REG access in DMA code
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Support functions for the OMAP internal DMA channels.
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License version 2 as
17  * published by the Free Software Foundation.
18  *
19  */
20
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28
29 #include <asm/system.h>
30 #include <asm/hardware.h>
31 #include <asm/dma.h>
32 #include <asm/io.h>
33
34 #include <asm/arch/tc.h>
35
36 #undef DEBUG
37
38 #ifndef CONFIG_ARCH_OMAP1
39 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
40         DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
41 };
42
43 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
44 #endif
45
46 #define OMAP_DMA_ACTIVE         0x01
47 #define OMAP_DMA_CCR_EN         (1 << 7)
48 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffe
49
50 #define OMAP_FUNC_MUX_ARM_BASE  (0xfffe1000 + 0xec)
51
52 static int enable_1510_mode = 0;
53
54 struct omap_dma_lch {
55         int next_lch;
56         int dev_id;
57         u16 saved_csr;
58         u16 enabled_irqs;
59         const char *dev_name;
60         void (* callback)(int lch, u16 ch_status, void *data);
61         void *data;
62
63 #ifndef CONFIG_ARCH_OMAP1
64         /* required for Dynamic chaining */
65         int prev_linked_ch;
66         int next_linked_ch;
67         int state;
68         int chain_id;
69
70         int status;
71 #endif
72         long flags;
73 };
74
75 struct dma_link_info {
76         int *linked_dmach_q;
77         int no_of_lchs_linked;
78
79         int q_count;
80         int q_tail;
81         int q_head;
82
83         int chain_state;
84         int chain_mode;
85
86 };
87
88 static struct dma_link_info *dma_linked_lch;
89
90 #ifndef CONFIG_ARCH_OMAP1
91
92 /* Chain handling macros */
93 #define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
94         do {                                                            \
95                 dma_linked_lch[chain_id].q_head =                       \
96                 dma_linked_lch[chain_id].q_tail =                       \
97                 dma_linked_lch[chain_id].q_count = 0;                   \
98         } while (0)
99 #define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
100                 (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
101                 dma_linked_lch[chain_id].q_count)
102 #define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
103         do {                                                            \
104                 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
105                 dma_linked_lch[chain_id].q_count)                       \
106         } while (0)
107 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
108                 (0 == dma_linked_lch[chain_id].q_count)
109 #define __OMAP_DMA_CHAIN_INCQ(end)                                      \
110         ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
111 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
112         do {                                                            \
113                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
114                 dma_linked_lch[chain_id].q_count--;                     \
115         } while (0)
116
117 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
118         do {                                                            \
119                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
120                 dma_linked_lch[chain_id].q_count++; \
121         } while (0)
122 #endif
123
124 static int dma_lch_count;
125 static int dma_chan_count;
126
127 static spinlock_t dma_chan_lock;
128 static struct omap_dma_lch *dma_chan;
129 void __iomem *omap_dma_base;
130
131 static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
132         INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
133         INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
134         INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
135         INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
136         INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
137 };
138
139 static inline void disable_lnk(int lch);
140 static void omap_disable_channel_irq(int lch);
141 static inline void omap_enable_channel_irq(int lch);
142
143 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
144                                                 __func__);
145
146 #define dma_read(reg)                                           \
147 ({                                                                      \
148         u32 __val;                                                      \
149         if (cpu_class_is_omap1())                                       \
150                 __val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg);   \
151         else                                                            \
152                 __val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg);   \
153         __val;                                                          \
154 })
155
156 #define dma_write(val, reg)                                             \
157 ({                                                                      \
158         if (cpu_class_is_omap1())                                       \
159                 __raw_writew((u16)val, omap_dma_base + OMAP1_DMA_##reg);\
160         else                                                            \
161                 __raw_writel((val), omap_dma_base + OMAP_DMA4_##reg);   \
162 })
163
164 #ifdef CONFIG_ARCH_OMAP15XX
165 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
166 int omap_dma_in_1510_mode(void)
167 {
168         return enable_1510_mode;
169 }
170 #else
171 #define omap_dma_in_1510_mode()         0
172 #endif
173
174 #ifdef CONFIG_ARCH_OMAP1
175 static inline int get_gdma_dev(int req)
176 {
177         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
178         int shift = ((req - 1) % 5) * 6;
179
180         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
181 }
182
183 static inline void set_gdma_dev(int req, int dev)
184 {
185         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
186         int shift = ((req - 1) % 5) * 6;
187         u32 l;
188
189         l = omap_readl(reg);
190         l &= ~(0x3f << shift);
191         l |= (dev - 1) << shift;
192         omap_writel(l, reg);
193 }
194 #else
195 #define set_gdma_dev(req, dev)  do {} while (0)
196 #endif
197
198 /* Omap1 only */
199 static void clear_lch_regs(int lch)
200 {
201         int i;
202         void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
203
204         for (i = 0; i < 0x2c; i += 2)
205                 __raw_writew(0, lch_base + i);
206 }
207
208 void omap_set_dma_priority(int lch, int dst_port, int priority)
209 {
210         unsigned long reg;
211         u32 l;
212
213         if (cpu_class_is_omap1()) {
214                 switch (dst_port) {
215                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
216                         reg = OMAP_TC_OCPT1_PRIOR;
217                         break;
218                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
219                         reg = OMAP_TC_OCPT2_PRIOR;
220                         break;
221                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
222                         reg = OMAP_TC_EMIFF_PRIOR;
223                         break;
224                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
225                         reg = OMAP_TC_EMIFS_PRIOR;
226                         break;
227                 default:
228                         BUG();
229                         return;
230                 }
231                 l = omap_readl(reg);
232                 l &= ~(0xf << 8);
233                 l |= (priority & 0xf) << 8;
234                 omap_writel(l, reg);
235         }
236
237         if (cpu_class_is_omap2()) {
238                 u32 ccr;
239
240                 ccr = dma_read(CCR(lch));
241                 if (priority)
242                         ccr |= (1 << 6);
243                 else
244                         ccr &= ~(1 << 6);
245                 dma_write(ccr, CCR(lch));
246         }
247 }
248
249 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
250                                   int frame_count, int sync_mode,
251                                   int dma_trigger, int src_or_dst_synch)
252 {
253         u32 l;
254
255         l = dma_read(CSDP(lch));
256         l &= ~0x03;
257         l |= data_type;
258         dma_write(l, CSDP(lch));
259
260         if (cpu_class_is_omap1()) {
261                 u16 ccr;
262
263                 ccr = dma_read(CCR(lch));
264                 ccr &= ~(1 << 5);
265                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
266                         ccr |= 1 << 5;
267                 dma_write(ccr, CCR(lch));
268
269                 ccr = dma_read(CCR2(lch));
270                 ccr &= ~(1 << 2);
271                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
272                         ccr |= 1 << 2;
273                 dma_write(ccr, CCR2(lch));
274         }
275
276         if (cpu_class_is_omap2() && dma_trigger) {
277                 u32 val;
278
279                 val = dma_read(CCR(lch));
280                 val &= ~(3 << 19);
281                 if (dma_trigger > 63)
282                         val |= 1 << 20;
283                 if (dma_trigger > 31)
284                         val |= 1 << 19;
285
286                 val &= ~(0x1f);
287                 val |= (dma_trigger & 0x1f);
288
289                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
290                         val |= 1 << 5;
291                 else
292                         val &= ~(1 << 5);
293
294                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
295                         val |= 1 << 18;
296                 else
297                         val &= ~(1 << 18);
298
299                 if (src_or_dst_synch)
300                         val |= 1 << 24;         /* source synch */
301                 else
302                         val &= ~(1 << 24);      /* dest synch */
303
304                 dma_write(val, CCR(lch));
305         }
306
307         dma_write(elem_count, CEN(lch));
308         dma_write(frame_count, CFN(lch));
309 }
310
311 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
312 {
313         u16 w;
314
315         BUG_ON(omap_dma_in_1510_mode());
316
317         if (cpu_class_is_omap2()) {
318                 REVISIT_24XX();
319                 return;
320         }
321
322         w = dma_read(CCR2(lch));
323         w &= ~0x03;
324
325         switch (mode) {
326         case OMAP_DMA_CONSTANT_FILL:
327                 w |= 0x01;
328                 break;
329         case OMAP_DMA_TRANSPARENT_COPY:
330                 w |= 0x02;
331                 break;
332         case OMAP_DMA_COLOR_DIS:
333                 break;
334         default:
335                 BUG();
336         }
337         dma_write(w, CCR2(lch));
338
339         w = dma_read(LCH_CTRL(lch));
340         w &= ~0x0f;
341         /* Default is channel type 2D */
342         if (mode) {
343                 dma_write((u16)color, COLOR_L(lch));
344                 dma_write((u16)(color >> 16), COLOR_U(lch));
345                 w |= 1;         /* Channel type G */
346         }
347         dma_write(w, LCH_CTRL(lch));
348 }
349
350 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
351 {
352         if (cpu_class_is_omap2()) {
353                 u32 csdp;
354
355                 csdp = dma_read(CSDP(lch));
356                 csdp &= ~(0x3 << 16);
357                 csdp |= (mode << 16);
358                 dma_write(csdp, CSDP(lch));
359         }
360 }
361
362 /* Note that src_port is only for omap1 */
363 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
364                              unsigned long src_start,
365                              int src_ei, int src_fi)
366 {
367         if (cpu_class_is_omap1()) {
368                 u16 w;
369
370                 w = dma_read(CSDP(lch));
371                 w &= ~(0x1f << 2);
372                 w |= src_port << 2;
373                 dma_write(w, CSDP(lch));
374
375                 w = dma_read(CCR(lch));
376                 w &= ~(0x03 << 12);
377                 w |= src_amode << 12;
378                 dma_write(w, CCR(lch));
379
380                 dma_write(src_start >> 16, CSSA_U(lch));
381                 dma_write((u16)src_start, CSSA_L(lch));
382
383                 dma_write(src_ei, CSEI(lch));
384                 dma_write(src_fi, CSFI(lch));
385         }
386
387         if (cpu_class_is_omap2()) {
388                 u32 l;
389
390                 l = dma_read(CCR(lch));
391                 l &= ~(0x03 << 12);
392                 l |= src_amode << 12;
393                 dma_write(l, CCR(lch));
394
395                 dma_write(src_start, CSSA(lch));
396                 dma_write(src_ei, CSEI(lch));
397                 dma_write(src_fi, CSFI(lch));
398         }
399 }
400
401 void omap_set_dma_params(int lch, struct omap_dma_channel_params * params)
402 {
403         omap_set_dma_transfer_params(lch, params->data_type,
404                                      params->elem_count, params->frame_count,
405                                      params->sync_mode, params->trigger,
406                                      params->src_or_dst_synch);
407         omap_set_dma_src_params(lch, params->src_port,
408                                 params->src_amode, params->src_start,
409                                 params->src_ei, params->src_fi);
410
411         omap_set_dma_dest_params(lch, params->dst_port,
412                                  params->dst_amode, params->dst_start,
413                                  params->dst_ei, params->dst_fi);
414         if (params->read_prio || params->write_prio)
415                 omap_dma_set_prio_lch(lch, params->read_prio,
416                                       params->write_prio);
417 }
418
419 void omap_set_dma_src_index(int lch, int eidx, int fidx)
420 {
421         if (cpu_class_is_omap2()) {
422                 REVISIT_24XX();
423                 return;
424         }
425         dma_write(eidx, CSEI(lch));
426         dma_write(fidx, CSFI(lch));
427 }
428
429 void omap_set_dma_src_data_pack(int lch, int enable)
430 {
431         u32 l;
432
433         l = dma_read(CSDP(lch));
434         l &= ~(1 << 6);
435         if (enable)
436                 l |= (1 << 6);
437         dma_write(l, CSDP(lch));
438 }
439
440 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
441 {
442         unsigned int burst = 0;
443         u32 l;
444
445         l = dma_read(CSDP(lch));
446         l &= ~(0x03 << 7);
447
448         switch (burst_mode) {
449         case OMAP_DMA_DATA_BURST_DIS:
450                 break;
451         case OMAP_DMA_DATA_BURST_4:
452                 if (cpu_class_is_omap2())
453                         burst = 0x1;
454                 else
455                         burst = 0x2;
456                 break;
457         case OMAP_DMA_DATA_BURST_8:
458                 if (cpu_class_is_omap2()) {
459                         burst = 0x2;
460                         break;
461                 }
462                 /* not supported by current hardware on OMAP1
463                  * w |= (0x03 << 7);
464                  * fall through
465                  */
466         case OMAP_DMA_DATA_BURST_16:
467                 if (cpu_class_is_omap2()) {
468                         burst = 0x3;
469                         break;
470                 }
471                 /* OMAP1 don't support burst 16
472                  * fall through
473                  */
474         default:
475                 BUG();
476         }
477
478         l |= (burst << 7);
479         dma_write(l, CSDP(lch));
480 }
481
482 /* Note that dest_port is only for OMAP1 */
483 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
484                               unsigned long dest_start,
485                               int dst_ei, int dst_fi)
486 {
487         u32 l;
488
489         if (cpu_class_is_omap1()) {
490                 l = dma_read(CSDP(lch));
491                 l &= ~(0x1f << 9);
492                 l |= dest_port << 9;
493                 dma_write(l, CSDP(lch));
494         }
495
496         l = dma_read(CCR(lch));
497         l &= ~(0x03 << 14);
498         l |= dest_amode << 14;
499         dma_write(l, CCR(lch));
500
501         if (cpu_class_is_omap1()) {
502                 dma_write(dest_start >> 16, CDSA_U(lch));
503                 dma_write(dest_start, CDSA_L(lch));
504         }
505
506         if (cpu_class_is_omap2())
507                 dma_write(dest_start, CDSA(lch));
508
509         dma_write(dst_ei, CDEI(lch));
510         dma_write(dst_fi, CDFI(lch));
511 }
512
513 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
514 {
515         if (cpu_class_is_omap2()) {
516                 REVISIT_24XX();
517                 return;
518         }
519         dma_write(eidx, CDEI(lch));
520         dma_write(fidx, CDFI(lch));
521 }
522
523 void omap_set_dma_dest_data_pack(int lch, int enable)
524 {
525         u32 l;
526
527         l = dma_read(CSDP(lch));
528         l &= ~(1 << 13);
529         if (enable)
530                 l |= 1 << 13;
531         dma_write(l, CSDP(lch));
532 }
533
534 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
535 {
536         unsigned int burst = 0;
537         u32 l;
538
539         l = dma_read(CSDP(lch));
540         l &= ~(0x03 << 14);
541
542         switch (burst_mode) {
543         case OMAP_DMA_DATA_BURST_DIS:
544                 break;
545         case OMAP_DMA_DATA_BURST_4:
546                 if (cpu_class_is_omap2())
547                         burst = 0x1;
548                 else
549                         burst = 0x2;
550                 break;
551         case OMAP_DMA_DATA_BURST_8:
552                 if (cpu_class_is_omap2())
553                         burst = 0x2;
554                 else
555                         burst = 0x3;
556                 break;
557         case OMAP_DMA_DATA_BURST_16:
558                 if (cpu_class_is_omap2()) {
559                         burst = 0x3;
560                         break;
561                 }
562                 /* OMAP1 don't support burst 16
563                  * fall through
564                  */
565         default:
566                 printk(KERN_ERR "Invalid DMA burst mode\n");
567                 BUG();
568                 return;
569         }
570         l |= (burst << 14);
571         dma_write(l, CSDP(lch));
572 }
573
574 static inline void omap_enable_channel_irq(int lch)
575 {
576         u32 status;
577
578         /* Clear CSR */
579         if (cpu_class_is_omap1())
580                 status = dma_read(CSR(lch));
581         else if (cpu_class_is_omap2())
582                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
583
584         /* Enable some nice interrupts. */
585         dma_write(dma_chan[lch].enabled_irqs, CICR(lch));
586 }
587
588 static void omap_disable_channel_irq(int lch)
589 {
590         if (cpu_class_is_omap2())
591                 dma_write(0, CICR(lch));
592 }
593
594 void omap_enable_dma_irq(int lch, u16 bits)
595 {
596         dma_chan[lch].enabled_irqs |= bits;
597 }
598
599 void omap_disable_dma_irq(int lch, u16 bits)
600 {
601         dma_chan[lch].enabled_irqs &= ~bits;
602 }
603
604 static inline void enable_lnk(int lch)
605 {
606         u32 l;
607
608         l = dma_read(CLNK_CTRL(lch));
609
610         if (cpu_class_is_omap1())
611                 l &= ~(1 << 14);
612
613         /* Set the ENABLE_LNK bits */
614         if (dma_chan[lch].next_lch != -1)
615                 l = dma_chan[lch].next_lch | (1 << 15);
616
617 #ifndef CONFIG_ARCH_OMAP1
618         if (dma_chan[lch].next_linked_ch != -1)
619                 l = dma_chan[lch].next_linked_ch | (1 << 15);
620 #endif
621
622         dma_write(l, CLNK_CTRL(lch));
623 }
624
625 static inline void disable_lnk(int lch)
626 {
627         u32 l;
628
629         l = dma_read(CLNK_CTRL(lch));
630
631         /* Disable interrupts */
632         if (cpu_class_is_omap1()) {
633                 dma_write(0, CICR(lch));
634                 /* Set the STOP_LNK bit */
635                 l |= 1 << 14;
636         }
637
638         if (cpu_class_is_omap2()) {
639                 omap_disable_channel_irq(lch);
640                 /* Clear the ENABLE_LNK bit */
641                 l &= ~(1 << 15);
642         }
643
644         dma_write(l, CLNK_CTRL(lch));
645         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
646 }
647
648 static inline void omap2_enable_irq_lch(int lch)
649 {
650         u32 val;
651
652         if (!cpu_class_is_omap2())
653                 return;
654
655         val = dma_read(IRQENABLE_L0);
656         val |= 1 << lch;
657         dma_write(val, IRQENABLE_L0);
658 }
659
660 int omap_request_dma(int dev_id, const char *dev_name,
661                      void (* callback)(int lch, u16 ch_status, void *data),
662                      void *data, int *dma_ch_out)
663 {
664         int ch, free_ch = -1;
665         unsigned long flags;
666         struct omap_dma_lch *chan;
667
668         spin_lock_irqsave(&dma_chan_lock, flags);
669         for (ch = 0; ch < dma_chan_count; ch++) {
670                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
671                         free_ch = ch;
672                         if (dev_id == 0)
673                                 break;
674                 }
675         }
676         if (free_ch == -1) {
677                 spin_unlock_irqrestore(&dma_chan_lock, flags);
678                 return -EBUSY;
679         }
680         chan = dma_chan + free_ch;
681         chan->dev_id = dev_id;
682
683         if (cpu_class_is_omap1())
684                 clear_lch_regs(free_ch);
685
686         if (cpu_class_is_omap2())
687                 omap_clear_dma(free_ch);
688
689         spin_unlock_irqrestore(&dma_chan_lock, flags);
690
691         chan->dev_name = dev_name;
692         chan->callback = callback;
693         chan->data = data;
694 #ifndef CONFIG_ARCH_OMAP1
695         chan->chain_id = -1;
696         chan->next_linked_ch = -1;
697 #endif
698         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
699
700         if (cpu_class_is_omap1())
701                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
702         else if (cpu_class_is_omap2())
703                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
704                         OMAP2_DMA_TRANS_ERR_IRQ;
705
706         if (cpu_is_omap16xx()) {
707                 /* If the sync device is set, configure it dynamically. */
708                 if (dev_id != 0) {
709                         set_gdma_dev(free_ch + 1, dev_id);
710                         dev_id = free_ch + 1;
711                 }
712                 /* Disable the 1510 compatibility mode and set the sync device
713                  * id. */
714                 dma_write(dev_id | (1 << 10), CCR(free_ch));
715         } else if (cpu_is_omap730() || cpu_is_omap15xx()) {
716                 dma_write(dev_id, CCR(free_ch));
717         }
718
719         if (cpu_class_is_omap2()) {
720                 omap2_enable_irq_lch(free_ch);
721
722                 omap_enable_channel_irq(free_ch);
723                 /* Clear the CSR register and IRQ status register */
724                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch));
725                 dma_write(1 << free_ch, IRQSTATUS_L0);
726         }
727
728         *dma_ch_out = free_ch;
729
730         return 0;
731 }
732
733 void omap_free_dma(int lch)
734 {
735         unsigned long flags;
736
737         spin_lock_irqsave(&dma_chan_lock, flags);
738         if (dma_chan[lch].dev_id == -1) {
739                 printk("omap_dma: trying to free nonallocated DMA channel %d\n",
740                        lch);
741                 spin_unlock_irqrestore(&dma_chan_lock, flags);
742                 return;
743         }
744         dma_chan[lch].dev_id = -1;
745         dma_chan[lch].next_lch = -1;
746         dma_chan[lch].callback = NULL;
747         spin_unlock_irqrestore(&dma_chan_lock, flags);
748
749         if (cpu_class_is_omap1()) {
750                 /* Disable all DMA interrupts for the channel. */
751                 dma_write(0, CICR(lch));
752                 /* Make sure the DMA transfer is stopped. */
753                 dma_write(0, CCR(lch));
754         }
755
756         if (cpu_class_is_omap2()) {
757                 u32 val;
758                 /* Disable interrupts */
759                 val = dma_read(IRQENABLE_L0);
760                 val &= ~(1 << lch);
761                 dma_write(val, IRQENABLE_L0);
762
763                 /* Clear the CSR register and IRQ status register */
764                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
765                 dma_write(1 << lch, IRQSTATUS_L0);
766
767                 /* Disable all DMA interrupts for the channel. */
768                 dma_write(0, CICR(lch));
769
770                 /* Make sure the DMA transfer is stopped. */
771                 dma_write(0, CCR(lch));
772                 omap_clear_dma(lch);
773         }
774 }
775
776 /**
777  * @brief omap_dma_set_global_params : Set global priority settings for dma
778  *
779  * @param arb_rate
780  * @param max_fifo_depth
781  * @param tparams - Number of thereads to reserve : DMA_THREAD_RESERVE_NORM
782  *                                                  DMA_THREAD_RESERVE_ONET
783  *                                                  DMA_THREAD_RESERVE_TWOT
784  *                                                  DMA_THREAD_RESERVE_THREET
785  */
786 void
787 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
788 {
789         u32 reg;
790
791         if (!cpu_class_is_omap2()) {
792                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
793                 return;
794         }
795
796         if (arb_rate == 0)
797                 arb_rate = 1;
798
799         reg = (arb_rate & 0xff) << 16;
800         reg |= (0xff & max_fifo_depth);
801
802         dma_write(reg, GCR);
803 }
804 EXPORT_SYMBOL(omap_dma_set_global_params);
805
806 /**
807  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
808  *
809  * @param lch
810  * @param read_prio - Read priority
811  * @param write_prio - Write priority
812  * Both of the above can be set with one of the following values :
813  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
814  */
815 int
816 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
817                       unsigned char write_prio)
818 {
819         u32 l;
820
821         if (unlikely((lch < 0 || lch >= dma_lch_count))) {
822                 printk(KERN_ERR "Invalid channel id\n");
823                 return -EINVAL;
824         }
825         l = dma_read(CCR(lch));
826         l &= ~((1 << 6) | (1 << 26));
827         if (cpu_is_omap2430() || cpu_is_omap34xx())
828                 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
829         else
830                 l |= ((read_prio & 0x1) << 6);
831
832         dma_write(l, CCR(lch));
833
834         return 0;
835 }
836 EXPORT_SYMBOL(omap_dma_set_prio_lch);
837
838 /*
839  * Clears any DMA state so the DMA engine is ready to restart with new buffers
840  * through omap_start_dma(). Any buffers in flight are discarded.
841  */
842 void omap_clear_dma(int lch)
843 {
844         unsigned long flags;
845
846         local_irq_save(flags);
847
848         if (cpu_class_is_omap1()) {
849                 u32 l;
850
851                 l = dma_read(CCR(lch));
852                 l &= ~OMAP_DMA_CCR_EN;
853                 dma_write(l, CCR(lch));
854
855                 /* Clear pending interrupts */
856                 l = dma_read(CSR(lch));
857         }
858
859         if (cpu_class_is_omap2()) {
860                 int i;
861                 void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
862                 for (i = 0; i < 0x44; i += 4)
863                         __raw_writel(0, lch_base + i);
864         }
865
866         local_irq_restore(flags);
867 }
868
869 void omap_start_dma(int lch)
870 {
871         u32 l;
872
873         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
874                 int next_lch, cur_lch;
875                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
876
877                 dma_chan_link_map[lch] = 1;
878                 /* Set the link register of the first channel */
879                 enable_lnk(lch);
880
881                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
882                 cur_lch = dma_chan[lch].next_lch;
883                 do {
884                         next_lch = dma_chan[cur_lch].next_lch;
885
886                         /* The loop case: we've been here already */
887                         if (dma_chan_link_map[cur_lch])
888                                 break;
889                         /* Mark the current channel */
890                         dma_chan_link_map[cur_lch] = 1;
891
892                         enable_lnk(cur_lch);
893                         omap_enable_channel_irq(cur_lch);
894
895                         cur_lch = next_lch;
896                 } while (next_lch != -1);
897         } else if (cpu_class_is_omap2()) {
898                 /* Errata: Need to write lch even if not using chaining */
899                 dma_write(lch, CLNK_CTRL(lch));
900         }
901
902         omap_enable_channel_irq(lch);
903
904         l = dma_read(CCR(lch));
905
906         /* Errata: On ES2.0 BUFFERING disable must be set.
907          * This will always fail on ES1.0 */
908         if (cpu_is_omap24xx())
909                 l |= OMAP_DMA_CCR_EN;
910
911         l |= OMAP_DMA_CCR_EN;
912         dma_write(l, CCR(lch));
913
914         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
915 }
916
917 void omap_stop_dma(int lch)
918 {
919         u32 l;
920
921         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
922                 int next_lch, cur_lch = lch;
923                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
924
925                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
926                 do {
927                         /* The loop case: we've been here already */
928                         if (dma_chan_link_map[cur_lch])
929                                 break;
930                         /* Mark the current channel */
931                         dma_chan_link_map[cur_lch] = 1;
932
933                         disable_lnk(cur_lch);
934
935                         next_lch = dma_chan[cur_lch].next_lch;
936                         cur_lch = next_lch;
937                 } while (next_lch != -1);
938
939                 return;
940         }
941
942         /* Disable all interrupts on the channel */
943         if (cpu_class_is_omap1())
944                 dma_write(0, CICR(lch));
945
946         l = dma_read(CCR(lch));
947         l &= ~OMAP_DMA_CCR_EN;
948         dma_write(l, CCR(lch));
949
950         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
951 }
952
953 /*
954  * Allows changing the DMA callback function or data. This may be needed if
955  * the driver shares a single DMA channel for multiple dma triggers.
956  */
957 int omap_set_dma_callback(int lch,
958                           void (* callback)(int lch, u16 ch_status, void *data),
959                           void *data)
960 {
961         unsigned long flags;
962
963         if (lch < 0)
964                 return -ENODEV;
965
966         spin_lock_irqsave(&dma_chan_lock, flags);
967         if (dma_chan[lch].dev_id == -1) {
968                 printk(KERN_ERR "DMA callback for not set for free channel\n");
969                 spin_unlock_irqrestore(&dma_chan_lock, flags);
970                 return -EINVAL;
971         }
972         dma_chan[lch].callback = callback;
973         dma_chan[lch].data = data;
974         spin_unlock_irqrestore(&dma_chan_lock, flags);
975
976         return 0;
977 }
978
979 /*
980  * Returns current physical source address for the given DMA channel.
981  * If the channel is running the caller must disable interrupts prior calling
982  * this function and process the returned value before re-enabling interrupt to
983  * prevent races with the interrupt handler. Note that in continuous mode there
984  * is a chance for CSSA_L register overflow inbetween the two reads resulting
985  * in incorrect return value.
986  */
987 dma_addr_t omap_get_dma_src_pos(int lch)
988 {
989         dma_addr_t offset = 0;
990
991         if (cpu_class_is_omap1())
992                 offset = (dma_addr_t)(dma_read(CSSA_L(lch)) |
993                                         (dma_read(CSSA_U(lch)) << 16));
994
995         if (cpu_class_is_omap2()) {
996                 offset = dma_read(CSAC(lch));
997
998                 /*
999                  * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1000                  * read before the DMA controller finished disabling the channel.
1001                  */
1002                 if (offset == 0)
1003                         offset = dma_read(CSAC(lch));
1004         }
1005
1006         return offset;
1007 }
1008
1009 /*
1010  * Returns current physical destination address for the given DMA channel.
1011  * If the channel is running the caller must disable interrupts prior calling
1012  * this function and process the returned value before re-enabling interrupt to
1013  * prevent races with the interrupt handler. Note that in continuous mode there
1014  * is a chance for CDSA_L register overflow inbetween the two reads resulting
1015  * in incorrect return value.
1016  */
1017 dma_addr_t omap_get_dma_dst_pos(int lch)
1018 {
1019         dma_addr_t offset = 0;
1020
1021         if (cpu_class_is_omap1())
1022                 offset = (dma_addr_t)(dma_read(CDSA_L(lch)) |
1023                                         (dma_read(CDSA_U(lch)) << 16));
1024
1025         if (cpu_class_is_omap2()) {
1026                 offset = dma_read(CDAC(lch));
1027
1028                 /*
1029                  * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1030                  * read before the DMA controller finished disabling the channel.
1031                  */
1032                 if (offset == 0)
1033                         offset = dma_read(CDAC(lch));
1034         }
1035
1036         return offset;
1037 }
1038
1039 /*
1040  * Returns current source transfer counting for the given DMA channel.
1041  * Can be used to monitor the progress of a transfer inside a block.
1042  * It must be called with disabled interrupts.
1043  */
1044 int omap_get_dma_src_addr_counter(int lch)
1045 {
1046         return (dma_addr_t)dma_read(CSAC(lch));
1047 }
1048
1049 int omap_get_dma_active_status(int lch)
1050 {
1051         return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0;
1052 }
1053 EXPORT_SYMBOL(omap_get_dma_active_status);
1054
1055 int omap_dma_running(void)
1056 {
1057         int lch;
1058
1059         /* Check if LCD DMA is running */
1060         if (cpu_is_omap16xx())
1061                 if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
1062                         return 1;
1063
1064         for (lch = 0; lch < dma_chan_count; lch++)
1065                 if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN)
1066                         return 1;
1067
1068         return 0;
1069 }
1070
1071 /*
1072  * lch_queue DMA will start right after lch_head one is finished.
1073  * For this DMA link to start, you still need to start (see omap_start_dma)
1074  * the first one. That will fire up the entire queue.
1075  */
1076 void omap_dma_link_lch (int lch_head, int lch_queue)
1077 {
1078         if (omap_dma_in_1510_mode()) {
1079                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1080                 BUG();
1081                 return;
1082         }
1083
1084         if ((dma_chan[lch_head].dev_id == -1) ||
1085             (dma_chan[lch_queue].dev_id == -1)) {
1086                 printk(KERN_ERR "omap_dma: trying to link "
1087                        "non requested channels\n");
1088                 dump_stack();
1089         }
1090
1091         dma_chan[lch_head].next_lch = lch_queue;
1092 }
1093
1094 /*
1095  * Once the DMA queue is stopped, we can destroy it.
1096  */
1097 void omap_dma_unlink_lch (int lch_head, int lch_queue)
1098 {
1099         if (omap_dma_in_1510_mode()) {
1100                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1101                 BUG();
1102                 return;
1103         }
1104
1105         if (dma_chan[lch_head].next_lch != lch_queue ||
1106             dma_chan[lch_head].next_lch == -1) {
1107                 printk(KERN_ERR "omap_dma: trying to unlink "
1108                        "non linked channels\n");
1109                 dump_stack();
1110         }
1111
1112
1113         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1114             (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
1115                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1116                        "before unlinking\n");
1117                 dump_stack();
1118         }
1119
1120         dma_chan[lch_head].next_lch = -1;
1121 }
1122
1123 #ifndef CONFIG_ARCH_OMAP1
1124 /* Create chain of DMA channesls */
1125 static void create_dma_lch_chain(int lch_head, int lch_queue)
1126 {
1127         u32 l;
1128
1129         /* Check if this is the first link in chain */
1130         if (dma_chan[lch_head].next_linked_ch == -1) {
1131                 dma_chan[lch_head].next_linked_ch = lch_queue;
1132                 dma_chan[lch_head].prev_linked_ch = lch_queue;
1133                 dma_chan[lch_queue].next_linked_ch = lch_head;
1134                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1135         }
1136
1137         /* a link exists, link the new channel in circular chain */
1138         else {
1139                 dma_chan[lch_queue].next_linked_ch =
1140                                         dma_chan[lch_head].next_linked_ch;
1141                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1142                 dma_chan[lch_head].next_linked_ch = lch_queue;
1143                 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1144                                         lch_queue;
1145         }
1146
1147         l = dma_read(CLNK_CTRL(lch_head));
1148         l &= ~(0x1f);
1149         l |= lch_queue;
1150         dma_write(l, CLNK_CTRL(lch_head));
1151
1152         l = dma_read(CLNK_CTRL(lch_queue));
1153         l &= ~(0x1f);
1154         l |= (dma_chan[lch_queue].next_linked_ch);
1155         dma_write(l, CLNK_CTRL(lch_queue));
1156 }
1157
1158 /**
1159  * @brief omap_request_dma_chain : Request a chain of DMA channels
1160  *
1161  * @param dev_id - Device id using the dma channel
1162  * @param dev_name - Device name
1163  * @param callback - Call back function
1164  * @chain_id -
1165  * @no_of_chans - Number of channels requested
1166  * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1167  *                                            OMAP_DMA_DYNAMIC_CHAIN
1168  * @params - Channel parameters
1169  *
1170  * @return - Succes : 0
1171  *           Failure: -EINVAL/-ENOMEM
1172  */
1173 int omap_request_dma_chain(int dev_id, const char *dev_name,
1174                            void (*callback) (int chain_id, u16 ch_status,
1175                                              void *data),
1176                            int *chain_id, int no_of_chans, int chain_mode,
1177                            struct omap_dma_channel_params params)
1178 {
1179         int *channels;
1180         int i, err;
1181
1182         /* Is the chain mode valid ? */
1183         if (chain_mode != OMAP_DMA_STATIC_CHAIN
1184                         && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1185                 printk(KERN_ERR "Invalid chain mode requested\n");
1186                 return -EINVAL;
1187         }
1188
1189         if (unlikely((no_of_chans < 1
1190                         || no_of_chans > dma_lch_count))) {
1191                 printk(KERN_ERR "Invalid Number of channels requested\n");
1192                 return -EINVAL;
1193         }
1194
1195         /* Allocate a queue to maintain the status of the channels
1196          * in the chain */
1197         channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1198         if (channels == NULL) {
1199                 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1200                 return -ENOMEM;
1201         }
1202
1203         /* request and reserve DMA channels for the chain */
1204         for (i = 0; i < no_of_chans; i++) {
1205                 err = omap_request_dma(dev_id, dev_name,
1206                                         callback, 0, &channels[i]);
1207                 if (err < 0) {
1208                         int j;
1209                         for (j = 0; j < i; j++)
1210                                 omap_free_dma(channels[j]);
1211                         kfree(channels);
1212                         printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1213                         return err;
1214                 }
1215                 dma_chan[channels[i]].prev_linked_ch = -1;
1216                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1217
1218                 /*
1219                  * Allowing client drivers to set common parameters now,
1220                  * so that later only relevant (src_start, dest_start
1221                  * and element count) can be set
1222                  */
1223                 omap_set_dma_params(channels[i], &params);
1224         }
1225
1226         *chain_id = channels[0];
1227         dma_linked_lch[*chain_id].linked_dmach_q = channels;
1228         dma_linked_lch[*chain_id].chain_mode = chain_mode;
1229         dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1230         dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1231
1232         for (i = 0; i < no_of_chans; i++)
1233                 dma_chan[channels[i]].chain_id = *chain_id;
1234
1235         /* Reset the Queue pointers */
1236         OMAP_DMA_CHAIN_QINIT(*chain_id);
1237
1238         /* Set up the chain */
1239         if (no_of_chans == 1)
1240                 create_dma_lch_chain(channels[0], channels[0]);
1241         else {
1242                 for (i = 0; i < (no_of_chans - 1); i++)
1243                         create_dma_lch_chain(channels[i], channels[i + 1]);
1244         }
1245         return 0;
1246 }
1247 EXPORT_SYMBOL(omap_request_dma_chain);
1248
1249 /**
1250  * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1251  * params after setting it. Dont do this while dma is running!!
1252  *
1253  * @param chain_id - Chained logical channel id.
1254  * @param params
1255  *
1256  * @return - Success : 0
1257  *           Failure : -EINVAL
1258  */
1259 int omap_modify_dma_chain_params(int chain_id,
1260                                 struct omap_dma_channel_params params)
1261 {
1262         int *channels;
1263         u32 i;
1264
1265         /* Check for input params */
1266         if (unlikely((chain_id < 0
1267                         || chain_id >= dma_lch_count))) {
1268                 printk(KERN_ERR "Invalid chain id\n");
1269                 return -EINVAL;
1270         }
1271
1272         /* Check if the chain exists */
1273         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1274                 printk(KERN_ERR "Chain doesn't exists\n");
1275                 return -EINVAL;
1276         }
1277         channels = dma_linked_lch[chain_id].linked_dmach_q;
1278
1279         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1280                 /*
1281                  * Allowing client drivers to set common parameters now,
1282                  * so that later only relevant (src_start, dest_start
1283                  * and element count) can be set
1284                  */
1285                 omap_set_dma_params(channels[i], &params);
1286         }
1287         return 0;
1288 }
1289 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1290
1291 /**
1292  * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1293  *
1294  * @param chain_id
1295  *
1296  * @return - Success : 0
1297  *           Failure : -EINVAL
1298  */
1299 int omap_free_dma_chain(int chain_id)
1300 {
1301         int *channels;
1302         u32 i;
1303
1304         /* Check for input params */
1305         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1306                 printk(KERN_ERR "Invalid chain id\n");
1307                 return -EINVAL;
1308         }
1309
1310         /* Check if the chain exists */
1311         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1312                 printk(KERN_ERR "Chain doesn't exists\n");
1313                 return -EINVAL;
1314         }
1315
1316         channels = dma_linked_lch[chain_id].linked_dmach_q;
1317         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1318                 dma_chan[channels[i]].next_linked_ch = -1;
1319                 dma_chan[channels[i]].prev_linked_ch = -1;
1320                 dma_chan[channels[i]].chain_id = -1;
1321                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1322                 omap_free_dma(channels[i]);
1323         }
1324
1325         kfree(channels);
1326
1327         dma_linked_lch[chain_id].linked_dmach_q = NULL;
1328         dma_linked_lch[chain_id].chain_mode = -1;
1329         dma_linked_lch[chain_id].chain_state = -1;
1330         return (0);
1331 }
1332 EXPORT_SYMBOL(omap_free_dma_chain);
1333
1334 /**
1335  * @brief omap_dma_chain_status - Check if the chain is in
1336  * active / inactive state.
1337  * @param chain_id
1338  *
1339  * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1340  *           Failure : -EINVAL
1341  */
1342 int omap_dma_chain_status(int chain_id)
1343 {
1344         /* Check for input params */
1345         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1346                 printk(KERN_ERR "Invalid chain id\n");
1347                 return -EINVAL;
1348         }
1349
1350         /* Check if the chain exists */
1351         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1352                 printk(KERN_ERR "Chain doesn't exists\n");
1353                 return -EINVAL;
1354         }
1355         pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1356                         dma_linked_lch[chain_id].q_count);
1357
1358         if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1359                 return OMAP_DMA_CHAIN_INACTIVE;
1360         return OMAP_DMA_CHAIN_ACTIVE;
1361 }
1362 EXPORT_SYMBOL(omap_dma_chain_status);
1363
1364 /**
1365  * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1366  * set the params and start the transfer.
1367  *
1368  * @param chain_id
1369  * @param src_start - buffer start address
1370  * @param dest_start - Dest address
1371  * @param elem_count
1372  * @param frame_count
1373  * @param callbk_data - channel callback parameter data.
1374  *
1375  * @return  - Success : 0
1376  *            Failure: -EINVAL/-EBUSY
1377  */
1378 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1379                         int elem_count, int frame_count, void *callbk_data)
1380 {
1381         int *channels;
1382         u32 l, lch;
1383         int start_dma = 0;
1384
1385         /* if buffer size is less than 1 then there is
1386          * no use of starting the chain */
1387         if (elem_count < 1) {
1388                 printk(KERN_ERR "Invalid buffer size\n");
1389                 return -EINVAL;
1390         }
1391
1392         /* Check for input params */
1393         if (unlikely((chain_id < 0
1394                         || chain_id >= dma_lch_count))) {
1395                 printk(KERN_ERR "Invalid chain id\n");
1396                 return -EINVAL;
1397         }
1398
1399         /* Check if the chain exists */
1400         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1401                 printk(KERN_ERR "Chain doesn't exist\n");
1402                 return -EINVAL;
1403         }
1404
1405         /* Check if all the channels in chain are in use */
1406         if (OMAP_DMA_CHAIN_QFULL(chain_id))
1407                 return -EBUSY;
1408
1409         /* Frame count may be negative in case of indexed transfers */
1410         channels = dma_linked_lch[chain_id].linked_dmach_q;
1411
1412         /* Get a free channel */
1413         lch = channels[dma_linked_lch[chain_id].q_tail];
1414
1415         /* Store the callback data */
1416         dma_chan[lch].data = callbk_data;
1417
1418         /* Increment the q_tail */
1419         OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1420
1421         /* Set the params to the free channel */
1422         if (src_start != 0)
1423                 dma_write(src_start, CSSA(lch));
1424         if (dest_start != 0)
1425                 dma_write(dest_start, CDSA(lch));
1426
1427         /* Write the buffer size */
1428         dma_write(elem_count, CEN(lch));
1429         dma_write(frame_count, CFN(lch));
1430
1431         /* If the chain is dynamically linked,
1432          * then we may have to start the chain if its not active */
1433         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1434
1435                 /* In Dynamic chain, if the chain is not started,
1436                  * queue the channel */
1437                 if (dma_linked_lch[chain_id].chain_state ==
1438                                                 DMA_CHAIN_NOTSTARTED) {
1439                         /* Enable the link in previous channel */
1440                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1441                                                                 DMA_CH_QUEUED)
1442                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1443                         dma_chan[lch].state = DMA_CH_QUEUED;
1444                 }
1445
1446                 /* Chain is already started, make sure its active,
1447                  * if not then start the chain */
1448                 else {
1449                         start_dma = 1;
1450
1451                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1452                                                         DMA_CH_STARTED) {
1453                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1454                                 dma_chan[lch].state = DMA_CH_QUEUED;
1455                                 start_dma = 0;
1456                                 if (0 == ((1 << 7) & dma_read(
1457                                         CCR(dma_chan[lch].prev_linked_ch)))) {
1458                                         disable_lnk(dma_chan[lch].
1459                                                     prev_linked_ch);
1460                                         pr_debug("\n prev ch is stopped\n");
1461                                         start_dma = 1;
1462                                 }
1463                         }
1464
1465                         else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1466                                                         == DMA_CH_QUEUED) {
1467                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1468                                 dma_chan[lch].state = DMA_CH_QUEUED;
1469                                 start_dma = 0;
1470                         }
1471                         omap_enable_channel_irq(lch);
1472
1473                         l = dma_read(CCR(lch));
1474
1475                         if ((0 == (l & (1 << 24))))
1476                                 l &= ~(1 << 25);
1477                         else
1478                                 l |= (1 << 25);
1479                         if (start_dma == 1) {
1480                                 if (0 == (l & (1 << 7))) {
1481                                         l |= (1 << 7);
1482                                         dma_chan[lch].state = DMA_CH_STARTED;
1483                                         pr_debug("starting %d\n", lch);
1484                                         dma_write(l, CCR(lch));
1485                                 } else
1486                                         start_dma = 0;
1487                         } else {
1488                                 if (0 == (l & (1 << 7)))
1489                                         dma_write(l, CCR(lch));
1490                         }
1491                         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1492                 }
1493         }
1494         return 0;
1495 }
1496 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1497
1498 /**
1499  * @brief omap_start_dma_chain_transfers - Start the chain
1500  *
1501  * @param chain_id
1502  *
1503  * @return - Success : 0
1504  *           Failure : -EINVAL/-EBUSY
1505  */
1506 int omap_start_dma_chain_transfers(int chain_id)
1507 {
1508         int *channels;
1509         u32 l, i;
1510
1511         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1512                 printk(KERN_ERR "Invalid chain id\n");
1513                 return -EINVAL;
1514         }
1515
1516         channels = dma_linked_lch[chain_id].linked_dmach_q;
1517
1518         if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1519                 printk(KERN_ERR "Chain is already started\n");
1520                 return -EBUSY;
1521         }
1522
1523         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1524                 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1525                                                                         i++) {
1526                         enable_lnk(channels[i]);
1527                         omap_enable_channel_irq(channels[i]);
1528                 }
1529         } else {
1530                 omap_enable_channel_irq(channels[0]);
1531         }
1532
1533         l = dma_read(CCR(channels[0]));
1534         l |= (1 << 7);
1535         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1536         dma_chan[channels[0]].state = DMA_CH_STARTED;
1537
1538         if ((0 == (l & (1 << 24))))
1539                 l &= ~(1 << 25);
1540         else
1541                 l |= (1 << 25);
1542         dma_write(l, CCR(channels[0]));
1543
1544         dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1545         return 0;
1546 }
1547 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1548
1549 /**
1550  * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1551  *
1552  * @param chain_id
1553  *
1554  * @return - Success : 0
1555  *           Failure : EINVAL
1556  */
1557 int omap_stop_dma_chain_transfers(int chain_id)
1558 {
1559         int *channels;
1560         u32 l, i;
1561         u32 sys_cf;
1562
1563         /* Check for input params */
1564         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1565                 printk(KERN_ERR "Invalid chain id\n");
1566                 return -EINVAL;
1567         }
1568
1569         /* Check if the chain exists */
1570         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1571                 printk(KERN_ERR "Chain doesn't exists\n");
1572                 return -EINVAL;
1573         }
1574         channels = dma_linked_lch[chain_id].linked_dmach_q;
1575
1576         /* DMA Errata:
1577          * Special programming model needed to disable DMA before end of block
1578          */
1579         sys_cf = dma_read(OCP_SYSCONFIG);
1580         l = sys_cf;
1581         /* Middle mode reg set no Standby */
1582         l &= ~((1 << 12)|(1 << 13));
1583         dma_write(l, OCP_SYSCONFIG);
1584
1585         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1586
1587                 /* Stop the Channel transmission */
1588                 l = dma_read(CCR(channels[i]));
1589                 l &= ~(1 << 7);
1590                 dma_write(l, CCR(channels[i]));
1591
1592                 /* Disable the link in all the channels */
1593                 disable_lnk(channels[i]);
1594                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1595
1596         }
1597         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1598
1599         /* Reset the Queue pointers */
1600         OMAP_DMA_CHAIN_QINIT(chain_id);
1601
1602         /* Errata - put in the old value */
1603         dma_write(sys_cf, OCP_SYSCONFIG);
1604         return 0;
1605 }
1606 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1607
1608 /* Get the index of the ongoing DMA in chain */
1609 /**
1610  * @brief omap_get_dma_chain_index - Get the element and frame index
1611  * of the ongoing DMA in chain
1612  *
1613  * @param chain_id
1614  * @param ei - Element index
1615  * @param fi - Frame index
1616  *
1617  * @return - Success : 0
1618  *           Failure : -EINVAL
1619  */
1620 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1621 {
1622         int lch;
1623         int *channels;
1624
1625         /* Check for input params */
1626         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1627                 printk(KERN_ERR "Invalid chain id\n");
1628                 return -EINVAL;
1629         }
1630
1631         /* Check if the chain exists */
1632         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1633                 printk(KERN_ERR "Chain doesn't exists\n");
1634                 return -EINVAL;
1635         }
1636         if ((!ei) || (!fi))
1637                 return -EINVAL;
1638
1639         channels = dma_linked_lch[chain_id].linked_dmach_q;
1640
1641         /* Get the current channel */
1642         lch = channels[dma_linked_lch[chain_id].q_head];
1643
1644         *ei = dma_read(CCEN(lch));
1645         *fi = dma_read(CCFN(lch));
1646
1647         return 0;
1648 }
1649 EXPORT_SYMBOL(omap_get_dma_chain_index);
1650
1651 /**
1652  * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1653  * ongoing DMA in chain
1654  *
1655  * @param chain_id
1656  *
1657  * @return - Success : Destination position
1658  *           Failure : -EINVAL
1659  */
1660 int omap_get_dma_chain_dst_pos(int chain_id)
1661 {
1662         int lch;
1663         int *channels;
1664
1665         /* Check for input params */
1666         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1667                 printk(KERN_ERR "Invalid chain id\n");
1668                 return -EINVAL;
1669         }
1670
1671         /* Check if the chain exists */
1672         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1673                 printk(KERN_ERR "Chain doesn't exists\n");
1674                 return -EINVAL;
1675         }
1676
1677         channels = dma_linked_lch[chain_id].linked_dmach_q;
1678
1679         /* Get the current channel */
1680         lch = channels[dma_linked_lch[chain_id].q_head];
1681
1682         return dma_read(CDAC(lch));
1683 }
1684 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1685
1686 /**
1687  * @brief omap_get_dma_chain_src_pos - Get the source position
1688  * of the ongoing DMA in chain
1689  * @param chain_id
1690  *
1691  * @return - Success : Destination position
1692  *           Failure : -EINVAL
1693  */
1694 int omap_get_dma_chain_src_pos(int chain_id)
1695 {
1696         int lch;
1697         int *channels;
1698
1699         /* Check for input params */
1700         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1701                 printk(KERN_ERR "Invalid chain id\n");
1702                 return -EINVAL;
1703         }
1704
1705         /* Check if the chain exists */
1706         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1707                 printk(KERN_ERR "Chain doesn't exists\n");
1708                 return -EINVAL;
1709         }
1710
1711         channels = dma_linked_lch[chain_id].linked_dmach_q;
1712
1713         /* Get the current channel */
1714         lch = channels[dma_linked_lch[chain_id].q_head];
1715
1716         return dma_read(CSAC(lch));
1717 }
1718 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1719 #endif
1720
1721 /*----------------------------------------------------------------------------*/
1722
1723 #ifdef CONFIG_ARCH_OMAP1
1724
1725 static int omap1_dma_handle_ch(int ch)
1726 {
1727         u32 csr;
1728
1729         if (enable_1510_mode && ch >= 6) {
1730                 csr = dma_chan[ch].saved_csr;
1731                 dma_chan[ch].saved_csr = 0;
1732         } else
1733                 csr = dma_read(CSR(ch));
1734         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1735                 dma_chan[ch + 6].saved_csr = csr >> 7;
1736                 csr &= 0x7f;
1737         }
1738         if ((csr & 0x3f) == 0)
1739                 return 0;
1740         if (unlikely(dma_chan[ch].dev_id == -1)) {
1741                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1742                        "%d (CSR %04x)\n", ch, csr);
1743                 return 0;
1744         }
1745         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1746                 printk(KERN_WARNING "DMA timeout with device %d\n",
1747                        dma_chan[ch].dev_id);
1748         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1749                 printk(KERN_WARNING "DMA synchronization event drop occurred "
1750                        "with device %d\n", dma_chan[ch].dev_id);
1751         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1752                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1753         if (likely(dma_chan[ch].callback != NULL))
1754                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1755         return 1;
1756 }
1757
1758 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1759 {
1760         int ch = ((int) dev_id) - 1;
1761         int handled = 0;
1762
1763         for (;;) {
1764                 int handled_now = 0;
1765
1766                 handled_now += omap1_dma_handle_ch(ch);
1767                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1768                         handled_now += omap1_dma_handle_ch(ch + 6);
1769                 if (!handled_now)
1770                         break;
1771                 handled += handled_now;
1772         }
1773
1774         return handled ? IRQ_HANDLED : IRQ_NONE;
1775 }
1776
1777 #else
1778 #define omap1_dma_irq_handler   NULL
1779 #endif
1780
1781 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1782
1783 static int omap2_dma_handle_ch(int ch)
1784 {
1785         u32 status = dma_read(CSR(ch));
1786
1787         if (!status) {
1788                 if (printk_ratelimit())
1789                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", ch);
1790                 dma_write(1 << ch, IRQSTATUS_L0);
1791                 return 0;
1792         }
1793         if (unlikely(dma_chan[ch].dev_id == -1)) {
1794                 if (printk_ratelimit())
1795                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1796                                         "channel %d\n", status, ch);
1797                 return 0;
1798         }
1799         if (unlikely(status & OMAP_DMA_DROP_IRQ))
1800                 printk(KERN_INFO
1801                        "DMA synchronization event drop occurred with device "
1802                        "%d\n", dma_chan[ch].dev_id);
1803         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
1804                 printk(KERN_INFO "DMA transaction error with device %d\n",
1805                        dma_chan[ch].dev_id);
1806         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1807                 printk(KERN_INFO "DMA secure error with device %d\n",
1808                        dma_chan[ch].dev_id);
1809         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1810                 printk(KERN_INFO "DMA misaligned error with device %d\n",
1811                        dma_chan[ch].dev_id);
1812
1813         dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
1814         dma_write(1 << ch, IRQSTATUS_L0);
1815
1816         /* If the ch is not chained then chain_id will be -1 */
1817         if (dma_chan[ch].chain_id != -1) {
1818                 int chain_id = dma_chan[ch].chain_id;
1819                 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1820                 if (dma_read(CLNK_CTRL(ch)) & (1 << 15))
1821                         dma_chan[dma_chan[ch].next_linked_ch].state =
1822                                                         DMA_CH_STARTED;
1823                 if (dma_linked_lch[chain_id].chain_mode ==
1824                                                 OMAP_DMA_DYNAMIC_CHAIN)
1825                         disable_lnk(ch);
1826
1827                 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1828                         OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1829
1830                 status = dma_read(CSR(ch));
1831         }
1832
1833         if (likely(dma_chan[ch].callback != NULL))
1834                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1835
1836         dma_write(status, CSR(ch));
1837
1838         return 0;
1839 }
1840
1841 /* STATUS register count is from 1-32 while our is 0-31 */
1842 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1843 {
1844         u32 val;
1845         int i;
1846
1847         val = dma_read(IRQSTATUS_L0);
1848         if (val == 0) {
1849                 if (printk_ratelimit())
1850                         printk(KERN_WARNING "Spurious DMA IRQ\n");
1851                 return IRQ_HANDLED;
1852         }
1853         for (i = 0; i < dma_lch_count && val != 0; i++) {
1854                 if (val & 1)
1855                         omap2_dma_handle_ch(i);
1856                 val >>= 1;
1857         }
1858
1859         return IRQ_HANDLED;
1860 }
1861
1862 static struct irqaction omap24xx_dma_irq = {
1863         .name = "DMA",
1864         .handler = omap2_dma_irq_handler,
1865         .flags = IRQF_DISABLED
1866 };
1867
1868 #else
1869 static struct irqaction omap24xx_dma_irq;
1870 #endif
1871
1872 /*----------------------------------------------------------------------------*/
1873
1874 static struct lcd_dma_info {
1875         spinlock_t lock;
1876         int reserved;
1877         void (* callback)(u16 status, void *data);
1878         void *cb_data;
1879
1880         int active;
1881         unsigned long addr, size;
1882         int rotate, data_type, xres, yres;
1883         int vxres;
1884         int mirror;
1885         int xscale, yscale;
1886         int ext_ctrl;
1887         int src_port;
1888         int single_transfer;
1889 } lcd_dma;
1890
1891 void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
1892                          int data_type)
1893 {
1894         lcd_dma.addr = addr;
1895         lcd_dma.data_type = data_type;
1896         lcd_dma.xres = fb_xres;
1897         lcd_dma.yres = fb_yres;
1898 }
1899
1900 void omap_set_lcd_dma_src_port(int port)
1901 {
1902         lcd_dma.src_port = port;
1903 }
1904
1905 void omap_set_lcd_dma_ext_controller(int external)
1906 {
1907         lcd_dma.ext_ctrl = external;
1908 }
1909
1910 void omap_set_lcd_dma_single_transfer(int single)
1911 {
1912         lcd_dma.single_transfer = single;
1913 }
1914
1915
1916 void omap_set_lcd_dma_b1_rotation(int rotate)
1917 {
1918         if (omap_dma_in_1510_mode()) {
1919                 printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
1920                 BUG();
1921                 return;
1922         }
1923         lcd_dma.rotate = rotate;
1924 }
1925
1926 void omap_set_lcd_dma_b1_mirror(int mirror)
1927 {
1928         if (omap_dma_in_1510_mode()) {
1929                 printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
1930                 BUG();
1931         }
1932         lcd_dma.mirror = mirror;
1933 }
1934
1935 void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
1936 {
1937         if (omap_dma_in_1510_mode()) {
1938                 printk(KERN_ERR "DMA virtual resulotion is not supported "
1939                                 "in 1510 mode\n");
1940                 BUG();
1941         }
1942         lcd_dma.vxres = vxres;
1943 }
1944
1945 void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
1946 {
1947         if (omap_dma_in_1510_mode()) {
1948                 printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
1949                 BUG();
1950         }
1951         lcd_dma.xscale = xscale;
1952         lcd_dma.yscale = yscale;
1953 }
1954
1955 static void set_b1_regs(void)
1956 {
1957         unsigned long top, bottom;
1958         int es;
1959         u16 w;
1960         unsigned long en, fn;
1961         long ei, fi;
1962         unsigned long vxres;
1963         unsigned int xscale, yscale;
1964
1965         switch (lcd_dma.data_type) {
1966         case OMAP_DMA_DATA_TYPE_S8:
1967                 es = 1;
1968                 break;
1969         case OMAP_DMA_DATA_TYPE_S16:
1970                 es = 2;
1971                 break;
1972         case OMAP_DMA_DATA_TYPE_S32:
1973                 es = 4;
1974                 break;
1975         default:
1976                 BUG();
1977                 return;
1978         }
1979
1980         vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres;
1981         xscale = lcd_dma.xscale ? lcd_dma.xscale : 1;
1982         yscale = lcd_dma.yscale ? lcd_dma.yscale : 1;
1983         BUG_ON(vxres < lcd_dma.xres);
1984 #define PIXADDR(x,y) (lcd_dma.addr + ((y) * vxres * yscale + (x) * xscale) * es)
1985 #define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1)
1986         switch (lcd_dma.rotate) {
1987         case 0:
1988                 if (!lcd_dma.mirror) {
1989                         top = PIXADDR(0, 0);
1990                         bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1991                         /* 1510 DMA requires the bottom address to be 2 more
1992                          * than the actual last memory access location. */
1993                         if (omap_dma_in_1510_mode() &&
1994                             lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
1995                                 bottom += 2;
1996                         ei = PIXSTEP(0, 0, 1, 0);
1997                         fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1);
1998                 } else {
1999                         top = PIXADDR(lcd_dma.xres - 1, 0);
2000                         bottom = PIXADDR(0, lcd_dma.yres - 1);
2001                         ei = PIXSTEP(1, 0, 0, 0);
2002                         fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1);
2003                 }
2004                 en = lcd_dma.xres;
2005                 fn = lcd_dma.yres;
2006                 break;
2007         case 90:
2008                 if (!lcd_dma.mirror) {
2009                         top = PIXADDR(0, lcd_dma.yres - 1);
2010                         bottom = PIXADDR(lcd_dma.xres - 1, 0);
2011                         ei = PIXSTEP(0, 1, 0, 0);
2012                         fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1);
2013                 } else {
2014                         top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
2015                         bottom = PIXADDR(0, 0);
2016                         ei = PIXSTEP(0, 1, 0, 0);
2017                         fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1);
2018                 }
2019                 en = lcd_dma.yres;
2020                 fn = lcd_dma.xres;
2021                 break;
2022         case 180:
2023                 if (!lcd_dma.mirror) {
2024                         top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
2025                         bottom = PIXADDR(0, 0);
2026                         ei = PIXSTEP(1, 0, 0, 0);
2027                         fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0);
2028                 } else {
2029                         top = PIXADDR(0, lcd_dma.yres - 1);
2030                         bottom = PIXADDR(lcd_dma.xres - 1, 0);
2031                         ei = PIXSTEP(0, 0, 1, 0);
2032                         fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0);
2033                 }
2034                 en = lcd_dma.xres;
2035                 fn = lcd_dma.yres;
2036                 break;
2037         case 270:
2038                 if (!lcd_dma.mirror) {
2039                         top = PIXADDR(lcd_dma.xres - 1, 0);
2040                         bottom = PIXADDR(0, lcd_dma.yres - 1);
2041                         ei = PIXSTEP(0, 0, 0, 1);
2042                         fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0);
2043                 } else {
2044                         top = PIXADDR(0, 0);
2045                         bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
2046                         ei = PIXSTEP(0, 0, 0, 1);
2047                         fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0);
2048                 }
2049                 en = lcd_dma.yres;
2050                 fn = lcd_dma.xres;
2051                 break;
2052         default:
2053                 BUG();
2054                 return; /* Suppress warning about uninitialized vars */
2055         }
2056
2057         if (omap_dma_in_1510_mode()) {
2058                 omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
2059                 omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
2060                 omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
2061                 omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
2062
2063                 return;
2064         }
2065
2066         /* 1610 regs */
2067         omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
2068         omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
2069         omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
2070         omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
2071
2072         omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
2073         omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
2074
2075         w = omap_readw(OMAP1610_DMA_LCD_CSDP);
2076         w &= ~0x03;
2077         w |= lcd_dma.data_type;
2078         omap_writew(w, OMAP1610_DMA_LCD_CSDP);
2079
2080         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2081         /* Always set the source port as SDRAM for now*/
2082         w &= ~(0x03 << 6);
2083         if (lcd_dma.callback != NULL)
2084                 w |= 1 << 1;            /* Block interrupt enable */
2085         else
2086                 w &= ~(1 << 1);
2087         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2088
2089         if (!(lcd_dma.rotate || lcd_dma.mirror ||
2090               lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale))
2091                 return;
2092
2093         w = omap_readw(OMAP1610_DMA_LCD_CCR);
2094         /* Set the double-indexed addressing mode */
2095         w |= (0x03 << 12);
2096         omap_writew(w, OMAP1610_DMA_LCD_CCR);
2097
2098         omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
2099         omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
2100         omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
2101 }
2102
2103 static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id)
2104 {
2105         u16 w;
2106
2107         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2108         if (unlikely(!(w & (1 << 3)))) {
2109                 printk(KERN_WARNING "Spurious LCD DMA IRQ\n");
2110                 return IRQ_NONE;
2111         }
2112         /* Ack the IRQ */
2113         w |= (1 << 3);
2114         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2115         lcd_dma.active = 0;
2116         if (lcd_dma.callback != NULL)
2117                 lcd_dma.callback(w, lcd_dma.cb_data);
2118
2119         return IRQ_HANDLED;
2120 }
2121
2122 int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
2123                          void *data)
2124 {
2125         spin_lock_irq(&lcd_dma.lock);
2126         if (lcd_dma.reserved) {
2127                 spin_unlock_irq(&lcd_dma.lock);
2128                 printk(KERN_ERR "LCD DMA channel already reserved\n");
2129                 BUG();
2130                 return -EBUSY;
2131         }
2132         lcd_dma.reserved = 1;
2133         spin_unlock_irq(&lcd_dma.lock);
2134         lcd_dma.callback = callback;
2135         lcd_dma.cb_data = data;
2136         lcd_dma.active = 0;
2137         lcd_dma.single_transfer = 0;
2138         lcd_dma.rotate = 0;
2139         lcd_dma.vxres = 0;
2140         lcd_dma.mirror = 0;
2141         lcd_dma.xscale = 0;
2142         lcd_dma.yscale = 0;
2143         lcd_dma.ext_ctrl = 0;
2144         lcd_dma.src_port = 0;
2145
2146         return 0;
2147 }
2148
2149 void omap_free_lcd_dma(void)
2150 {
2151         spin_lock(&lcd_dma.lock);
2152         if (!lcd_dma.reserved) {
2153                 spin_unlock(&lcd_dma.lock);
2154                 printk(KERN_ERR "LCD DMA is not reserved\n");
2155                 BUG();
2156                 return;
2157         }
2158         if (!enable_1510_mode)
2159                 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
2160                             OMAP1610_DMA_LCD_CCR);
2161         lcd_dma.reserved = 0;
2162         spin_unlock(&lcd_dma.lock);
2163 }
2164
2165 void omap_enable_lcd_dma(void)
2166 {
2167         u16 w;
2168
2169         /* Set the Enable bit only if an external controller is
2170          * connected. Otherwise the OMAP internal controller will
2171          * start the transfer when it gets enabled.
2172          */
2173         if (enable_1510_mode || !lcd_dma.ext_ctrl)
2174                 return;
2175
2176         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2177         w |= 1 << 8;
2178         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2179
2180         lcd_dma.active = 1;
2181
2182         w = omap_readw(OMAP1610_DMA_LCD_CCR);
2183         w |= 1 << 7;
2184         omap_writew(w, OMAP1610_DMA_LCD_CCR);
2185 }
2186
2187 void omap_setup_lcd_dma(void)
2188 {
2189         BUG_ON(lcd_dma.active);
2190         if (!enable_1510_mode) {
2191                 /* Set some reasonable defaults */
2192                 omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
2193                 omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
2194                 omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
2195         }
2196         set_b1_regs();
2197         if (!enable_1510_mode) {
2198                 u16 w;
2199
2200                 w = omap_readw(OMAP1610_DMA_LCD_CCR);
2201                 /* If DMA was already active set the end_prog bit to have
2202                  * the programmed register set loaded into the active
2203                  * register set.
2204                  */
2205                 w |= 1 << 11;           /* End_prog */
2206                 if (!lcd_dma.single_transfer)
2207                         w |= (3 << 8);  /* Auto_init, repeat */
2208                 omap_writew(w, OMAP1610_DMA_LCD_CCR);
2209         }
2210 }
2211
2212 void omap_stop_lcd_dma(void)
2213 {
2214         u16 w;
2215
2216         lcd_dma.active = 0;
2217         if (enable_1510_mode || !lcd_dma.ext_ctrl)
2218                 return;
2219
2220         w = omap_readw(OMAP1610_DMA_LCD_CCR);
2221         w &= ~(1 << 7);
2222         omap_writew(w, OMAP1610_DMA_LCD_CCR);
2223
2224         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2225         w &= ~(1 << 8);
2226         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2227 }
2228
2229 /*----------------------------------------------------------------------------*/
2230
2231 static int __init omap_init_dma(void)
2232 {
2233         int ch, r;
2234
2235         if (cpu_class_is_omap1()) {
2236                 omap_dma_base = (void __iomem *)IO_ADDRESS(OMAP1_DMA_BASE);
2237                 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2238         } else if (cpu_is_omap24xx()) {
2239                 omap_dma_base = (void __iomem *)IO_ADDRESS(OMAP24XX_DMA4_BASE);
2240                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2241         } else if (cpu_is_omap34xx()) {
2242                 omap_dma_base = (void __iomem *)IO_ADDRESS(OMAP34XX_DMA4_BASE);
2243                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2244         } else {
2245                 pr_err("DMA init failed for unsupported omap\n");
2246                 return -ENODEV;
2247         }
2248
2249         dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2250                                 GFP_KERNEL);
2251         if (!dma_chan)
2252                 return -ENOMEM;
2253
2254         if (cpu_class_is_omap2()) {
2255                 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2256                                                 dma_lch_count, GFP_KERNEL);
2257                 if (!dma_linked_lch) {
2258                         kfree(dma_chan);
2259                         return -ENOMEM;
2260                 }
2261         }
2262
2263         if (cpu_is_omap15xx()) {
2264                 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2265                 dma_chan_count = 9;
2266                 enable_1510_mode = 1;
2267         } else if (cpu_is_omap16xx() || cpu_is_omap730()) {
2268                 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2269                        dma_read(HW_ID));
2270                 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2271                        (dma_read(CAPS_0_U) << 16) |
2272                        dma_read(CAPS_0_L),
2273                        (dma_read(CAPS_1_U) << 16) |
2274                        dma_read(CAPS_1_L),
2275                        dma_read(CAPS_2), dma_read(CAPS_3),
2276                        dma_read(CAPS_4));
2277                 if (!enable_1510_mode) {
2278                         u16 w;
2279
2280                         /* Disable OMAP 3.0/3.1 compatibility mode. */
2281                         w = dma_read(GSCR);
2282                         w |= 1 << 3;
2283                         dma_write(w, GSCR);
2284                         dma_chan_count = 16;
2285                 } else
2286                         dma_chan_count = 9;
2287                 if (cpu_is_omap16xx()) {
2288                         u16 w;
2289
2290                         /* this would prevent OMAP sleep */
2291                         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2292                         w &= ~(1 << 8);
2293                         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2294                 }
2295         } else if (cpu_class_is_omap2()) {
2296                 u8 revision = dma_read(REVISION) & 0xff;
2297                 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2298                        revision >> 4, revision & 0xf);
2299                 dma_chan_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2300         } else {
2301                 dma_chan_count = 0;
2302                 return 0;
2303         }
2304
2305         spin_lock_init(&lcd_dma.lock);
2306         spin_lock_init(&dma_chan_lock);
2307
2308         for (ch = 0; ch < dma_chan_count; ch++) {
2309                 omap_clear_dma(ch);
2310                 dma_chan[ch].dev_id = -1;
2311                 dma_chan[ch].next_lch = -1;
2312
2313                 if (ch >= 6 && enable_1510_mode)
2314                         continue;
2315
2316                 if (cpu_class_is_omap1()) {
2317                         /* request_irq() doesn't like dev_id (ie. ch) being
2318                          * zero, so we have to kludge around this. */
2319                         r = request_irq(omap1_dma_irq[ch],
2320                                         omap1_dma_irq_handler, 0, "DMA",
2321                                         (void *) (ch + 1));
2322                         if (r != 0) {
2323                                 int i;
2324
2325                                 printk(KERN_ERR "unable to request IRQ %d "
2326                                        "for DMA (error %d)\n",
2327                                        omap1_dma_irq[ch], r);
2328                                 for (i = 0; i < ch; i++)
2329                                         free_irq(omap1_dma_irq[i],
2330                                                  (void *) (i + 1));
2331                                 return r;
2332                         }
2333                 }
2334         }
2335
2336         if (cpu_is_omap2430() || cpu_is_omap34xx())
2337                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2338                                 DMA_DEFAULT_FIFO_DEPTH, 0);
2339
2340         if (cpu_class_is_omap2())
2341                 setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq);
2342
2343         /* FIXME: Update LCD DMA to work on 24xx */
2344         if (cpu_class_is_omap1()) {
2345                 r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
2346                                 "LCD DMA", NULL);
2347                 if (r != 0) {
2348                         int i;
2349
2350                         printk(KERN_ERR "unable to request IRQ for LCD DMA "
2351                                "(error %d)\n", r);
2352                         for (i = 0; i < dma_chan_count; i++)
2353                                 free_irq(omap1_dma_irq[i], (void *) (i + 1));
2354                         return r;
2355                 }
2356         }
2357
2358         return 0;
2359 }
2360
2361 arch_initcall(omap_init_dma);
2362
2363 EXPORT_SYMBOL(omap_get_dma_src_pos);
2364 EXPORT_SYMBOL(omap_get_dma_dst_pos);
2365 EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
2366 EXPORT_SYMBOL(omap_clear_dma);
2367 EXPORT_SYMBOL(omap_set_dma_priority);
2368 EXPORT_SYMBOL(omap_request_dma);
2369 EXPORT_SYMBOL(omap_free_dma);
2370 EXPORT_SYMBOL(omap_start_dma);
2371 EXPORT_SYMBOL(omap_stop_dma);
2372 EXPORT_SYMBOL(omap_set_dma_callback);
2373 EXPORT_SYMBOL(omap_enable_dma_irq);
2374 EXPORT_SYMBOL(omap_disable_dma_irq);
2375
2376 EXPORT_SYMBOL(omap_set_dma_transfer_params);
2377 EXPORT_SYMBOL(omap_set_dma_color_mode);
2378 EXPORT_SYMBOL(omap_set_dma_write_mode);
2379
2380 EXPORT_SYMBOL(omap_set_dma_src_params);
2381 EXPORT_SYMBOL(omap_set_dma_src_index);
2382 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
2383 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
2384
2385 EXPORT_SYMBOL(omap_set_dma_dest_params);
2386 EXPORT_SYMBOL(omap_set_dma_dest_index);
2387 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
2388 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
2389
2390 EXPORT_SYMBOL(omap_set_dma_params);
2391
2392 EXPORT_SYMBOL(omap_dma_link_lch);
2393 EXPORT_SYMBOL(omap_dma_unlink_lch);
2394
2395 EXPORT_SYMBOL(omap_request_lcd_dma);
2396 EXPORT_SYMBOL(omap_free_lcd_dma);
2397 EXPORT_SYMBOL(omap_enable_lcd_dma);
2398 EXPORT_SYMBOL(omap_setup_lcd_dma);
2399 EXPORT_SYMBOL(omap_stop_lcd_dma);
2400 EXPORT_SYMBOL(omap_set_lcd_dma_b1);
2401 EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
2402 EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller);
2403 EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
2404 EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
2405 EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale);
2406 EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
2407