]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dma.c
48f6e75d5c285871e5e5ce05408399a1ac2b8dc3
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Support functions for the OMAP internal DMA channels.
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License version 2 as
17  * published by the Free Software Foundation.
18  *
19  */
20
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28
29 #include <asm/system.h>
30 #include <asm/hardware.h>
31 #include <asm/dma.h>
32 #include <asm/io.h>
33
34 #include <asm/arch/tc.h>
35
36 #define DEBUG_PRINTS
37 #undef DEBUG_PRINTS
38 #ifdef DEBUG_PRINTS
39 #define debug_printk(x) printk x
40 #else
41 #define debug_printk(x)
42 #endif
43
44 #define OMAP_DMA_ACTIVE         0x01
45 #define OMAP_DMA_CCR_EN         (1 << 7)
46 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffe
47
48 #define OMAP_FUNC_MUX_ARM_BASE  (0xfffe1000 + 0xec)
49
50 static int enable_1510_mode = 0;
51
52 struct omap_dma_lch {
53         int next_lch;
54         int dev_id;
55         u16 saved_csr;
56         u16 enabled_irqs;
57         const char *dev_name;
58         void (* callback)(int lch, u16 ch_status, void *data);
59         void *data;
60         long flags;
61 };
62
63 static int dma_chan_count;
64
65 static spinlock_t dma_chan_lock;
66 static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
67
68 static const u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
69         INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
70         INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
71         INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
72         INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
73         INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
74 };
75
76 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
77                                                 __FUNCTION__);
78
79 #ifdef CONFIG_ARCH_OMAP15XX
80 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
81 int omap_dma_in_1510_mode(void)
82 {
83         return enable_1510_mode;
84 }
85 #else
86 #define omap_dma_in_1510_mode()         0
87 #endif
88
89 #ifdef CONFIG_ARCH_OMAP1
90 static inline int get_gdma_dev(int req)
91 {
92         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
93         int shift = ((req - 1) % 5) * 6;
94
95         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
96 }
97
98 static inline void set_gdma_dev(int req, int dev)
99 {
100         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
101         int shift = ((req - 1) % 5) * 6;
102         u32 l;
103
104         l = omap_readl(reg);
105         l &= ~(0x3f << shift);
106         l |= (dev - 1) << shift;
107         omap_writel(l, reg);
108 }
109 #else
110 #define set_gdma_dev(req, dev)  do {} while (0)
111 #endif
112
113 static void clear_lch_regs(int lch)
114 {
115         int i;
116         u32 lch_base = OMAP_DMA_BASE + lch * 0x40;
117
118         for (i = 0; i < 0x2c; i += 2)
119                 omap_writew(0, lch_base + i);
120 }
121
122 void omap_set_dma_priority(int lch, int dst_port, int priority)
123 {
124         unsigned long reg;
125         u32 l;
126
127         if (cpu_class_is_omap1()) {
128                 switch (dst_port) {
129                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
130                         reg = OMAP_TC_OCPT1_PRIOR;
131                         break;
132                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
133                         reg = OMAP_TC_OCPT2_PRIOR;
134                         break;
135                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
136                         reg = OMAP_TC_EMIFF_PRIOR;
137                         break;
138                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
139                         reg = OMAP_TC_EMIFS_PRIOR;
140                         break;
141                 default:
142                         BUG();
143                         return;
144                 }
145                 l = omap_readl(reg);
146                 l &= ~(0xf << 8);
147                 l |= (priority & 0xf) << 8;
148                 omap_writel(l, reg);
149         }
150
151         if (cpu_class_is_omap2()) {
152                 if (priority)
153                         OMAP_DMA_CCR_REG(lch) |= (1 << 6);
154                 else
155                         OMAP_DMA_CCR_REG(lch) &= ~(1 << 6);
156         }
157 }
158
159 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
160                                   int frame_count, int sync_mode,
161                                   int dma_trigger, int src_or_dst_synch)
162 {
163         OMAP_DMA_CSDP_REG(lch) &= ~0x03;
164         OMAP_DMA_CSDP_REG(lch) |= data_type;
165
166         if (cpu_class_is_omap1()) {
167                 OMAP_DMA_CCR_REG(lch) &= ~(1 << 5);
168                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
169                         OMAP_DMA_CCR_REG(lch) |= 1 << 5;
170
171                 OMAP1_DMA_CCR2_REG(lch) &= ~(1 << 2);
172                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
173                         OMAP1_DMA_CCR2_REG(lch) |= 1 << 2;
174         }
175
176         if (cpu_class_is_omap2() && dma_trigger) {
177                 u32 val = OMAP_DMA_CCR_REG(lch);
178
179                 val &= ~(3 << 19);
180                 if (dma_trigger > 63)
181                         val |= 1 << 20;
182                 if (dma_trigger > 31)
183                         val |= 1 << 19;
184
185                 val &= ~(0x1f);
186                 val |= (dma_trigger & 0x1f);
187
188                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
189                         val |= 1 << 5;
190                 else
191                         val &= ~(1 << 5);
192
193                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
194                         val |= 1 << 18;
195                 else
196                         val &= ~(1 << 18);
197
198                 if (src_or_dst_synch)
199                         val |= 1 << 24;         /* source synch */
200                 else
201                         val &= ~(1 << 24);      /* dest synch */
202
203                 OMAP_DMA_CCR_REG(lch) = val;
204         }
205
206         OMAP_DMA_CEN_REG(lch) = elem_count;
207         OMAP_DMA_CFN_REG(lch) = frame_count;
208 }
209
210 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
211 {
212         u16 w;
213
214         BUG_ON(omap_dma_in_1510_mode());
215
216         if (cpu_class_is_omap2()) {
217                 REVISIT_24XX();
218                 return;
219         }
220
221         w = OMAP1_DMA_CCR2_REG(lch) & ~0x03;
222         switch (mode) {
223         case OMAP_DMA_CONSTANT_FILL:
224                 w |= 0x01;
225                 break;
226         case OMAP_DMA_TRANSPARENT_COPY:
227                 w |= 0x02;
228                 break;
229         case OMAP_DMA_COLOR_DIS:
230                 break;
231         default:
232                 BUG();
233         }
234         OMAP1_DMA_CCR2_REG(lch) = w;
235
236         w = OMAP1_DMA_LCH_CTRL_REG(lch) & ~0x0f;
237         /* Default is channel type 2D */
238         if (mode) {
239                 OMAP1_DMA_COLOR_L_REG(lch) = (u16)color;
240                 OMAP1_DMA_COLOR_U_REG(lch) = (u16)(color >> 16);
241                 w |= 1;         /* Channel type G */
242         }
243         OMAP1_DMA_LCH_CTRL_REG(lch) = w;
244 }
245
246 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
247 {
248         if (cpu_class_is_omap2()) {
249                 OMAP_DMA_CSDP_REG(lch) &= ~(0x3 << 16);
250                 OMAP_DMA_CSDP_REG(lch) |= (mode << 16);
251         }
252 }
253
254 /* Note that src_port is only for omap1 */
255 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
256                              unsigned long src_start,
257                              int src_ei, int src_fi)
258 {
259         if (cpu_class_is_omap1()) {
260                 OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 2);
261                 OMAP_DMA_CSDP_REG(lch) |= src_port << 2;
262         }
263
264         OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 12);
265         OMAP_DMA_CCR_REG(lch) |= src_amode << 12;
266
267         if (cpu_class_is_omap1()) {
268                 OMAP1_DMA_CSSA_U_REG(lch) = src_start >> 16;
269                 OMAP1_DMA_CSSA_L_REG(lch) = src_start;
270         }
271
272         if (cpu_class_is_omap2())
273                 OMAP2_DMA_CSSA_REG(lch) = src_start;
274
275         OMAP_DMA_CSEI_REG(lch) = src_ei;
276         OMAP_DMA_CSFI_REG(lch) = src_fi;
277 }
278
279 void omap_set_dma_params(int lch, struct omap_dma_channel_params * params)
280 {
281         omap_set_dma_transfer_params(lch, params->data_type,
282                                      params->elem_count, params->frame_count,
283                                      params->sync_mode, params->trigger,
284                                      params->src_or_dst_synch);
285         omap_set_dma_src_params(lch, params->src_port,
286                                 params->src_amode, params->src_start,
287                                 params->src_ei, params->src_fi);
288
289         omap_set_dma_dest_params(lch, params->dst_port,
290                                  params->dst_amode, params->dst_start,
291                                  params->dst_ei, params->dst_fi);
292         if (params->read_prio || params->write_prio)
293                 omap_dma_set_prio_lch(lch, params->read_prio,
294                                       params->write_prio);
295 }
296
297 void omap_set_dma_src_index(int lch, int eidx, int fidx)
298 {
299         if (cpu_class_is_omap2()) {
300                 REVISIT_24XX();
301                 return;
302         }
303         OMAP_DMA_CSEI_REG(lch) = eidx;
304         OMAP_DMA_CSFI_REG(lch) = fidx;
305 }
306
307 void omap_set_dma_src_data_pack(int lch, int enable)
308 {
309         OMAP_DMA_CSDP_REG(lch) &= ~(1 << 6);
310         if (enable)
311                 OMAP_DMA_CSDP_REG(lch) |= (1 << 6);
312 }
313
314 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
315 {
316         unsigned int burst = 0;
317         OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7);
318
319         switch (burst_mode) {
320         case OMAP_DMA_DATA_BURST_DIS:
321                 break;
322         case OMAP_DMA_DATA_BURST_4:
323                 if (cpu_class_is_omap2())
324                         burst = 0x1;
325                 else
326                         burst = 0x2;
327                 break;
328         case OMAP_DMA_DATA_BURST_8:
329                 if (cpu_class_is_omap2()) {
330                         burst = 0x2;
331                         break;
332                 }
333                 /* not supported by current hardware on OMAP1
334                  * w |= (0x03 << 7);
335                  * fall through
336                  */
337         case OMAP_DMA_DATA_BURST_16:
338                 if (cpu_class_is_omap2()) {
339                         burst = 0x3;
340                         break;
341                 }
342                 /* OMAP1 don't support burst 16
343                  * fall through
344                  */
345         default:
346                 BUG();
347         }
348         OMAP_DMA_CSDP_REG(lch) |= (burst << 7);
349 }
350
351 /* Note that dest_port is only for OMAP1 */
352 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
353                               unsigned long dest_start,
354                               int dst_ei, int dst_fi)
355 {
356         if (cpu_class_is_omap1()) {
357                 OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 9);
358                 OMAP_DMA_CSDP_REG(lch) |= dest_port << 9;
359         }
360
361         OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 14);
362         OMAP_DMA_CCR_REG(lch) |= dest_amode << 14;
363
364         if (cpu_class_is_omap1()) {
365                 OMAP1_DMA_CDSA_U_REG(lch) = dest_start >> 16;
366                 OMAP1_DMA_CDSA_L_REG(lch) = dest_start;
367         }
368
369         if (cpu_class_is_omap2())
370                 OMAP2_DMA_CDSA_REG(lch) = dest_start;
371
372         OMAP_DMA_CDEI_REG(lch) = dst_ei;
373         OMAP_DMA_CDFI_REG(lch) = dst_fi;
374 }
375
376 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
377 {
378         if (cpu_class_is_omap2()) {
379                 REVISIT_24XX();
380                 return;
381         }
382         OMAP_DMA_CDEI_REG(lch) = eidx;
383         OMAP_DMA_CDFI_REG(lch) = fidx;
384 }
385
386 void omap_set_dma_dest_data_pack(int lch, int enable)
387 {
388         OMAP_DMA_CSDP_REG(lch) &= ~(1 << 13);
389         if (enable)
390                 OMAP_DMA_CSDP_REG(lch) |= 1 << 13;
391 }
392
393 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
394 {
395         unsigned int burst = 0;
396         OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14);
397
398         switch (burst_mode) {
399         case OMAP_DMA_DATA_BURST_DIS:
400                 break;
401         case OMAP_DMA_DATA_BURST_4:
402                 if (cpu_class_is_omap2())
403                         burst = 0x1;
404                 else
405                         burst = 0x2;
406                 break;
407         case OMAP_DMA_DATA_BURST_8:
408                 if (cpu_class_is_omap2())
409                         burst = 0x2;
410                 else
411                         burst = 0x3;
412                 break;
413         case OMAP_DMA_DATA_BURST_16:
414                 if (cpu_class_is_omap2()) {
415                         burst = 0x3;
416                         break;
417                 }
418                 /* OMAP1 don't support burst 16
419                  * fall through
420                  */
421         default:
422                 printk(KERN_ERR "Invalid DMA burst mode\n");
423                 BUG();
424                 return;
425         }
426         OMAP_DMA_CSDP_REG(lch) |= (burst << 14);
427 }
428
429 static inline void omap_enable_channel_irq(int lch)
430 {
431         u32 status;
432
433         /* Clear CSR */
434         if (cpu_class_is_omap1())
435                 status = OMAP_DMA_CSR_REG(lch);
436         else if (cpu_class_is_omap2())
437                 OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
438
439         /* Enable some nice interrupts. */
440         OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs;
441
442         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
443 }
444
445 static void omap_disable_channel_irq(int lch)
446 {
447         if (cpu_class_is_omap2())
448                 OMAP_DMA_CICR_REG(lch) = 0;
449 }
450
451 void omap_enable_dma_irq(int lch, u16 bits)
452 {
453         dma_chan[lch].enabled_irqs |= bits;
454 }
455
456 void omap_disable_dma_irq(int lch, u16 bits)
457 {
458         dma_chan[lch].enabled_irqs &= ~bits;
459 }
460
461 static inline void enable_lnk(int lch)
462 {
463         if (cpu_class_is_omap1())
464                 OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 14);
465
466         /* Set the ENABLE_LNK bits */
467         if (dma_chan[lch].next_lch != -1)
468                 OMAP_DMA_CLNK_CTRL_REG(lch) =
469                         dma_chan[lch].next_lch | (1 << 15);
470 }
471
472 static inline void disable_lnk(int lch)
473 {
474         /* Disable interrupts */
475         if (cpu_class_is_omap1()) {
476                 OMAP_DMA_CICR_REG(lch) = 0;
477                 /* Set the STOP_LNK bit */
478                 OMAP_DMA_CLNK_CTRL_REG(lch) |= 1 << 14;
479         }
480
481         if (cpu_class_is_omap2()) {
482                 omap_disable_channel_irq(lch);
483                 /* Clear the ENABLE_LNK bit */
484                 OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 15);
485         }
486
487         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
488 }
489
490 static inline void omap2_enable_irq_lch(int lch)
491 {
492         u32 val;
493
494         if (!cpu_class_is_omap2())
495                 return;
496
497         val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
498         val |= 1 << lch;
499         omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
500 }
501
502 int omap_request_dma(int dev_id, const char *dev_name,
503                      void (* callback)(int lch, u16 ch_status, void *data),
504                      void *data, int *dma_ch_out)
505 {
506         int ch, free_ch = -1;
507         unsigned long flags;
508         struct omap_dma_lch *chan;
509
510         spin_lock_irqsave(&dma_chan_lock, flags);
511         for (ch = 0; ch < dma_chan_count; ch++) {
512                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
513                         free_ch = ch;
514                         if (dev_id == 0)
515                                 break;
516                 }
517         }
518         if (free_ch == -1) {
519                 spin_unlock_irqrestore(&dma_chan_lock, flags);
520                 return -EBUSY;
521         }
522         chan = dma_chan + free_ch;
523         chan->dev_id = dev_id;
524
525         if (cpu_class_is_omap1())
526                 clear_lch_regs(free_ch);
527
528         if (cpu_class_is_omap2())
529                 omap_clear_dma(free_ch);
530
531         spin_unlock_irqrestore(&dma_chan_lock, flags);
532
533         chan->dev_name = dev_name;
534         chan->callback = callback;
535         chan->data = data;
536         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
537
538         if (cpu_class_is_omap1())
539                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
540         else if (cpu_class_is_omap2())
541                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
542                         OMAP2_DMA_TRANS_ERR_IRQ;
543
544         if (cpu_is_omap16xx()) {
545                 /* If the sync device is set, configure it dynamically. */
546                 if (dev_id != 0) {
547                         set_gdma_dev(free_ch + 1, dev_id);
548                         dev_id = free_ch + 1;
549                 }
550                 /* Disable the 1510 compatibility mode and set the sync device
551                  * id. */
552                 OMAP_DMA_CCR_REG(free_ch) = dev_id | (1 << 10);
553         } else if (cpu_is_omap730() || cpu_is_omap15xx()) {
554                 OMAP_DMA_CCR_REG(free_ch) = dev_id;
555         }
556
557         if (cpu_class_is_omap2()) {
558                 omap2_enable_irq_lch(free_ch);
559
560                 omap_enable_channel_irq(free_ch);
561                 /* Clear the CSR register and IRQ status register */
562                 OMAP_DMA_CSR_REG(free_ch) = OMAP2_DMA_CSR_CLEAR_MASK;
563                 omap_writel(1 << free_ch, OMAP_DMA4_IRQSTATUS_L0);
564         }
565
566         *dma_ch_out = free_ch;
567
568         return 0;
569 }
570
571 void omap_free_dma(int lch)
572 {
573         unsigned long flags;
574
575         spin_lock_irqsave(&dma_chan_lock, flags);
576         if (dma_chan[lch].dev_id == -1) {
577                 printk("omap_dma: trying to free nonallocated DMA channel %d\n",
578                        lch);
579                 spin_unlock_irqrestore(&dma_chan_lock, flags);
580                 return;
581         }
582         dma_chan[lch].dev_id = -1;
583         dma_chan[lch].next_lch = -1;
584         dma_chan[lch].callback = NULL;
585         spin_unlock_irqrestore(&dma_chan_lock, flags);
586
587         if (cpu_class_is_omap1()) {
588                 /* Disable all DMA interrupts for the channel. */
589                 OMAP_DMA_CICR_REG(lch) = 0;
590                 /* Make sure the DMA transfer is stopped. */
591                 OMAP_DMA_CCR_REG(lch) = 0;
592         }
593
594         if (cpu_class_is_omap2()) {
595                 u32 val;
596                 /* Disable interrupts */
597                 val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
598                 val &= ~(1 << lch);
599                 omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
600
601                 /* Clear the CSR register and IRQ status register */
602                 OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
603                 omap_writel(1 << lch, OMAP_DMA4_IRQSTATUS_L0);
604
605                 /* Disable all DMA interrupts for the channel. */
606                 OMAP_DMA_CICR_REG(lch) = 0;
607
608                 /* Make sure the DMA transfer is stopped. */
609                 OMAP_DMA_CCR_REG(lch) = 0;
610                 omap_clear_dma(lch);
611         }
612 }
613
614 /**
615  * @brief omap_dma_set_global_params : Set global priority settings for dma
616  *
617  * @param arb_rate
618  * @param max_fifo_depth
619  * @param tparams - Number of thereads to reserve : DMA_THREAD_RESERVE_NORM
620  *                                                  DMA_THREAD_RESERVE_ONET
621  *                                                  DMA_THREAD_RESERVE_TWOT
622  *                                                  DMA_THREAD_RESERVE_THREET
623  */
624 void
625 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
626 {
627         u32 reg;
628
629         if (!cpu_class_is_omap2()) {
630                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __FUNCTION__);
631                 return;
632         }
633
634         if (arb_rate == 0)
635                 arb_rate = 1;
636
637         reg = (arb_rate & 0xff) << 16;
638         reg |= (0xff & max_fifo_depth);
639
640         omap_writel(reg, OMAP_DMA4_GCR_REG);
641 }
642 EXPORT_SYMBOL(omap_dma_set_global_params);
643
644 /**
645  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
646  *
647  * @param lch
648  * @param read_prio - Read priority
649  * @param write_prio - Write priority
650  * Both of the above can be set with one of the following values :
651  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
652  */
653 int
654 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
655                       unsigned char write_prio)
656 {
657         u32 w;
658
659         if (unlikely((lch < 0 || lch >= OMAP_LOGICAL_DMA_CH_COUNT))) {
660                 printk(KERN_ERR "Invalid channel id\n");
661                 return -EINVAL;
662         }
663         w = OMAP_DMA_CCR_REG(lch);
664         w &= ~((1 << 6) | (1 << 26));
665         if (cpu_is_omap2430() || cpu_is_omap34xx())
666                 w |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
667         else
668                 w |= ((read_prio & 0x1) << 6);
669
670         OMAP_DMA_CCR_REG(lch) = w;
671         return 0;
672 }
673 EXPORT_SYMBOL(omap_dma_set_prio_lch);
674
675 /*
676  * Clears any DMA state so the DMA engine is ready to restart with new buffers
677  * through omap_start_dma(). Any buffers in flight are discarded.
678  */
679 void omap_clear_dma(int lch)
680 {
681         unsigned long flags;
682
683         local_irq_save(flags);
684
685         if (cpu_class_is_omap1()) {
686                 int status;
687                 OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
688
689                 /* Clear pending interrupts */
690                 status = OMAP_DMA_CSR_REG(lch);
691         }
692
693         if (cpu_class_is_omap2()) {
694                 int i;
695                 u32 lch_base = OMAP_DMA4_BASE + lch * 0x60 + 0x80;
696                 for (i = 0; i < 0x44; i += 4)
697                         omap_writel(0, lch_base + i);
698         }
699
700         local_irq_restore(flags);
701 }
702
703 void omap_start_dma(int lch)
704 {
705         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
706                 int next_lch, cur_lch;
707                 char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
708
709                 dma_chan_link_map[lch] = 1;
710                 /* Set the link register of the first channel */
711                 enable_lnk(lch);
712
713                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
714                 cur_lch = dma_chan[lch].next_lch;
715                 do {
716                         next_lch = dma_chan[cur_lch].next_lch;
717
718                         /* The loop case: we've been here already */
719                         if (dma_chan_link_map[cur_lch])
720                                 break;
721                         /* Mark the current channel */
722                         dma_chan_link_map[cur_lch] = 1;
723
724                         enable_lnk(cur_lch);
725                         omap_enable_channel_irq(cur_lch);
726
727                         cur_lch = next_lch;
728                 } while (next_lch != -1);
729         } else if (cpu_class_is_omap2()) {
730                 /* Errata: Need to write lch even if not using chaining */
731                 OMAP_DMA_CLNK_CTRL_REG(lch) = lch;
732         }
733
734         omap_enable_channel_irq(lch);
735
736         /* Errata: On ES2.0 BUFFERING disable must be set.
737          * This will always fail on ES1.0 */
738         if (cpu_is_omap24xx()) {
739                 OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
740         }
741
742         OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
743
744         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
745 }
746
747 void omap_stop_dma(int lch)
748 {
749         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
750                 int next_lch, cur_lch = lch;
751                 char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
752
753                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
754                 do {
755                         /* The loop case: we've been here already */
756                         if (dma_chan_link_map[cur_lch])
757                                 break;
758                         /* Mark the current channel */
759                         dma_chan_link_map[cur_lch] = 1;
760
761                         disable_lnk(cur_lch);
762
763                         next_lch = dma_chan[cur_lch].next_lch;
764                         cur_lch = next_lch;
765                 } while (next_lch != -1);
766
767                 return;
768         }
769
770         /* Disable all interrupts on the channel */
771         if (cpu_class_is_omap1())
772                 OMAP_DMA_CICR_REG(lch) = 0;
773
774         OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
775         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
776 }
777
778 /*
779  * Allows changing the DMA callback function or data. This may be needed if
780  * the driver shares a single DMA channel for multiple dma triggers.
781  */
782 int omap_set_dma_callback(int lch,
783                           void (* callback)(int lch, u16 ch_status, void *data),
784                           void *data)
785 {
786         unsigned long flags;
787
788         if (lch < 0)
789                 return -ENODEV;
790
791         spin_lock_irqsave(&dma_chan_lock, flags);
792         if (dma_chan[lch].dev_id == -1) {
793                 printk(KERN_ERR "DMA callback for not set for free channel\n");
794                 spin_unlock_irqrestore(&dma_chan_lock, flags);
795                 return -EINVAL;
796         }
797         dma_chan[lch].callback = callback;
798         dma_chan[lch].data = data;
799         spin_unlock_irqrestore(&dma_chan_lock, flags);
800
801         return 0;
802 }
803
804 /*
805  * Returns current physical source address for the given DMA channel.
806  * If the channel is running the caller must disable interrupts prior calling
807  * this function and process the returned value before re-enabling interrupt to
808  * prevent races with the interrupt handler. Note that in continuous mode there
809  * is a chance for CSSA_L register overflow inbetween the two reads resulting
810  * in incorrect return value.
811  */
812 dma_addr_t omap_get_dma_src_pos(int lch)
813 {
814         dma_addr_t offset = 0;
815
816         if (cpu_class_is_omap1())
817                 offset = (dma_addr_t) (OMAP1_DMA_CSSA_L_REG(lch) |
818                                        (OMAP1_DMA_CSSA_U_REG(lch) << 16));
819
820         if (cpu_class_is_omap2())
821                 offset = OMAP_DMA_CSAC_REG(lch);
822
823         return offset;
824 }
825
826 /*
827  * Returns current physical destination address for the given DMA channel.
828  * If the channel is running the caller must disable interrupts prior calling
829  * this function and process the returned value before re-enabling interrupt to
830  * prevent races with the interrupt handler. Note that in continuous mode there
831  * is a chance for CDSA_L register overflow inbetween the two reads resulting
832  * in incorrect return value.
833  */
834 dma_addr_t omap_get_dma_dst_pos(int lch)
835 {
836         dma_addr_t offset = 0;
837
838         if (cpu_class_is_omap1())
839                 offset = (dma_addr_t) (OMAP1_DMA_CDSA_L_REG(lch) |
840                                        (OMAP1_DMA_CDSA_U_REG(lch) << 16));
841
842         if (cpu_class_is_omap2())
843                 offset = OMAP_DMA_CDAC_REG(lch);
844
845         return offset;
846 }
847
848 /*
849  * Returns current source transfer counting for the given DMA channel.
850  * Can be used to monitor the progress of a transfer inside a block.
851  * It must be called with disabled interrupts.
852  */
853 int omap_get_dma_src_addr_counter(int lch)
854 {
855         return (dma_addr_t) OMAP_DMA_CSAC_REG(lch);
856 }
857
858 int omap_dma_running(void)
859 {
860         int lch;
861
862         /* Check if LCD DMA is running */
863         if (cpu_is_omap16xx())
864                 if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
865                         return 1;
866
867         for (lch = 0; lch < dma_chan_count; lch++)
868                 if (OMAP_DMA_CCR_REG(lch) & OMAP_DMA_CCR_EN)
869                         return 1;
870
871         return 0;
872 }
873
874 /*
875  * lch_queue DMA will start right after lch_head one is finished.
876  * For this DMA link to start, you still need to start (see omap_start_dma)
877  * the first one. That will fire up the entire queue.
878  */
879 void omap_dma_link_lch (int lch_head, int lch_queue)
880 {
881         if (omap_dma_in_1510_mode()) {
882                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
883                 BUG();
884                 return;
885         }
886
887         if ((dma_chan[lch_head].dev_id == -1) ||
888             (dma_chan[lch_queue].dev_id == -1)) {
889                 printk(KERN_ERR "omap_dma: trying to link "
890                        "non requested channels\n");
891                 dump_stack();
892         }
893
894         dma_chan[lch_head].next_lch = lch_queue;
895 }
896
897 /*
898  * Once the DMA queue is stopped, we can destroy it.
899  */
900 void omap_dma_unlink_lch (int lch_head, int lch_queue)
901 {
902         if (omap_dma_in_1510_mode()) {
903                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
904                 BUG();
905                 return;
906         }
907
908         if (dma_chan[lch_head].next_lch != lch_queue ||
909             dma_chan[lch_head].next_lch == -1) {
910                 printk(KERN_ERR "omap_dma: trying to unlink "
911                        "non linked channels\n");
912                 dump_stack();
913         }
914
915
916         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
917             (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
918                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
919                        "before unlinking\n");
920                 dump_stack();
921         }
922
923         dma_chan[lch_head].next_lch = -1;
924 }
925
926 /*----------------------------------------------------------------------------*/
927
928 #ifdef CONFIG_ARCH_OMAP1
929
930 static int omap1_dma_handle_ch(int ch)
931 {
932         u16 csr;
933
934         if (enable_1510_mode && ch >= 6) {
935                 csr = dma_chan[ch].saved_csr;
936                 dma_chan[ch].saved_csr = 0;
937         } else
938                 csr = OMAP_DMA_CSR_REG(ch);
939         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
940                 dma_chan[ch + 6].saved_csr = csr >> 7;
941                 csr &= 0x7f;
942         }
943         if ((csr & 0x3f) == 0)
944                 return 0;
945         if (unlikely(dma_chan[ch].dev_id == -1)) {
946                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
947                        "%d (CSR %04x)\n", ch, csr);
948                 return 0;
949         }
950         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
951                 printk(KERN_WARNING "DMA timeout with device %d\n",
952                        dma_chan[ch].dev_id);
953         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
954                 printk(KERN_WARNING "DMA synchronization event drop occurred "
955                        "with device %d\n", dma_chan[ch].dev_id);
956         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
957                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
958         if (likely(dma_chan[ch].callback != NULL))
959                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
960         return 1;
961 }
962
963 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
964 {
965         int ch = ((int) dev_id) - 1;
966         int handled = 0;
967
968         for (;;) {
969                 int handled_now = 0;
970
971                 handled_now += omap1_dma_handle_ch(ch);
972                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
973                         handled_now += omap1_dma_handle_ch(ch + 6);
974                 if (!handled_now)
975                         break;
976                 handled += handled_now;
977         }
978
979         return handled ? IRQ_HANDLED : IRQ_NONE;
980 }
981
982 #else
983 #define omap1_dma_irq_handler   NULL
984 #endif
985
986 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
987
988 static int omap2_dma_handle_ch(int ch)
989 {
990         u32 status = OMAP_DMA_CSR_REG(ch);
991
992         if (!status) {
993                 if (printk_ratelimit())
994                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", ch);
995                 return 0;
996         }
997         if (unlikely(dma_chan[ch].dev_id == -1)) {
998                 if (printk_ratelimit())
999                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1000                                         "channel %d\n", status, ch);
1001                 return 0;
1002         }
1003         if (unlikely(status & OMAP_DMA_DROP_IRQ))
1004                 printk(KERN_INFO
1005                        "DMA synchronization event drop occurred with device "
1006                        "%d\n", dma_chan[ch].dev_id);
1007         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
1008                 printk(KERN_INFO "DMA transaction error with device %d\n",
1009                        dma_chan[ch].dev_id);
1010         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1011                 printk(KERN_INFO "DMA secure error with device %d\n",
1012                        dma_chan[ch].dev_id);
1013         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1014                 printk(KERN_INFO "DMA misaligned error with device %d\n",
1015                        dma_chan[ch].dev_id);
1016
1017         OMAP_DMA_CSR_REG(ch) = OMAP2_DMA_CSR_CLEAR_MASK;
1018         omap_writel(1 << ch, OMAP_DMA4_IRQSTATUS_L0);
1019
1020         if (likely(dma_chan[ch].callback != NULL))
1021                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1022
1023         return 0;
1024 }
1025
1026 /* STATUS register count is from 1-32 while our is 0-31 */
1027 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1028 {
1029         u32 val;
1030         int i;
1031
1032         val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
1033         if (val == 0) {
1034                 if (printk_ratelimit())
1035                         printk(KERN_WARNING "Spurious DMA IRQ\n");
1036                 return IRQ_HANDLED;
1037         }
1038         for (i = 0; i < OMAP_LOGICAL_DMA_CH_COUNT && val != 0; i++) {
1039                 if (val & 1)
1040                         omap2_dma_handle_ch(i);
1041                 val >>= 1;
1042         }
1043
1044         return IRQ_HANDLED;
1045 }
1046
1047 static struct irqaction omap24xx_dma_irq = {
1048         .name = "DMA",
1049         .handler = omap2_dma_irq_handler,
1050         .flags = IRQF_DISABLED
1051 };
1052
1053 #else
1054 static struct irqaction omap24xx_dma_irq;
1055 #endif
1056
1057 /*----------------------------------------------------------------------------*/
1058
1059 static struct lcd_dma_info {
1060         spinlock_t lock;
1061         int reserved;
1062         void (* callback)(u16 status, void *data);
1063         void *cb_data;
1064
1065         int active;
1066         unsigned long addr, size;
1067         int rotate, data_type, xres, yres;
1068         int vxres;
1069         int mirror;
1070         int xscale, yscale;
1071         int ext_ctrl;
1072         int src_port;
1073         int single_transfer;
1074 } lcd_dma;
1075
1076 void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
1077                          int data_type)
1078 {
1079         lcd_dma.addr = addr;
1080         lcd_dma.data_type = data_type;
1081         lcd_dma.xres = fb_xres;
1082         lcd_dma.yres = fb_yres;
1083 }
1084
1085 void omap_set_lcd_dma_src_port(int port)
1086 {
1087         lcd_dma.src_port = port;
1088 }
1089
1090 void omap_set_lcd_dma_ext_controller(int external)
1091 {
1092         lcd_dma.ext_ctrl = external;
1093 }
1094
1095 void omap_set_lcd_dma_single_transfer(int single)
1096 {
1097         lcd_dma.single_transfer = single;
1098 }
1099
1100
1101 void omap_set_lcd_dma_b1_rotation(int rotate)
1102 {
1103         if (omap_dma_in_1510_mode()) {
1104                 printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
1105                 BUG();
1106                 return;
1107         }
1108         lcd_dma.rotate = rotate;
1109 }
1110
1111 void omap_set_lcd_dma_b1_mirror(int mirror)
1112 {
1113         if (omap_dma_in_1510_mode()) {
1114                 printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
1115                 BUG();
1116         }
1117         lcd_dma.mirror = mirror;
1118 }
1119
1120 void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
1121 {
1122         if (omap_dma_in_1510_mode()) {
1123                 printk(KERN_ERR "DMA virtual resulotion is not supported "
1124                                 "in 1510 mode\n");
1125                 BUG();
1126         }
1127         lcd_dma.vxres = vxres;
1128 }
1129
1130 void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
1131 {
1132         if (omap_dma_in_1510_mode()) {
1133                 printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
1134                 BUG();
1135         }
1136         lcd_dma.xscale = xscale;
1137         lcd_dma.yscale = yscale;
1138 }
1139
1140 static void set_b1_regs(void)
1141 {
1142         unsigned long top, bottom;
1143         int es;
1144         u16 w;
1145         unsigned long en, fn;
1146         long ei, fi;
1147         unsigned long vxres;
1148         unsigned int xscale, yscale;
1149
1150         switch (lcd_dma.data_type) {
1151         case OMAP_DMA_DATA_TYPE_S8:
1152                 es = 1;
1153                 break;
1154         case OMAP_DMA_DATA_TYPE_S16:
1155                 es = 2;
1156                 break;
1157         case OMAP_DMA_DATA_TYPE_S32:
1158                 es = 4;
1159                 break;
1160         default:
1161                 BUG();
1162                 return;
1163         }
1164
1165         vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres;
1166         xscale = lcd_dma.xscale ? lcd_dma.xscale : 1;
1167         yscale = lcd_dma.yscale ? lcd_dma.yscale : 1;
1168         BUG_ON(vxres < lcd_dma.xres);
1169 #define PIXADDR(x,y) (lcd_dma.addr + ((y) * vxres * yscale + (x) * xscale) * es)
1170 #define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1)
1171         switch (lcd_dma.rotate) {
1172         case 0:
1173                 if (!lcd_dma.mirror) {
1174                         top = PIXADDR(0, 0);
1175                         bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1176                         /* 1510 DMA requires the bottom address to be 2 more
1177                          * than the actual last memory access location. */
1178                         if (omap_dma_in_1510_mode() &&
1179                             lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
1180                                 bottom += 2;
1181                         ei = PIXSTEP(0, 0, 1, 0);
1182                         fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1);
1183                 } else {
1184                         top = PIXADDR(lcd_dma.xres - 1, 0);
1185                         bottom = PIXADDR(0, lcd_dma.yres - 1);
1186                         ei = PIXSTEP(1, 0, 0, 0);
1187                         fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1);
1188                 }
1189                 en = lcd_dma.xres;
1190                 fn = lcd_dma.yres;
1191                 break;
1192         case 90:
1193                 if (!lcd_dma.mirror) {
1194                         top = PIXADDR(0, lcd_dma.yres - 1);
1195                         bottom = PIXADDR(lcd_dma.xres - 1, 0);
1196                         ei = PIXSTEP(0, 1, 0, 0);
1197                         fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1);
1198                 } else {
1199                         top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1200                         bottom = PIXADDR(0, 0);
1201                         ei = PIXSTEP(0, 1, 0, 0);
1202                         fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1);
1203                 }
1204                 en = lcd_dma.yres;
1205                 fn = lcd_dma.xres;
1206                 break;
1207         case 180:
1208                 if (!lcd_dma.mirror) {
1209                         top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1210                         bottom = PIXADDR(0, 0);
1211                         ei = PIXSTEP(1, 0, 0, 0);
1212                         fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0);
1213                 } else {
1214                         top = PIXADDR(0, lcd_dma.yres - 1);
1215                         bottom = PIXADDR(lcd_dma.xres - 1, 0);
1216                         ei = PIXSTEP(0, 0, 1, 0);
1217                         fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0);
1218                 }
1219                 en = lcd_dma.xres;
1220                 fn = lcd_dma.yres;
1221                 break;
1222         case 270:
1223                 if (!lcd_dma.mirror) {
1224                         top = PIXADDR(lcd_dma.xres - 1, 0);
1225                         bottom = PIXADDR(0, lcd_dma.yres - 1);
1226                         ei = PIXSTEP(0, 0, 0, 1);
1227                         fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0);
1228                 } else {
1229                         top = PIXADDR(0, 0);
1230                         bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1231                         ei = PIXSTEP(0, 0, 0, 1);
1232                         fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0);
1233                 }
1234                 en = lcd_dma.yres;
1235                 fn = lcd_dma.xres;
1236                 break;
1237         default:
1238                 BUG();
1239                 return; /* Suppress warning about uninitialized vars */
1240         }
1241
1242         if (omap_dma_in_1510_mode()) {
1243                 omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
1244                 omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
1245                 omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
1246                 omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
1247
1248                 return;
1249         }
1250
1251         /* 1610 regs */
1252         omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
1253         omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
1254         omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
1255         omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
1256
1257         omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
1258         omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
1259
1260         w = omap_readw(OMAP1610_DMA_LCD_CSDP);
1261         w &= ~0x03;
1262         w |= lcd_dma.data_type;
1263         omap_writew(w, OMAP1610_DMA_LCD_CSDP);
1264
1265         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1266         /* Always set the source port as SDRAM for now*/
1267         w &= ~(0x03 << 6);
1268         if (lcd_dma.callback != NULL)
1269                 w |= 1 << 1;            /* Block interrupt enable */
1270         else
1271                 w &= ~(1 << 1);
1272         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1273
1274         if (!(lcd_dma.rotate || lcd_dma.mirror ||
1275               lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale))
1276                 return;
1277
1278         w = omap_readw(OMAP1610_DMA_LCD_CCR);
1279         /* Set the double-indexed addressing mode */
1280         w |= (0x03 << 12);
1281         omap_writew(w, OMAP1610_DMA_LCD_CCR);
1282
1283         omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
1284         omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
1285         omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
1286 }
1287
1288 static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id)
1289 {
1290         u16 w;
1291
1292         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1293         if (unlikely(!(w & (1 << 3)))) {
1294                 printk(KERN_WARNING "Spurious LCD DMA IRQ\n");
1295                 return IRQ_NONE;
1296         }
1297         /* Ack the IRQ */
1298         w |= (1 << 3);
1299         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1300         lcd_dma.active = 0;
1301         if (lcd_dma.callback != NULL)
1302                 lcd_dma.callback(w, lcd_dma.cb_data);
1303
1304         return IRQ_HANDLED;
1305 }
1306
1307 int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
1308                          void *data)
1309 {
1310         spin_lock_irq(&lcd_dma.lock);
1311         if (lcd_dma.reserved) {
1312                 spin_unlock_irq(&lcd_dma.lock);
1313                 printk(KERN_ERR "LCD DMA channel already reserved\n");
1314                 BUG();
1315                 return -EBUSY;
1316         }
1317         lcd_dma.reserved = 1;
1318         spin_unlock_irq(&lcd_dma.lock);
1319         lcd_dma.callback = callback;
1320         lcd_dma.cb_data = data;
1321         lcd_dma.active = 0;
1322         lcd_dma.single_transfer = 0;
1323         lcd_dma.rotate = 0;
1324         lcd_dma.vxres = 0;
1325         lcd_dma.mirror = 0;
1326         lcd_dma.xscale = 0;
1327         lcd_dma.yscale = 0;
1328         lcd_dma.ext_ctrl = 0;
1329         lcd_dma.src_port = 0;
1330
1331         return 0;
1332 }
1333
1334 void omap_free_lcd_dma(void)
1335 {
1336         spin_lock(&lcd_dma.lock);
1337         if (!lcd_dma.reserved) {
1338                 spin_unlock(&lcd_dma.lock);
1339                 printk(KERN_ERR "LCD DMA is not reserved\n");
1340                 BUG();
1341                 return;
1342         }
1343         if (!enable_1510_mode)
1344                 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
1345                             OMAP1610_DMA_LCD_CCR);
1346         lcd_dma.reserved = 0;
1347         spin_unlock(&lcd_dma.lock);
1348 }
1349
1350 void omap_enable_lcd_dma(void)
1351 {
1352         u16 w;
1353
1354         /* Set the Enable bit only if an external controller is
1355          * connected. Otherwise the OMAP internal controller will
1356          * start the transfer when it gets enabled.
1357          */
1358         if (enable_1510_mode || !lcd_dma.ext_ctrl)
1359                 return;
1360
1361         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1362         w |= 1 << 8;
1363         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1364
1365         lcd_dma.active = 1;
1366
1367         w = omap_readw(OMAP1610_DMA_LCD_CCR);
1368         w |= 1 << 7;
1369         omap_writew(w, OMAP1610_DMA_LCD_CCR);
1370 }
1371
1372 void omap_setup_lcd_dma(void)
1373 {
1374         BUG_ON(lcd_dma.active);
1375         if (!enable_1510_mode) {
1376                 /* Set some reasonable defaults */
1377                 omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
1378                 omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
1379                 omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
1380         }
1381         set_b1_regs();
1382         if (!enable_1510_mode) {
1383                 u16 w;
1384
1385                 w = omap_readw(OMAP1610_DMA_LCD_CCR);
1386                 /* If DMA was already active set the end_prog bit to have
1387                  * the programmed register set loaded into the active
1388                  * register set.
1389                  */
1390                 w |= 1 << 11;           /* End_prog */
1391                 if (!lcd_dma.single_transfer)
1392                         w |= (3 << 8);  /* Auto_init, repeat */
1393                 omap_writew(w, OMAP1610_DMA_LCD_CCR);
1394         }
1395 }
1396
1397 void omap_stop_lcd_dma(void)
1398 {
1399         u16 w;
1400
1401         lcd_dma.active = 0;
1402         if (enable_1510_mode || !lcd_dma.ext_ctrl)
1403                 return;
1404
1405         w = omap_readw(OMAP1610_DMA_LCD_CCR);
1406         w &= ~(1 << 7);
1407         omap_writew(w, OMAP1610_DMA_LCD_CCR);
1408
1409         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1410         w &= ~(1 << 8);
1411         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1412 }
1413
1414 /*----------------------------------------------------------------------------*/
1415
1416 static int __init omap_init_dma(void)
1417 {
1418         int ch, r;
1419
1420         if (cpu_is_omap15xx()) {
1421                 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
1422                 dma_chan_count = 9;
1423                 enable_1510_mode = 1;
1424         } else if (cpu_is_omap16xx() || cpu_is_omap730()) {
1425                 printk(KERN_INFO "OMAP DMA hardware version %d\n",
1426                        omap_readw(OMAP_DMA_HW_ID));
1427                 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
1428                        (omap_readw(OMAP_DMA_CAPS_0_U) << 16) |
1429                        omap_readw(OMAP_DMA_CAPS_0_L),
1430                        (omap_readw(OMAP_DMA_CAPS_1_U) << 16) |
1431                        omap_readw(OMAP_DMA_CAPS_1_L),
1432                        omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3),
1433                        omap_readw(OMAP_DMA_CAPS_4));
1434                 if (!enable_1510_mode) {
1435                         u16 w;
1436
1437                         /* Disable OMAP 3.0/3.1 compatibility mode. */
1438                         w = omap_readw(OMAP_DMA_GSCR);
1439                         w |= 1 << 3;
1440                         omap_writew(w, OMAP_DMA_GSCR);
1441                         dma_chan_count = 16;
1442                 } else
1443                         dma_chan_count = 9;
1444                 if (cpu_is_omap16xx()) {
1445                         u16 w;
1446
1447                         /* this would prevent OMAP sleep */
1448                         w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1449                         w &= ~(1 << 8);
1450                         omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1451                 }
1452         } else if (cpu_class_is_omap2()) {
1453                 u8 revision = omap_readb(OMAP_DMA4_REVISION);
1454                 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
1455                        revision >> 4, revision & 0xf);
1456                 dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
1457         } else {
1458                 dma_chan_count = 0;
1459                 return 0;
1460         }
1461
1462         memset(&lcd_dma, 0, sizeof(lcd_dma));
1463         spin_lock_init(&lcd_dma.lock);
1464         spin_lock_init(&dma_chan_lock);
1465         memset(&dma_chan, 0, sizeof(dma_chan));
1466
1467         for (ch = 0; ch < dma_chan_count; ch++) {
1468                 omap_clear_dma(ch);
1469                 dma_chan[ch].dev_id = -1;
1470                 dma_chan[ch].next_lch = -1;
1471
1472                 if (ch >= 6 && enable_1510_mode)
1473                         continue;
1474
1475                 if (cpu_class_is_omap1()) {
1476                         /* request_irq() doesn't like dev_id (ie. ch) being
1477                          * zero, so we have to kludge around this. */
1478                         r = request_irq(omap1_dma_irq[ch],
1479                                         omap1_dma_irq_handler, 0, "DMA",
1480                                         (void *) (ch + 1));
1481                         if (r != 0) {
1482                                 int i;
1483
1484                                 printk(KERN_ERR "unable to request IRQ %d "
1485                                        "for DMA (error %d)\n",
1486                                        omap1_dma_irq[ch], r);
1487                                 for (i = 0; i < ch; i++)
1488                                         free_irq(omap1_dma_irq[i],
1489                                                  (void *) (i + 1));
1490                                 return r;
1491                         }
1492                 }
1493         }
1494
1495         if (cpu_is_omap2430() || cpu_is_omap34xx())
1496                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
1497                                 DMA_DEFAULT_FIFO_DEPTH, 0);
1498
1499         if (cpu_class_is_omap2())
1500                 setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq);
1501
1502         /* FIXME: Update LCD DMA to work on 24xx */
1503         if (cpu_class_is_omap1()) {
1504                 r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
1505                                 "LCD DMA", NULL);
1506                 if (r != 0) {
1507                         int i;
1508
1509                         printk(KERN_ERR "unable to request IRQ for LCD DMA "
1510                                "(error %d)\n", r);
1511                         for (i = 0; i < dma_chan_count; i++)
1512                                 free_irq(omap1_dma_irq[i], (void *) (i + 1));
1513                         return r;
1514                 }
1515         }
1516
1517         return 0;
1518 }
1519
1520 arch_initcall(omap_init_dma);
1521
1522 EXPORT_SYMBOL(omap_get_dma_src_pos);
1523 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1524 EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
1525 EXPORT_SYMBOL(omap_clear_dma);
1526 EXPORT_SYMBOL(omap_set_dma_priority);
1527 EXPORT_SYMBOL(omap_request_dma);
1528 EXPORT_SYMBOL(omap_free_dma);
1529 EXPORT_SYMBOL(omap_start_dma);
1530 EXPORT_SYMBOL(omap_stop_dma);
1531 EXPORT_SYMBOL(omap_set_dma_callback);
1532 EXPORT_SYMBOL(omap_enable_dma_irq);
1533 EXPORT_SYMBOL(omap_disable_dma_irq);
1534
1535 EXPORT_SYMBOL(omap_set_dma_transfer_params);
1536 EXPORT_SYMBOL(omap_set_dma_color_mode);
1537 EXPORT_SYMBOL(omap_set_dma_write_mode);
1538
1539 EXPORT_SYMBOL(omap_set_dma_src_params);
1540 EXPORT_SYMBOL(omap_set_dma_src_index);
1541 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
1542 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
1543
1544 EXPORT_SYMBOL(omap_set_dma_dest_params);
1545 EXPORT_SYMBOL(omap_set_dma_dest_index);
1546 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
1547 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
1548
1549 EXPORT_SYMBOL(omap_set_dma_params);
1550
1551 EXPORT_SYMBOL(omap_dma_link_lch);
1552 EXPORT_SYMBOL(omap_dma_unlink_lch);
1553
1554 EXPORT_SYMBOL(omap_request_lcd_dma);
1555 EXPORT_SYMBOL(omap_free_lcd_dma);
1556 EXPORT_SYMBOL(omap_enable_lcd_dma);
1557 EXPORT_SYMBOL(omap_setup_lcd_dma);
1558 EXPORT_SYMBOL(omap_stop_lcd_dma);
1559 EXPORT_SYMBOL(omap_set_lcd_dma_b1);
1560 EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
1561 EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller);
1562 EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
1563 EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
1564 EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale);
1565 EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
1566