2 * linux/arch/arm/plat-omap/dma.c
4 * Copyright (C) 2003 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7 * Graphics DMA and LCD DMA graphics tranformations
8 * by Imre Deak <imre.deak@nokia.com>
9 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10 * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
13 * Support functions for the OMAP internal DMA channels.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
29 #include <asm/system.h>
30 #include <asm/hardware.h>
34 #include <asm/arch/tc.h>
38 #ifndef CONFIG_ARCH_OMAP1
39 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
40 DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
43 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
46 #define OMAP_DMA_ACTIVE 0x01
47 #define OMAP_DMA_CCR_EN (1 << 7)
48 #define OMAP2_DMA_CSR_CLEAR_MASK 0xffe
50 #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
52 static int enable_1510_mode = 0;
60 void (* callback)(int lch, u16 ch_status, void *data);
63 #ifndef CONFIG_ARCH_OMAP1
64 /* required for Dynamic chaining */
75 struct dma_link_info {
77 int no_of_lchs_linked;
88 static struct dma_link_info *dma_linked_lch;
90 #ifndef CONFIG_ARCH_OMAP1
92 /* Chain handling macros */
93 #define OMAP_DMA_CHAIN_QINIT(chain_id) \
95 dma_linked_lch[chain_id].q_head = \
96 dma_linked_lch[chain_id].q_tail = \
97 dma_linked_lch[chain_id].q_count = 0; \
99 #define OMAP_DMA_CHAIN_QFULL(chain_id) \
100 (dma_linked_lch[chain_id].no_of_lchs_linked == \
101 dma_linked_lch[chain_id].q_count)
102 #define OMAP_DMA_CHAIN_QLAST(chain_id) \
104 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) == \
105 dma_linked_lch[chain_id].q_count) \
107 #define OMAP_DMA_CHAIN_QEMPTY(chain_id) \
108 (0 == dma_linked_lch[chain_id].q_count)
109 #define __OMAP_DMA_CHAIN_INCQ(end) \
110 ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
111 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id) \
113 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
114 dma_linked_lch[chain_id].q_count--; \
117 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id) \
119 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
120 dma_linked_lch[chain_id].q_count++; \
124 static int dma_lch_count;
125 static int dma_chan_count;
127 static spinlock_t dma_chan_lock;
128 static struct omap_dma_lch *dma_chan;
129 void __iomem *omap_dma_base;
131 static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
132 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
133 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
134 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
135 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
136 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
139 static inline void disable_lnk(int lch);
140 static void omap_disable_channel_irq(int lch);
141 static inline void omap_enable_channel_irq(int lch);
143 #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
146 #define dma_read(reg) \
149 if (cpu_class_is_omap1()) \
150 __val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg); \
152 __val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg); \
156 #define dma_write(val, reg) \
158 if (cpu_class_is_omap1()) \
159 __raw_writew((u16)val, omap_dma_base + OMAP1_DMA_##reg);\
161 __raw_writel((val), omap_dma_base + OMAP_DMA4_##reg); \
164 #ifdef CONFIG_ARCH_OMAP15XX
165 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
166 int omap_dma_in_1510_mode(void)
168 return enable_1510_mode;
171 #define omap_dma_in_1510_mode() 0
174 #ifdef CONFIG_ARCH_OMAP1
175 static inline int get_gdma_dev(int req)
177 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
178 int shift = ((req - 1) % 5) * 6;
180 return ((omap_readl(reg) >> shift) & 0x3f) + 1;
183 static inline void set_gdma_dev(int req, int dev)
185 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
186 int shift = ((req - 1) % 5) * 6;
190 l &= ~(0x3f << shift);
191 l |= (dev - 1) << shift;
195 #define set_gdma_dev(req, dev) do {} while (0)
199 static void clear_lch_regs(int lch)
202 void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
204 for (i = 0; i < 0x2c; i += 2)
205 __raw_writew(0, lch_base + i);
208 void omap_set_dma_priority(int lch, int dst_port, int priority)
213 if (cpu_class_is_omap1()) {
215 case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */
216 reg = OMAP_TC_OCPT1_PRIOR;
218 case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */
219 reg = OMAP_TC_OCPT2_PRIOR;
221 case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */
222 reg = OMAP_TC_EMIFF_PRIOR;
224 case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */
225 reg = OMAP_TC_EMIFS_PRIOR;
233 l |= (priority & 0xf) << 8;
237 if (cpu_class_is_omap2()) {
240 ccr = dma_read(CCR(lch));
245 dma_write(ccr, CCR(lch));
249 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
250 int frame_count, int sync_mode,
251 int dma_trigger, int src_or_dst_synch)
255 l = dma_read(CSDP(lch));
258 dma_write(l, CSDP(lch));
260 if (cpu_class_is_omap1()) {
263 ccr = dma_read(CCR(lch));
265 if (sync_mode == OMAP_DMA_SYNC_FRAME)
267 dma_write(ccr, CCR(lch));
269 ccr = dma_read(CCR2(lch));
271 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
273 dma_write(ccr, CCR2(lch));
276 if (cpu_class_is_omap2() && dma_trigger) {
279 val = dma_read(CCR(lch));
281 if (dma_trigger > 63)
283 if (dma_trigger > 31)
287 val |= (dma_trigger & 0x1f);
289 if (sync_mode & OMAP_DMA_SYNC_FRAME)
294 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
299 if (src_or_dst_synch)
300 val |= 1 << 24; /* source synch */
302 val &= ~(1 << 24); /* dest synch */
304 dma_write(val, CCR(lch));
307 dma_write(elem_count, CEN(lch));
308 dma_write(frame_count, CFN(lch));
311 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
315 BUG_ON(omap_dma_in_1510_mode());
317 if (cpu_class_is_omap2()) {
322 w = dma_read(CCR2(lch));
326 case OMAP_DMA_CONSTANT_FILL:
329 case OMAP_DMA_TRANSPARENT_COPY:
332 case OMAP_DMA_COLOR_DIS:
337 dma_write(w, CCR2(lch));
339 w = dma_read(LCH_CTRL(lch));
341 /* Default is channel type 2D */
343 dma_write((u16)color, COLOR_L(lch));
344 dma_write((u16)(color >> 16), COLOR_U(lch));
345 w |= 1; /* Channel type G */
347 dma_write(w, LCH_CTRL(lch));
350 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
352 if (cpu_class_is_omap2()) {
355 csdp = dma_read(CSDP(lch));
356 csdp &= ~(0x3 << 16);
357 csdp |= (mode << 16);
358 dma_write(csdp, CSDP(lch));
362 /* Note that src_port is only for omap1 */
363 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
364 unsigned long src_start,
365 int src_ei, int src_fi)
367 if (cpu_class_is_omap1()) {
370 w = dma_read(CSDP(lch));
373 dma_write(w, CSDP(lch));
375 w = dma_read(CCR(lch));
377 w |= src_amode << 12;
378 dma_write(w, CCR(lch));
380 dma_write(src_start >> 16, CSSA_U(lch));
381 dma_write((u16)src_start, CSSA_L(lch));
383 dma_write(src_ei, CSEI(lch));
384 dma_write(src_fi, CSFI(lch));
387 if (cpu_class_is_omap2()) {
390 l = dma_read(CCR(lch));
392 l |= src_amode << 12;
393 dma_write(l, CCR(lch));
395 dma_write(src_start, CSSA(lch));
396 dma_write(src_ei, CSEI(lch));
397 dma_write(src_fi, CSFI(lch));
401 void omap_set_dma_params(int lch, struct omap_dma_channel_params * params)
403 omap_set_dma_transfer_params(lch, params->data_type,
404 params->elem_count, params->frame_count,
405 params->sync_mode, params->trigger,
406 params->src_or_dst_synch);
407 omap_set_dma_src_params(lch, params->src_port,
408 params->src_amode, params->src_start,
409 params->src_ei, params->src_fi);
411 omap_set_dma_dest_params(lch, params->dst_port,
412 params->dst_amode, params->dst_start,
413 params->dst_ei, params->dst_fi);
414 if (params->read_prio || params->write_prio)
415 omap_dma_set_prio_lch(lch, params->read_prio,
419 void omap_set_dma_src_index(int lch, int eidx, int fidx)
421 if (cpu_class_is_omap2()) {
425 dma_write(eidx, CSEI(lch));
426 dma_write(fidx, CSFI(lch));
429 void omap_set_dma_src_data_pack(int lch, int enable)
433 l = dma_read(CSDP(lch));
437 dma_write(l, CSDP(lch));
440 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
442 unsigned int burst = 0;
445 l = dma_read(CSDP(lch));
448 switch (burst_mode) {
449 case OMAP_DMA_DATA_BURST_DIS:
451 case OMAP_DMA_DATA_BURST_4:
452 if (cpu_class_is_omap2())
457 case OMAP_DMA_DATA_BURST_8:
458 if (cpu_class_is_omap2()) {
462 /* not supported by current hardware on OMAP1
466 case OMAP_DMA_DATA_BURST_16:
467 if (cpu_class_is_omap2()) {
471 /* OMAP1 don't support burst 16
479 dma_write(l, CSDP(lch));
482 /* Note that dest_port is only for OMAP1 */
483 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
484 unsigned long dest_start,
485 int dst_ei, int dst_fi)
489 if (cpu_class_is_omap1()) {
490 l = dma_read(CSDP(lch));
493 dma_write(l, CSDP(lch));
496 l = dma_read(CCR(lch));
498 l |= dest_amode << 14;
499 dma_write(l, CCR(lch));
501 if (cpu_class_is_omap1()) {
502 dma_write(dest_start >> 16, CDSA_U(lch));
503 dma_write(dest_start, CDSA_L(lch));
506 if (cpu_class_is_omap2())
507 dma_write(dest_start, CDSA(lch));
509 dma_write(dst_ei, CDEI(lch));
510 dma_write(dst_fi, CDFI(lch));
513 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
515 if (cpu_class_is_omap2()) {
519 dma_write(eidx, CDEI(lch));
520 dma_write(fidx, CDFI(lch));
523 void omap_set_dma_dest_data_pack(int lch, int enable)
527 l = dma_read(CSDP(lch));
531 dma_write(l, CSDP(lch));
534 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
536 unsigned int burst = 0;
539 l = dma_read(CSDP(lch));
542 switch (burst_mode) {
543 case OMAP_DMA_DATA_BURST_DIS:
545 case OMAP_DMA_DATA_BURST_4:
546 if (cpu_class_is_omap2())
551 case OMAP_DMA_DATA_BURST_8:
552 if (cpu_class_is_omap2())
557 case OMAP_DMA_DATA_BURST_16:
558 if (cpu_class_is_omap2()) {
562 /* OMAP1 don't support burst 16
566 printk(KERN_ERR "Invalid DMA burst mode\n");
571 dma_write(l, CSDP(lch));
574 static inline void omap_enable_channel_irq(int lch)
579 if (cpu_class_is_omap1())
580 status = dma_read(CSR(lch));
581 else if (cpu_class_is_omap2())
582 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
584 /* Enable some nice interrupts. */
585 dma_write(dma_chan[lch].enabled_irqs, CICR(lch));
588 static void omap_disable_channel_irq(int lch)
590 if (cpu_class_is_omap2())
591 dma_write(0, CICR(lch));
594 void omap_enable_dma_irq(int lch, u16 bits)
596 dma_chan[lch].enabled_irqs |= bits;
599 void omap_disable_dma_irq(int lch, u16 bits)
601 dma_chan[lch].enabled_irqs &= ~bits;
604 static inline void enable_lnk(int lch)
608 l = dma_read(CLNK_CTRL(lch));
610 if (cpu_class_is_omap1())
613 /* Set the ENABLE_LNK bits */
614 if (dma_chan[lch].next_lch != -1)
615 l = dma_chan[lch].next_lch | (1 << 15);
617 #ifndef CONFIG_ARCH_OMAP1
618 if (dma_chan[lch].next_linked_ch != -1)
619 l = dma_chan[lch].next_linked_ch | (1 << 15);
622 dma_write(l, CLNK_CTRL(lch));
625 static inline void disable_lnk(int lch)
629 l = dma_read(CLNK_CTRL(lch));
631 /* Disable interrupts */
632 if (cpu_class_is_omap1()) {
633 dma_write(0, CICR(lch));
634 /* Set the STOP_LNK bit */
638 if (cpu_class_is_omap2()) {
639 omap_disable_channel_irq(lch);
640 /* Clear the ENABLE_LNK bit */
644 dma_write(l, CLNK_CTRL(lch));
645 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
648 static inline void omap2_enable_irq_lch(int lch)
652 if (!cpu_class_is_omap2())
655 val = dma_read(IRQENABLE_L0);
657 dma_write(val, IRQENABLE_L0);
660 int omap_request_dma(int dev_id, const char *dev_name,
661 void (* callback)(int lch, u16 ch_status, void *data),
662 void *data, int *dma_ch_out)
664 int ch, free_ch = -1;
666 struct omap_dma_lch *chan;
668 spin_lock_irqsave(&dma_chan_lock, flags);
669 for (ch = 0; ch < dma_chan_count; ch++) {
670 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
677 spin_unlock_irqrestore(&dma_chan_lock, flags);
680 chan = dma_chan + free_ch;
681 chan->dev_id = dev_id;
683 if (cpu_class_is_omap1())
684 clear_lch_regs(free_ch);
686 if (cpu_class_is_omap2())
687 omap_clear_dma(free_ch);
689 spin_unlock_irqrestore(&dma_chan_lock, flags);
691 chan->dev_name = dev_name;
692 chan->callback = callback;
694 #ifndef CONFIG_ARCH_OMAP1
696 chan->next_linked_ch = -1;
698 chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
700 if (cpu_class_is_omap1())
701 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
702 else if (cpu_class_is_omap2())
703 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
704 OMAP2_DMA_TRANS_ERR_IRQ;
706 if (cpu_is_omap16xx()) {
707 /* If the sync device is set, configure it dynamically. */
709 set_gdma_dev(free_ch + 1, dev_id);
710 dev_id = free_ch + 1;
712 /* Disable the 1510 compatibility mode and set the sync device
714 dma_write(dev_id | (1 << 10), CCR(free_ch));
715 } else if (cpu_is_omap730() || cpu_is_omap15xx()) {
716 dma_write(dev_id, CCR(free_ch));
719 if (cpu_class_is_omap2()) {
720 omap2_enable_irq_lch(free_ch);
722 omap_enable_channel_irq(free_ch);
723 /* Clear the CSR register and IRQ status register */
724 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch));
725 dma_write(1 << free_ch, IRQSTATUS_L0);
728 *dma_ch_out = free_ch;
733 void omap_free_dma(int lch)
737 spin_lock_irqsave(&dma_chan_lock, flags);
738 if (dma_chan[lch].dev_id == -1) {
739 printk("omap_dma: trying to free nonallocated DMA channel %d\n",
741 spin_unlock_irqrestore(&dma_chan_lock, flags);
744 dma_chan[lch].dev_id = -1;
745 dma_chan[lch].next_lch = -1;
746 dma_chan[lch].callback = NULL;
747 spin_unlock_irqrestore(&dma_chan_lock, flags);
749 if (cpu_class_is_omap1()) {
750 /* Disable all DMA interrupts for the channel. */
751 dma_write(0, CICR(lch));
752 /* Make sure the DMA transfer is stopped. */
753 dma_write(0, CCR(lch));
756 if (cpu_class_is_omap2()) {
758 /* Disable interrupts */
759 val = dma_read(IRQENABLE_L0);
761 dma_write(val, IRQENABLE_L0);
763 /* Clear the CSR register and IRQ status register */
764 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
765 dma_write(1 << lch, IRQSTATUS_L0);
767 /* Disable all DMA interrupts for the channel. */
768 dma_write(0, CICR(lch));
770 /* Make sure the DMA transfer is stopped. */
771 dma_write(0, CCR(lch));
777 * @brief omap_dma_set_global_params : Set global priority settings for dma
780 * @param max_fifo_depth
781 * @param tparams - Number of thereads to reserve : DMA_THREAD_RESERVE_NORM
782 * DMA_THREAD_RESERVE_ONET
783 * DMA_THREAD_RESERVE_TWOT
784 * DMA_THREAD_RESERVE_THREET
787 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
791 if (!cpu_class_is_omap2()) {
792 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
799 reg = (arb_rate & 0xff) << 16;
800 reg |= (0xff & max_fifo_depth);
804 EXPORT_SYMBOL(omap_dma_set_global_params);
807 * @brief omap_dma_set_prio_lch : Set channel wise priority settings
810 * @param read_prio - Read priority
811 * @param write_prio - Write priority
812 * Both of the above can be set with one of the following values :
813 * DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
816 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
817 unsigned char write_prio)
821 if (unlikely((lch < 0 || lch >= dma_lch_count))) {
822 printk(KERN_ERR "Invalid channel id\n");
825 l = dma_read(CCR(lch));
826 l &= ~((1 << 6) | (1 << 26));
827 if (cpu_is_omap2430() || cpu_is_omap34xx())
828 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
830 l |= ((read_prio & 0x1) << 6);
832 dma_write(l, CCR(lch));
836 EXPORT_SYMBOL(omap_dma_set_prio_lch);
839 * Clears any DMA state so the DMA engine is ready to restart with new buffers
840 * through omap_start_dma(). Any buffers in flight are discarded.
842 void omap_clear_dma(int lch)
846 local_irq_save(flags);
848 if (cpu_class_is_omap1()) {
851 l = dma_read(CCR(lch));
852 l &= ~OMAP_DMA_CCR_EN;
853 dma_write(l, CCR(lch));
855 /* Clear pending interrupts */
856 l = dma_read(CSR(lch));
859 if (cpu_class_is_omap2()) {
861 void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
862 for (i = 0; i < 0x44; i += 4)
863 __raw_writel(0, lch_base + i);
866 local_irq_restore(flags);
869 void omap_start_dma(int lch)
873 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
874 int next_lch, cur_lch;
875 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
877 dma_chan_link_map[lch] = 1;
878 /* Set the link register of the first channel */
881 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
882 cur_lch = dma_chan[lch].next_lch;
884 next_lch = dma_chan[cur_lch].next_lch;
886 /* The loop case: we've been here already */
887 if (dma_chan_link_map[cur_lch])
889 /* Mark the current channel */
890 dma_chan_link_map[cur_lch] = 1;
893 omap_enable_channel_irq(cur_lch);
896 } while (next_lch != -1);
897 } else if (cpu_class_is_omap2()) {
898 /* Errata: Need to write lch even if not using chaining */
899 dma_write(lch, CLNK_CTRL(lch));
902 omap_enable_channel_irq(lch);
904 l = dma_read(CCR(lch));
906 /* Errata: On ES2.0 BUFFERING disable must be set.
907 * This will always fail on ES1.0 */
908 if (cpu_is_omap24xx())
909 l |= OMAP_DMA_CCR_EN;
911 l |= OMAP_DMA_CCR_EN;
912 dma_write(l, CCR(lch));
914 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
917 void omap_stop_dma(int lch)
921 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
922 int next_lch, cur_lch = lch;
923 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
925 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
927 /* The loop case: we've been here already */
928 if (dma_chan_link_map[cur_lch])
930 /* Mark the current channel */
931 dma_chan_link_map[cur_lch] = 1;
933 disable_lnk(cur_lch);
935 next_lch = dma_chan[cur_lch].next_lch;
937 } while (next_lch != -1);
942 /* Disable all interrupts on the channel */
943 if (cpu_class_is_omap1())
944 dma_write(0, CICR(lch));
946 l = dma_read(CCR(lch));
947 l &= ~OMAP_DMA_CCR_EN;
948 dma_write(l, CCR(lch));
950 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
954 * Allows changing the DMA callback function or data. This may be needed if
955 * the driver shares a single DMA channel for multiple dma triggers.
957 int omap_set_dma_callback(int lch,
958 void (* callback)(int lch, u16 ch_status, void *data),
966 spin_lock_irqsave(&dma_chan_lock, flags);
967 if (dma_chan[lch].dev_id == -1) {
968 printk(KERN_ERR "DMA callback for not set for free channel\n");
969 spin_unlock_irqrestore(&dma_chan_lock, flags);
972 dma_chan[lch].callback = callback;
973 dma_chan[lch].data = data;
974 spin_unlock_irqrestore(&dma_chan_lock, flags);
980 * Returns current physical source address for the given DMA channel.
981 * If the channel is running the caller must disable interrupts prior calling
982 * this function and process the returned value before re-enabling interrupt to
983 * prevent races with the interrupt handler. Note that in continuous mode there
984 * is a chance for CSSA_L register overflow inbetween the two reads resulting
985 * in incorrect return value.
987 dma_addr_t omap_get_dma_src_pos(int lch)
989 dma_addr_t offset = 0;
991 if (cpu_class_is_omap1())
992 offset = (dma_addr_t)(dma_read(CSSA_L(lch)) |
993 (dma_read(CSSA_U(lch)) << 16));
995 if (cpu_class_is_omap2()) {
996 offset = dma_read(CSAC(lch));
999 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1000 * read before the DMA controller finished disabling the channel.
1003 offset = dma_read(CSAC(lch));
1010 * Returns current physical destination address for the given DMA channel.
1011 * If the channel is running the caller must disable interrupts prior calling
1012 * this function and process the returned value before re-enabling interrupt to
1013 * prevent races with the interrupt handler. Note that in continuous mode there
1014 * is a chance for CDSA_L register overflow inbetween the two reads resulting
1015 * in incorrect return value.
1017 dma_addr_t omap_get_dma_dst_pos(int lch)
1019 dma_addr_t offset = 0;
1021 if (cpu_class_is_omap1())
1022 offset = (dma_addr_t)(dma_read(CDSA_L(lch)) |
1023 (dma_read(CDSA_U(lch)) << 16));
1025 if (cpu_class_is_omap2()) {
1026 offset = dma_read(CDAC(lch));
1029 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1030 * read before the DMA controller finished disabling the channel.
1033 offset = dma_read(CDAC(lch));
1040 * Returns current source transfer counting for the given DMA channel.
1041 * Can be used to monitor the progress of a transfer inside a block.
1042 * It must be called with disabled interrupts.
1044 int omap_get_dma_src_addr_counter(int lch)
1046 return (dma_addr_t)dma_read(CSAC(lch));
1049 int omap_get_dma_active_status(int lch)
1051 return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0;
1053 EXPORT_SYMBOL(omap_get_dma_active_status);
1055 int omap_dma_running(void)
1059 /* Check if LCD DMA is running */
1060 if (cpu_is_omap16xx())
1061 if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
1064 for (lch = 0; lch < dma_chan_count; lch++)
1065 if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN)
1072 * lch_queue DMA will start right after lch_head one is finished.
1073 * For this DMA link to start, you still need to start (see omap_start_dma)
1074 * the first one. That will fire up the entire queue.
1076 void omap_dma_link_lch (int lch_head, int lch_queue)
1078 if (omap_dma_in_1510_mode()) {
1079 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1084 if ((dma_chan[lch_head].dev_id == -1) ||
1085 (dma_chan[lch_queue].dev_id == -1)) {
1086 printk(KERN_ERR "omap_dma: trying to link "
1087 "non requested channels\n");
1091 dma_chan[lch_head].next_lch = lch_queue;
1095 * Once the DMA queue is stopped, we can destroy it.
1097 void omap_dma_unlink_lch (int lch_head, int lch_queue)
1099 if (omap_dma_in_1510_mode()) {
1100 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1105 if (dma_chan[lch_head].next_lch != lch_queue ||
1106 dma_chan[lch_head].next_lch == -1) {
1107 printk(KERN_ERR "omap_dma: trying to unlink "
1108 "non linked channels\n");
1113 if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1114 (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
1115 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1116 "before unlinking\n");
1120 dma_chan[lch_head].next_lch = -1;
1123 #ifndef CONFIG_ARCH_OMAP1
1124 /* Create chain of DMA channesls */
1125 static void create_dma_lch_chain(int lch_head, int lch_queue)
1129 /* Check if this is the first link in chain */
1130 if (dma_chan[lch_head].next_linked_ch == -1) {
1131 dma_chan[lch_head].next_linked_ch = lch_queue;
1132 dma_chan[lch_head].prev_linked_ch = lch_queue;
1133 dma_chan[lch_queue].next_linked_ch = lch_head;
1134 dma_chan[lch_queue].prev_linked_ch = lch_head;
1137 /* a link exists, link the new channel in circular chain */
1139 dma_chan[lch_queue].next_linked_ch =
1140 dma_chan[lch_head].next_linked_ch;
1141 dma_chan[lch_queue].prev_linked_ch = lch_head;
1142 dma_chan[lch_head].next_linked_ch = lch_queue;
1143 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1147 l = dma_read(CLNK_CTRL(lch_head));
1150 dma_write(l, CLNK_CTRL(lch_head));
1152 l = dma_read(CLNK_CTRL(lch_queue));
1154 l |= (dma_chan[lch_queue].next_linked_ch);
1155 dma_write(l, CLNK_CTRL(lch_queue));
1159 * @brief omap_request_dma_chain : Request a chain of DMA channels
1161 * @param dev_id - Device id using the dma channel
1162 * @param dev_name - Device name
1163 * @param callback - Call back function
1165 * @no_of_chans - Number of channels requested
1166 * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1167 * OMAP_DMA_DYNAMIC_CHAIN
1168 * @params - Channel parameters
1170 * @return - Succes : 0
1171 * Failure: -EINVAL/-ENOMEM
1173 int omap_request_dma_chain(int dev_id, const char *dev_name,
1174 void (*callback) (int chain_id, u16 ch_status,
1176 int *chain_id, int no_of_chans, int chain_mode,
1177 struct omap_dma_channel_params params)
1182 /* Is the chain mode valid ? */
1183 if (chain_mode != OMAP_DMA_STATIC_CHAIN
1184 && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1185 printk(KERN_ERR "Invalid chain mode requested\n");
1189 if (unlikely((no_of_chans < 1
1190 || no_of_chans > dma_lch_count))) {
1191 printk(KERN_ERR "Invalid Number of channels requested\n");
1195 /* Allocate a queue to maintain the status of the channels
1197 channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1198 if (channels == NULL) {
1199 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1203 /* request and reserve DMA channels for the chain */
1204 for (i = 0; i < no_of_chans; i++) {
1205 err = omap_request_dma(dev_id, dev_name,
1206 callback, 0, &channels[i]);
1209 for (j = 0; j < i; j++)
1210 omap_free_dma(channels[j]);
1212 printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1215 dma_chan[channels[i]].prev_linked_ch = -1;
1216 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1219 * Allowing client drivers to set common parameters now,
1220 * so that later only relevant (src_start, dest_start
1221 * and element count) can be set
1223 omap_set_dma_params(channels[i], ¶ms);
1226 *chain_id = channels[0];
1227 dma_linked_lch[*chain_id].linked_dmach_q = channels;
1228 dma_linked_lch[*chain_id].chain_mode = chain_mode;
1229 dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1230 dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1232 for (i = 0; i < no_of_chans; i++)
1233 dma_chan[channels[i]].chain_id = *chain_id;
1235 /* Reset the Queue pointers */
1236 OMAP_DMA_CHAIN_QINIT(*chain_id);
1238 /* Set up the chain */
1239 if (no_of_chans == 1)
1240 create_dma_lch_chain(channels[0], channels[0]);
1242 for (i = 0; i < (no_of_chans - 1); i++)
1243 create_dma_lch_chain(channels[i], channels[i + 1]);
1247 EXPORT_SYMBOL(omap_request_dma_chain);
1250 * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1251 * params after setting it. Dont do this while dma is running!!
1253 * @param chain_id - Chained logical channel id.
1256 * @return - Success : 0
1259 int omap_modify_dma_chain_params(int chain_id,
1260 struct omap_dma_channel_params params)
1265 /* Check for input params */
1266 if (unlikely((chain_id < 0
1267 || chain_id >= dma_lch_count))) {
1268 printk(KERN_ERR "Invalid chain id\n");
1272 /* Check if the chain exists */
1273 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1274 printk(KERN_ERR "Chain doesn't exists\n");
1277 channels = dma_linked_lch[chain_id].linked_dmach_q;
1279 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1281 * Allowing client drivers to set common parameters now,
1282 * so that later only relevant (src_start, dest_start
1283 * and element count) can be set
1285 omap_set_dma_params(channels[i], ¶ms);
1289 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1292 * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1296 * @return - Success : 0
1299 int omap_free_dma_chain(int chain_id)
1304 /* Check for input params */
1305 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1306 printk(KERN_ERR "Invalid chain id\n");
1310 /* Check if the chain exists */
1311 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1312 printk(KERN_ERR "Chain doesn't exists\n");
1316 channels = dma_linked_lch[chain_id].linked_dmach_q;
1317 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1318 dma_chan[channels[i]].next_linked_ch = -1;
1319 dma_chan[channels[i]].prev_linked_ch = -1;
1320 dma_chan[channels[i]].chain_id = -1;
1321 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1322 omap_free_dma(channels[i]);
1327 dma_linked_lch[chain_id].linked_dmach_q = NULL;
1328 dma_linked_lch[chain_id].chain_mode = -1;
1329 dma_linked_lch[chain_id].chain_state = -1;
1332 EXPORT_SYMBOL(omap_free_dma_chain);
1335 * @brief omap_dma_chain_status - Check if the chain is in
1336 * active / inactive state.
1339 * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1342 int omap_dma_chain_status(int chain_id)
1344 /* Check for input params */
1345 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1346 printk(KERN_ERR "Invalid chain id\n");
1350 /* Check if the chain exists */
1351 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1352 printk(KERN_ERR "Chain doesn't exists\n");
1355 pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1356 dma_linked_lch[chain_id].q_count);
1358 if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1359 return OMAP_DMA_CHAIN_INACTIVE;
1360 return OMAP_DMA_CHAIN_ACTIVE;
1362 EXPORT_SYMBOL(omap_dma_chain_status);
1365 * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1366 * set the params and start the transfer.
1369 * @param src_start - buffer start address
1370 * @param dest_start - Dest address
1372 * @param frame_count
1373 * @param callbk_data - channel callback parameter data.
1375 * @return - Success : 0
1376 * Failure: -EINVAL/-EBUSY
1378 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1379 int elem_count, int frame_count, void *callbk_data)
1385 /* if buffer size is less than 1 then there is
1386 * no use of starting the chain */
1387 if (elem_count < 1) {
1388 printk(KERN_ERR "Invalid buffer size\n");
1392 /* Check for input params */
1393 if (unlikely((chain_id < 0
1394 || chain_id >= dma_lch_count))) {
1395 printk(KERN_ERR "Invalid chain id\n");
1399 /* Check if the chain exists */
1400 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1401 printk(KERN_ERR "Chain doesn't exist\n");
1405 /* Check if all the channels in chain are in use */
1406 if (OMAP_DMA_CHAIN_QFULL(chain_id))
1409 /* Frame count may be negative in case of indexed transfers */
1410 channels = dma_linked_lch[chain_id].linked_dmach_q;
1412 /* Get a free channel */
1413 lch = channels[dma_linked_lch[chain_id].q_tail];
1415 /* Store the callback data */
1416 dma_chan[lch].data = callbk_data;
1418 /* Increment the q_tail */
1419 OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1421 /* Set the params to the free channel */
1423 dma_write(src_start, CSSA(lch));
1424 if (dest_start != 0)
1425 dma_write(dest_start, CDSA(lch));
1427 /* Write the buffer size */
1428 dma_write(elem_count, CEN(lch));
1429 dma_write(frame_count, CFN(lch));
1431 /* If the chain is dynamically linked,
1432 * then we may have to start the chain if its not active */
1433 if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1435 /* In Dynamic chain, if the chain is not started,
1436 * queue the channel */
1437 if (dma_linked_lch[chain_id].chain_state ==
1438 DMA_CHAIN_NOTSTARTED) {
1439 /* Enable the link in previous channel */
1440 if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1442 enable_lnk(dma_chan[lch].prev_linked_ch);
1443 dma_chan[lch].state = DMA_CH_QUEUED;
1446 /* Chain is already started, make sure its active,
1447 * if not then start the chain */
1451 if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1453 enable_lnk(dma_chan[lch].prev_linked_ch);
1454 dma_chan[lch].state = DMA_CH_QUEUED;
1456 if (0 == ((1 << 7) & dma_read(
1457 CCR(dma_chan[lch].prev_linked_ch)))) {
1458 disable_lnk(dma_chan[lch].
1460 pr_debug("\n prev ch is stopped\n");
1465 else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1467 enable_lnk(dma_chan[lch].prev_linked_ch);
1468 dma_chan[lch].state = DMA_CH_QUEUED;
1471 omap_enable_channel_irq(lch);
1473 l = dma_read(CCR(lch));
1475 if ((0 == (l & (1 << 24))))
1479 if (start_dma == 1) {
1480 if (0 == (l & (1 << 7))) {
1482 dma_chan[lch].state = DMA_CH_STARTED;
1483 pr_debug("starting %d\n", lch);
1484 dma_write(l, CCR(lch));
1488 if (0 == (l & (1 << 7)))
1489 dma_write(l, CCR(lch));
1491 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1496 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1499 * @brief omap_start_dma_chain_transfers - Start the chain
1503 * @return - Success : 0
1504 * Failure : -EINVAL/-EBUSY
1506 int omap_start_dma_chain_transfers(int chain_id)
1511 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1512 printk(KERN_ERR "Invalid chain id\n");
1516 channels = dma_linked_lch[chain_id].linked_dmach_q;
1518 if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1519 printk(KERN_ERR "Chain is already started\n");
1523 if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1524 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1526 enable_lnk(channels[i]);
1527 omap_enable_channel_irq(channels[i]);
1530 omap_enable_channel_irq(channels[0]);
1533 l = dma_read(CCR(channels[0]));
1535 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1536 dma_chan[channels[0]].state = DMA_CH_STARTED;
1538 if ((0 == (l & (1 << 24))))
1542 dma_write(l, CCR(channels[0]));
1544 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1547 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1550 * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1554 * @return - Success : 0
1557 int omap_stop_dma_chain_transfers(int chain_id)
1563 /* Check for input params */
1564 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1565 printk(KERN_ERR "Invalid chain id\n");
1569 /* Check if the chain exists */
1570 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1571 printk(KERN_ERR "Chain doesn't exists\n");
1574 channels = dma_linked_lch[chain_id].linked_dmach_q;
1577 * Special programming model needed to disable DMA before end of block
1579 sys_cf = dma_read(OCP_SYSCONFIG);
1581 /* Middle mode reg set no Standby */
1582 l &= ~((1 << 12)|(1 << 13));
1583 dma_write(l, OCP_SYSCONFIG);
1585 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1587 /* Stop the Channel transmission */
1588 l = dma_read(CCR(channels[i]));
1590 dma_write(l, CCR(channels[i]));
1592 /* Disable the link in all the channels */
1593 disable_lnk(channels[i]);
1594 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1597 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1599 /* Reset the Queue pointers */
1600 OMAP_DMA_CHAIN_QINIT(chain_id);
1602 /* Errata - put in the old value */
1603 dma_write(sys_cf, OCP_SYSCONFIG);
1606 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1608 /* Get the index of the ongoing DMA in chain */
1610 * @brief omap_get_dma_chain_index - Get the element and frame index
1611 * of the ongoing DMA in chain
1614 * @param ei - Element index
1615 * @param fi - Frame index
1617 * @return - Success : 0
1620 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1625 /* Check for input params */
1626 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1627 printk(KERN_ERR "Invalid chain id\n");
1631 /* Check if the chain exists */
1632 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1633 printk(KERN_ERR "Chain doesn't exists\n");
1639 channels = dma_linked_lch[chain_id].linked_dmach_q;
1641 /* Get the current channel */
1642 lch = channels[dma_linked_lch[chain_id].q_head];
1644 *ei = dma_read(CCEN(lch));
1645 *fi = dma_read(CCFN(lch));
1649 EXPORT_SYMBOL(omap_get_dma_chain_index);
1652 * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1653 * ongoing DMA in chain
1657 * @return - Success : Destination position
1660 int omap_get_dma_chain_dst_pos(int chain_id)
1665 /* Check for input params */
1666 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1667 printk(KERN_ERR "Invalid chain id\n");
1671 /* Check if the chain exists */
1672 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1673 printk(KERN_ERR "Chain doesn't exists\n");
1677 channels = dma_linked_lch[chain_id].linked_dmach_q;
1679 /* Get the current channel */
1680 lch = channels[dma_linked_lch[chain_id].q_head];
1682 return dma_read(CDAC(lch));
1684 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1687 * @brief omap_get_dma_chain_src_pos - Get the source position
1688 * of the ongoing DMA in chain
1691 * @return - Success : Destination position
1694 int omap_get_dma_chain_src_pos(int chain_id)
1699 /* Check for input params */
1700 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1701 printk(KERN_ERR "Invalid chain id\n");
1705 /* Check if the chain exists */
1706 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1707 printk(KERN_ERR "Chain doesn't exists\n");
1711 channels = dma_linked_lch[chain_id].linked_dmach_q;
1713 /* Get the current channel */
1714 lch = channels[dma_linked_lch[chain_id].q_head];
1716 return dma_read(CSAC(lch));
1718 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1721 /*----------------------------------------------------------------------------*/
1723 #ifdef CONFIG_ARCH_OMAP1
1725 static int omap1_dma_handle_ch(int ch)
1729 if (enable_1510_mode && ch >= 6) {
1730 csr = dma_chan[ch].saved_csr;
1731 dma_chan[ch].saved_csr = 0;
1733 csr = dma_read(CSR(ch));
1734 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1735 dma_chan[ch + 6].saved_csr = csr >> 7;
1738 if ((csr & 0x3f) == 0)
1740 if (unlikely(dma_chan[ch].dev_id == -1)) {
1741 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1742 "%d (CSR %04x)\n", ch, csr);
1745 if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1746 printk(KERN_WARNING "DMA timeout with device %d\n",
1747 dma_chan[ch].dev_id);
1748 if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1749 printk(KERN_WARNING "DMA synchronization event drop occurred "
1750 "with device %d\n", dma_chan[ch].dev_id);
1751 if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1752 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1753 if (likely(dma_chan[ch].callback != NULL))
1754 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1758 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1760 int ch = ((int) dev_id) - 1;
1764 int handled_now = 0;
1766 handled_now += omap1_dma_handle_ch(ch);
1767 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1768 handled_now += omap1_dma_handle_ch(ch + 6);
1771 handled += handled_now;
1774 return handled ? IRQ_HANDLED : IRQ_NONE;
1778 #define omap1_dma_irq_handler NULL
1781 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1783 static int omap2_dma_handle_ch(int ch)
1785 u32 status = dma_read(CSR(ch));
1788 if (printk_ratelimit())
1789 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", ch);
1790 dma_write(1 << ch, IRQSTATUS_L0);
1793 if (unlikely(dma_chan[ch].dev_id == -1)) {
1794 if (printk_ratelimit())
1795 printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1796 "channel %d\n", status, ch);
1799 if (unlikely(status & OMAP_DMA_DROP_IRQ))
1801 "DMA synchronization event drop occurred with device "
1802 "%d\n", dma_chan[ch].dev_id);
1803 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
1804 printk(KERN_INFO "DMA transaction error with device %d\n",
1805 dma_chan[ch].dev_id);
1806 if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1807 printk(KERN_INFO "DMA secure error with device %d\n",
1808 dma_chan[ch].dev_id);
1809 if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1810 printk(KERN_INFO "DMA misaligned error with device %d\n",
1811 dma_chan[ch].dev_id);
1813 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
1814 dma_write(1 << ch, IRQSTATUS_L0);
1816 /* If the ch is not chained then chain_id will be -1 */
1817 if (dma_chan[ch].chain_id != -1) {
1818 int chain_id = dma_chan[ch].chain_id;
1819 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1820 if (dma_read(CLNK_CTRL(ch)) & (1 << 15))
1821 dma_chan[dma_chan[ch].next_linked_ch].state =
1823 if (dma_linked_lch[chain_id].chain_mode ==
1824 OMAP_DMA_DYNAMIC_CHAIN)
1827 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1828 OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1830 status = dma_read(CSR(ch));
1833 if (likely(dma_chan[ch].callback != NULL))
1834 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1836 dma_write(status, CSR(ch));
1841 /* STATUS register count is from 1-32 while our is 0-31 */
1842 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1847 val = dma_read(IRQSTATUS_L0);
1849 if (printk_ratelimit())
1850 printk(KERN_WARNING "Spurious DMA IRQ\n");
1853 for (i = 0; i < dma_lch_count && val != 0; i++) {
1855 omap2_dma_handle_ch(i);
1862 static struct irqaction omap24xx_dma_irq = {
1864 .handler = omap2_dma_irq_handler,
1865 .flags = IRQF_DISABLED
1869 static struct irqaction omap24xx_dma_irq;
1872 /*----------------------------------------------------------------------------*/
1874 static struct lcd_dma_info {
1877 void (* callback)(u16 status, void *data);
1881 unsigned long addr, size;
1882 int rotate, data_type, xres, yres;
1888 int single_transfer;
1891 void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
1894 lcd_dma.addr = addr;
1895 lcd_dma.data_type = data_type;
1896 lcd_dma.xres = fb_xres;
1897 lcd_dma.yres = fb_yres;
1900 void omap_set_lcd_dma_src_port(int port)
1902 lcd_dma.src_port = port;
1905 void omap_set_lcd_dma_ext_controller(int external)
1907 lcd_dma.ext_ctrl = external;
1910 void omap_set_lcd_dma_single_transfer(int single)
1912 lcd_dma.single_transfer = single;
1916 void omap_set_lcd_dma_b1_rotation(int rotate)
1918 if (omap_dma_in_1510_mode()) {
1919 printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
1923 lcd_dma.rotate = rotate;
1926 void omap_set_lcd_dma_b1_mirror(int mirror)
1928 if (omap_dma_in_1510_mode()) {
1929 printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
1932 lcd_dma.mirror = mirror;
1935 void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
1937 if (omap_dma_in_1510_mode()) {
1938 printk(KERN_ERR "DMA virtual resulotion is not supported "
1942 lcd_dma.vxres = vxres;
1945 void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
1947 if (omap_dma_in_1510_mode()) {
1948 printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
1951 lcd_dma.xscale = xscale;
1952 lcd_dma.yscale = yscale;
1955 static void set_b1_regs(void)
1957 unsigned long top, bottom;
1960 unsigned long en, fn;
1962 unsigned long vxres;
1963 unsigned int xscale, yscale;
1965 switch (lcd_dma.data_type) {
1966 case OMAP_DMA_DATA_TYPE_S8:
1969 case OMAP_DMA_DATA_TYPE_S16:
1972 case OMAP_DMA_DATA_TYPE_S32:
1980 vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres;
1981 xscale = lcd_dma.xscale ? lcd_dma.xscale : 1;
1982 yscale = lcd_dma.yscale ? lcd_dma.yscale : 1;
1983 BUG_ON(vxres < lcd_dma.xres);
1984 #define PIXADDR(x,y) (lcd_dma.addr + ((y) * vxres * yscale + (x) * xscale) * es)
1985 #define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1)
1986 switch (lcd_dma.rotate) {
1988 if (!lcd_dma.mirror) {
1989 top = PIXADDR(0, 0);
1990 bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1991 /* 1510 DMA requires the bottom address to be 2 more
1992 * than the actual last memory access location. */
1993 if (omap_dma_in_1510_mode() &&
1994 lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
1996 ei = PIXSTEP(0, 0, 1, 0);
1997 fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1);
1999 top = PIXADDR(lcd_dma.xres - 1, 0);
2000 bottom = PIXADDR(0, lcd_dma.yres - 1);
2001 ei = PIXSTEP(1, 0, 0, 0);
2002 fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1);
2008 if (!lcd_dma.mirror) {
2009 top = PIXADDR(0, lcd_dma.yres - 1);
2010 bottom = PIXADDR(lcd_dma.xres - 1, 0);
2011 ei = PIXSTEP(0, 1, 0, 0);
2012 fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1);
2014 top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
2015 bottom = PIXADDR(0, 0);
2016 ei = PIXSTEP(0, 1, 0, 0);
2017 fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1);
2023 if (!lcd_dma.mirror) {
2024 top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
2025 bottom = PIXADDR(0, 0);
2026 ei = PIXSTEP(1, 0, 0, 0);
2027 fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0);
2029 top = PIXADDR(0, lcd_dma.yres - 1);
2030 bottom = PIXADDR(lcd_dma.xres - 1, 0);
2031 ei = PIXSTEP(0, 0, 1, 0);
2032 fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0);
2038 if (!lcd_dma.mirror) {
2039 top = PIXADDR(lcd_dma.xres - 1, 0);
2040 bottom = PIXADDR(0, lcd_dma.yres - 1);
2041 ei = PIXSTEP(0, 0, 0, 1);
2042 fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0);
2044 top = PIXADDR(0, 0);
2045 bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
2046 ei = PIXSTEP(0, 0, 0, 1);
2047 fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0);
2054 return; /* Suppress warning about uninitialized vars */
2057 if (omap_dma_in_1510_mode()) {
2058 omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
2059 omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
2060 omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
2061 omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
2067 omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
2068 omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
2069 omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
2070 omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
2072 omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
2073 omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
2075 w = omap_readw(OMAP1610_DMA_LCD_CSDP);
2077 w |= lcd_dma.data_type;
2078 omap_writew(w, OMAP1610_DMA_LCD_CSDP);
2080 w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2081 /* Always set the source port as SDRAM for now*/
2083 if (lcd_dma.callback != NULL)
2084 w |= 1 << 1; /* Block interrupt enable */
2087 omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2089 if (!(lcd_dma.rotate || lcd_dma.mirror ||
2090 lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale))
2093 w = omap_readw(OMAP1610_DMA_LCD_CCR);
2094 /* Set the double-indexed addressing mode */
2096 omap_writew(w, OMAP1610_DMA_LCD_CCR);
2098 omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
2099 omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
2100 omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
2103 static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id)
2107 w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2108 if (unlikely(!(w & (1 << 3)))) {
2109 printk(KERN_WARNING "Spurious LCD DMA IRQ\n");
2114 omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2116 if (lcd_dma.callback != NULL)
2117 lcd_dma.callback(w, lcd_dma.cb_data);
2122 int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
2125 spin_lock_irq(&lcd_dma.lock);
2126 if (lcd_dma.reserved) {
2127 spin_unlock_irq(&lcd_dma.lock);
2128 printk(KERN_ERR "LCD DMA channel already reserved\n");
2132 lcd_dma.reserved = 1;
2133 spin_unlock_irq(&lcd_dma.lock);
2134 lcd_dma.callback = callback;
2135 lcd_dma.cb_data = data;
2137 lcd_dma.single_transfer = 0;
2143 lcd_dma.ext_ctrl = 0;
2144 lcd_dma.src_port = 0;
2149 void omap_free_lcd_dma(void)
2151 spin_lock(&lcd_dma.lock);
2152 if (!lcd_dma.reserved) {
2153 spin_unlock(&lcd_dma.lock);
2154 printk(KERN_ERR "LCD DMA is not reserved\n");
2158 if (!enable_1510_mode)
2159 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
2160 OMAP1610_DMA_LCD_CCR);
2161 lcd_dma.reserved = 0;
2162 spin_unlock(&lcd_dma.lock);
2165 void omap_enable_lcd_dma(void)
2169 /* Set the Enable bit only if an external controller is
2170 * connected. Otherwise the OMAP internal controller will
2171 * start the transfer when it gets enabled.
2173 if (enable_1510_mode || !lcd_dma.ext_ctrl)
2176 w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2178 omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2182 w = omap_readw(OMAP1610_DMA_LCD_CCR);
2184 omap_writew(w, OMAP1610_DMA_LCD_CCR);
2187 void omap_setup_lcd_dma(void)
2189 BUG_ON(lcd_dma.active);
2190 if (!enable_1510_mode) {
2191 /* Set some reasonable defaults */
2192 omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
2193 omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
2194 omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
2197 if (!enable_1510_mode) {
2200 w = omap_readw(OMAP1610_DMA_LCD_CCR);
2201 /* If DMA was already active set the end_prog bit to have
2202 * the programmed register set loaded into the active
2205 w |= 1 << 11; /* End_prog */
2206 if (!lcd_dma.single_transfer)
2207 w |= (3 << 8); /* Auto_init, repeat */
2208 omap_writew(w, OMAP1610_DMA_LCD_CCR);
2212 void omap_stop_lcd_dma(void)
2217 if (enable_1510_mode || !lcd_dma.ext_ctrl)
2220 w = omap_readw(OMAP1610_DMA_LCD_CCR);
2222 omap_writew(w, OMAP1610_DMA_LCD_CCR);
2224 w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2226 omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2229 /*----------------------------------------------------------------------------*/
2231 static int __init omap_init_dma(void)
2235 if (cpu_class_is_omap1()) {
2236 omap_dma_base = (void __iomem *)IO_ADDRESS(OMAP1_DMA_BASE);
2237 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2238 } else if (cpu_is_omap24xx()) {
2239 omap_dma_base = (void __iomem *)IO_ADDRESS(OMAP24XX_DMA4_BASE);
2240 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2241 } else if (cpu_is_omap34xx()) {
2242 omap_dma_base = (void __iomem *)IO_ADDRESS(OMAP34XX_DMA4_BASE);
2243 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2245 pr_err("DMA init failed for unsupported omap\n");
2249 dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2254 if (cpu_class_is_omap2()) {
2255 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2256 dma_lch_count, GFP_KERNEL);
2257 if (!dma_linked_lch) {
2263 if (cpu_is_omap15xx()) {
2264 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2266 enable_1510_mode = 1;
2267 } else if (cpu_is_omap16xx() || cpu_is_omap730()) {
2268 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2270 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2271 (dma_read(CAPS_0_U) << 16) |
2273 (dma_read(CAPS_1_U) << 16) |
2275 dma_read(CAPS_2), dma_read(CAPS_3),
2277 if (!enable_1510_mode) {
2280 /* Disable OMAP 3.0/3.1 compatibility mode. */
2284 dma_chan_count = 16;
2287 if (cpu_is_omap16xx()) {
2290 /* this would prevent OMAP sleep */
2291 w = omap_readw(OMAP1610_DMA_LCD_CTRL);
2293 omap_writew(w, OMAP1610_DMA_LCD_CTRL);
2295 } else if (cpu_class_is_omap2()) {
2296 u8 revision = dma_read(REVISION) & 0xff;
2297 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2298 revision >> 4, revision & 0xf);
2299 dma_chan_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2305 spin_lock_init(&lcd_dma.lock);
2306 spin_lock_init(&dma_chan_lock);
2308 for (ch = 0; ch < dma_chan_count; ch++) {
2310 dma_chan[ch].dev_id = -1;
2311 dma_chan[ch].next_lch = -1;
2313 if (ch >= 6 && enable_1510_mode)
2316 if (cpu_class_is_omap1()) {
2317 /* request_irq() doesn't like dev_id (ie. ch) being
2318 * zero, so we have to kludge around this. */
2319 r = request_irq(omap1_dma_irq[ch],
2320 omap1_dma_irq_handler, 0, "DMA",
2325 printk(KERN_ERR "unable to request IRQ %d "
2326 "for DMA (error %d)\n",
2327 omap1_dma_irq[ch], r);
2328 for (i = 0; i < ch; i++)
2329 free_irq(omap1_dma_irq[i],
2336 if (cpu_is_omap2430() || cpu_is_omap34xx())
2337 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2338 DMA_DEFAULT_FIFO_DEPTH, 0);
2340 if (cpu_class_is_omap2())
2341 setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq);
2343 /* FIXME: Update LCD DMA to work on 24xx */
2344 if (cpu_class_is_omap1()) {
2345 r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
2350 printk(KERN_ERR "unable to request IRQ for LCD DMA "
2352 for (i = 0; i < dma_chan_count; i++)
2353 free_irq(omap1_dma_irq[i], (void *) (i + 1));
2361 arch_initcall(omap_init_dma);
2363 EXPORT_SYMBOL(omap_get_dma_src_pos);
2364 EXPORT_SYMBOL(omap_get_dma_dst_pos);
2365 EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
2366 EXPORT_SYMBOL(omap_clear_dma);
2367 EXPORT_SYMBOL(omap_set_dma_priority);
2368 EXPORT_SYMBOL(omap_request_dma);
2369 EXPORT_SYMBOL(omap_free_dma);
2370 EXPORT_SYMBOL(omap_start_dma);
2371 EXPORT_SYMBOL(omap_stop_dma);
2372 EXPORT_SYMBOL(omap_set_dma_callback);
2373 EXPORT_SYMBOL(omap_enable_dma_irq);
2374 EXPORT_SYMBOL(omap_disable_dma_irq);
2376 EXPORT_SYMBOL(omap_set_dma_transfer_params);
2377 EXPORT_SYMBOL(omap_set_dma_color_mode);
2378 EXPORT_SYMBOL(omap_set_dma_write_mode);
2380 EXPORT_SYMBOL(omap_set_dma_src_params);
2381 EXPORT_SYMBOL(omap_set_dma_src_index);
2382 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
2383 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
2385 EXPORT_SYMBOL(omap_set_dma_dest_params);
2386 EXPORT_SYMBOL(omap_set_dma_dest_index);
2387 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
2388 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
2390 EXPORT_SYMBOL(omap_set_dma_params);
2392 EXPORT_SYMBOL(omap_dma_link_lch);
2393 EXPORT_SYMBOL(omap_dma_unlink_lch);
2395 EXPORT_SYMBOL(omap_request_lcd_dma);
2396 EXPORT_SYMBOL(omap_free_lcd_dma);
2397 EXPORT_SYMBOL(omap_enable_lcd_dma);
2398 EXPORT_SYMBOL(omap_setup_lcd_dma);
2399 EXPORT_SYMBOL(omap_stop_lcd_dma);
2400 EXPORT_SYMBOL(omap_set_lcd_dma_b1);
2401 EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
2402 EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller);
2403 EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
2404 EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
2405 EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale);
2406 EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);