2 * linux/drivers/media/mmc/omap.c
4 * Copyright (C) 2004 Nokia Corporation
5 * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/config.h>
16 // #define CONFIG_MMC_DEBUG
17 #ifdef CONFIG_MMC_DEBUG
18 #define DEBUG /* for dev_dbg(), pr_debug(), etc */
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/platform_device.h>
26 #include <linux/interrupt.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/delay.h>
29 #include <linux/spinlock.h>
30 #include <linux/timer.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/protocol.h>
33 #include <linux/mmc/card.h>
34 #include <linux/clk.h>
38 #include <asm/scatterlist.h>
39 #include <asm/mach-types.h>
41 #include <asm/arch/board.h>
42 #include <asm/arch/gpio.h>
43 #include <asm/arch/dma.h>
44 #include <asm/arch/mux.h>
45 #include <asm/arch/fpga.h>
46 #include <asm/arch/tps65010.h>
47 #include <asm/arch/menelaus.h>
51 #define DRIVER_NAME "mmci-omap"
53 #ifdef CONFIG_MMC_DEBUG
54 #define DBG(x...) pr_debug(x)
55 //#define DBG(x...) printk(x)
57 #define DBG(x...) do { } while (0)
60 /* Specifies how often in millisecs to poll for card status changes
61 * when the cover switch is open */
62 #define OMAP_MMC_SWITCH_POLL_DELAY 500
64 static int mmc_omap_enable_poll = 1;
66 struct mmc_omap_host {
69 struct mmc_request * mrq;
70 struct mmc_command * cmd;
71 struct mmc_data * data;
72 struct mmc_host * mmc;
74 unsigned char id; /* 16xx chips have 2 MMC blocks */
79 unsigned char bus_mode;
80 unsigned char hw_bus_mode;
85 u32 buffer_bytes_left;
87 struct timer_list xfer_timer;
90 unsigned brs_received:1, dma_done:1;
91 unsigned dma_is_read:1;
92 unsigned dma_in_use:1;
95 struct timer_list dma_timer;
102 struct work_struct switch_work;
103 struct timer_list switch_timer;
104 int switch_last_state;
108 mmc_omap_cover_is_open(struct mmc_omap_host *host)
110 if (host->switch_pin < 0)
112 return omap_get_gpio_datain(host->switch_pin);
116 mmc_omap_show_cover_switch(struct device *dev,
117 struct device_attribute *attr, char *buf)
119 struct mmc_omap_host *host = dev_get_drvdata(dev);
121 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" : "closed");
124 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
127 mmc_omap_show_enable_poll(struct device *dev,
128 struct device_attribute *attr, char *buf)
130 return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
134 mmc_omap_store_enable_poll(struct device *dev,
135 struct device_attribute *attr, const char *buf,
140 if (sscanf(buf, "%10d", &enable_poll) != 1)
143 if (enable_poll != mmc_omap_enable_poll) {
144 struct mmc_omap_host *host = dev_get_drvdata(dev);
146 mmc_omap_enable_poll = enable_poll;
147 if (enable_poll && host->switch_pin >= 0)
148 schedule_work(&host->switch_work);
153 static DEVICE_ATTR(enable_poll, 0664,
154 mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
157 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
163 pr_debug("MMC%d: CMD%d, argument 0x%08x%s%s%s%s\n",
164 host->id, cmd->opcode, cmd->arg,
165 (cmd->flags & MMC_RSP_SHORT) ? ", 32-bit response" : "",
166 (cmd->flags & MMC_RSP_LONG) ? ", 128-bit response" : "",
167 (cmd->flags & MMC_RSP_CRC) ? ", CRC" : "",
168 (cmd->flags & MMC_RSP_BUSY) ? ", busy notification" : "");
176 * On 24xx we may have external MMC transceiver on Menelaus.
177 * In that case we need to manually toggle between open-drain
178 * and push-pull states.
180 if (omap_has_menelaus() && (host->bus_mode != host->hw_bus_mode)) {
181 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
182 menelaus_mmc_opendrain(1);
184 menelaus_mmc_opendrain(0);
185 host->hw_bus_mode = host->bus_mode;
188 if (!(cmd->flags & MMC_RSP_PRESENT))
189 resptype = 0; /* Resp 0 */
191 if (cmd->flags & MMC_RSP_136)
192 resptype = 2; /* Resp 2 */
194 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
195 resptype = 3; /* Resp 3 */
197 resptype = 1; /* Resp 1, Resp 1b */
200 /* Protocol layer does not provide command type, but our hardware
202 * any data transfer means adtc type (but that information is not
203 * in command structure, so we flagged it into host struct.)
204 * However, telling bc, bcr and ac apart based on response is
206 * CMD0 = bc = resp0 CMD15 = ac = resp0
207 * CMD2 = bcr = resp2 CMD10 = ac = resp2
209 * Resolve to best guess with some exception testing:
210 * resp0 -> bc, except CMD15 = ac
211 * rest are ac, except if opendrain
214 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
215 } else if (resptype == 0 && cmd->opcode != 15) {
216 cmdtype = OMAP_MMC_CMDTYPE_BC;
217 } else if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) {
218 cmdtype = OMAP_MMC_CMDTYPE_BCR;
220 cmdtype = OMAP_MMC_CMDTYPE_AC;
223 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
225 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
228 if (cmd->flags & MMC_RSP_BUSY)
231 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
234 clk_enable(host->fclk);
236 OMAP_MMC_WRITE(host->base, CTO, 200);
237 OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff);
238 OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16);
239 OMAP_MMC_WRITE(host->base, IE,
240 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
241 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
242 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
243 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
244 OMAP_MMC_STAT_END_OF_DATA);
245 OMAP_MMC_WRITE(host->base, CMD, cmdreg);
249 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
251 del_timer_sync(&host->xfer_timer);
253 if (host->dma_in_use) {
254 enum dma_data_direction dma_data_dir;
256 BUG_ON(host->dma_ch < 0);
257 if (data->error != MMC_ERR_NONE)
258 omap_stop_dma(host->dma_ch);
259 /* Release DMA channel lazily */
260 mod_timer(&host->dma_timer, jiffies + HZ);
261 if (data->flags & MMC_DATA_WRITE)
262 dma_data_dir = DMA_TO_DEVICE;
264 dma_data_dir = DMA_FROM_DEVICE;
265 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
270 clk_disable(host->fclk);
272 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
273 * dozens of requests until the card finishes writing data.
274 * It'd be cheaper to just wait till an EOFB interrupt arrives...
279 mmc_request_done(host->mmc, data->mrq);
283 mmc_omap_start_command(host, data->stop);
287 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
292 if (!host->dma_in_use) {
293 mmc_omap_xfer_done(host, data);
297 spin_lock_irqsave(&host->dma_lock, flags);
301 host->brs_received = 1;
302 spin_unlock_irqrestore(&host->dma_lock, flags);
304 mmc_omap_xfer_done(host, data);
308 mmc_omap_dma_timer(unsigned long data)
310 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
312 DBG("MMC%d: Freeing DMA channel %d\n", host->id, host->dma_ch);
313 BUG_ON(host->dma_ch < 0);
314 omap_free_dma(host->dma_ch);
319 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
325 spin_lock_irqsave(&host->dma_lock, flags);
326 if (host->brs_received)
330 spin_unlock_irqrestore(&host->dma_lock, flags);
332 mmc_omap_xfer_done(host, data);
336 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd, int card_ready)
340 if (cmd->flags & MMC_RSP_136) {
341 /* Response type 2 */
343 OMAP_MMC_READ(host->base, RSP0) |
344 (OMAP_MMC_READ(host->base, RSP1) << 16);
346 OMAP_MMC_READ(host->base, RSP2) |
347 (OMAP_MMC_READ(host->base, RSP3) << 16);
349 OMAP_MMC_READ(host->base, RSP4) |
350 (OMAP_MMC_READ(host->base, RSP5) << 16);
352 OMAP_MMC_READ(host->base, RSP6) |
353 (OMAP_MMC_READ(host->base, RSP7) << 16);
354 DBG("MMC%d: Response %08x %08x %08x %08x\n", host->id,
355 cmd->resp[0], cmd->resp[1],
356 cmd->resp[2], cmd->resp[3]);
358 /* Response types 1, 1b, 3, 4, 5, 6 */
360 OMAP_MMC_READ(host->base, RSP6) |
361 (OMAP_MMC_READ(host->base, RSP7) << 16);
362 DBG("MMC%d: Response %08x\n", host->id, cmd->resp[0]);
364 pr_debug("MMC%d: Faking card ready based on EOFB\n", host->id);
365 cmd->resp[0] |= R1_READY_FOR_DATA;
369 if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
370 DBG("MMC%d: End request, err %x\n", host->id, cmd->error);
371 if (host->data != NULL)
372 del_timer_sync(&host->xfer_timer);
374 clk_disable(host->fclk);
375 mmc_request_done(host->mmc, cmd->mrq);
380 mmc_omap_xfer_timeout(unsigned long data)
382 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
384 printk(KERN_ERR "MMC%d: Data xfer timeout\n", host->id);
385 if (host->data != NULL) {
386 host->data->error |= MMC_ERR_TIMEOUT;
387 /* Perform a pseudo-reset of the MMC core logic, since
388 * the controller seems to get really stuck */
389 OMAP_MMC_WRITE(host->base, CON, OMAP_MMC_READ(host->base, CON) & ~(1 << 11));
390 OMAP_MMC_WRITE(host->base, CON, OMAP_MMC_READ(host->base, CON) | (1 << 11));
391 mmc_omap_xfer_done(host, host->data);
397 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
399 struct scatterlist *sg;
401 sg = host->data->sg + host->sg_idx;
402 host->buffer_bytes_left = sg->length;
403 host->buffer = page_address(sg->page) + sg->offset;
404 if (host->buffer_bytes_left > host->total_bytes_left)
405 host->buffer_bytes_left = host->total_bytes_left;
410 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
416 if (host->buffer_bytes_left == 0) {
418 BUG_ON(host->sg_idx == host->sg_len);
419 mmc_omap_sg_to_buf(host);
422 if (n > host->buffer_bytes_left)
423 n = host->buffer_bytes_left;
424 host->buffer_bytes_left -= n;
425 host->total_bytes_left -= n;
426 host->data->bytes_xfered += n;
428 /* Optimize the loop a bit by calculating the register only
430 reg = host->base + OMAP_MMC_REG_DATA;
435 __raw_writew(*p++, reg);
438 *p++ = __raw_readw(reg);
443 static inline void mmc_omap_report_irq(u16 status)
445 static const char *mmc_omap_status_bits[] = {
446 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
447 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
451 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
452 if (status & (1 << i)) {
455 printk("%s", mmc_omap_status_bits[i]);
460 static irqreturn_t mmc_omap_irq(int irq, void *dev_id, struct pt_regs *regs)
462 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
469 if (host->cmd == NULL && host->data == NULL) {
470 status = OMAP_MMC_READ(host->base, STAT);
471 printk(KERN_INFO "MMC%d: Spurious interrupt 0x%04x\n", host->id, status);
473 OMAP_MMC_WRITE(host->base, STAT, status);
474 OMAP_MMC_WRITE(host->base, IE, 0);
484 while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) {
485 OMAP_MMC_WRITE(host->base, STAT, status); // Reset status bits
486 #ifdef CONFIG_MMC_DEBUG
487 printk(KERN_DEBUG "\tMMC IRQ %04x (CMD %d): ", status,
488 host->cmd != NULL ? host->cmd->opcode : -1);
489 mmc_omap_report_irq(status);
492 if (host->total_bytes_left) {
493 if ((status & OMAP_MMC_STAT_A_FULL) ||
494 (status & OMAP_MMC_STAT_END_OF_DATA))
495 mmc_omap_xfer_data(host, 0);
496 if (status & OMAP_MMC_STAT_A_EMPTY)
497 mmc_omap_xfer_data(host, 1);
500 if (status & OMAP_MMC_STAT_END_OF_DATA) {
501 // Block sent/received
505 if (status & OMAP_MMC_STAT_DATA_TOUT) {
507 printk(KERN_DEBUG "MMC%d: Data timeout\n", host->id);
509 host->data->error |= MMC_ERR_TIMEOUT;
514 if (status & OMAP_MMC_STAT_DATA_CRC) {
517 host->data->error |= MMC_ERR_BADCRC;
518 printk(KERN_DEBUG "MMC%d: Data CRC error, bytes left %d\n",
519 host->id, host->total_bytes_left);
522 printk(KERN_DEBUG "MMC%d: Data CRC error\n",
527 if (status & OMAP_MMC_STAT_CMD_TOUT) {
528 /* Timeouts are routine with some commands */
530 if (host->cmd->opcode != MMC_ALL_SEND_CID &&
531 host->cmd->opcode != MMC_SEND_OP_COND &&
532 host->cmd->opcode != MMC_APP_CMD &&
533 !mmc_omap_cover_is_open(host))
534 printk(KERN_ERR "MMC%d: Command timeout, CMD%d\n",
535 host->id, host->cmd->opcode);
536 host->cmd->error |= MMC_ERR_TIMEOUT;
541 if (status & OMAP_MMC_STAT_CMD_CRC) {
544 printk(KERN_ERR "MMC%d: Command CRC error (CMD%d, arg 0x%08x)\n",
545 host->id, host->cmd->opcode,
547 host->cmd->error |= MMC_ERR_BADCRC;
550 printk(KERN_ERR "MMC%d: Command CRC error without cmd?\n", host->id);
553 if (status & OMAP_MMC_STAT_OCR_BUSY) {
554 /* OCR Busy ... happens a lot */
555 if (host->cmd && host->cmd->opcode != MMC_SEND_OP_COND
556 && host->cmd->opcode != MMC_SET_RELATIVE_ADDR) {
557 DBG("MMC%d: OCR busy error, CMD%d\n",
558 host->id, host->cmd->opcode);
562 if (status & OMAP_MMC_STAT_CARD_ERR) {
563 if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
564 u32 response = OMAP_MMC_READ(host->base, RSP6)
565 | (OMAP_MMC_READ(host->base, RSP7) << 16);
566 /* STOP sometimes sets must-ignore bits */
567 if (!(response & (R1_CC_ERROR
569 | R1_COM_CRC_ERROR))) {
576 printk(KERN_DEBUG "MMC%d: Card status error (CMD%d)\n",
577 host->id, host->cmd->opcode);
579 host->cmd->error |= MMC_ERR_FAILED;
583 host->data->error |= MMC_ERR_FAILED;
589 * NOTE: On 1610 the END_OF_CMD may come too early when
592 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
593 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
594 // End of command phase
598 * Some cards produce EOFB interrupt and never
599 * raise R1_READY_FOR_DATA bit after that.
600 * To avoid infinite card status polling loop,
601 * we must fake that bit to MMC layer.
603 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
604 (status & OMAP_MMC_STAT_END_BUSY)) {
610 mmc_omap_cmd_done(host, host->cmd, card_ready);
613 mmc_omap_xfer_done(host, host->data);
614 else if (end_transfer)
615 mmc_omap_end_of_data(host, host->data);
620 static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id, struct pt_regs *regs)
622 struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
623 int cover_open, detect_now;
625 cover_open = mmc_omap_cover_is_open(host);
626 DBG("MMC%d cover is now %s\n", host->id,
627 cover_open ? "open" : "closed");
628 set_irq_type(OMAP_GPIO_IRQ(host->switch_pin), 0);
630 if (host->switch_last_state != cover_open) {
631 /* If the cover was just opened and a card is inserted,
632 * we want to inform user-space about the event as soon as
635 struct mmc_card *card;
637 list_for_each_entry(card, &host->mmc->cards, node)
638 if (mmc_card_present(card))
643 schedule_work(&host->switch_work);
645 /* Delay the switch work a little bit to get rid of the GPIO
647 mod_timer(&host->switch_timer,
648 jiffies + msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY) / 2);
654 static void mmc_omap_switch_timer(unsigned long arg)
656 struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
658 schedule_work(&host->switch_work);
661 /* FIXME: Handle card insertion and removal properly. Maybe use a mask
663 static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask)
665 if (machine_is_omap_h4()) {
667 printk("XXX card in slot 1\n");
669 printk("XXX card in slot 2\n");
671 /* Assume card detect connected to cover switch */
673 printk("XXX cover open\n");
675 printk("XXX cover closed\n");
679 static void mmc_omap_switch_handler(void *data)
681 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
682 struct mmc_card *card;
683 static int complained = 0;
684 int cards = 0, cover_open;
686 if (host->switch_pin == -1)
688 set_irq_type(OMAP_GPIO_IRQ(host->switch_pin), IRQT_RISING | IRQT_FALLING);
689 cover_open = mmc_omap_cover_is_open(host);
690 if (cover_open != host->switch_last_state) {
691 kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
692 host->switch_last_state = cover_open;
694 DBG("MMC cover switch handler started\n");
695 mmc_detect_change(host->mmc, 0);
696 list_for_each_entry(card, &host->mmc->cards, node) {
697 if (mmc_card_present(card))
700 DBG("MMC%d: %d card(s) present\n", host->id, cards);
703 printk(KERN_INFO "MMC%d: cover is open\n", host->id);
706 if (cover_open && (cards || mmc_omap_enable_poll))
707 mod_timer(&host->switch_timer, jiffies +
708 msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
714 /* prepare to transfer the next segment of a scatterlist */
716 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
718 int dma_ch = host->dma_ch;
719 unsigned long data_addr;
722 struct scatterlist *sg = &data->sg[host->sg_idx];
727 data_addr = (unsigned long)io_v2p((void __force *) host->base) + OMAP_MMC_REG_DATA;
728 frame = 1 << data->blksz_bits;
729 count = (u32)sg_dma_len(sg);
731 /* the MMC layer is confused about single block writes... */
732 if ((data->blocks == 1) && (count > (1 << data->blksz_bits))) {
733 pr_debug("patch bogus single block length! %d > %d\n",
737 host->dma_len = count;
739 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
740 * Use 16 or 32 word frames when the blocksize is at least that large.
741 * Blocksize is usually 512 bytes; but not for some SD reads.
743 if (cpu_is_omap15xx() && frame > 32)
750 if (!(data->flags & MMC_DATA_WRITE)) {
751 buf = 0x800f | ((frame - 1) << 8);
753 if (cpu_class_is_omap1()) {
754 src_port = OMAP_DMA_PORT_TIPB;
755 dst_port = OMAP_DMA_PORT_EMIFF;
757 if (cpu_is_omap24xx())
758 sync_dev = OMAP24XX_DMA_MMC1_RX;
760 omap_set_dma_src_params(dma_ch, src_port,
761 OMAP_DMA_AMODE_CONSTANT,
763 omap_set_dma_dest_params(dma_ch, dst_port,
764 OMAP_DMA_AMODE_POST_INC,
765 sg_dma_address(sg), 0, 0);
766 omap_set_dma_dest_data_pack(dma_ch, 1);
767 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
769 buf = 0x0f80 | ((frame - 1) << 0);
771 if (cpu_class_is_omap1()) {
772 src_port = OMAP_DMA_PORT_EMIFF;
773 dst_port = OMAP_DMA_PORT_TIPB;
775 if (cpu_is_omap24xx())
776 sync_dev = OMAP24XX_DMA_MMC1_TX;
778 omap_set_dma_dest_params(dma_ch, dst_port,
779 OMAP_DMA_AMODE_CONSTANT,
781 omap_set_dma_src_params(dma_ch, src_port,
782 OMAP_DMA_AMODE_POST_INC,
783 sg_dma_address(sg), 0, 0);
784 omap_set_dma_src_data_pack(dma_ch, 1);
785 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
788 /* Max limit for DMA frame count is 0xffff */
789 if (unlikely(count > 0xffff))
792 OMAP_MMC_WRITE(host->base, BUF, buf);
793 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
794 frame, count, OMAP_DMA_SYNC_FRAME,
798 /* a scatterlist segment completed */
799 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
801 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
802 struct mmc_data *mmcdat = host->data;
804 if (unlikely(host->dma_ch < 0)) {
805 printk(KERN_ERR "MMC%d: DMA callback while DMA not enabled\n",
809 /* FIXME: We really should do something to _handle_ the errors */
810 if (ch_status & OMAP_DMA_TOUT_IRQ) {
811 printk(KERN_ERR "MMC%d: DMA timeout\n", host->id);
814 if (ch_status & OMAP_DMA_DROP_IRQ) {
815 printk(KERN_ERR "MMC%d: DMA sync error\n", host->id);
818 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
819 /* REVISIT we should be able to avoid getting IRQs with
820 * just SYNC status ...
822 if ((ch_status & ~OMAP1_DMA_SYNC_IRQ))
823 pr_debug("MMC%d: DMA channel status: %04x\n",
824 host->id, ch_status);
827 mmcdat->bytes_xfered += host->dma_len;
829 pr_debug("\tMMC DMA %d bytes CB %04x (%d segments to go), %p\n",
830 host->dma_len, ch_status,
831 host->sg_len - host->sg_idx - 1, host->data);
834 if (host->sg_idx < host->sg_len) {
835 mmc_omap_prepare_dma(host, host->data);
836 omap_start_dma(host->dma_ch);
838 mmc_omap_dma_done(host, host->data);
841 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
843 const char *dev_name;
844 int sync_dev, dma_ch, is_read, r;
846 is_read = !(data->flags & MMC_DATA_WRITE);
847 del_timer_sync(&host->dma_timer);
848 if (host->dma_ch >= 0) {
849 if (is_read == host->dma_is_read)
851 omap_free_dma(host->dma_ch);
857 sync_dev = OMAP_DMA_MMC_RX;
858 dev_name = "MMC1 read";
860 sync_dev = OMAP_DMA_MMC2_RX;
861 dev_name = "MMC2 read";
865 sync_dev = OMAP_DMA_MMC_TX;
866 dev_name = "MMC1 write";
868 sync_dev = OMAP_DMA_MMC2_TX;
869 dev_name = "MMC2 write";
872 r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
875 printk("MMC%d: omap_request_dma() failed with %d\n",
879 host->dma_ch = dma_ch;
880 host->dma_is_read = is_read;
885 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
889 reg = OMAP_MMC_READ(host->base, SDIO);
891 OMAP_MMC_WRITE(host->base, SDIO, reg);
892 /* Set maximum timeout */
893 OMAP_MMC_WRITE(host->base, CTO, 0xff);
896 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
901 /* Convert ns to clock cycles by assuming 20MHz frequency
902 * 1 cycle at 20MHz = 500 ns
904 timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
906 /* Some cards require more time to do at least the first read operation */
907 timeout = timeout << 4;
909 /* Check if we need to use timeout multiplier register */
910 reg = OMAP_MMC_READ(host->base, SDIO);
911 if (timeout > 0xffff) {
916 OMAP_MMC_WRITE(host->base, SDIO, reg);
917 OMAP_MMC_WRITE(host->base, DTO, timeout);
921 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
923 struct mmc_data *data = req->data;
924 int i, use_dma, block_size;
929 OMAP_MMC_WRITE(host->base, BLEN, 0);
930 OMAP_MMC_WRITE(host->base, NBLK, 0);
931 OMAP_MMC_WRITE(host->base, BUF, 0);
932 host->dma_in_use = 0;
933 set_cmd_timeout(host, req);
938 block_size = 1 << data->blksz_bits;
940 OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
941 OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
942 set_data_timeout(host, req);
944 /* cope with calling layer confusion; it issues "single
945 * block" writes using multi-block scatterlists.
947 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
949 /* Only do DMA for entire blocks */
950 use_dma = host->use_dma;
952 for (i = 0; i < sg_len; i++) {
953 if ((data->sg[i].length % block_size) != 0) {
962 if (mmc_omap_get_dma_channel(host, data) == 0) {
963 enum dma_data_direction dma_data_dir;
965 if (data->flags & MMC_DATA_WRITE)
966 dma_data_dir = DMA_TO_DEVICE;
968 dma_data_dir = DMA_FROM_DEVICE;
970 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
971 sg_len, dma_data_dir);
972 host->total_bytes_left = 0;
973 mmc_omap_prepare_dma(host, req->data);
974 host->brs_received = 0;
976 host->dma_in_use = 1;
983 OMAP_MMC_WRITE(host->base, BUF, 0x1f1f);
984 host->total_bytes_left = data->blocks * block_size;
985 host->sg_len = sg_len;
986 mmc_omap_sg_to_buf(host);
987 host->dma_in_use = 0;
989 mod_timer(&host->xfer_timer, jiffies + msecs_to_jiffies(500));
991 pr_debug("MMC%d: %s %s %s, DTO %d cycles + %d ns, "
992 "%d blocks of %d bytes, %d segments\n",
993 host->id, use_dma ? "DMA" : "PIO",
994 (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
995 (data->flags & MMC_DATA_WRITE) ? "write" : "read",
996 data->timeout_clks, data->timeout_ns, data->blocks,
997 block_size, host->sg_len);
1000 static inline int is_broken_card(struct mmc_card *card)
1003 struct mmc_cid *c = &card->cid;
1004 static const struct broken_card_cid {
1005 unsigned int manfid;
1007 unsigned char hwrev;
1008 unsigned char fwrev;
1009 } broken_cards[] = {
1010 { 0x00150000, "\x30\x30\x30\x30\x30\x30\x15\x00", 0x06, 0x03 },
1013 for (i = 0; i < sizeof(broken_cards)/sizeof(broken_cards[0]); i++) {
1014 const struct broken_card_cid *b = broken_cards + i;
1016 if (b->manfid != c->manfid)
1018 if (memcmp(b->prod_name, c->prod_name, sizeof(b->prod_name)) != 0)
1020 if (b->hwrev != c->hwrev || b->fwrev != c->fwrev)
1027 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1029 struct mmc_omap_host *host = mmc_priv(mmc);
1031 WARN_ON(host->mrq != NULL);
1035 /* Some cards (vendor left unnamed to protect the guilty) seem to
1036 * require this delay after power-up. Otherwise we'll get mysterious
1038 if (req->cmd->opcode == MMC_SEND_CSD) {
1039 struct mmc_card *card;
1040 int broken_present = 0;
1042 list_for_each_entry(card, &mmc->cards, node) {
1043 if (is_broken_card(card)) {
1048 if (broken_present) {
1049 static int complained = 0;
1052 printk(KERN_WARNING "MMC%d: Broken card workaround enabled\n",
1056 if (in_interrupt()) {
1058 printk(KERN_ERR "Sleeping in IRQ handler, FIXME please!\n");
1062 set_current_state(TASK_UNINTERRUPTIBLE);
1063 schedule_timeout(100 * HZ / 1000);
1068 /* only touch fifo AFTER the controller readies it */
1069 mmc_omap_prepare_data(host, req);
1070 mmc_omap_start_command(host, req->cmd);
1071 if (host->dma_in_use)
1072 omap_start_dma(host->dma_ch);
1075 static void innovator_fpga_socket_power(int on)
1077 #if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
1080 fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
1081 OMAP1510_FPGA_POWER);
1083 fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
1084 OMAP1510_FPGA_POWER);
1090 * Turn the socket power on/off. Innovator uses FPGA, most boards
1091 * probably use GPIO.
1093 static void mmc_omap_power(struct mmc_omap_host *host, int on)
1096 if (machine_is_omap_innovator())
1097 innovator_fpga_socket_power(1);
1098 else if (machine_is_omap_h2())
1099 tps65010_set_gpio_out_value(GPIO3, HIGH);
1100 else if (machine_is_omap_h3())
1101 /* GPIO 4 of TPS65010 sends SD_EN signal */
1102 tps65010_set_gpio_out_value(GPIO4, HIGH);
1103 else if (cpu_is_omap24xx()) {
1104 u16 reg = OMAP_MMC_READ(host->base, CON);
1105 OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11));
1107 if (host->power_pin >= 0)
1108 omap_set_gpio_dataout(host->power_pin, 1);
1110 if (machine_is_omap_innovator())
1111 innovator_fpga_socket_power(0);
1112 else if (machine_is_omap_h2())
1113 tps65010_set_gpio_out_value(GPIO3, LOW);
1114 else if (machine_is_omap_h3())
1115 tps65010_set_gpio_out_value(GPIO4, LOW);
1116 else if (cpu_is_omap24xx()) {
1117 u16 reg = OMAP_MMC_READ(host->base, CON);
1118 OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11));
1120 if (host->power_pin >= 0)
1121 omap_set_gpio_dataout(host->power_pin, 0);
1125 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1127 struct mmc_omap_host *host = mmc_priv(mmc);
1131 DBG("MMC%d: set_ios: clock %dHz busmode %d powermode %d Vdd %d.%02d\n",
1132 host->id, ios->clock, ios->bus_mode, ios->power_mode,
1133 ios->vdd / 100, ios->vdd % 100);
1135 if (ios->power_mode == MMC_POWER_UP && ios->clock < 400000)
1136 realclock = 400000; /* Fix for broken stack */
1138 realclock = ios->clock;
1140 if (ios->clock == 0)
1143 int func_clk_rate = clk_get_rate(host->fclk);
1145 dsor = func_clk_rate / realclock;
1149 if (func_clk_rate / dsor > realclock)
1156 if (ios->bus_width == MMC_BUS_WIDTH_4)
1160 switch (ios->power_mode) {
1162 mmc_omap_power(host, 0);
1166 mmc_omap_power(host, 1);
1171 host->bus_mode = ios->bus_mode;
1172 if (omap_has_menelaus()) {
1173 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
1174 menelaus_mmc_opendrain(1);
1176 menelaus_mmc_opendrain(0);
1178 host->hw_bus_mode = host->bus_mode;
1180 clk_enable(host->fclk);
1182 /* On insanely high arm_per frequencies something sometimes
1183 * goes somehow out of sync, and the POW bit is not being set,
1184 * which results in the while loop below getting stuck.
1185 * Writing to the CON register twice seems to do the trick. */
1186 for (i = 0; i < 2; i++)
1187 OMAP_MMC_WRITE(host->base, CON, dsor);
1188 if (ios->power_mode == MMC_POWER_UP) {
1189 /* Wait a little while for the power regulator to
1192 /* Send clock cycles, poll completion */
1193 OMAP_MMC_WRITE(host->base, IE, 0);
1194 OMAP_MMC_WRITE(host->base, STAT, 0xffff);
1195 OMAP_MMC_WRITE(host->base, CMD, 1<<7);
1196 while (0 == (OMAP_MMC_READ(host->base, STAT) & 1));
1197 OMAP_MMC_WRITE(host->base, STAT, 1);
1199 clk_disable(host->fclk);
1202 static int mmc_omap_get_ro(struct mmc_host *mmc)
1204 struct mmc_omap_host *host = mmc_priv(mmc);
1206 return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
1209 static struct mmc_host_ops mmc_omap_ops = {
1210 .request = mmc_omap_request,
1211 .set_ios = mmc_omap_set_ios,
1212 .get_ro = mmc_omap_get_ro,
1215 static int __init mmc_omap_probe(struct platform_device *pdev)
1217 struct omap_mmc_conf *minfo = pdev->dev.platform_data;
1218 struct mmc_host *mmc;
1219 struct mmc_omap_host *host = NULL;
1222 if (pdev->resource[0].flags != IORESOURCE_MEM
1223 || pdev->resource[1].flags != IORESOURCE_IRQ) {
1224 printk(KERN_ERR "mmc_omap_probe: invalid resource type\n");
1228 if (!request_mem_region(pdev->resource[0].start,
1229 pdev->resource[0].end - pdev->resource[0].start + 1,
1231 dev_dbg(&pdev->dev, "request_mem_region failed\n");
1235 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
1241 host = mmc_priv(mmc);
1244 spin_lock_init(&host->dma_lock);
1245 init_timer(&host->dma_timer);
1246 host->dma_timer.function = mmc_omap_dma_timer;
1247 host->dma_timer.data = (unsigned long) host;
1249 init_timer(&host->xfer_timer);
1250 host->xfer_timer.function = mmc_omap_xfer_timeout;
1251 host->xfer_timer.data = (unsigned long) host;
1253 host->id = pdev->id;
1255 if (cpu_is_omap24xx()) {
1256 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1257 if (IS_ERR(host->iclk))
1259 clk_enable(host->iclk);
1262 if (!cpu_is_omap24xx())
1263 host->fclk = clk_get(&pdev->dev, "mmc_ck");
1265 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1267 if (IS_ERR(host->fclk)) {
1268 ret = PTR_ERR(host->fclk);
1273 * Also, use minfo->cover to decide how to manage
1274 * the card detect sensing.
1276 host->power_pin = minfo->power_pin;
1277 host->switch_pin = minfo->switch_pin;
1278 host->wp_pin = minfo->wp_pin;
1282 host->irq = pdev->resource[1].start;
1283 host->base = (void __iomem *)pdev->resource[0].start;
1286 mmc->caps |= MMC_CAP_4_BIT_DATA;
1288 mmc->ops = &mmc_omap_ops;
1289 mmc->f_min = 400000;
1290 mmc->f_max = 24000000;
1291 mmc->ocr_avail = MMC_VDD_33_34;
1293 /* Use scatterlist DMA to reduce per-transfer costs.
1294 * NOTE max_seg_size assumption that small blocks aren't
1295 * normally used (except e.g. for reading SD registers).
1297 mmc->max_phys_segs = 32;
1298 mmc->max_hw_segs = 32;
1299 mmc->max_sectors = 120; /* NBLK max 11-bits, OMAP also limited by DMA */
1300 mmc->max_seg_size = mmc->max_sectors * 512;
1302 if (host->power_pin >= 0) {
1303 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
1304 printk(KERN_ERR "MMC%d: Unable to get GPIO pin for MMC power\n",
1308 omap_set_gpio_direction(host->power_pin, 0);
1311 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1315 host->dev = &pdev->dev;
1316 platform_set_drvdata(pdev, host);
1320 if (host->switch_pin >= 0) {
1321 INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host);
1322 init_timer(&host->switch_timer);
1323 host->switch_timer.function = mmc_omap_switch_timer;
1324 host->switch_timer.data = (unsigned long) host;
1325 if (omap_request_gpio(host->switch_pin) != 0) {
1326 printk(KERN_WARNING "MMC%d: Unable to get GPIO pin for MMC cover switch\n",
1328 host->switch_pin = -1;
1332 omap_set_gpio_direction(host->switch_pin, 1);
1333 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
1334 mmc_omap_switch_irq,
1335 SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
1338 printk(KERN_WARNING "MMC%d: Unable to get IRQ for MMC cover switch\n",
1340 omap_free_gpio(host->switch_pin);
1341 host->switch_pin = -1;
1344 ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
1346 ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
1348 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1351 printk(KERN_WARNING "MMC%d: Unable to create sysfs attributes\n",
1353 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1354 omap_free_gpio(host->switch_pin);
1355 host->switch_pin = -1;
1358 host->switch_last_state = mmc_omap_cover_is_open(host);
1359 if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
1360 schedule_work(&host->switch_work);
1363 if (omap_has_menelaus())
1364 menelaus_mmc_register(mmc_omap_switch_callback,
1365 (unsigned long)&host);
1371 /* FIXME: Free other resources too. */
1373 if (host->iclk && !IS_ERR(host->iclk))
1374 clk_put(host->iclk);
1375 if (host->fclk && !IS_ERR(host->fclk))
1376 clk_put(host->fclk);
1377 mmc_free_host(host->mmc);
1382 static int mmc_omap_remove(struct platform_device *pdev)
1384 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1386 platform_set_drvdata(pdev, NULL);
1389 mmc_remove_host(host->mmc);
1390 free_irq(host->irq, host);
1391 mmc_omap_power(host, 0);
1393 if (host->power_pin >= 0)
1394 omap_free_gpio(host->power_pin);
1395 if (host->switch_pin >= 0) {
1396 device_remove_file(&pdev->dev, &dev_attr_enable_poll);
1397 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1398 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1399 omap_free_gpio(host->switch_pin);
1400 host->switch_pin = -1;
1401 del_timer_sync(&host->switch_timer);
1402 flush_scheduled_work();
1404 if (host->iclk && !IS_ERR(host->iclk))
1405 clk_put(host->iclk);
1406 if (host->fclk && !IS_ERR(host->fclk))
1407 clk_put(host->fclk);
1408 mmc_free_host(host->mmc);
1411 if (omap_has_menelaus())
1412 menelaus_mmc_remove();
1414 release_mem_region(pdev->resource[0].start,
1415 pdev->resource[0].end - pdev->resource[0].start + 1);
1421 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1424 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1426 if (host && host->suspended)
1429 if (!irqs_disabled())
1433 ret = mmc_suspend_host(host->mmc, mesg);
1435 host->suspended = 1;
1440 static int mmc_omap_resume(struct platform_device *pdev)
1443 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1445 if (host && !host->suspended)
1449 ret = mmc_resume_host(host->mmc);
1451 host->suspended = 0;
1457 #define mmc_omap_suspend NULL
1458 #define mmc_omap_resume NULL
1461 static struct platform_driver mmc_omap_driver = {
1462 .probe = mmc_omap_probe,
1463 .remove = mmc_omap_remove,
1464 .suspend = mmc_omap_suspend,
1465 .resume = mmc_omap_resume,
1467 .name = DRIVER_NAME,
1471 static int __init mmc_omap_init(void)
1473 return platform_driver_register(&mmc_omap_driver);
1476 static void __exit mmc_omap_exit(void)
1478 platform_driver_unregister(&mmc_omap_driver);
1481 module_init(mmc_omap_init);
1482 module_exit(mmc_omap_exit);
1484 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1485 MODULE_LICENSE("GPL");
1486 MODULE_ALIAS(DRIVER_NAME);
1487 MODULE_AUTHOR("Juha Yrjölä");