2 * linux/drivers/media/mmc/omap.c
4 * Copyright (C) 2004 Nokia Corporation
5 * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/config.h>
16 // #define CONFIG_MMC_DEBUG
17 #ifdef CONFIG_MMC_DEBUG
18 #define DEBUG /* for dev_dbg(), pr_debug(), etc */
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/platform_device.h>
26 #include <linux/interrupt.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/delay.h>
29 #include <linux/spinlock.h>
30 #include <linux/timer.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/protocol.h>
33 #include <linux/mmc/card.h>
34 #include <linux/clk.h>
38 #include <asm/scatterlist.h>
39 #include <asm/mach-types.h>
41 #include <asm/arch/board.h>
42 #include <asm/arch/gpio.h>
43 #include <asm/arch/dma.h>
44 #include <asm/arch/mux.h>
45 #include <asm/arch/fpga.h>
46 #include <asm/arch/tps65010.h>
47 #include <asm/arch/menelaus.h>
51 #define DRIVER_NAME "mmci-omap"
53 #ifdef CONFIG_MMC_DEBUG
54 #define DBG(x...) pr_debug(x)
55 //#define DBG(x...) printk(x)
57 #define DBG(x...) do { } while (0)
60 /* Specifies how often in millisecs to poll for card status changes
61 * when the cover switch is open */
62 #define OMAP_MMC_SWITCH_POLL_DELAY 500
64 static int mmc_omap_enable_poll = 1;
66 struct mmc_omap_host {
69 struct mmc_request * mrq;
70 struct mmc_command * cmd;
71 struct mmc_data * data;
72 struct mmc_host * mmc;
74 unsigned char id; /* 16xx chips have 2 MMC blocks */
79 unsigned char bus_mode;
80 unsigned char hw_bus_mode;
85 u32 buffer_bytes_left;
89 unsigned brs_received:1, dma_done:1;
90 unsigned dma_is_read:1;
91 unsigned dma_in_use:1;
94 struct timer_list dma_timer;
101 struct work_struct switch_work;
102 struct timer_list switch_timer;
103 int switch_last_state;
107 mmc_omap_cover_is_open(struct mmc_omap_host *host)
109 if (host->switch_pin < 0)
111 return omap_get_gpio_datain(host->switch_pin);
115 mmc_omap_show_cover_switch(struct device *dev,
116 struct device_attribute *attr, char *buf)
118 struct mmc_omap_host *host = dev_get_drvdata(dev);
120 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" : "closed");
123 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
126 mmc_omap_show_enable_poll(struct device *dev,
127 struct device_attribute *attr, char *buf)
129 return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
133 mmc_omap_store_enable_poll(struct device *dev,
134 struct device_attribute *attr, const char *buf,
139 if (sscanf(buf, "%10d", &enable_poll) != 1)
142 if (enable_poll != mmc_omap_enable_poll) {
143 struct mmc_omap_host *host = dev_get_drvdata(dev);
145 mmc_omap_enable_poll = enable_poll;
146 if (enable_poll && host->switch_pin >= 0)
147 schedule_work(&host->switch_work);
152 static DEVICE_ATTR(enable_poll, 0664,
153 mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
156 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
162 pr_debug("MMC%d: CMD%d, argument 0x%08x%s%s%s%s\n",
163 host->id, cmd->opcode, cmd->arg,
164 (cmd->flags & MMC_RSP_SHORT) ? ", 32-bit response" : "",
165 (cmd->flags & MMC_RSP_LONG) ? ", 128-bit response" : "",
166 (cmd->flags & MMC_RSP_CRC) ? ", CRC" : "",
167 (cmd->flags & MMC_RSP_BUSY) ? ", busy notification" : "");
175 * On 24xx we may have external MMC transceiver on Menelaus.
176 * In that case we need to manually toggle between open-drain
177 * and push-pull states.
179 if (omap_has_menelaus() && (host->bus_mode != host->hw_bus_mode)) {
180 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
181 menelaus_mmc_opendrain(1);
183 menelaus_mmc_opendrain(0);
184 host->hw_bus_mode = host->bus_mode;
187 if (!(cmd->flags & MMC_RSP_PRESENT))
188 resptype = 0; /* Resp 0 */
190 if (cmd->flags & MMC_RSP_136)
191 resptype = 2; /* Resp 2 */
193 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
194 resptype = 3; /* Resp 3 */
196 resptype = 1; /* Resp 1, Resp 1b */
199 /* Protocol layer does not provide command type, but our hardware
201 * any data transfer means adtc type (but that information is not
202 * in command structure, so we flagged it into host struct.)
203 * However, telling bc, bcr and ac apart based on response is
205 * CMD0 = bc = resp0 CMD15 = ac = resp0
206 * CMD2 = bcr = resp2 CMD10 = ac = resp2
208 * Resolve to best guess with some exception testing:
209 * resp0 -> bc, except CMD15 = ac
210 * rest are ac, except if opendrain
213 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
214 } else if (resptype == 0 && cmd->opcode != 15) {
215 cmdtype = OMAP_MMC_CMDTYPE_BC;
216 } else if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) {
217 cmdtype = OMAP_MMC_CMDTYPE_BCR;
219 cmdtype = OMAP_MMC_CMDTYPE_AC;
222 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
224 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
227 if (cmd->flags & MMC_RSP_BUSY)
230 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
233 clk_enable(host->fclk);
235 OMAP_MMC_WRITE(host->base, CTO, 200);
236 OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff);
237 OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16);
238 OMAP_MMC_WRITE(host->base, IE,
239 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
240 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
241 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
242 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
243 OMAP_MMC_STAT_END_OF_DATA);
244 OMAP_MMC_WRITE(host->base, CMD, cmdreg);
248 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
250 if (host->dma_in_use) {
251 enum dma_data_direction dma_data_dir;
253 BUG_ON(host->dma_ch < 0);
254 if (data->error != MMC_ERR_NONE)
255 omap_stop_dma(host->dma_ch);
256 /* Release DMA channel lazily */
257 mod_timer(&host->dma_timer, jiffies + HZ);
258 if (data->flags & MMC_DATA_WRITE)
259 dma_data_dir = DMA_TO_DEVICE;
261 dma_data_dir = DMA_FROM_DEVICE;
262 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
267 clk_disable(host->fclk);
269 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
270 * dozens of requests until the card finishes writing data.
271 * It'd be cheaper to just wait till an EOFB interrupt arrives...
276 mmc_request_done(host->mmc, data->mrq);
280 mmc_omap_start_command(host, data->stop);
284 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
289 if (!host->dma_in_use) {
290 mmc_omap_xfer_done(host, data);
294 spin_lock_irqsave(&host->dma_lock, flags);
298 host->brs_received = 1;
299 spin_unlock_irqrestore(&host->dma_lock, flags);
301 mmc_omap_xfer_done(host, data);
305 mmc_omap_dma_timer(unsigned long data)
307 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
309 DBG("MMC%d: Freeing DMA channel %d\n", host->id, host->dma_ch);
310 BUG_ON(host->dma_ch < 0);
311 omap_free_dma(host->dma_ch);
316 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
322 spin_lock_irqsave(&host->dma_lock, flags);
323 if (host->brs_received)
327 spin_unlock_irqrestore(&host->dma_lock, flags);
329 mmc_omap_xfer_done(host, data);
333 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
337 if (cmd->flags & MMC_RSP_136) {
338 /* Response type 2 */
340 OMAP_MMC_READ(host->base, RSP0) |
341 (OMAP_MMC_READ(host->base, RSP1) << 16);
343 OMAP_MMC_READ(host->base, RSP2) |
344 (OMAP_MMC_READ(host->base, RSP3) << 16);
346 OMAP_MMC_READ(host->base, RSP4) |
347 (OMAP_MMC_READ(host->base, RSP5) << 16);
349 OMAP_MMC_READ(host->base, RSP6) |
350 (OMAP_MMC_READ(host->base, RSP7) << 16);
351 DBG("MMC%d: Response %08x %08x %08x %08x\n", host->id,
352 cmd->resp[0], cmd->resp[1],
353 cmd->resp[2], cmd->resp[3]);
355 /* Response types 1, 1b, 3, 4, 5, 6 */
357 OMAP_MMC_READ(host->base, RSP6) |
358 (OMAP_MMC_READ(host->base, RSP7) << 16);
359 DBG("MMC%d: Response %08x\n", host->id, cmd->resp[0]);
362 if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
363 DBG("MMC%d: End request, err %x\n", host->id, cmd->error);
365 clk_disable(host->fclk);
366 mmc_request_done(host->mmc, cmd->mrq);
372 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
374 struct scatterlist *sg;
376 sg = host->data->sg + host->sg_idx;
377 host->buffer_bytes_left = sg->length;
378 host->buffer = page_address(sg->page) + sg->offset;
379 if (host->buffer_bytes_left > host->total_bytes_left)
380 host->buffer_bytes_left = host->total_bytes_left;
385 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
391 if (host->buffer_bytes_left == 0) {
393 BUG_ON(host->sg_idx == host->sg_len);
394 mmc_omap_sg_to_buf(host);
397 if (n > host->buffer_bytes_left)
398 n = host->buffer_bytes_left;
399 host->buffer_bytes_left -= n;
400 host->total_bytes_left -= n;
401 host->data->bytes_xfered += n;
403 /* Optimize the loop a bit by calculating the register only
405 reg = host->base + OMAP_MMC_REG_DATA;
410 __raw_writew(*p++, reg);
413 *p++ = __raw_readw(reg);
418 static inline void mmc_omap_report_irq(u16 status)
420 static const char *mmc_omap_status_bits[] = {
421 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
422 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
426 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
427 if (status & (1 << i)) {
430 printk("%s", mmc_omap_status_bits[i]);
435 static irqreturn_t mmc_omap_irq(int irq, void *dev_id, struct pt_regs *regs)
437 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
443 if (host->cmd == NULL && host->data == NULL) {
444 status = OMAP_MMC_READ(host->base, STAT);
445 printk(KERN_INFO "MMC%d: Spurious interrupt 0x%04x\n", host->id, status);
447 OMAP_MMC_WRITE(host->base, STAT, status);
448 OMAP_MMC_WRITE(host->base, IE, 0);
457 while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) {
458 OMAP_MMC_WRITE(host->base, STAT, status); // Reset status bits
459 #ifdef CONFIG_MMC_DEBUG
460 printk(KERN_DEBUG "\tMMC IRQ %04x (CMD %d): ", status,
461 host->cmd != NULL ? host->cmd->opcode : -1);
462 mmc_omap_report_irq(status);
465 if (host->total_bytes_left) {
466 if ((status & OMAP_MMC_STAT_A_FULL) ||
467 (status & OMAP_MMC_STAT_END_OF_DATA))
468 mmc_omap_xfer_data(host, 0);
469 if (status & OMAP_MMC_STAT_A_EMPTY)
470 mmc_omap_xfer_data(host, 1);
473 if (status & OMAP_MMC_STAT_END_OF_DATA) {
474 // Block sent/received
478 if (status & OMAP_MMC_STAT_DATA_TOUT) {
480 printk(KERN_DEBUG "MMC%d: Data timeout\n", host->id);
482 host->data->error |= MMC_ERR_TIMEOUT;
487 if (status & OMAP_MMC_STAT_DATA_CRC) {
490 host->data->error |= MMC_ERR_BADCRC;
491 printk(KERN_DEBUG "MMC%d: Data CRC error, bytes left %d\n",
492 host->id, host->total_bytes_left);
495 printk(KERN_DEBUG "MMC%d: Data CRC error\n",
500 if (status & OMAP_MMC_STAT_CMD_TOUT) {
501 /* Timeouts are routine with some commands */
503 if (host->cmd->opcode != MMC_ALL_SEND_CID &&
504 host->cmd->opcode != MMC_SEND_OP_COND &&
505 host->cmd->opcode != MMC_APP_CMD &&
506 !mmc_omap_cover_is_open(host))
507 printk(KERN_ERR "MMC%d: Command timeout, CMD%d\n",
508 host->id, host->cmd->opcode);
509 host->cmd->error |= MMC_ERR_TIMEOUT;
514 if (status & OMAP_MMC_STAT_CMD_CRC) {
517 printk(KERN_ERR "MMC%d: Command CRC error (CMD%d, arg 0x%08x)\n",
518 host->id, host->cmd->opcode,
520 host->cmd->error |= MMC_ERR_BADCRC;
523 printk(KERN_ERR "MMC%d: Command CRC error without cmd?\n", host->id);
526 if (status & OMAP_MMC_STAT_OCR_BUSY) {
527 /* OCR Busy ... happens a lot */
528 if (host->cmd && host->cmd->opcode != MMC_SEND_OP_COND
529 && host->cmd->opcode != MMC_SET_RELATIVE_ADDR) {
530 DBG("MMC%d: OCR busy error, CMD%d\n",
531 host->id, host->cmd->opcode);
535 if (status & OMAP_MMC_STAT_CARD_ERR) {
536 if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
537 u32 response = OMAP_MMC_READ(host->base, RSP6)
538 | (OMAP_MMC_READ(host->base, RSP7) << 16);
539 /* STOP sometimes sets must-ignore bits */
540 if (!(response & (R1_CC_ERROR
542 | R1_COM_CRC_ERROR))) {
549 printk(KERN_DEBUG "MMC%d: Card status error (CMD%d)\n",
550 host->id, host->cmd->opcode);
552 host->cmd->error |= MMC_ERR_FAILED;
556 host->data->error |= MMC_ERR_FAILED;
562 * NOTE: On 1610 the END_OF_CMD may come too early when
565 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
566 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
567 // End of command phase
573 mmc_omap_cmd_done(host, host->cmd);
576 mmc_omap_xfer_done(host, host->data);
577 else if (end_transfer)
578 mmc_omap_end_of_data(host, host->data);
583 static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id, struct pt_regs *regs)
585 struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
587 DBG("MMC%d cover is now %s\n", host->id,
588 omap_get_gpio_datain(host->switch_pin) ? "open" : "closed");
589 schedule_work(&host->switch_work);
594 static void mmc_omap_switch_timer(unsigned long arg)
596 struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
598 schedule_work(&host->switch_work);
601 /* FIXME: Handle card insertion and removal properly. Maybe use a mask
603 static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask)
605 if (machine_is_omap_h4()) {
607 printk("XXX card in slot 1\n");
609 printk("XXX card in slot 2\n");
611 /* Assume card detect connected to cover switch */
613 printk("XXX cover open\n");
615 printk("XXX cover closed\n");
619 static void mmc_omap_switch_handler(void *data)
621 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
622 struct mmc_card *card;
623 static int complained = 0;
624 int cards = 0, cover_open;
626 if (host->switch_pin == -1)
628 cover_open = mmc_omap_cover_is_open(host);
629 if (cover_open != host->switch_last_state) {
630 kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
631 host->switch_last_state = cover_open;
633 DBG("MMC cover switch handler started\n");
634 mmc_detect_change(host->mmc, 0);
635 list_for_each_entry(card, &host->mmc->cards, node) {
636 if (mmc_card_present(card))
639 DBG("MMC%d: %d card(s) present\n", host->id, cards);
640 if (mmc_omap_cover_is_open(host)) {
642 printk(KERN_INFO "MMC%d: cover is open\n", host->id);
645 if (mmc_omap_enable_poll)
646 mod_timer(&host->switch_timer, jiffies +
647 msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
653 /* prepare to transfer the next segment of a scatterlist */
655 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
657 int dma_ch = host->dma_ch;
658 unsigned long data_addr;
661 struct scatterlist *sg = &data->sg[host->sg_idx];
666 data_addr = (unsigned long)io_v2p((void __force *) host->base) + OMAP_MMC_REG_DATA;
667 frame = 1 << data->blksz_bits;
668 count = (u32)sg_dma_len(sg);
670 /* the MMC layer is confused about single block writes... */
671 if ((data->blocks == 1) && (count > (1 << data->blksz_bits))) {
672 pr_debug("patch bogus single block length! %d > %d\n",
676 host->dma_len = count;
678 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
679 * Use 16 or 32 word frames when the blocksize is at least that large.
680 * Blocksize is usually 512 bytes; but not for some SD reads.
682 if (cpu_is_omap15xx() && frame > 32)
689 if (!(data->flags & MMC_DATA_WRITE)) {
690 buf = 0x800f | ((frame - 1) << 8);
692 if (cpu_class_is_omap1()) {
693 src_port = OMAP_DMA_PORT_TIPB;
694 dst_port = OMAP_DMA_PORT_EMIFF;
696 if (cpu_is_omap24xx())
697 sync_dev = OMAP24XX_DMA_MMC1_RX;
699 omap_set_dma_src_params(dma_ch, src_port,
700 OMAP_DMA_AMODE_CONSTANT,
702 omap_set_dma_dest_params(dma_ch, dst_port,
703 OMAP_DMA_AMODE_POST_INC,
704 sg_dma_address(sg), 0, 0);
705 omap_set_dma_dest_data_pack(dma_ch, 1);
706 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
708 buf = 0x0f80 | ((frame - 1) << 0);
710 if (cpu_class_is_omap1()) {
711 src_port = OMAP_DMA_PORT_EMIFF;
712 dst_port = OMAP_DMA_PORT_TIPB;
714 if (cpu_is_omap24xx())
715 sync_dev = OMAP24XX_DMA_MMC1_TX;
717 omap_set_dma_dest_params(dma_ch, dst_port,
718 OMAP_DMA_AMODE_CONSTANT,
720 omap_set_dma_src_params(dma_ch, src_port,
721 OMAP_DMA_AMODE_POST_INC,
722 sg_dma_address(sg), 0, 0);
723 omap_set_dma_src_data_pack(dma_ch, 1);
724 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
727 /* Max limit for DMA frame count is 0xffff */
728 if (unlikely(count > 0xffff))
731 OMAP_MMC_WRITE(host->base, BUF, buf);
732 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
733 frame, count, OMAP_DMA_SYNC_FRAME,
737 /* a scatterlist segment completed */
738 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
740 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
741 struct mmc_data *mmcdat = host->data;
743 if (unlikely(host->dma_ch < 0)) {
744 printk(KERN_ERR "MMC%d: DMA callback while DMA not enabled\n",
748 /* FIXME: We really should do something to _handle_ the errors */
749 if (ch_status & OMAP_DMA_TOUT_IRQ) {
750 printk(KERN_ERR "MMC%d: DMA timeout\n", host->id);
753 if (ch_status & OMAP_DMA_DROP_IRQ) {
754 printk(KERN_ERR "MMC%d: DMA sync error\n", host->id);
757 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
758 /* REVISIT we should be able to avoid getting IRQs with
759 * just SYNC status ...
761 if ((ch_status & ~OMAP1_DMA_SYNC_IRQ))
762 pr_debug("MMC%d: DMA channel status: %04x\n",
763 host->id, ch_status);
766 mmcdat->bytes_xfered += host->dma_len;
768 pr_debug("\tMMC DMA %d bytes CB %04x (%d segments to go), %p\n",
769 host->dma_len, ch_status,
770 host->sg_len - host->sg_idx - 1, host->data);
773 if (host->sg_idx < host->sg_len) {
774 mmc_omap_prepare_dma(host, host->data);
775 omap_start_dma(host->dma_ch);
777 mmc_omap_dma_done(host, host->data);
780 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
782 const char *dev_name;
783 int sync_dev, dma_ch, is_read, r;
785 is_read = !(data->flags & MMC_DATA_WRITE);
786 del_timer_sync(&host->dma_timer);
787 if (host->dma_ch >= 0) {
788 if (is_read == host->dma_is_read)
790 omap_free_dma(host->dma_ch);
796 sync_dev = OMAP_DMA_MMC_RX;
797 dev_name = "MMC1 read";
799 sync_dev = OMAP_DMA_MMC2_RX;
800 dev_name = "MMC2 read";
804 sync_dev = OMAP_DMA_MMC_TX;
805 dev_name = "MMC1 write";
807 sync_dev = OMAP_DMA_MMC2_TX;
808 dev_name = "MMC2 write";
811 r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
814 printk("MMC%d: omap_request_dma() failed with %d\n",
818 host->dma_ch = dma_ch;
819 host->dma_is_read = is_read;
824 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
828 reg = OMAP_MMC_READ(host->base, SDIO);
830 OMAP_MMC_WRITE(host->base, SDIO, reg);
831 /* Set maximum timeout */
832 OMAP_MMC_WRITE(host->base, CTO, 0xff);
835 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
840 /* Convert ns to clock cycles by assuming 20MHz frequency
841 * 1 cycle at 20MHz = 500 ns
843 timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
845 /* Some cards require more time to do at least the first read operation */
846 timeout = timeout << 4;
848 /* Check if we need to use timeout multiplier register */
849 reg = OMAP_MMC_READ(host->base, SDIO);
850 if (timeout > 0xffff) {
855 OMAP_MMC_WRITE(host->base, SDIO, reg);
856 OMAP_MMC_WRITE(host->base, DTO, timeout);
860 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
862 struct mmc_data *data = req->data;
863 int i, use_dma, block_size;
868 OMAP_MMC_WRITE(host->base, BLEN, 0);
869 OMAP_MMC_WRITE(host->base, NBLK, 0);
870 OMAP_MMC_WRITE(host->base, BUF, 0);
871 host->dma_in_use = 0;
872 set_cmd_timeout(host, req);
877 block_size = 1 << data->blksz_bits;
879 OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
880 OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
881 set_data_timeout(host, req);
883 /* cope with calling layer confusion; it issues "single
884 * block" writes using multi-block scatterlists.
886 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
888 /* Only do DMA for entire blocks */
889 use_dma = host->use_dma;
891 for (i = 0; i < sg_len; i++) {
892 if ((data->sg[i].length % block_size) != 0) {
901 if (mmc_omap_get_dma_channel(host, data) == 0) {
902 enum dma_data_direction dma_data_dir;
904 if (data->flags & MMC_DATA_WRITE)
905 dma_data_dir = DMA_TO_DEVICE;
907 dma_data_dir = DMA_FROM_DEVICE;
909 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
910 sg_len, dma_data_dir);
911 host->total_bytes_left = 0;
912 mmc_omap_prepare_dma(host, req->data);
913 host->brs_received = 0;
915 host->dma_in_use = 1;
922 OMAP_MMC_WRITE(host->base, BUF, 0x1f1f);
923 host->total_bytes_left = data->blocks * block_size;
924 host->sg_len = sg_len;
925 mmc_omap_sg_to_buf(host);
926 host->dma_in_use = 0;
929 pr_debug("MMC%d: %s %s %s, DTO %d cycles + %d ns, "
930 "%d blocks of %d bytes, %d segments\n",
931 host->id, use_dma ? "DMA" : "PIO",
932 (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
933 (data->flags & MMC_DATA_WRITE) ? "write" : "read",
934 data->timeout_clks, data->timeout_ns, data->blocks,
935 block_size, host->sg_len);
938 static inline int is_broken_card(struct mmc_card *card)
941 struct mmc_cid *c = &card->cid;
942 static const struct broken_card_cid {
948 { 0x00150000, "\x30\x30\x30\x30\x30\x30\x15\x00", 0x06, 0x03 },
951 for (i = 0; i < sizeof(broken_cards)/sizeof(broken_cards[0]); i++) {
952 const struct broken_card_cid *b = broken_cards + i;
954 if (b->manfid != c->manfid)
956 if (memcmp(b->prod_name, c->prod_name, sizeof(b->prod_name)) != 0)
958 if (b->hwrev != c->hwrev || b->fwrev != c->fwrev)
965 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
967 struct mmc_omap_host *host = mmc_priv(mmc);
969 WARN_ON(host->mrq != NULL);
973 /* Some cards (vendor left unnamed to protect the guilty) seem to
974 * require this delay after power-up. Otherwise we'll get mysterious
976 if (req->cmd->opcode == MMC_SEND_CSD) {
977 struct mmc_card *card;
978 int broken_present = 0;
980 list_for_each_entry(card, &mmc->cards, node) {
981 if (is_broken_card(card)) {
986 if (broken_present) {
987 static int complained = 0;
990 printk(KERN_WARNING "MMC%d: Broken card workaround enabled\n",
994 if (in_interrupt()) {
996 printk(KERN_ERR "Sleeping in IRQ handler, FIXME please!\n");
1000 set_current_state(TASK_UNINTERRUPTIBLE);
1001 schedule_timeout(100 * HZ / 1000);
1006 /* only touch fifo AFTER the controller readies it */
1007 mmc_omap_prepare_data(host, req);
1008 mmc_omap_start_command(host, req->cmd);
1009 if (host->dma_in_use)
1010 omap_start_dma(host->dma_ch);
1013 static void innovator_fpga_socket_power(int on)
1015 #if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
1018 fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
1019 OMAP1510_FPGA_POWER);
1021 fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
1022 OMAP1510_FPGA_POWER);
1028 * Turn the socket power on/off. Innovator uses FPGA, most boards
1029 * probably use GPIO.
1031 static void mmc_omap_power(struct mmc_omap_host *host, int on)
1034 if (machine_is_omap_innovator())
1035 innovator_fpga_socket_power(1);
1036 else if (machine_is_omap_h2())
1037 tps65010_set_gpio_out_value(GPIO3, HIGH);
1038 else if (machine_is_omap_h3())
1039 /* GPIO 4 of TPS65010 sends SD_EN signal */
1040 tps65010_set_gpio_out_value(GPIO4, HIGH);
1041 else if (cpu_is_omap24xx()) {
1042 u16 reg = OMAP_MMC_READ(host->base, CON);
1043 OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11));
1045 if (host->power_pin >= 0)
1046 omap_set_gpio_dataout(host->power_pin, 1);
1048 if (machine_is_omap_innovator())
1049 innovator_fpga_socket_power(0);
1050 else if (machine_is_omap_h2())
1051 tps65010_set_gpio_out_value(GPIO3, LOW);
1052 else if (machine_is_omap_h3())
1053 tps65010_set_gpio_out_value(GPIO4, LOW);
1054 else if (cpu_is_omap24xx()) {
1055 u16 reg = OMAP_MMC_READ(host->base, CON);
1056 OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11));
1058 if (host->power_pin >= 0)
1059 omap_set_gpio_dataout(host->power_pin, 0);
1063 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1065 struct mmc_omap_host *host = mmc_priv(mmc);
1069 DBG("MMC%d: set_ios: clock %dHz busmode %d powermode %d Vdd %d.%02d\n",
1070 host->id, ios->clock, ios->bus_mode, ios->power_mode,
1071 ios->vdd / 100, ios->vdd % 100);
1073 if (ios->power_mode == MMC_POWER_UP && ios->clock < 400000)
1074 realclock = 400000; /* Fix for broken stack */
1076 realclock = ios->clock;
1078 if (ios->clock == 0)
1081 int func_clk_rate = clk_get_rate(host->fclk);
1083 dsor = func_clk_rate / realclock;
1087 if (func_clk_rate / dsor > realclock)
1094 if (ios->bus_width == MMC_BUS_WIDTH_4)
1098 switch (ios->power_mode) {
1100 mmc_omap_power(host, 0);
1104 mmc_omap_power(host, 1);
1109 host->bus_mode = ios->bus_mode;
1110 if (omap_has_menelaus()) {
1111 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
1112 menelaus_mmc_opendrain(1);
1114 menelaus_mmc_opendrain(0);
1116 host->hw_bus_mode = host->bus_mode;
1118 clk_enable(host->fclk);
1120 /* On insanely high arm_per frequencies something sometimes
1121 * goes somehow out of sync, and the POW bit is not being set,
1122 * which results in the while loop below getting stuck.
1123 * Writing to the CON register twice seems to do the trick. */
1124 for (i = 0; i < 2; i++)
1125 OMAP_MMC_WRITE(host->base, CON, dsor);
1126 if (ios->power_mode == MMC_POWER_UP) {
1127 /* Send clock cycles, poll completion */
1128 OMAP_MMC_WRITE(host->base, IE, 0);
1129 OMAP_MMC_WRITE(host->base, STAT, 0xffff);
1130 OMAP_MMC_WRITE(host->base, CMD, 1<<7);
1131 while (0 == (OMAP_MMC_READ(host->base, STAT) & 1));
1132 OMAP_MMC_WRITE(host->base, STAT, 1);
1134 clk_disable(host->fclk);
1137 static int mmc_omap_get_ro(struct mmc_host *mmc)
1139 struct mmc_omap_host *host = mmc_priv(mmc);
1141 return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
1144 static struct mmc_host_ops mmc_omap_ops = {
1145 .request = mmc_omap_request,
1146 .set_ios = mmc_omap_set_ios,
1147 .get_ro = mmc_omap_get_ro,
1150 static int __init mmc_omap_probe(struct platform_device *pdev)
1152 struct omap_mmc_conf *minfo = pdev->dev.platform_data;
1153 struct mmc_host *mmc;
1154 struct mmc_omap_host *host = NULL;
1157 if (pdev->resource[0].flags != IORESOURCE_MEM
1158 || pdev->resource[1].flags != IORESOURCE_IRQ) {
1159 printk(KERN_ERR "mmc_omap_probe: invalid resource type\n");
1163 if (!request_mem_region(pdev->resource[0].start,
1164 pdev->resource[0].end - pdev->resource[0].start + 1,
1166 dev_dbg(&pdev->dev, "request_mem_region failed\n");
1170 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
1176 host = mmc_priv(mmc);
1179 spin_lock_init(&host->dma_lock);
1180 init_timer(&host->dma_timer);
1181 host->dma_timer.function = mmc_omap_dma_timer;
1182 host->dma_timer.data = (unsigned long) host;
1184 host->id = pdev->id;
1186 if (cpu_is_omap24xx()) {
1187 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1188 if (IS_ERR(host->iclk))
1190 clk_enable(host->iclk);
1193 if (!cpu_is_omap24xx())
1194 host->fclk = clk_get(&pdev->dev,
1195 (host->id == 1) ? "mmc1_ck" : "mmc2_ck");
1197 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1199 if (IS_ERR(host->fclk)) {
1200 ret = PTR_ERR(host->fclk);
1205 * Also, use minfo->cover to decide how to manage
1206 * the card detect sensing.
1208 host->power_pin = minfo->power_pin;
1209 host->switch_pin = minfo->switch_pin;
1210 host->wp_pin = minfo->wp_pin;
1214 host->irq = pdev->resource[1].start;
1215 host->base = (void __iomem *)pdev->resource[0].start;
1218 mmc->caps |= MMC_CAP_4_BIT_DATA;
1220 mmc->ops = &mmc_omap_ops;
1221 mmc->f_min = 400000;
1222 mmc->f_max = 24000000;
1223 mmc->ocr_avail = MMC_VDD_33_34;
1225 /* Use scatterlist DMA to reduce per-transfer costs.
1226 * NOTE max_seg_size assumption that small blocks aren't
1227 * normally used (except e.g. for reading SD registers).
1229 mmc->max_phys_segs = 32;
1230 mmc->max_hw_segs = 32;
1231 mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */
1232 mmc->max_seg_size = mmc->max_sectors * 512;
1234 if (host->power_pin >= 0) {
1235 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
1236 printk(KERN_ERR "MMC%d: Unable to get GPIO pin for MMC power\n",
1240 omap_set_gpio_direction(host->power_pin, 0);
1243 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1247 host->dev = &pdev->dev;
1248 platform_set_drvdata(pdev, host);
1252 if (host->switch_pin >= 0) {
1253 INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host);
1254 init_timer(&host->switch_timer);
1255 host->switch_timer.function = mmc_omap_switch_timer;
1256 host->switch_timer.data = (unsigned long) host;
1257 if (omap_request_gpio(host->switch_pin) != 0) {
1258 printk(KERN_WARNING "MMC%d: Unable to get GPIO pin for MMC cover switch\n",
1260 host->switch_pin = -1;
1264 omap_set_gpio_direction(host->switch_pin, 1);
1265 set_irq_type(OMAP_GPIO_IRQ(host->switch_pin), IRQT_RISING);
1266 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
1267 mmc_omap_switch_irq, 0, DRIVER_NAME, host);
1269 printk(KERN_WARNING "MMC%d: Unable to get IRQ for MMC cover switch\n",
1271 omap_free_gpio(host->switch_pin);
1272 host->switch_pin = -1;
1275 ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
1277 ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
1279 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1282 printk(KERN_WARNING "MMC%d: Unable to create sysfs attributes\n",
1284 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1285 omap_free_gpio(host->switch_pin);
1286 host->switch_pin = -1;
1289 if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
1290 schedule_work(&host->switch_work);
1293 if (omap_has_menelaus())
1294 menelaus_mmc_register(mmc_omap_switch_callback,
1295 (unsigned long)&host);
1301 /* FIXME: Free other resources too. */
1303 if (host->iclk && !IS_ERR(host->iclk))
1304 clk_put(host->iclk);
1305 if (host->fclk && !IS_ERR(host->fclk))
1306 clk_put(host->fclk);
1307 mmc_free_host(host->mmc);
1312 static int mmc_omap_remove(struct platform_device *pdev)
1314 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1316 platform_set_drvdata(pdev, NULL);
1319 mmc_remove_host(host->mmc);
1320 free_irq(host->irq, host);
1321 mmc_omap_power(host, 0);
1323 if (host->power_pin >= 0)
1324 omap_free_gpio(host->power_pin);
1325 if (host->switch_pin >= 0) {
1326 device_remove_file(&pdev->dev, &dev_attr_enable_poll);
1327 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1328 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1329 omap_free_gpio(host->switch_pin);
1330 host->switch_pin = -1;
1331 del_timer_sync(&host->switch_timer);
1332 flush_scheduled_work();
1334 if (host->iclk && !IS_ERR(host->iclk))
1335 clk_put(host->iclk);
1336 if (host->fclk && !IS_ERR(host->fclk))
1337 clk_put(host->fclk);
1338 mmc_free_host(host->mmc);
1341 if (omap_has_menelaus())
1342 menelaus_mmc_remove();
1344 release_mem_region(pdev->resource[0].start,
1345 pdev->resource[0].end - pdev->resource[0].start + 1);
1351 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1354 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1356 if (host && host->suspended)
1359 if (!irqs_disabled())
1363 ret = mmc_suspend_host(host->mmc, mesg);
1365 host->suspended = 1;
1370 static int mmc_omap_resume(struct platform_device *pdev)
1373 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1375 if (host && !host->suspended)
1379 ret = mmc_resume_host(host->mmc);
1381 host->suspended = 0;
1387 #define mmc_omap_suspend NULL
1388 #define mmc_omap_resume NULL
1391 static struct platform_driver mmc_omap_driver = {
1392 .probe = mmc_omap_probe,
1393 .remove = mmc_omap_remove,
1394 .suspend = mmc_omap_suspend,
1395 .resume = mmc_omap_resume,
1397 .name = DRIVER_NAME,
1401 static int __init mmc_omap_init(void)
1403 return platform_driver_register(&mmc_omap_driver);
1406 static void __exit mmc_omap_exit(void)
1408 platform_driver_unregister(&mmc_omap_driver);
1411 module_init(mmc_omap_init);
1412 module_exit(mmc_omap_exit);
1414 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1415 MODULE_LICENSE("GPL");
1416 MODULE_ALIAS(DRIVER_NAME);
1417 MODULE_AUTHOR("Juha Yrjölä");