2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
18 #include <linux/pci.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/scatterlist.h>
22 #include <linux/mmc/host.h>
26 #define DRIVER_NAME "sdhci"
28 #define DBG(f, x...) \
29 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
31 static unsigned int debug_quirks = 0;
34 * Different quirks to handle when the hardware deviates from a strict
35 * interpretation of the SDHCI specification.
38 /* Controller doesn't honor resets unless we touch the clock register */
39 #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
40 /* Controller has bad caps bits, but really supports DMA */
41 #define SDHCI_QUIRK_FORCE_DMA (1<<1)
42 /* Controller doesn't like some resets when there is no card inserted. */
43 #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
44 /* Controller doesn't like clearing the power reg before a change */
45 #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
46 /* Controller has flaky internal state so reset it on each ios change */
47 #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
48 /* Controller has an unusable DMA engine */
49 #define SDHCI_QUIRK_BROKEN_DMA (1<<5)
50 /* Controller can only DMA from 32-bit aligned addresses */
51 #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6)
52 /* Controller can only DMA chunk sizes that are a multiple of 32 bits */
53 #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7)
54 /* Controller needs to be reset after each request to stay stable */
55 #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8)
57 static const struct pci_device_id pci_ids[] __devinitdata = {
59 .vendor = PCI_VENDOR_ID_RICOH,
60 .device = PCI_DEVICE_ID_RICOH_R5C822,
61 .subvendor = PCI_VENDOR_ID_IBM,
62 .subdevice = PCI_ANY_ID,
63 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
64 SDHCI_QUIRK_FORCE_DMA,
68 .vendor = PCI_VENDOR_ID_RICOH,
69 .device = PCI_DEVICE_ID_RICOH_R5C822,
70 .subvendor = PCI_ANY_ID,
71 .subdevice = PCI_ANY_ID,
72 .driver_data = SDHCI_QUIRK_FORCE_DMA |
73 SDHCI_QUIRK_NO_CARD_NO_RESET,
77 .vendor = PCI_VENDOR_ID_TI,
78 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
79 .subvendor = PCI_ANY_ID,
80 .subdevice = PCI_ANY_ID,
81 .driver_data = SDHCI_QUIRK_FORCE_DMA,
85 .vendor = PCI_VENDOR_ID_ENE,
86 .device = PCI_DEVICE_ID_ENE_CB712_SD,
87 .subvendor = PCI_ANY_ID,
88 .subdevice = PCI_ANY_ID,
89 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
90 SDHCI_QUIRK_BROKEN_DMA,
94 .vendor = PCI_VENDOR_ID_ENE,
95 .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
96 .subvendor = PCI_ANY_ID,
97 .subdevice = PCI_ANY_ID,
98 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
99 SDHCI_QUIRK_BROKEN_DMA,
103 .vendor = PCI_VENDOR_ID_ENE,
104 .device = PCI_DEVICE_ID_ENE_CB714_SD,
105 .subvendor = PCI_ANY_ID,
106 .subdevice = PCI_ANY_ID,
107 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
108 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
112 .vendor = PCI_VENDOR_ID_ENE,
113 .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
114 .subvendor = PCI_ANY_ID,
115 .subdevice = PCI_ANY_ID,
116 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
117 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
121 .vendor = PCI_VENDOR_ID_JMICRON,
122 .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
123 .subvendor = PCI_ANY_ID,
124 .subdevice = PCI_ANY_ID,
125 .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR |
126 SDHCI_QUIRK_32BIT_DMA_SIZE |
127 SDHCI_QUIRK_RESET_AFTER_REQUEST,
130 { /* Generic SD host controller */
131 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
134 { /* end: all zeroes */ },
137 MODULE_DEVICE_TABLE(pci, pci_ids);
139 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
140 static void sdhci_finish_data(struct sdhci_host *);
142 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
143 static void sdhci_finish_command(struct sdhci_host *);
145 static void sdhci_dumpregs(struct sdhci_host *host)
147 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
149 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
150 readl(host->ioaddr + SDHCI_DMA_ADDRESS),
151 readw(host->ioaddr + SDHCI_HOST_VERSION));
152 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
153 readw(host->ioaddr + SDHCI_BLOCK_SIZE),
154 readw(host->ioaddr + SDHCI_BLOCK_COUNT));
155 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
156 readl(host->ioaddr + SDHCI_ARGUMENT),
157 readw(host->ioaddr + SDHCI_TRANSFER_MODE));
158 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
159 readl(host->ioaddr + SDHCI_PRESENT_STATE),
160 readb(host->ioaddr + SDHCI_HOST_CONTROL));
161 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
162 readb(host->ioaddr + SDHCI_POWER_CONTROL),
163 readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
164 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
165 readb(host->ioaddr + SDHCI_WAKE_UP_CONTROL),
166 readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
167 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
168 readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
169 readl(host->ioaddr + SDHCI_INT_STATUS));
170 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
171 readl(host->ioaddr + SDHCI_INT_ENABLE),
172 readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
173 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
174 readw(host->ioaddr + SDHCI_ACMD12_ERR),
175 readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
176 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
177 readl(host->ioaddr + SDHCI_CAPABILITIES),
178 readl(host->ioaddr + SDHCI_MAX_CURRENT));
180 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
183 /*****************************************************************************\
185 * Low level functions *
187 \*****************************************************************************/
189 static void sdhci_reset(struct sdhci_host *host, u8 mask)
191 unsigned long timeout;
193 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
194 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
199 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
201 if (mask & SDHCI_RESET_ALL)
204 /* Wait max 100 ms */
207 /* hw clears the bit when it's done */
208 while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
210 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
211 mmc_hostname(host->mmc), (int)mask);
212 sdhci_dumpregs(host);
220 static void sdhci_init(struct sdhci_host *host)
224 sdhci_reset(host, SDHCI_RESET_ALL);
226 intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
227 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
228 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
229 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
230 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
231 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
233 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
234 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
237 static void sdhci_activate_led(struct sdhci_host *host)
241 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
242 ctrl |= SDHCI_CTRL_LED;
243 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
246 static void sdhci_deactivate_led(struct sdhci_host *host)
250 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
251 ctrl &= ~SDHCI_CTRL_LED;
252 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
255 /*****************************************************************************\
259 \*****************************************************************************/
261 static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
263 return sg_virt(host->cur_sg);
266 static inline int sdhci_next_sg(struct sdhci_host* host)
269 * Skip to next SG entry.
277 if (host->num_sg > 0) {
279 host->remain = host->cur_sg->length;
285 static void sdhci_read_block_pio(struct sdhci_host *host)
287 int blksize, chunk_remain;
292 DBG("PIO reading\n");
294 blksize = host->data->blksz;
298 buffer = sdhci_sg_to_buffer(host) + host->offset;
301 if (chunk_remain == 0) {
302 data = readl(host->ioaddr + SDHCI_BUFFER);
303 chunk_remain = min(blksize, 4);
306 size = min(host->remain, chunk_remain);
308 chunk_remain -= size;
310 host->offset += size;
311 host->remain -= size;
314 *buffer = data & 0xFF;
320 if (host->remain == 0) {
321 if (sdhci_next_sg(host) == 0) {
322 BUG_ON(blksize != 0);
325 buffer = sdhci_sg_to_buffer(host);
330 static void sdhci_write_block_pio(struct sdhci_host *host)
332 int blksize, chunk_remain;
337 DBG("PIO writing\n");
339 blksize = host->data->blksz;
344 buffer = sdhci_sg_to_buffer(host) + host->offset;
347 size = min(host->remain, chunk_remain);
349 chunk_remain -= size;
351 host->offset += size;
352 host->remain -= size;
356 data |= (u32)*buffer << 24;
361 if (chunk_remain == 0) {
362 writel(data, host->ioaddr + SDHCI_BUFFER);
363 chunk_remain = min(blksize, 4);
366 if (host->remain == 0) {
367 if (sdhci_next_sg(host) == 0) {
368 BUG_ON(blksize != 0);
371 buffer = sdhci_sg_to_buffer(host);
376 static void sdhci_transfer_pio(struct sdhci_host *host)
382 if (host->num_sg == 0)
385 if (host->data->flags & MMC_DATA_READ)
386 mask = SDHCI_DATA_AVAILABLE;
388 mask = SDHCI_SPACE_AVAILABLE;
390 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
391 if (host->data->flags & MMC_DATA_READ)
392 sdhci_read_block_pio(host);
394 sdhci_write_block_pio(host);
396 if (host->num_sg == 0)
400 DBG("PIO transfer complete.\n");
403 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
406 unsigned target_timeout, current_timeout;
414 BUG_ON(data->blksz * data->blocks > 524288);
415 BUG_ON(data->blksz > host->mmc->max_blk_size);
416 BUG_ON(data->blocks > 65535);
419 host->data_early = 0;
422 target_timeout = data->timeout_ns / 1000 +
423 data->timeout_clks / host->clock;
426 * Figure out needed cycles.
427 * We do this in steps in order to fit inside a 32 bit int.
428 * The first step is the minimum timeout, which will have a
429 * minimum resolution of 6 bits:
430 * (1) 2^13*1000 > 2^22,
431 * (2) host->timeout_clk < 2^16
436 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
437 while (current_timeout < target_timeout) {
439 current_timeout <<= 1;
445 printk(KERN_WARNING "%s: Too large timeout requested!\n",
446 mmc_hostname(host->mmc));
450 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
452 if (host->flags & SDHCI_USE_DMA)
453 host->flags |= SDHCI_REQ_USE_DMA;
455 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
456 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) &&
457 ((data->blksz * data->blocks) & 0x3))) {
458 DBG("Reverting to PIO because of transfer size (%d)\n",
459 data->blksz * data->blocks);
460 host->flags &= ~SDHCI_REQ_USE_DMA;
464 * The assumption here being that alignment is the same after
465 * translation to device address space.
467 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
468 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
469 (data->sg->offset & 0x3))) {
470 DBG("Reverting to PIO because of bad alignment\n");
471 host->flags &= ~SDHCI_REQ_USE_DMA;
474 if (host->flags & SDHCI_REQ_USE_DMA) {
477 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len,
478 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
481 writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
483 host->cur_sg = data->sg;
484 host->num_sg = data->sg_len;
487 host->remain = host->cur_sg->length;
490 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
491 writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
492 host->ioaddr + SDHCI_BLOCK_SIZE);
493 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
496 static void sdhci_set_transfer_mode(struct sdhci_host *host,
497 struct mmc_data *data)
504 WARN_ON(!host->data);
506 mode = SDHCI_TRNS_BLK_CNT_EN;
507 if (data->blocks > 1)
508 mode |= SDHCI_TRNS_MULTI;
509 if (data->flags & MMC_DATA_READ)
510 mode |= SDHCI_TRNS_READ;
511 if (host->flags & SDHCI_REQ_USE_DMA)
512 mode |= SDHCI_TRNS_DMA;
514 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
517 static void sdhci_finish_data(struct sdhci_host *host)
519 struct mmc_data *data;
527 if (host->flags & SDHCI_REQ_USE_DMA) {
528 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
529 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
533 * Controller doesn't count down when in single block mode.
535 if (data->blocks == 1)
536 blocks = (data->error == 0) ? 0 : 1;
538 blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
539 data->bytes_xfered = data->blksz * (data->blocks - blocks);
541 if (!data->error && blocks) {
542 printk(KERN_ERR "%s: Controller signalled completion even "
543 "though there were blocks left.\n",
544 mmc_hostname(host->mmc));
550 * The controller needs a reset of internal state machines
551 * upon error conditions.
554 sdhci_reset(host, SDHCI_RESET_CMD);
555 sdhci_reset(host, SDHCI_RESET_DATA);
558 sdhci_send_command(host, data->stop);
560 tasklet_schedule(&host->finish_tasklet);
563 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
567 unsigned long timeout;
574 mask = SDHCI_CMD_INHIBIT;
575 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
576 mask |= SDHCI_DATA_INHIBIT;
578 /* We shouldn't wait for data inihibit for stop commands, even
579 though they might use busy signaling */
580 if (host->mrq->data && (cmd == host->mrq->data->stop))
581 mask &= ~SDHCI_DATA_INHIBIT;
583 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
585 printk(KERN_ERR "%s: Controller never released "
586 "inhibit bit(s).\n", mmc_hostname(host->mmc));
587 sdhci_dumpregs(host);
589 tasklet_schedule(&host->finish_tasklet);
596 mod_timer(&host->timer, jiffies + 10 * HZ);
600 sdhci_prepare_data(host, cmd->data);
602 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
604 sdhci_set_transfer_mode(host, cmd->data);
606 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
607 printk(KERN_ERR "%s: Unsupported response type!\n",
608 mmc_hostname(host->mmc));
609 cmd->error = -EINVAL;
610 tasklet_schedule(&host->finish_tasklet);
614 if (!(cmd->flags & MMC_RSP_PRESENT))
615 flags = SDHCI_CMD_RESP_NONE;
616 else if (cmd->flags & MMC_RSP_136)
617 flags = SDHCI_CMD_RESP_LONG;
618 else if (cmd->flags & MMC_RSP_BUSY)
619 flags = SDHCI_CMD_RESP_SHORT_BUSY;
621 flags = SDHCI_CMD_RESP_SHORT;
623 if (cmd->flags & MMC_RSP_CRC)
624 flags |= SDHCI_CMD_CRC;
625 if (cmd->flags & MMC_RSP_OPCODE)
626 flags |= SDHCI_CMD_INDEX;
628 flags |= SDHCI_CMD_DATA;
630 writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
631 host->ioaddr + SDHCI_COMMAND);
634 static void sdhci_finish_command(struct sdhci_host *host)
638 BUG_ON(host->cmd == NULL);
640 if (host->cmd->flags & MMC_RSP_PRESENT) {
641 if (host->cmd->flags & MMC_RSP_136) {
642 /* CRC is stripped so we need to do some shifting. */
643 for (i = 0;i < 4;i++) {
644 host->cmd->resp[i] = readl(host->ioaddr +
645 SDHCI_RESPONSE + (3-i)*4) << 8;
647 host->cmd->resp[i] |=
649 SDHCI_RESPONSE + (3-i)*4-1);
652 host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
656 host->cmd->error = 0;
658 if (host->data && host->data_early)
659 sdhci_finish_data(host);
661 if (!host->cmd->data)
662 tasklet_schedule(&host->finish_tasklet);
667 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
671 unsigned long timeout;
673 if (clock == host->clock)
676 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
681 for (div = 1;div < 256;div *= 2) {
682 if ((host->max_clk / div) <= clock)
687 clk = div << SDHCI_DIVIDER_SHIFT;
688 clk |= SDHCI_CLOCK_INT_EN;
689 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
693 while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
694 & SDHCI_CLOCK_INT_STABLE)) {
696 printk(KERN_ERR "%s: Internal clock never "
697 "stabilised.\n", mmc_hostname(host->mmc));
698 sdhci_dumpregs(host);
705 clk |= SDHCI_CLOCK_CARD_EN;
706 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
712 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
716 if (host->power == power)
719 if (power == (unsigned short)-1) {
720 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
725 * Spec says that we should clear the power reg before setting
726 * a new value. Some controllers don't seem to like this though.
728 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
729 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
731 pwr = SDHCI_POWER_ON;
733 switch (1 << power) {
734 case MMC_VDD_165_195:
735 pwr |= SDHCI_POWER_180;
739 pwr |= SDHCI_POWER_300;
743 pwr |= SDHCI_POWER_330;
749 writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
755 /*****************************************************************************\
759 \*****************************************************************************/
761 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
763 struct sdhci_host *host;
766 host = mmc_priv(mmc);
768 spin_lock_irqsave(&host->lock, flags);
770 WARN_ON(host->mrq != NULL);
772 sdhci_activate_led(host);
776 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
777 host->mrq->cmd->error = -ENOMEDIUM;
778 tasklet_schedule(&host->finish_tasklet);
780 sdhci_send_command(host, mrq->cmd);
783 spin_unlock_irqrestore(&host->lock, flags);
786 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
788 struct sdhci_host *host;
792 host = mmc_priv(mmc);
794 spin_lock_irqsave(&host->lock, flags);
797 * Reset the chip on each power off.
798 * Should clear out any weird states.
800 if (ios->power_mode == MMC_POWER_OFF) {
801 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
805 sdhci_set_clock(host, ios->clock);
807 if (ios->power_mode == MMC_POWER_OFF)
808 sdhci_set_power(host, -1);
810 sdhci_set_power(host, ios->vdd);
812 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
814 if (ios->bus_width == MMC_BUS_WIDTH_4)
815 ctrl |= SDHCI_CTRL_4BITBUS;
817 ctrl &= ~SDHCI_CTRL_4BITBUS;
819 if (ios->timing == MMC_TIMING_SD_HS)
820 ctrl |= SDHCI_CTRL_HISPD;
822 ctrl &= ~SDHCI_CTRL_HISPD;
824 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
827 * Some (ENE) controllers go apeshit on some ios operation,
828 * signalling timeout and CRC errors even on CMD0. Resetting
829 * it on each ios seems to solve the problem.
831 if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
832 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
835 spin_unlock_irqrestore(&host->lock, flags);
838 static int sdhci_get_ro(struct mmc_host *mmc)
840 struct sdhci_host *host;
844 host = mmc_priv(mmc);
846 spin_lock_irqsave(&host->lock, flags);
848 present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
850 spin_unlock_irqrestore(&host->lock, flags);
852 return !(present & SDHCI_WRITE_PROTECT);
855 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
857 struct sdhci_host *host;
861 host = mmc_priv(mmc);
863 spin_lock_irqsave(&host->lock, flags);
865 ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
867 ier &= ~SDHCI_INT_CARD_INT;
869 ier |= SDHCI_INT_CARD_INT;
871 writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
872 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
876 spin_unlock_irqrestore(&host->lock, flags);
879 static const struct mmc_host_ops sdhci_ops = {
880 .request = sdhci_request,
881 .set_ios = sdhci_set_ios,
882 .get_ro = sdhci_get_ro,
883 .enable_sdio_irq = sdhci_enable_sdio_irq,
886 /*****************************************************************************\
890 \*****************************************************************************/
892 static void sdhci_tasklet_card(unsigned long param)
894 struct sdhci_host *host;
897 host = (struct sdhci_host*)param;
899 spin_lock_irqsave(&host->lock, flags);
901 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
903 printk(KERN_ERR "%s: Card removed during transfer!\n",
904 mmc_hostname(host->mmc));
905 printk(KERN_ERR "%s: Resetting controller.\n",
906 mmc_hostname(host->mmc));
908 sdhci_reset(host, SDHCI_RESET_CMD);
909 sdhci_reset(host, SDHCI_RESET_DATA);
911 host->mrq->cmd->error = -ENOMEDIUM;
912 tasklet_schedule(&host->finish_tasklet);
916 spin_unlock_irqrestore(&host->lock, flags);
918 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
921 static void sdhci_tasklet_finish(unsigned long param)
923 struct sdhci_host *host;
925 struct mmc_request *mrq;
927 host = (struct sdhci_host*)param;
929 spin_lock_irqsave(&host->lock, flags);
931 del_timer(&host->timer);
936 * The controller needs a reset of internal state machines
937 * upon error conditions.
939 if (mrq->cmd->error ||
940 (mrq->data && (mrq->data->error ||
941 (mrq->data->stop && mrq->data->stop->error))) ||
942 (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
944 /* Some controllers need this kick or reset won't work here */
945 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
948 /* This is to force an update */
951 sdhci_set_clock(host, clock);
954 /* Spec says we should do both at the same time, but Ricoh
955 controllers do not like that. */
956 sdhci_reset(host, SDHCI_RESET_CMD);
957 sdhci_reset(host, SDHCI_RESET_DATA);
964 sdhci_deactivate_led(host);
967 spin_unlock_irqrestore(&host->lock, flags);
969 mmc_request_done(host->mmc, mrq);
972 static void sdhci_timeout_timer(unsigned long data)
974 struct sdhci_host *host;
977 host = (struct sdhci_host*)data;
979 spin_lock_irqsave(&host->lock, flags);
982 printk(KERN_ERR "%s: Timeout waiting for hardware "
983 "interrupt.\n", mmc_hostname(host->mmc));
984 sdhci_dumpregs(host);
987 host->data->error = -ETIMEDOUT;
988 sdhci_finish_data(host);
991 host->cmd->error = -ETIMEDOUT;
993 host->mrq->cmd->error = -ETIMEDOUT;
995 tasklet_schedule(&host->finish_tasklet);
1000 spin_unlock_irqrestore(&host->lock, flags);
1003 /*****************************************************************************\
1005 * Interrupt handling *
1007 \*****************************************************************************/
1009 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1011 BUG_ON(intmask == 0);
1014 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1015 "though no command operation was in progress.\n",
1016 mmc_hostname(host->mmc), (unsigned)intmask);
1017 sdhci_dumpregs(host);
1021 if (intmask & SDHCI_INT_TIMEOUT)
1022 host->cmd->error = -ETIMEDOUT;
1023 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1025 host->cmd->error = -EILSEQ;
1027 if (host->cmd->error)
1028 tasklet_schedule(&host->finish_tasklet);
1029 else if (intmask & SDHCI_INT_RESPONSE)
1030 sdhci_finish_command(host);
1033 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1035 BUG_ON(intmask == 0);
1039 * A data end interrupt is sent together with the response
1040 * for the stop command.
1042 if (intmask & SDHCI_INT_DATA_END)
1045 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1046 "though no data operation was in progress.\n",
1047 mmc_hostname(host->mmc), (unsigned)intmask);
1048 sdhci_dumpregs(host);
1053 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1054 host->data->error = -ETIMEDOUT;
1055 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1056 host->data->error = -EILSEQ;
1058 if (host->data->error)
1059 sdhci_finish_data(host);
1061 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1062 sdhci_transfer_pio(host);
1065 * We currently don't do anything fancy with DMA
1066 * boundaries, but as we can't disable the feature
1067 * we need to at least restart the transfer.
1069 if (intmask & SDHCI_INT_DMA_END)
1070 writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
1071 host->ioaddr + SDHCI_DMA_ADDRESS);
1073 if (intmask & SDHCI_INT_DATA_END) {
1076 * Data managed to finish before the
1077 * command completed. Make sure we do
1078 * things in the proper order.
1080 host->data_early = 1;
1082 sdhci_finish_data(host);
1088 static irqreturn_t sdhci_irq(int irq, void *dev_id)
1091 struct sdhci_host* host = dev_id;
1095 spin_lock(&host->lock);
1097 intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
1099 if (!intmask || intmask == 0xffffffff) {
1104 DBG("*** %s got interrupt: 0x%08x\n",
1105 mmc_hostname(host->mmc), intmask);
1107 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1108 writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
1109 host->ioaddr + SDHCI_INT_STATUS);
1110 tasklet_schedule(&host->card_tasklet);
1113 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1115 if (intmask & SDHCI_INT_CMD_MASK) {
1116 writel(intmask & SDHCI_INT_CMD_MASK,
1117 host->ioaddr + SDHCI_INT_STATUS);
1118 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1121 if (intmask & SDHCI_INT_DATA_MASK) {
1122 writel(intmask & SDHCI_INT_DATA_MASK,
1123 host->ioaddr + SDHCI_INT_STATUS);
1124 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1127 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1129 intmask &= ~SDHCI_INT_ERROR;
1131 if (intmask & SDHCI_INT_BUS_POWER) {
1132 printk(KERN_ERR "%s: Card is consuming too much power!\n",
1133 mmc_hostname(host->mmc));
1134 writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
1137 intmask &= ~SDHCI_INT_BUS_POWER;
1139 if (intmask & SDHCI_INT_CARD_INT)
1142 intmask &= ~SDHCI_INT_CARD_INT;
1145 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1146 mmc_hostname(host->mmc), intmask);
1147 sdhci_dumpregs(host);
1149 writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
1152 result = IRQ_HANDLED;
1156 spin_unlock(&host->lock);
1159 * We have to delay this as it calls back into the driver.
1162 mmc_signal_sdio_irq(host->mmc);
1167 /*****************************************************************************\
1171 \*****************************************************************************/
1175 static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
1177 struct sdhci_chip *chip;
1180 chip = pci_get_drvdata(pdev);
1184 DBG("Suspending...\n");
1186 for (i = 0;i < chip->num_slots;i++) {
1187 if (!chip->hosts[i])
1189 ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
1191 for (i--;i >= 0;i--)
1192 mmc_resume_host(chip->hosts[i]->mmc);
1197 pci_save_state(pdev);
1198 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1200 for (i = 0;i < chip->num_slots;i++) {
1201 if (!chip->hosts[i])
1203 free_irq(chip->hosts[i]->irq, chip->hosts[i]);
1206 pci_disable_device(pdev);
1207 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1212 static int sdhci_resume (struct pci_dev *pdev)
1214 struct sdhci_chip *chip;
1217 chip = pci_get_drvdata(pdev);
1221 DBG("Resuming...\n");
1223 pci_set_power_state(pdev, PCI_D0);
1224 pci_restore_state(pdev);
1225 ret = pci_enable_device(pdev);
1229 for (i = 0;i < chip->num_slots;i++) {
1230 if (!chip->hosts[i])
1232 if (chip->hosts[i]->flags & SDHCI_USE_DMA)
1233 pci_set_master(pdev);
1234 ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
1235 IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc),
1239 sdhci_init(chip->hosts[i]);
1241 ret = mmc_resume_host(chip->hosts[i]->mmc);
1249 #else /* CONFIG_PM */
1251 #define sdhci_suspend NULL
1252 #define sdhci_resume NULL
1254 #endif /* CONFIG_PM */
1256 /*****************************************************************************\
1258 * Device probing/removal *
1260 \*****************************************************************************/
1262 static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1265 unsigned int version;
1266 struct sdhci_chip *chip;
1267 struct mmc_host *mmc;
1268 struct sdhci_host *host;
1273 chip = pci_get_drvdata(pdev);
1276 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
1280 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
1282 if (first_bar > 5) {
1283 printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
1287 if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
1288 printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
1292 if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
1293 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
1294 "You may experience problems.\n");
1297 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1298 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
1302 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
1303 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
1307 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
1311 host = mmc_priv(mmc);
1315 chip->hosts[slot] = host;
1317 host->bar = first_bar + slot;
1319 host->addr = pci_resource_start(pdev, host->bar);
1320 host->irq = pdev->irq;
1322 DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq);
1324 ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc));
1328 host->ioaddr = ioremap_nocache(host->addr,
1329 pci_resource_len(pdev, host->bar));
1330 if (!host->ioaddr) {
1335 sdhci_reset(host, SDHCI_RESET_ALL);
1337 version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1338 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
1340 printk(KERN_ERR "%s: Unknown controller version (%d). "
1341 "You may experience problems.\n", mmc_hostname(mmc),
1345 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1347 if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
1348 host->flags |= SDHCI_USE_DMA;
1349 else if (!(caps & SDHCI_CAN_DO_DMA))
1350 DBG("Controller doesn't have DMA capability\n");
1352 host->flags |= SDHCI_USE_DMA;
1354 if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1355 (host->flags & SDHCI_USE_DMA)) {
1356 DBG("Disabling DMA as it is marked broken\n");
1357 host->flags &= ~SDHCI_USE_DMA;
1360 if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1361 (host->flags & SDHCI_USE_DMA)) {
1362 printk(KERN_WARNING "%s: Will use DMA "
1363 "mode even though HW doesn't fully "
1364 "claim to support it.\n", mmc_hostname(mmc));
1367 if (host->flags & SDHCI_USE_DMA) {
1368 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1369 printk(KERN_WARNING "%s: No suitable DMA available. "
1370 "Falling back to PIO.\n", mmc_hostname(mmc));
1371 host->flags &= ~SDHCI_USE_DMA;
1375 if (host->flags & SDHCI_USE_DMA)
1376 pci_set_master(pdev);
1377 else /* XXX: Hack to get MMC layer to avoid highmem */
1381 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1382 if (host->max_clk == 0) {
1383 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1384 "frequency.\n", mmc_hostname(mmc));
1388 host->max_clk *= 1000000;
1391 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1392 if (host->timeout_clk == 0) {
1393 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1394 "frequency.\n", mmc_hostname(mmc));
1398 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1399 host->timeout_clk *= 1000;
1402 * Set host parameters.
1404 mmc->ops = &sdhci_ops;
1405 mmc->f_min = host->max_clk / 256;
1406 mmc->f_max = host->max_clk;
1407 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ;
1409 if (caps & SDHCI_CAN_DO_HISPD)
1410 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1413 if (caps & SDHCI_CAN_VDD_330)
1414 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1415 if (caps & SDHCI_CAN_VDD_300)
1416 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1417 if (caps & SDHCI_CAN_VDD_180)
1418 mmc->ocr_avail |= MMC_VDD_165_195;
1420 if (mmc->ocr_avail == 0) {
1421 printk(KERN_ERR "%s: Hardware doesn't report any "
1422 "support voltages.\n", mmc_hostname(mmc));
1427 spin_lock_init(&host->lock);
1430 * Maximum number of segments. Hardware cannot do scatter lists.
1432 if (host->flags & SDHCI_USE_DMA)
1433 mmc->max_hw_segs = 1;
1435 mmc->max_hw_segs = 16;
1436 mmc->max_phys_segs = 16;
1439 * Maximum number of sectors in one transfer. Limited by DMA boundary
1442 mmc->max_req_size = 524288;
1445 * Maximum segment size. Could be one segment with the maximum number
1448 mmc->max_seg_size = mmc->max_req_size;
1451 * Maximum block size. This varies from controller to controller and
1452 * is specified in the capabilities register.
1454 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1455 if (mmc->max_blk_size >= 3) {
1456 printk(KERN_WARNING "%s: Invalid maximum block size, "
1457 "assuming 512 bytes\n", mmc_hostname(mmc));
1458 mmc->max_blk_size = 512;
1460 mmc->max_blk_size = 512 << mmc->max_blk_size;
1463 * Maximum block count.
1465 mmc->max_blk_count = 65535;
1470 tasklet_init(&host->card_tasklet,
1471 sdhci_tasklet_card, (unsigned long)host);
1472 tasklet_init(&host->finish_tasklet,
1473 sdhci_tasklet_finish, (unsigned long)host);
1475 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1477 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1478 mmc_hostname(mmc), host);
1484 #ifdef CONFIG_MMC_DEBUG
1485 sdhci_dumpregs(host);
1492 printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n",
1493 mmc_hostname(mmc), host->addr, host->irq,
1494 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1499 tasklet_kill(&host->card_tasklet);
1500 tasklet_kill(&host->finish_tasklet);
1502 iounmap(host->ioaddr);
1504 pci_release_region(pdev, host->bar);
1511 static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
1513 struct sdhci_chip *chip;
1514 struct mmc_host *mmc;
1515 struct sdhci_host *host;
1517 chip = pci_get_drvdata(pdev);
1518 host = chip->hosts[slot];
1521 chip->hosts[slot] = NULL;
1523 mmc_remove_host(mmc);
1525 sdhci_reset(host, SDHCI_RESET_ALL);
1527 free_irq(host->irq, host);
1529 del_timer_sync(&host->timer);
1531 tasklet_kill(&host->card_tasklet);
1532 tasklet_kill(&host->finish_tasklet);
1534 iounmap(host->ioaddr);
1536 pci_release_region(pdev, host->bar);
1541 static int __devinit sdhci_probe(struct pci_dev *pdev,
1542 const struct pci_device_id *ent)
1546 struct sdhci_chip *chip;
1548 BUG_ON(pdev == NULL);
1549 BUG_ON(ent == NULL);
1551 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
1553 printk(KERN_INFO DRIVER_NAME
1554 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
1555 pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
1558 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
1562 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
1563 DBG("found %d slot(s)\n", slots);
1567 ret = pci_enable_device(pdev);
1571 chip = kzalloc(sizeof(struct sdhci_chip) +
1572 sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
1579 chip->quirks = ent->driver_data;
1582 chip->quirks = debug_quirks;
1584 chip->num_slots = slots;
1585 pci_set_drvdata(pdev, chip);
1587 for (i = 0;i < slots;i++) {
1588 ret = sdhci_probe_slot(pdev, i);
1590 for (i--;i >= 0;i--)
1591 sdhci_remove_slot(pdev, i);
1599 pci_set_drvdata(pdev, NULL);
1603 pci_disable_device(pdev);
1607 static void __devexit sdhci_remove(struct pci_dev *pdev)
1610 struct sdhci_chip *chip;
1612 chip = pci_get_drvdata(pdev);
1615 for (i = 0;i < chip->num_slots;i++)
1616 sdhci_remove_slot(pdev, i);
1618 pci_set_drvdata(pdev, NULL);
1623 pci_disable_device(pdev);
1626 static struct pci_driver sdhci_driver = {
1627 .name = DRIVER_NAME,
1628 .id_table = pci_ids,
1629 .probe = sdhci_probe,
1630 .remove = __devexit_p(sdhci_remove),
1631 .suspend = sdhci_suspend,
1632 .resume = sdhci_resume,
1635 /*****************************************************************************\
1637 * Driver init/exit *
1639 \*****************************************************************************/
1641 static int __init sdhci_drv_init(void)
1643 printk(KERN_INFO DRIVER_NAME
1644 ": Secure Digital Host Controller Interface driver\n");
1645 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1647 return pci_register_driver(&sdhci_driver);
1650 static void __exit sdhci_drv_exit(void)
1654 pci_unregister_driver(&sdhci_driver);
1657 module_init(sdhci_drv_init);
1658 module_exit(sdhci_drv_exit);
1660 module_param(debug_quirks, uint, 0444);
1662 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1663 MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
1664 MODULE_LICENSE("GPL");
1666 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");