]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/mmc/host/omap.c
Merge current mainline tree into linux-omap tree
[linux-2.6-omap-h63xx.git] / drivers / mmc / host / omap.c
1 /*
2  *  linux/drivers/mmc/host/omap.c
3  *
4  *  Copyright (C) 2004 Nokia Corporation
5  *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
6  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7  *  Other hacks (DMA, SD, etc) by David Brownell
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/timer.h>
24 #include <linux/mmc/mmc.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/clk.h>
28 #include <linux/scatterlist.h>
29
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <asm/mach-types.h>
33
34 #include <asm/arch/board.h>
35 #include <asm/arch/mmc.h>
36 #include <asm/arch/gpio.h>
37 #include <asm/arch/dma.h>
38 #include <asm/arch/mux.h>
39 #include <asm/arch/fpga.h>
40 #include <asm/arch/tps65010.h>
41 #include <asm/arch/board-sx1.h>
42
43 #define OMAP_MMC_REG_CMD        0x00
44 #define OMAP_MMC_REG_ARGL       0x04
45 #define OMAP_MMC_REG_ARGH       0x08
46 #define OMAP_MMC_REG_CON        0x0c
47 #define OMAP_MMC_REG_STAT       0x10
48 #define OMAP_MMC_REG_IE         0x14
49 #define OMAP_MMC_REG_CTO        0x18
50 #define OMAP_MMC_REG_DTO        0x1c
51 #define OMAP_MMC_REG_DATA       0x20
52 #define OMAP_MMC_REG_BLEN       0x24
53 #define OMAP_MMC_REG_NBLK       0x28
54 #define OMAP_MMC_REG_BUF        0x2c
55 #define OMAP_MMC_REG_SDIO       0x34
56 #define OMAP_MMC_REG_REV        0x3c
57 #define OMAP_MMC_REG_RSP0       0x40
58 #define OMAP_MMC_REG_RSP1       0x44
59 #define OMAP_MMC_REG_RSP2       0x48
60 #define OMAP_MMC_REG_RSP3       0x4c
61 #define OMAP_MMC_REG_RSP4       0x50
62 #define OMAP_MMC_REG_RSP5       0x54
63 #define OMAP_MMC_REG_RSP6       0x58
64 #define OMAP_MMC_REG_RSP7       0x5c
65 #define OMAP_MMC_REG_IOSR       0x60
66 #define OMAP_MMC_REG_SYSC       0x64
67 #define OMAP_MMC_REG_SYSS       0x68
68
69 #define OMAP_MMC_STAT_CARD_ERR          (1 << 14)
70 #define OMAP_MMC_STAT_CARD_IRQ          (1 << 13)
71 #define OMAP_MMC_STAT_OCR_BUSY          (1 << 12)
72 #define OMAP_MMC_STAT_A_EMPTY           (1 << 11)
73 #define OMAP_MMC_STAT_A_FULL            (1 << 10)
74 #define OMAP_MMC_STAT_CMD_CRC           (1 <<  8)
75 #define OMAP_MMC_STAT_CMD_TOUT          (1 <<  7)
76 #define OMAP_MMC_STAT_DATA_CRC          (1 <<  6)
77 #define OMAP_MMC_STAT_DATA_TOUT         (1 <<  5)
78 #define OMAP_MMC_STAT_END_BUSY          (1 <<  4)
79 #define OMAP_MMC_STAT_END_OF_DATA       (1 <<  3)
80 #define OMAP_MMC_STAT_CARD_BUSY         (1 <<  2)
81 #define OMAP_MMC_STAT_END_OF_CMD        (1 <<  0)
82
83 #define OMAP_MMC_READ(host, reg)        __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
84 #define OMAP_MMC_WRITE(host, reg, val)  __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
85
86 /*
87  * Command types
88  */
89 #define OMAP_MMC_CMDTYPE_BC     0
90 #define OMAP_MMC_CMDTYPE_BCR    1
91 #define OMAP_MMC_CMDTYPE_AC     2
92 #define OMAP_MMC_CMDTYPE_ADTC   3
93
94
95 #define DRIVER_NAME "mmci-omap"
96
97 /* Specifies how often in millisecs to poll for card status changes
98  * when the cover switch is open */
99 #define OMAP_MMC_SWITCH_POLL_DELAY      500
100
101 struct mmc_omap_host;
102
103 struct mmc_omap_slot {
104         int                     id;
105         unsigned int            vdd;
106         u16                     saved_con;
107         u16                     bus_mode;
108         unsigned int            fclk_freq;
109         unsigned                powered:1;
110
111         struct work_struct      switch_work;
112         struct timer_list       switch_timer;
113         unsigned                cover_open;
114
115         struct mmc_request      *mrq;
116         struct mmc_omap_host    *host;
117         struct mmc_host         *mmc;
118         struct omap_mmc_slot_data *pdata;
119 };
120
121 struct mmc_omap_host {
122         int                     initialized;
123         int                     suspended;
124         struct mmc_request *    mrq;
125         struct mmc_command *    cmd;
126         struct mmc_data *       data;
127         struct mmc_host *       mmc;
128         struct device *         dev;
129         unsigned char           id; /* 16xx chips have 2 MMC blocks */
130         struct clk *            iclk;
131         struct clk *            fclk;
132         struct resource         *mem_res;
133         void __iomem            *virt_base;
134         unsigned int            phys_base;
135         int                     irq;
136         unsigned char           bus_mode;
137         unsigned char           hw_bus_mode;
138
139         struct work_struct      cmd_abort;
140         struct timer_list       cmd_timer;
141
142         unsigned int            sg_len;
143         int                     sg_idx;
144         u16 *                   buffer;
145         u32                     buffer_bytes_left;
146         u32                     total_bytes_left;
147
148         unsigned                use_dma:1;
149         unsigned                brs_received:1, dma_done:1;
150         unsigned                dma_is_read:1;
151         unsigned                dma_in_use:1;
152         int                     dma_ch;
153         spinlock_t              dma_lock;
154         struct timer_list       dma_timer;
155         unsigned                dma_len;
156
157         short                   power_pin;
158
159         struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
160         struct mmc_omap_slot    *current_slot;
161         spinlock_t              slot_lock;
162         wait_queue_head_t       slot_wq;
163         int                     nr_slots;
164
165         struct omap_mmc_platform_data *pdata;
166 };
167
168 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
169 {
170         struct mmc_omap_host *host = slot->host;
171         unsigned long flags;
172
173         if (claimed)
174                 goto no_claim;
175         spin_lock_irqsave(&host->slot_lock, flags);
176         while (host->mmc != NULL) {
177                 spin_unlock_irqrestore(&host->slot_lock, flags);
178                 wait_event(host->slot_wq, host->mmc == NULL);
179                 spin_lock_irqsave(&host->slot_lock, flags);
180         }
181         host->mmc = slot->mmc;
182         spin_unlock_irqrestore(&host->slot_lock, flags);
183 no_claim:
184         clk_enable(host->fclk);
185         if (host->current_slot != slot) {
186                 if (host->pdata->switch_slot != NULL)
187                         host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
188                 host->current_slot = slot;
189         }
190
191         /* Doing the dummy read here seems to work around some bug
192          * at least in OMAP24xx silicon where the command would not
193          * start after writing the CMD register. Sigh. */
194         OMAP_MMC_READ(host, CON);
195
196         OMAP_MMC_WRITE(host, CON, slot->saved_con);
197 }
198
199 static void mmc_omap_start_request(struct mmc_omap_host *host,
200                                    struct mmc_request *req);
201
202 static void mmc_omap_release_slot(struct mmc_omap_slot *slot)
203 {
204         struct mmc_omap_host *host = slot->host;
205         unsigned long flags;
206         int i;
207
208         BUG_ON(slot == NULL || host->mmc == NULL);
209         clk_disable(host->fclk);
210
211         spin_lock_irqsave(&host->slot_lock, flags);
212         /* Check for any pending requests */
213         for (i = 0; i < host->nr_slots; i++) {
214                 struct mmc_omap_slot *new_slot;
215                 struct mmc_request *rq;
216
217                 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
218                         continue;
219
220                 new_slot = host->slots[i];
221                 /* The current slot should not have a request in queue */
222                 BUG_ON(new_slot == host->current_slot);
223
224                 host->mmc = new_slot->mmc;
225                 spin_unlock_irqrestore(&host->slot_lock, flags);
226                 mmc_omap_select_slot(new_slot, 1);
227                 rq = new_slot->mrq;
228                 new_slot->mrq = NULL;
229                 mmc_omap_start_request(host, rq);
230                 return;
231         }
232
233         host->mmc = NULL;
234         wake_up(&host->slot_wq);
235         spin_unlock_irqrestore(&host->slot_lock, flags);
236 }
237
238 static inline
239 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
240 {
241         if (slot->pdata->get_cover_state)
242                 return slot->pdata->get_cover_state(mmc_dev(slot->mmc), slot->id);
243         return 0;
244 }
245
246 static ssize_t
247 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
248                            char *buf)
249 {
250         struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
251         struct mmc_omap_slot *slot = mmc_priv(mmc);
252
253         return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
254                        "closed");
255 }
256
257 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
258
259 /* Access to the R/O switch is required for production testing
260  * purposes. */
261 static ssize_t
262 mmc_omap_show_ro(struct device *dev, struct device_attribute *attr, char *buf)
263 {
264         struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
265         struct mmc_omap_slot *slot = mmc_priv(mmc);
266
267         return sprintf(buf, "%d\n", slot->pdata->get_ro(mmc_dev(mmc),
268                                                         slot->id));
269 }
270
271 static DEVICE_ATTR(ro, S_IRUGO, mmc_omap_show_ro, NULL);
272
273 static ssize_t
274 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
275                         char *buf)
276 {
277         struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
278         struct mmc_omap_slot *slot = mmc_priv(mmc);
279
280         return sprintf(buf, "%s\n", slot->pdata->name);
281 }
282
283 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
284
285 static void
286 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
287 {
288         u32 cmdreg;
289         u32 resptype;
290         u32 cmdtype;
291
292         host->cmd = cmd;
293
294         resptype = 0;
295         cmdtype = 0;
296
297         /* Our hardware needs to know exact type */
298         switch (mmc_resp_type(cmd)) {
299         case MMC_RSP_NONE:
300                 break;
301         case MMC_RSP_R1:
302         case MMC_RSP_R1B:
303                 /* resp 1, 1b, 6, 7 */
304                 resptype = 1;
305                 break;
306         case MMC_RSP_R2:
307                 resptype = 2;
308                 break;
309         case MMC_RSP_R3:
310                 resptype = 3;
311                 break;
312         default:
313                 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
314                 break;
315         }
316
317         if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
318                 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
319         } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
320                 cmdtype = OMAP_MMC_CMDTYPE_BC;
321         } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
322                 cmdtype = OMAP_MMC_CMDTYPE_BCR;
323         } else {
324                 cmdtype = OMAP_MMC_CMDTYPE_AC;
325         }
326
327         cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
328
329         if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
330                 cmdreg |= 1 << 6;
331
332         if (cmd->flags & MMC_RSP_BUSY)
333                 cmdreg |= 1 << 11;
334
335         if (host->data && !(host->data->flags & MMC_DATA_WRITE))
336                 cmdreg |= 1 << 15;
337
338         mod_timer(&host->cmd_timer, jiffies + HZ/2);
339
340         OMAP_MMC_WRITE(host, CTO, 200);
341         OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
342         OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
343         OMAP_MMC_WRITE(host, IE,
344                        OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    |
345                        OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  |
346                        OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT |
347                        OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  |
348                        OMAP_MMC_STAT_END_OF_DATA);
349         OMAP_MMC_WRITE(host, CMD, cmdreg);
350 }
351
352 static void
353 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
354                      int abort)
355 {
356         enum dma_data_direction dma_data_dir;
357
358         BUG_ON(host->dma_ch < 0);
359         if (data->error)
360                 omap_stop_dma(host->dma_ch);
361         /* Release DMA channel lazily */
362         mod_timer(&host->dma_timer, jiffies + HZ);
363         if (data->flags & MMC_DATA_WRITE)
364                 dma_data_dir = DMA_TO_DEVICE;
365         else
366                 dma_data_dir = DMA_FROM_DEVICE;
367         dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
368                      dma_data_dir);
369 }
370
371 static void
372 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
373 {
374         if (host->dma_in_use)
375                 mmc_omap_release_dma(host, data, data->error);
376
377         host->data = NULL;
378         host->sg_len = 0;
379
380         /* NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
381          * dozens of requests until the card finishes writing data.
382          * It'd be cheaper to just wait till an EOFB interrupt arrives...
383          */
384
385         if (!data->stop) {
386                 struct mmc_host *mmc;
387
388                 host->mrq = NULL;
389                 mmc = host->mmc;
390                 mmc_omap_release_slot(host->current_slot);
391                 mmc_request_done(mmc, data->mrq);
392                 return;
393         }
394
395         mmc_omap_start_command(host, data->stop);
396 }
397
398 static void
399 mmc_omap_send_abort(struct mmc_omap_host *host)
400 {
401         struct mmc_omap_slot *slot = host->current_slot;
402         unsigned int restarts, passes, timeout;
403         u16 stat = 0;
404
405         /* Sending abort takes 80 clocks. Have some extra and round up */
406         timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
407         restarts = 0;
408         while (restarts < 10000) {
409                 OMAP_MMC_WRITE(host, STAT, 0xFFFF);
410                 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
411
412                 passes = 0;
413                 while (passes < timeout) {
414                         stat = OMAP_MMC_READ(host, STAT);
415                         if (stat & OMAP_MMC_STAT_END_OF_CMD)
416                                 goto out;
417                         udelay(1);
418                         passes++;
419                 }
420
421                 restarts++;
422         }
423 out:
424         OMAP_MMC_WRITE(host, STAT, stat);
425 }
426
427 static void
428 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
429 {
430         u16 ie;
431
432         if (host->dma_in_use)
433                 mmc_omap_release_dma(host, data, 1);
434
435         host->data = NULL;
436         host->sg_len = 0;
437
438         ie = OMAP_MMC_READ(host, IE);
439         OMAP_MMC_WRITE(host, IE, 0);
440         OMAP_MMC_WRITE(host, IE, ie);
441         mmc_omap_send_abort(host);
442 }
443
444 static void
445 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
446 {
447         unsigned long flags;
448         int done;
449
450         if (!host->dma_in_use) {
451                 mmc_omap_xfer_done(host, data);
452                 return;
453         }
454         done = 0;
455         spin_lock_irqsave(&host->dma_lock, flags);
456         if (host->dma_done)
457                 done = 1;
458         else
459                 host->brs_received = 1;
460         spin_unlock_irqrestore(&host->dma_lock, flags);
461         if (done)
462                 mmc_omap_xfer_done(host, data);
463 }
464
465 static void
466 mmc_omap_dma_timer(unsigned long data)
467 {
468         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
469
470         BUG_ON(host->dma_ch < 0);
471         omap_free_dma(host->dma_ch);
472         host->dma_ch = -1;
473 }
474
475 static void
476 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
477 {
478         unsigned long flags;
479         int done;
480
481         done = 0;
482         spin_lock_irqsave(&host->dma_lock, flags);
483         if (host->brs_received)
484                 done = 1;
485         else
486                 host->dma_done = 1;
487         spin_unlock_irqrestore(&host->dma_lock, flags);
488         if (done)
489                 mmc_omap_xfer_done(host, data);
490 }
491
492 static void
493 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
494 {
495         host->cmd = NULL;
496
497         del_timer(&host->cmd_timer);
498
499         if (cmd->flags & MMC_RSP_PRESENT) {
500                 if (cmd->flags & MMC_RSP_136) {
501                         /* response type 2 */
502                         cmd->resp[3] =
503                                 OMAP_MMC_READ(host, RSP0) |
504                                 (OMAP_MMC_READ(host, RSP1) << 16);
505                         cmd->resp[2] =
506                                 OMAP_MMC_READ(host, RSP2) |
507                                 (OMAP_MMC_READ(host, RSP3) << 16);
508                         cmd->resp[1] =
509                                 OMAP_MMC_READ(host, RSP4) |
510                                 (OMAP_MMC_READ(host, RSP5) << 16);
511                         cmd->resp[0] =
512                                 OMAP_MMC_READ(host, RSP6) |
513                                 (OMAP_MMC_READ(host, RSP7) << 16);
514                 } else {
515                         /* response types 1, 1b, 3, 4, 5, 6 */
516                         cmd->resp[0] =
517                                 OMAP_MMC_READ(host, RSP6) |
518                                 (OMAP_MMC_READ(host, RSP7) << 16);
519                 }
520         }
521
522         if (host->data == NULL || cmd->error) {
523                 struct mmc_host *mmc;
524
525                 if (host->data != NULL)
526                         mmc_omap_abort_xfer(host, host->data);
527                 host->mrq = NULL;
528                 mmc = host->mmc;
529                 mmc_omap_release_slot(host->current_slot);
530                 mmc_request_done(mmc, cmd->mrq);
531         }
532 }
533
534 /*
535  * Abort stuck command. Can occur when card is removed while it is being
536  * read.
537  */
538 static void mmc_omap_abort_command(struct work_struct *work)
539 {
540         struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
541                                                   cmd_abort);
542         u16 ie;
543
544         ie = OMAP_MMC_READ(host, IE);
545         OMAP_MMC_WRITE(host, IE, 0);
546
547         if (!host->cmd) {
548                 OMAP_MMC_WRITE(host, IE, ie);
549                 return;
550         }
551
552         dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
553                 host->cmd->opcode);
554
555         if (host->data && host->dma_in_use)
556                 mmc_omap_release_dma(host, host->data, 1);
557
558         host->data = NULL;
559         host->sg_len = 0;
560
561         mmc_omap_send_abort(host);
562         host->cmd->error = -ETIMEDOUT;
563         mmc_omap_cmd_done(host, host->cmd);
564         OMAP_MMC_WRITE(host, IE, ie);
565 }
566
567 static void
568 mmc_omap_cmd_timer(unsigned long data)
569 {
570         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
571
572         schedule_work(&host->cmd_abort);
573 }
574
575 /* PIO only */
576 static void
577 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
578 {
579         struct scatterlist *sg;
580
581         sg = host->data->sg + host->sg_idx;
582         host->buffer_bytes_left = sg->length;
583         host->buffer = sg_virt(sg);
584         if (host->buffer_bytes_left > host->total_bytes_left)
585                 host->buffer_bytes_left = host->total_bytes_left;
586 }
587
588 /* PIO only */
589 static void
590 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
591 {
592         int n;
593
594         if (host->buffer_bytes_left == 0) {
595                 host->sg_idx++;
596                 BUG_ON(host->sg_idx == host->sg_len);
597                 mmc_omap_sg_to_buf(host);
598         }
599         n = 64;
600         if (n > host->buffer_bytes_left)
601                 n = host->buffer_bytes_left;
602         host->buffer_bytes_left -= n;
603         host->total_bytes_left -= n;
604         host->data->bytes_xfered += n;
605
606         if (write) {
607                 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
608         } else {
609                 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
610         }
611 }
612
613 static inline void mmc_omap_report_irq(u16 status)
614 {
615         static const char *mmc_omap_status_bits[] = {
616                 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
617                 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
618         };
619         int i, c = 0;
620
621         for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
622                 if (status & (1 << i)) {
623                         if (c)
624                                 printk(" ");
625                         printk("%s", mmc_omap_status_bits[i]);
626                         c++;
627                 }
628 }
629
630 static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
631 {
632         struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
633         u16 status;
634         int end_command;
635         int end_transfer;
636         int transfer_error, cmd_error;
637
638         if (host->cmd == NULL && host->data == NULL) {
639                 status = OMAP_MMC_READ(host, STAT);
640                 dev_info(mmc_dev(host->slots[0]->mmc),
641                          "Spurious IRQ 0x%04x\n", status);
642                 if (status != 0) {
643                         OMAP_MMC_WRITE(host, STAT, status);
644                         OMAP_MMC_WRITE(host, IE, 0);
645                 }
646                 return IRQ_HANDLED;
647         }
648
649         end_command = 0;
650         end_transfer = 0;
651         transfer_error = 0;
652         cmd_error = 0;
653
654         while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
655                 int cmd;
656
657                 OMAP_MMC_WRITE(host, STAT, status);
658                 if (host->cmd != NULL)
659                         cmd = host->cmd->opcode;
660                 else
661                         cmd = -1;
662 #ifdef CONFIG_MMC_DEBUG
663                 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
664                         status, cmd);
665                 mmc_omap_report_irq(status);
666                 printk("\n");
667 #endif
668                 if (host->total_bytes_left) {
669                         if ((status & OMAP_MMC_STAT_A_FULL) ||
670                             (status & OMAP_MMC_STAT_END_OF_DATA))
671                                 mmc_omap_xfer_data(host, 0);
672                         if (status & OMAP_MMC_STAT_A_EMPTY)
673                                 mmc_omap_xfer_data(host, 1);
674                 }
675
676                 if (status & OMAP_MMC_STAT_END_OF_DATA)
677                         end_transfer = 1;
678
679                 if (status & OMAP_MMC_STAT_DATA_TOUT) {
680                         dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
681                                 cmd);
682                         if (host->data) {
683                                 host->data->error = -ETIMEDOUT;
684                                 transfer_error = 1;
685                         }
686                 }
687
688                 if (status & OMAP_MMC_STAT_DATA_CRC) {
689                         if (host->data) {
690                                 host->data->error = -EILSEQ;
691                                 dev_dbg(mmc_dev(host->mmc),
692                                          "data CRC error, bytes left %d\n",
693                                         host->total_bytes_left);
694                                 transfer_error = 1;
695                         } else {
696                                 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
697                         }
698                 }
699
700                 if (status & OMAP_MMC_STAT_CMD_TOUT) {
701                         /* Timeouts are routine with some commands */
702                         if (host->cmd) {
703                                 struct mmc_omap_slot *slot =
704                                         host->current_slot;
705                                 if (host->cmd->opcode != MMC_ALL_SEND_CID &&
706                                     host->cmd->opcode != MMC_SEND_OP_COND &&
707                                     host->cmd->opcode != MMC_APP_CMD &&
708                                     (slot == NULL ||
709                                      !mmc_omap_cover_is_open(slot)))
710                                         dev_err(mmc_dev(host->mmc),
711                                                 "command timeout (CMD%d)\n",
712                                                 cmd);
713                                 host->cmd->error = -ETIMEDOUT;
714                                 end_command = 1;
715                                 cmd_error = 1;
716                         }
717                 }
718
719                 if (status & OMAP_MMC_STAT_CMD_CRC) {
720                         if (host->cmd) {
721                                 dev_err(mmc_dev(host->mmc),
722                                         "command CRC error (CMD%d, arg 0x%08x)\n",
723                                         cmd, host->cmd->arg);
724                                 host->cmd->error = -EILSEQ;
725                                 end_command = 1;
726                                 cmd_error = 1;
727                         } else
728                                 dev_err(mmc_dev(host->mmc),
729                                         "command CRC error without cmd?\n");
730                 }
731
732                 if (status & OMAP_MMC_STAT_CARD_ERR) {
733                         dev_dbg(mmc_dev(host->mmc),
734                                 "ignoring card status error (CMD%d)\n",
735                                 cmd);
736                         end_command = 1;
737                 }
738
739                 /*
740                  * NOTE: On 1610 the END_OF_CMD may come too early when
741                  * starting a write
742                  */
743                 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
744                     (!(status & OMAP_MMC_STAT_A_EMPTY))) {
745                         end_command = 1;
746                 }
747         }
748
749         if (end_command)
750                 mmc_omap_cmd_done(host, host->cmd);
751         if (host->data != NULL) {
752                 if (transfer_error)
753                         mmc_omap_xfer_done(host, host->data);
754                 else if (end_transfer)
755                         mmc_omap_end_of_data(host, host->data);
756         }
757
758         return IRQ_HANDLED;
759 }
760
761 void omap_mmc_notify_cover_event(struct device *dev, int slot, int is_closed)
762 {
763         struct mmc_omap_host *host = dev_get_drvdata(dev);
764
765         BUG_ON(slot >= host->nr_slots);
766
767         /* Other subsystems can call in here before we're initialised. */
768         if (host->nr_slots == 0 || !host->slots[slot])
769                 return;
770
771         schedule_work(&host->slots[slot]->switch_work);
772 }
773
774 static void mmc_omap_switch_timer(unsigned long arg)
775 {
776         struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
777
778         schedule_work(&slot->switch_work);
779 }
780
781 static void mmc_omap_cover_handler(struct work_struct *work)
782 {
783         struct mmc_omap_slot *slot = container_of(work, struct mmc_omap_slot,
784                                                   switch_work);
785         int cover_open;
786
787         cover_open = mmc_omap_cover_is_open(slot);
788         if (cover_open != slot->cover_open) {
789                 sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
790                 slot->cover_open = cover_open;
791                 dev_info(mmc_dev(slot->mmc), "cover is now %s\n",
792                          cover_open ? "open" : "closed");
793         }
794         mmc_detect_change(slot->mmc, slot->id);
795 }
796
797 /* Prepare to transfer the next segment of a scatterlist */
798 static void
799 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
800 {
801         int dma_ch = host->dma_ch;
802         unsigned long data_addr;
803         u16 buf, frame;
804         u32 count;
805         struct scatterlist *sg = &data->sg[host->sg_idx];
806         int src_port = 0;
807         int dst_port = 0;
808         int sync_dev = 0;
809
810         data_addr = host->phys_base + OMAP_MMC_REG_DATA;
811         frame = data->blksz;
812         count = sg_dma_len(sg);
813
814         if ((data->blocks == 1) && (count > data->blksz))
815                 count = frame;
816
817         host->dma_len = count;
818
819         /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
820          * Use 16 or 32 word frames when the blocksize is at least that large.
821          * Blocksize is usually 512 bytes; but not for some SD reads.
822          */
823         if (cpu_is_omap15xx() && frame > 32)
824                 frame = 32;
825         else if (frame > 64)
826                 frame = 64;
827         count /= frame;
828         frame >>= 1;
829
830         if (!(data->flags & MMC_DATA_WRITE)) {
831                 buf = 0x800f | ((frame - 1) << 8);
832
833                 if (cpu_class_is_omap1()) {
834                         src_port = OMAP_DMA_PORT_TIPB;
835                         dst_port = OMAP_DMA_PORT_EMIFF;
836                 }
837                 if (cpu_is_omap24xx())
838                         sync_dev = OMAP24XX_DMA_MMC1_RX;
839
840                 omap_set_dma_src_params(dma_ch, src_port,
841                                         OMAP_DMA_AMODE_CONSTANT,
842                                         data_addr, 0, 0);
843                 omap_set_dma_dest_params(dma_ch, dst_port,
844                                          OMAP_DMA_AMODE_POST_INC,
845                                          sg_dma_address(sg), 0, 0);
846                 omap_set_dma_dest_data_pack(dma_ch, 1);
847                 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
848         } else {
849                 buf = 0x0f80 | ((frame - 1) << 0);
850
851                 if (cpu_class_is_omap1()) {
852                         src_port = OMAP_DMA_PORT_EMIFF;
853                         dst_port = OMAP_DMA_PORT_TIPB;
854                 }
855                 if (cpu_is_omap24xx())
856                         sync_dev = OMAP24XX_DMA_MMC1_TX;
857
858                 omap_set_dma_dest_params(dma_ch, dst_port,
859                                          OMAP_DMA_AMODE_CONSTANT,
860                                          data_addr, 0, 0);
861                 omap_set_dma_src_params(dma_ch, src_port,
862                                         OMAP_DMA_AMODE_POST_INC,
863                                         sg_dma_address(sg), 0, 0);
864                 omap_set_dma_src_data_pack(dma_ch, 1);
865                 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
866         }
867
868         /* Max limit for DMA frame count is 0xffff */
869         BUG_ON(count > 0xffff);
870
871         OMAP_MMC_WRITE(host, BUF, buf);
872         omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
873                                      frame, count, OMAP_DMA_SYNC_FRAME,
874                                      sync_dev, 0);
875 }
876
877 /* A scatterlist segment completed */
878 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
879 {
880         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
881         struct mmc_data *mmcdat = host->data;
882
883         if (unlikely(host->dma_ch < 0)) {
884                 dev_err(mmc_dev(host->mmc),
885                         "DMA callback while DMA not enabled\n");
886                 return;
887         }
888         /* FIXME: We really should do something to _handle_ the errors */
889         if (ch_status & OMAP1_DMA_TOUT_IRQ) {
890                 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
891                 return;
892         }
893         if (ch_status & OMAP_DMA_DROP_IRQ) {
894                 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
895                 return;
896         }
897         if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
898                 return;
899         }
900         mmcdat->bytes_xfered += host->dma_len;
901         host->sg_idx++;
902         if (host->sg_idx < host->sg_len) {
903                 mmc_omap_prepare_dma(host, host->data);
904                 omap_start_dma(host->dma_ch);
905         } else
906                 mmc_omap_dma_done(host, host->data);
907 }
908
909 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
910 {
911         const char *dev_name;
912         int sync_dev, dma_ch, is_read, r;
913
914         is_read = !(data->flags & MMC_DATA_WRITE);
915         del_timer_sync(&host->dma_timer);
916         if (host->dma_ch >= 0) {
917                 if (is_read == host->dma_is_read)
918                         return 0;
919                 omap_free_dma(host->dma_ch);
920                 host->dma_ch = -1;
921         }
922
923         if (is_read) {
924                 if (host->id == 1) {
925                         sync_dev = OMAP_DMA_MMC_RX;
926                         dev_name = "MMC1 read";
927                 } else {
928                         sync_dev = OMAP_DMA_MMC2_RX;
929                         dev_name = "MMC2 read";
930                 }
931         } else {
932                 if (host->id == 1) {
933                         sync_dev = OMAP_DMA_MMC_TX;
934                         dev_name = "MMC1 write";
935                 } else {
936                         sync_dev = OMAP_DMA_MMC2_TX;
937                         dev_name = "MMC2 write";
938                 }
939         }
940         r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
941                              host, &dma_ch);
942         if (r != 0) {
943                 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
944                 return r;
945         }
946         host->dma_ch = dma_ch;
947         host->dma_is_read = is_read;
948
949         return 0;
950 }
951
952 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
953 {
954         u16 reg;
955
956         reg = OMAP_MMC_READ(host, SDIO);
957         reg &= ~(1 << 5);
958         OMAP_MMC_WRITE(host, SDIO, reg);
959         /* Set maximum timeout */
960         OMAP_MMC_WRITE(host, CTO, 0xff);
961 }
962
963 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
964 {
965         unsigned int timeout, cycle_ns;
966         u16 reg;
967
968         cycle_ns = 1000000000 / host->current_slot->fclk_freq;
969         timeout = req->data->timeout_ns / cycle_ns;
970         timeout += req->data->timeout_clks;
971
972         /* Check if we need to use timeout multiplier register */
973         reg = OMAP_MMC_READ(host, SDIO);
974         if (timeout > 0xffff) {
975                 reg |= (1 << 5);
976                 timeout /= 1024;
977         } else
978                 reg &= ~(1 << 5);
979         OMAP_MMC_WRITE(host, SDIO, reg);
980         OMAP_MMC_WRITE(host, DTO, timeout);
981 }
982
983 static void
984 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
985 {
986         struct mmc_data *data = req->data;
987         int i, use_dma, block_size;
988         unsigned sg_len;
989
990         host->data = data;
991         if (data == NULL) {
992                 OMAP_MMC_WRITE(host, BLEN, 0);
993                 OMAP_MMC_WRITE(host, NBLK, 0);
994                 OMAP_MMC_WRITE(host, BUF, 0);
995                 host->dma_in_use = 0;
996                 set_cmd_timeout(host, req);
997                 return;
998         }
999
1000         block_size = data->blksz;
1001
1002         OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
1003         OMAP_MMC_WRITE(host, BLEN, block_size - 1);
1004         set_data_timeout(host, req);
1005
1006         /* cope with calling layer confusion; it issues "single
1007          * block" writes using multi-block scatterlists.
1008          */
1009         sg_len = (data->blocks == 1) ? 1 : data->sg_len;
1010
1011         /* Only do DMA for entire blocks */
1012         use_dma = host->use_dma;
1013         if (use_dma) {
1014                 for (i = 0; i < sg_len; i++) {
1015                         if ((data->sg[i].length % block_size) != 0) {
1016                                 use_dma = 0;
1017                                 break;
1018                         }
1019                 }
1020         }
1021
1022         host->sg_idx = 0;
1023         if (use_dma) {
1024                 if (mmc_omap_get_dma_channel(host, data) == 0) {
1025                         enum dma_data_direction dma_data_dir;
1026
1027                         if (data->flags & MMC_DATA_WRITE)
1028                                 dma_data_dir = DMA_TO_DEVICE;
1029                         else
1030                                 dma_data_dir = DMA_FROM_DEVICE;
1031
1032                         host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
1033                                                 sg_len, dma_data_dir);
1034                         host->total_bytes_left = 0;
1035                         mmc_omap_prepare_dma(host, req->data);
1036                         host->brs_received = 0;
1037                         host->dma_done = 0;
1038                         host->dma_in_use = 1;
1039                 } else
1040                         use_dma = 0;
1041         }
1042
1043         /* Revert to PIO? */
1044         if (!use_dma) {
1045                 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1046                 host->total_bytes_left = data->blocks * block_size;
1047                 host->sg_len = sg_len;
1048                 mmc_omap_sg_to_buf(host);
1049                 host->dma_in_use = 0;
1050         }
1051 }
1052
1053 static void mmc_omap_start_request(struct mmc_omap_host *host,
1054                                    struct mmc_request *req)
1055 {
1056         BUG_ON(host->mrq != NULL);
1057
1058         host->mrq = req;
1059
1060         /* only touch fifo AFTER the controller readies it */
1061         mmc_omap_prepare_data(host, req);
1062         mmc_omap_start_command(host, req->cmd);
1063         if (host->dma_in_use)
1064                 omap_start_dma(host->dma_ch);
1065         BUG_ON(irqs_disabled());
1066 }
1067
1068 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1069 {
1070         struct mmc_omap_slot *slot = mmc_priv(mmc);
1071         struct mmc_omap_host *host = slot->host;
1072         unsigned long flags;
1073
1074         spin_lock_irqsave(&host->slot_lock, flags);
1075         if (host->mmc != NULL) {
1076                 BUG_ON(slot->mrq != NULL);
1077                 slot->mrq = req;
1078                 spin_unlock_irqrestore(&host->slot_lock, flags);
1079                 return;
1080         } else
1081                 host->mmc = mmc;
1082         spin_unlock_irqrestore(&host->slot_lock, flags);
1083         mmc_omap_select_slot(slot, 1);
1084         mmc_omap_start_request(host, req);
1085 }
1086
1087 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
1088                                 int vdd)
1089 {
1090         struct mmc_omap_host *host;
1091
1092         host = slot->host;
1093
1094         if (slot->pdata->set_power != NULL)
1095                 slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
1096                                         vdd);
1097
1098         if (cpu_is_omap24xx()) {
1099                 u16 w;
1100
1101                 if (power_on) {
1102                         w = OMAP_MMC_READ(host, CON);
1103                         OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1104                 } else {
1105                         w = OMAP_MMC_READ(host, CON);
1106                         OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1107                 }
1108         }
1109 }
1110
1111 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
1112 {
1113         struct mmc_omap_slot *slot = mmc_priv(mmc);
1114         struct mmc_omap_host *host = slot->host;
1115         int func_clk_rate = clk_get_rate(host->fclk);
1116         int dsor;
1117
1118         if (ios->clock == 0)
1119                 return 0;
1120
1121         dsor = func_clk_rate / ios->clock;
1122         if (dsor < 1)
1123                 dsor = 1;
1124
1125         if (func_clk_rate / dsor > ios->clock)
1126                 dsor++;
1127
1128         if (dsor > 250)
1129                 dsor = 250;
1130
1131         slot->fclk_freq = func_clk_rate / dsor;
1132
1133         if (ios->bus_width == MMC_BUS_WIDTH_4)
1134                 dsor |= 1 << 15;
1135
1136         return dsor;
1137 }
1138
1139 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1140 {
1141         struct mmc_omap_slot *slot = mmc_priv(mmc);
1142         struct mmc_omap_host *host = slot->host;
1143         int i, dsor;
1144
1145         dsor = mmc_omap_calc_divisor(mmc, ios);
1146
1147         mmc_omap_select_slot(slot, 0);
1148
1149         if (ios->vdd != slot->vdd)
1150                 slot->vdd = ios->vdd;
1151
1152         switch (ios->power_mode) {
1153         case MMC_POWER_OFF:
1154                 mmc_omap_set_power(slot, 0, ios->vdd);
1155                 break;
1156         case MMC_POWER_UP:
1157                 /* Cannot touch dsor yet, just power up MMC */
1158                 mmc_omap_set_power(slot, 1, ios->vdd);
1159                 goto exit;
1160         case MMC_POWER_ON:
1161                 dsor |= 1 << 11;
1162                 break;
1163         }
1164
1165         if (slot->bus_mode != ios->bus_mode) {
1166                 if (slot->pdata->set_bus_mode != NULL)
1167                         slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
1168                                                   ios->bus_mode);
1169                 slot->bus_mode = ios->bus_mode;
1170         }
1171
1172         /* On insanely high arm_per frequencies something sometimes
1173          * goes somehow out of sync, and the POW bit is not being set,
1174          * which results in the while loop below getting stuck.
1175          * Writing to the CON register twice seems to do the trick. */
1176         for (i = 0; i < 2; i++)
1177                 OMAP_MMC_WRITE(host, CON, dsor);
1178         slot->saved_con = dsor;
1179         if (ios->power_mode == MMC_POWER_ON) {
1180                 /* Send clock cycles, poll completion */
1181                 OMAP_MMC_WRITE(host, IE, 0);
1182                 OMAP_MMC_WRITE(host, STAT, 0xffff);
1183                 OMAP_MMC_WRITE(host, CMD, 1 << 7);
1184                 while ((OMAP_MMC_READ(host, STAT) & 1) == 0);
1185                 OMAP_MMC_WRITE(host, STAT, 1);
1186         }
1187
1188 exit:
1189         mmc_omap_release_slot(slot);
1190 }
1191
1192 static int mmc_omap_get_ro(struct mmc_host *mmc)
1193 {
1194         struct mmc_omap_slot *slot = mmc_priv(mmc);
1195
1196         if (slot->pdata->get_ro != NULL)
1197                 return slot->pdata->get_ro(mmc_dev(mmc), slot->id);
1198         return 0;
1199 }
1200
1201 static const struct mmc_host_ops mmc_omap_ops = {
1202         .request        = mmc_omap_request,
1203         .set_ios        = mmc_omap_set_ios,
1204         .get_ro         = mmc_omap_get_ro,
1205 };
1206
1207 static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1208 {
1209         struct mmc_omap_slot *slot = NULL;
1210         struct mmc_host *mmc;
1211         int r;
1212
1213         mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1214         if (mmc == NULL)
1215                 return -ENOMEM;
1216
1217         slot = mmc_priv(mmc);
1218         slot->host = host;
1219         slot->mmc = mmc;
1220         slot->id = id;
1221         slot->pdata = &host->pdata->slots[id];
1222
1223         host->slots[id] = slot;
1224
1225         mmc->caps = MMC_CAP_MULTIWRITE | MMC_CAP_MMC_HIGHSPEED |
1226                     MMC_CAP_SD_HIGHSPEED;
1227         if (host->pdata->conf.wire4)
1228                 mmc->caps |= MMC_CAP_4_BIT_DATA;
1229
1230         mmc->ops = &mmc_omap_ops;
1231         mmc->f_min = 400000;
1232
1233         if (cpu_class_is_omap2())
1234                 mmc->f_max = 48000000;
1235         else
1236                 mmc->f_max = 24000000;
1237         if (host->pdata->max_freq)
1238                 mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1239         mmc->ocr_avail = slot->pdata->ocr_mask;
1240
1241         /* Use scatterlist DMA to reduce per-transfer costs.
1242          * NOTE max_seg_size assumption that small blocks aren't
1243          * normally used (except e.g. for reading SD registers).
1244          */
1245         mmc->max_phys_segs = 32;
1246         mmc->max_hw_segs = 32;
1247         mmc->max_blk_size = 2048;       /* BLEN is 11 bits (+1) */
1248         mmc->max_blk_count = 2048;      /* NBLK is 11 bits (+1) */
1249         mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1250         mmc->max_seg_size = mmc->max_req_size;
1251
1252         r = mmc_add_host(mmc);
1253         if (r < 0)
1254                 return r;
1255
1256         if (slot->pdata->name != NULL) {
1257                 r = device_create_file(&mmc->class_dev,
1258                                         &dev_attr_slot_name);
1259                 if (r < 0)
1260                         goto err_remove_host;
1261         }
1262
1263         if (slot->pdata->get_cover_state != NULL) {
1264                 r = device_create_file(&mmc->class_dev,
1265                                         &dev_attr_cover_switch);
1266                 if (r < 0)
1267                         goto err_remove_slot_name;
1268
1269                 INIT_WORK(&slot->switch_work, mmc_omap_cover_handler);
1270                 setup_timer(&slot->switch_timer, mmc_omap_switch_timer,
1271                             (unsigned long) slot);
1272                 schedule_work(&slot->switch_work);
1273         }
1274
1275         if (slot->pdata->get_ro != NULL) {
1276                 r = device_create_file(&mmc->class_dev,
1277                                         &dev_attr_ro);
1278                 if (r < 0)
1279                         goto err_remove_cover_attr;
1280         }
1281
1282         return 0;
1283
1284 err_remove_cover_attr:
1285         if (slot->pdata->get_cover_state != NULL)
1286                 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1287 err_remove_slot_name:
1288         if (slot->pdata->name != NULL)
1289                 device_remove_file(&mmc->class_dev, &dev_attr_ro);
1290 err_remove_host:
1291         mmc_remove_host(mmc);
1292         return r;
1293 }
1294
1295 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1296 {
1297         struct mmc_host *mmc = slot->mmc;
1298
1299         if (slot->pdata->name != NULL)
1300                 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1301         if (slot->pdata->get_cover_state != NULL)
1302                 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1303         if (slot->pdata->get_ro != NULL)
1304                 device_remove_file(&mmc->class_dev, &dev_attr_ro);
1305
1306         del_timer_sync(&slot->switch_timer);
1307         flush_scheduled_work();
1308
1309         mmc_remove_host(mmc);
1310         mmc_free_host(mmc);
1311 }
1312
1313 static int __init mmc_omap_probe(struct platform_device *pdev)
1314 {
1315         struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1316         struct mmc_omap_host *host = NULL;
1317         struct resource *res;
1318         int i, ret = 0;
1319         int irq;
1320
1321         if (pdata == NULL) {
1322                 dev_err(&pdev->dev, "platform data missing\n");
1323                 return -ENXIO;
1324         }
1325         if (pdata->nr_slots == 0) {
1326                 dev_err(&pdev->dev, "no slots\n");
1327                 return -ENXIO;
1328         }
1329
1330         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1331         irq = platform_get_irq(pdev, 0);
1332         if (res == NULL || irq < 0)
1333                 return -ENXIO;
1334
1335         res = request_mem_region(res->start, res->end - res->start + 1,
1336                                  pdev->name);
1337         if (res == NULL)
1338                 return -EBUSY;
1339
1340         host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
1341         if (host == NULL) {
1342                 ret = -ENOMEM;
1343                 goto err_free_mem_region;
1344         }
1345
1346         INIT_WORK(&host->cmd_abort, mmc_omap_abort_command);
1347         setup_timer(&host->cmd_timer, mmc_omap_cmd_timer, (unsigned long) host);
1348
1349         spin_lock_init(&host->dma_lock);
1350         setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
1351         spin_lock_init(&host->slot_lock);
1352         init_waitqueue_head(&host->slot_wq);
1353
1354         host->pdata = pdata;
1355         host->dev = &pdev->dev;
1356         platform_set_drvdata(pdev, host);
1357
1358         host->id = pdev->id;
1359         host->mem_res = res;
1360         host->irq = irq;
1361
1362         host->use_dma = 1;
1363         host->dma_ch = -1;
1364
1365         host->irq = irq;
1366         host->phys_base = host->mem_res->start;
1367         host->virt_base = (void __iomem *) IO_ADDRESS(host->phys_base);
1368
1369         if (cpu_is_omap24xx()) {
1370                 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1371                 if (IS_ERR(host->iclk))
1372                         goto err_free_mmc_host;
1373                 clk_enable(host->iclk);
1374         }
1375
1376         if (!cpu_is_omap24xx())
1377                 host->fclk = clk_get(&pdev->dev, "mmc_ck");
1378         else
1379                 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1380
1381         if (IS_ERR(host->fclk)) {
1382                 ret = PTR_ERR(host->fclk);
1383                 goto err_free_iclk;
1384         }
1385
1386         ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1387         if (ret)
1388                 goto err_free_fclk;
1389
1390         if (pdata->init != NULL) {
1391                 ret = pdata->init(&pdev->dev);
1392                 if (ret < 0)
1393                         goto err_free_irq;
1394         }
1395
1396         host->nr_slots = pdata->nr_slots;
1397         for (i = 0; i < pdata->nr_slots; i++) {
1398                 ret = mmc_omap_new_slot(host, i);
1399                 if (ret < 0) {
1400                         while (--i >= 0)
1401                                 mmc_omap_remove_slot(host->slots[i]);
1402
1403                         goto err_plat_cleanup;
1404                 }
1405         }
1406
1407         return 0;
1408
1409 err_plat_cleanup:
1410         if (pdata->cleanup)
1411                 pdata->cleanup(&pdev->dev);
1412 err_free_irq:
1413         free_irq(host->irq, host);
1414 err_free_fclk:
1415         clk_put(host->fclk);
1416 err_free_iclk:
1417         if (host->iclk != NULL) {
1418                 clk_disable(host->iclk);
1419                 clk_put(host->iclk);
1420         }
1421 err_free_mmc_host:
1422         kfree(host);
1423 err_free_mem_region:
1424         release_mem_region(res->start, res->end - res->start + 1);
1425         return ret;
1426 }
1427
1428 static int mmc_omap_remove(struct platform_device *pdev)
1429 {
1430         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1431         int i;
1432
1433         platform_set_drvdata(pdev, NULL);
1434
1435         BUG_ON(host == NULL);
1436
1437         for (i = 0; i < host->nr_slots; i++)
1438                 mmc_omap_remove_slot(host->slots[i]);
1439
1440         if (host->pdata->cleanup)
1441                 host->pdata->cleanup(&pdev->dev);
1442
1443         if (host->iclk && !IS_ERR(host->iclk))
1444                 clk_put(host->iclk);
1445         if (host->fclk && !IS_ERR(host->fclk))
1446                 clk_put(host->fclk);
1447
1448         release_mem_region(pdev->resource[0].start,
1449                            pdev->resource[0].end - pdev->resource[0].start + 1);
1450
1451         kfree(host);
1452
1453         return 0;
1454 }
1455
1456 #ifdef CONFIG_PM
1457 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1458 {
1459         int i, ret = 0;
1460         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1461
1462         if (host == NULL || host->suspended)
1463                 return 0;
1464
1465         for (i = 0; i < host->nr_slots; i++) {
1466                 struct mmc_omap_slot *slot;
1467
1468                 slot = host->slots[i];
1469                 ret = mmc_suspend_host(slot->mmc, mesg);
1470                 if (ret < 0) {
1471                         while (--i >= 0) {
1472                                 slot = host->slots[i];
1473                                 mmc_resume_host(slot->mmc);
1474                         }
1475                         return ret;
1476                 }
1477         }
1478         host->suspended = 1;
1479         return 0;
1480 }
1481
1482 static int mmc_omap_resume(struct platform_device *pdev)
1483 {
1484         int i, ret = 0;
1485         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1486
1487         if (host == NULL || !host->suspended)
1488                 return 0;
1489
1490         for (i = 0; i < host->nr_slots; i++) {
1491                 struct mmc_omap_slot *slot;
1492                 slot = host->slots[i];
1493                 ret = mmc_resume_host(slot->mmc);
1494                 if (ret < 0)
1495                         return ret;
1496
1497                 host->suspended = 0;
1498         }
1499         return 0;
1500 }
1501 #else
1502 #define mmc_omap_suspend        NULL
1503 #define mmc_omap_resume         NULL
1504 #endif
1505
1506 static struct platform_driver mmc_omap_driver = {
1507         .probe          = mmc_omap_probe,
1508         .remove         = mmc_omap_remove,
1509         .suspend        = mmc_omap_suspend,
1510         .resume         = mmc_omap_resume,
1511         .driver         = {
1512                 .name   = DRIVER_NAME,
1513         },
1514 };
1515
1516 static int __init mmc_omap_init(void)
1517 {
1518         return platform_driver_register(&mmc_omap_driver);
1519 }
1520
1521 static void __exit mmc_omap_exit(void)
1522 {
1523         platform_driver_unregister(&mmc_omap_driver);
1524 }
1525
1526 module_init(mmc_omap_init);
1527 module_exit(mmc_omap_exit);
1528
1529 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1530 MODULE_LICENSE("GPL");
1531 MODULE_ALIAS(DRIVER_NAME);
1532 MODULE_AUTHOR("Juha Yrjölä");