]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mmc/host/atmel-mci.c
mmc: atmel-mci: not busy flag has also to be used for read operations
[karo-tx-linux.git] / drivers / mmc / host / atmel-mci.c
1 /*
2  * Atmel MultiMedia Card Interface driver
3  *
4  * Copyright (C) 2004-2008 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/gpio.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/ioport.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/stat.h>
27 #include <linux/types.h>
28
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/sdio.h>
31
32 #include <mach/atmel-mci.h>
33 #include <linux/atmel-mci.h>
34 #include <linux/atmel_pdc.h>
35
36 #include <asm/io.h>
37 #include <asm/unaligned.h>
38
39 #include <mach/cpu.h>
40 #include <mach/board.h>
41
42 #include "atmel-mci-regs.h"
43
44 #define ATMCI_DATA_ERROR_FLAGS  (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
45 #define ATMCI_DMA_THRESHOLD     16
46
47 enum {
48         EVENT_CMD_RDY = 0,
49         EVENT_XFER_COMPLETE,
50         EVENT_NOTBUSY,
51         EVENT_DATA_ERROR,
52 };
53
54 enum atmel_mci_state {
55         STATE_IDLE = 0,
56         STATE_SENDING_CMD,
57         STATE_DATA_XFER,
58         STATE_WAITING_NOTBUSY,
59         STATE_SENDING_STOP,
60         STATE_END_REQUEST,
61 };
62
63 enum atmci_xfer_dir {
64         XFER_RECEIVE = 0,
65         XFER_TRANSMIT,
66 };
67
68 enum atmci_pdc_buf {
69         PDC_FIRST_BUF = 0,
70         PDC_SECOND_BUF,
71 };
72
73 struct atmel_mci_caps {
74         bool    has_dma;
75         bool    has_pdc;
76         bool    has_cfg_reg;
77         bool    has_cstor_reg;
78         bool    has_highspeed;
79         bool    has_rwproof;
80         bool    has_odd_clk_div;
81         bool    has_bad_data_ordering;
82         bool    need_reset_after_xfer;
83         bool    need_blksz_mul_4;
84         bool    need_notbusy_for_read_ops;
85 };
86
87 struct atmel_mci_dma {
88         struct dma_chan                 *chan;
89         struct dma_async_tx_descriptor  *data_desc;
90 };
91
92 /**
93  * struct atmel_mci - MMC controller state shared between all slots
94  * @lock: Spinlock protecting the queue and associated data.
95  * @regs: Pointer to MMIO registers.
96  * @sg: Scatterlist entry currently being processed by PIO or PDC code.
97  * @pio_offset: Offset into the current scatterlist entry.
98  * @buffer: Buffer used if we don't have the r/w proof capability. We
99  *      don't have the time to switch pdc buffers so we have to use only
100  *      one buffer for the full transaction.
101  * @buf_size: size of the buffer.
102  * @phys_buf_addr: buffer address needed for pdc.
103  * @cur_slot: The slot which is currently using the controller.
104  * @mrq: The request currently being processed on @cur_slot,
105  *      or NULL if the controller is idle.
106  * @cmd: The command currently being sent to the card, or NULL.
107  * @data: The data currently being transferred, or NULL if no data
108  *      transfer is in progress.
109  * @data_size: just data->blocks * data->blksz.
110  * @dma: DMA client state.
111  * @data_chan: DMA channel being used for the current data transfer.
112  * @cmd_status: Snapshot of SR taken upon completion of the current
113  *      command. Only valid when EVENT_CMD_COMPLETE is pending.
114  * @data_status: Snapshot of SR taken upon completion of the current
115  *      data transfer. Only valid when EVENT_DATA_COMPLETE or
116  *      EVENT_DATA_ERROR is pending.
117  * @stop_cmdr: Value to be loaded into CMDR when the stop command is
118  *      to be sent.
119  * @tasklet: Tasklet running the request state machine.
120  * @pending_events: Bitmask of events flagged by the interrupt handler
121  *      to be processed by the tasklet.
122  * @completed_events: Bitmask of events which the state machine has
123  *      processed.
124  * @state: Tasklet state.
125  * @queue: List of slots waiting for access to the controller.
126  * @need_clock_update: Update the clock rate before the next request.
127  * @need_reset: Reset controller before next request.
128  * @timer: Timer to balance the data timeout error flag which cannot rise.
129  * @mode_reg: Value of the MR register.
130  * @cfg_reg: Value of the CFG register.
131  * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
132  *      rate and timeout calculations.
133  * @mapbase: Physical address of the MMIO registers.
134  * @mck: The peripheral bus clock hooked up to the MMC controller.
135  * @pdev: Platform device associated with the MMC controller.
136  * @slot: Slots sharing this MMC controller.
137  * @caps: MCI capabilities depending on MCI version.
138  * @prepare_data: function to setup MCI before data transfer which
139  * depends on MCI capabilities.
140  * @submit_data: function to start data transfer which depends on MCI
141  * capabilities.
142  * @stop_transfer: function to stop data transfer which depends on MCI
143  * capabilities.
144  *
145  * Locking
146  * =======
147  *
148  * @lock is a softirq-safe spinlock protecting @queue as well as
149  * @cur_slot, @mrq and @state. These must always be updated
150  * at the same time while holding @lock.
151  *
152  * @lock also protects mode_reg and need_clock_update since these are
153  * used to synchronize mode register updates with the queue
154  * processing.
155  *
156  * The @mrq field of struct atmel_mci_slot is also protected by @lock,
157  * and must always be written at the same time as the slot is added to
158  * @queue.
159  *
160  * @pending_events and @completed_events are accessed using atomic bit
161  * operations, so they don't need any locking.
162  *
163  * None of the fields touched by the interrupt handler need any
164  * locking. However, ordering is important: Before EVENT_DATA_ERROR or
165  * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
166  * interrupts must be disabled and @data_status updated with a
167  * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
168  * CMDRDY interrupt must be disabled and @cmd_status updated with a
169  * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
170  * bytes_xfered field of @data must be written. This is ensured by
171  * using barriers.
172  */
173 struct atmel_mci {
174         spinlock_t              lock;
175         void __iomem            *regs;
176
177         struct scatterlist      *sg;
178         unsigned int            pio_offset;
179         unsigned int            *buffer;
180         unsigned int            buf_size;
181         dma_addr_t              buf_phys_addr;
182
183         struct atmel_mci_slot   *cur_slot;
184         struct mmc_request      *mrq;
185         struct mmc_command      *cmd;
186         struct mmc_data         *data;
187         unsigned int            data_size;
188
189         struct atmel_mci_dma    dma;
190         struct dma_chan         *data_chan;
191         struct dma_slave_config dma_conf;
192
193         u32                     cmd_status;
194         u32                     data_status;
195         u32                     stop_cmdr;
196
197         struct tasklet_struct   tasklet;
198         unsigned long           pending_events;
199         unsigned long           completed_events;
200         enum atmel_mci_state    state;
201         struct list_head        queue;
202
203         bool                    need_clock_update;
204         bool                    need_reset;
205         struct timer_list       timer;
206         u32                     mode_reg;
207         u32                     cfg_reg;
208         unsigned long           bus_hz;
209         unsigned long           mapbase;
210         struct clk              *mck;
211         struct platform_device  *pdev;
212
213         struct atmel_mci_slot   *slot[ATMCI_MAX_NR_SLOTS];
214
215         struct atmel_mci_caps   caps;
216
217         u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
218         void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
219         void (*stop_transfer)(struct atmel_mci *host);
220 };
221
222 /**
223  * struct atmel_mci_slot - MMC slot state
224  * @mmc: The mmc_host representing this slot.
225  * @host: The MMC controller this slot is using.
226  * @sdc_reg: Value of SDCR to be written before using this slot.
227  * @sdio_irq: SDIO irq mask for this slot.
228  * @mrq: mmc_request currently being processed or waiting to be
229  *      processed, or NULL when the slot is idle.
230  * @queue_node: List node for placing this node in the @queue list of
231  *      &struct atmel_mci.
232  * @clock: Clock rate configured by set_ios(). Protected by host->lock.
233  * @flags: Random state bits associated with the slot.
234  * @detect_pin: GPIO pin used for card detection, or negative if not
235  *      available.
236  * @wp_pin: GPIO pin used for card write protect sending, or negative
237  *      if not available.
238  * @detect_is_active_high: The state of the detect pin when it is active.
239  * @detect_timer: Timer used for debouncing @detect_pin interrupts.
240  */
241 struct atmel_mci_slot {
242         struct mmc_host         *mmc;
243         struct atmel_mci        *host;
244
245         u32                     sdc_reg;
246         u32                     sdio_irq;
247
248         struct mmc_request      *mrq;
249         struct list_head        queue_node;
250
251         unsigned int            clock;
252         unsigned long           flags;
253 #define ATMCI_CARD_PRESENT      0
254 #define ATMCI_CARD_NEED_INIT    1
255 #define ATMCI_SHUTDOWN          2
256 #define ATMCI_SUSPENDED         3
257
258         int                     detect_pin;
259         int                     wp_pin;
260         bool                    detect_is_active_high;
261
262         struct timer_list       detect_timer;
263 };
264
265 #define atmci_test_and_clear_pending(host, event)               \
266         test_and_clear_bit(event, &host->pending_events)
267 #define atmci_set_completed(host, event)                        \
268         set_bit(event, &host->completed_events)
269 #define atmci_set_pending(host, event)                          \
270         set_bit(event, &host->pending_events)
271
272 /*
273  * The debugfs stuff below is mostly optimized away when
274  * CONFIG_DEBUG_FS is not set.
275  */
276 static int atmci_req_show(struct seq_file *s, void *v)
277 {
278         struct atmel_mci_slot   *slot = s->private;
279         struct mmc_request      *mrq;
280         struct mmc_command      *cmd;
281         struct mmc_command      *stop;
282         struct mmc_data         *data;
283
284         /* Make sure we get a consistent snapshot */
285         spin_lock_bh(&slot->host->lock);
286         mrq = slot->mrq;
287
288         if (mrq) {
289                 cmd = mrq->cmd;
290                 data = mrq->data;
291                 stop = mrq->stop;
292
293                 if (cmd)
294                         seq_printf(s,
295                                 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
296                                 cmd->opcode, cmd->arg, cmd->flags,
297                                 cmd->resp[0], cmd->resp[1], cmd->resp[2],
298                                 cmd->resp[3], cmd->error);
299                 if (data)
300                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
301                                 data->bytes_xfered, data->blocks,
302                                 data->blksz, data->flags, data->error);
303                 if (stop)
304                         seq_printf(s,
305                                 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
306                                 stop->opcode, stop->arg, stop->flags,
307                                 stop->resp[0], stop->resp[1], stop->resp[2],
308                                 stop->resp[3], stop->error);
309         }
310
311         spin_unlock_bh(&slot->host->lock);
312
313         return 0;
314 }
315
316 static int atmci_req_open(struct inode *inode, struct file *file)
317 {
318         return single_open(file, atmci_req_show, inode->i_private);
319 }
320
321 static const struct file_operations atmci_req_fops = {
322         .owner          = THIS_MODULE,
323         .open           = atmci_req_open,
324         .read           = seq_read,
325         .llseek         = seq_lseek,
326         .release        = single_release,
327 };
328
329 static void atmci_show_status_reg(struct seq_file *s,
330                 const char *regname, u32 value)
331 {
332         static const char       *sr_bit[] = {
333                 [0]     = "CMDRDY",
334                 [1]     = "RXRDY",
335                 [2]     = "TXRDY",
336                 [3]     = "BLKE",
337                 [4]     = "DTIP",
338                 [5]     = "NOTBUSY",
339                 [6]     = "ENDRX",
340                 [7]     = "ENDTX",
341                 [8]     = "SDIOIRQA",
342                 [9]     = "SDIOIRQB",
343                 [12]    = "SDIOWAIT",
344                 [14]    = "RXBUFF",
345                 [15]    = "TXBUFE",
346                 [16]    = "RINDE",
347                 [17]    = "RDIRE",
348                 [18]    = "RCRCE",
349                 [19]    = "RENDE",
350                 [20]    = "RTOE",
351                 [21]    = "DCRCE",
352                 [22]    = "DTOE",
353                 [23]    = "CSTOE",
354                 [24]    = "BLKOVRE",
355                 [25]    = "DMADONE",
356                 [26]    = "FIFOEMPTY",
357                 [27]    = "XFRDONE",
358                 [30]    = "OVRE",
359                 [31]    = "UNRE",
360         };
361         unsigned int            i;
362
363         seq_printf(s, "%s:\t0x%08x", regname, value);
364         for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
365                 if (value & (1 << i)) {
366                         if (sr_bit[i])
367                                 seq_printf(s, " %s", sr_bit[i]);
368                         else
369                                 seq_puts(s, " UNKNOWN");
370                 }
371         }
372         seq_putc(s, '\n');
373 }
374
375 static int atmci_regs_show(struct seq_file *s, void *v)
376 {
377         struct atmel_mci        *host = s->private;
378         u32                     *buf;
379
380         buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
381         if (!buf)
382                 return -ENOMEM;
383
384         /*
385          * Grab a more or less consistent snapshot. Note that we're
386          * not disabling interrupts, so IMR and SR may not be
387          * consistent.
388          */
389         spin_lock_bh(&host->lock);
390         clk_enable(host->mck);
391         memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
392         clk_disable(host->mck);
393         spin_unlock_bh(&host->lock);
394
395         seq_printf(s, "MR:\t0x%08x%s%s ",
396                         buf[ATMCI_MR / 4],
397                         buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
398                         buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
399         if (host->caps.has_odd_clk_div)
400                 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
401                                 ((buf[ATMCI_MR / 4] & 0xff) << 1)
402                                 | ((buf[ATMCI_MR / 4] >> 16) & 1));
403         else
404                 seq_printf(s, "CLKDIV=%u\n",
405                                 (buf[ATMCI_MR / 4] & 0xff));
406         seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
407         seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
408         seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
409         seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
410                         buf[ATMCI_BLKR / 4],
411                         buf[ATMCI_BLKR / 4] & 0xffff,
412                         (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
413         if (host->caps.has_cstor_reg)
414                 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
415
416         /* Don't read RSPR and RDR; it will consume the data there */
417
418         atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
419         atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
420
421         if (host->caps.has_dma) {
422                 u32 val;
423
424                 val = buf[ATMCI_DMA / 4];
425                 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
426                                 val, val & 3,
427                                 ((val >> 4) & 3) ?
428                                         1 << (((val >> 4) & 3) + 1) : 1,
429                                 val & ATMCI_DMAEN ? " DMAEN" : "");
430         }
431         if (host->caps.has_cfg_reg) {
432                 u32 val;
433
434                 val = buf[ATMCI_CFG / 4];
435                 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
436                                 val,
437                                 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
438                                 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
439                                 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
440                                 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
441         }
442
443         kfree(buf);
444
445         return 0;
446 }
447
448 static int atmci_regs_open(struct inode *inode, struct file *file)
449 {
450         return single_open(file, atmci_regs_show, inode->i_private);
451 }
452
453 static const struct file_operations atmci_regs_fops = {
454         .owner          = THIS_MODULE,
455         .open           = atmci_regs_open,
456         .read           = seq_read,
457         .llseek         = seq_lseek,
458         .release        = single_release,
459 };
460
461 static void atmci_init_debugfs(struct atmel_mci_slot *slot)
462 {
463         struct mmc_host         *mmc = slot->mmc;
464         struct atmel_mci        *host = slot->host;
465         struct dentry           *root;
466         struct dentry           *node;
467
468         root = mmc->debugfs_root;
469         if (!root)
470                 return;
471
472         node = debugfs_create_file("regs", S_IRUSR, root, host,
473                         &atmci_regs_fops);
474         if (IS_ERR(node))
475                 return;
476         if (!node)
477                 goto err;
478
479         node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
480         if (!node)
481                 goto err;
482
483         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
484         if (!node)
485                 goto err;
486
487         node = debugfs_create_x32("pending_events", S_IRUSR, root,
488                                      (u32 *)&host->pending_events);
489         if (!node)
490                 goto err;
491
492         node = debugfs_create_x32("completed_events", S_IRUSR, root,
493                                      (u32 *)&host->completed_events);
494         if (!node)
495                 goto err;
496
497         return;
498
499 err:
500         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
501 }
502
503 static inline unsigned int atmci_get_version(struct atmel_mci *host)
504 {
505         return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
506 }
507
508 static void atmci_timeout_timer(unsigned long data)
509 {
510         struct atmel_mci *host;
511
512         host = (struct atmel_mci *)data;
513
514         dev_dbg(&host->pdev->dev, "software timeout\n");
515
516         if (host->mrq->cmd->data) {
517                 host->mrq->cmd->data->error = -ETIMEDOUT;
518                 host->data = NULL;
519         } else {
520                 host->mrq->cmd->error = -ETIMEDOUT;
521                 host->cmd = NULL;
522         }
523         host->need_reset = 1;
524         host->state = STATE_END_REQUEST;
525         smp_wmb();
526         tasklet_schedule(&host->tasklet);
527 }
528
529 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
530                                         unsigned int ns)
531 {
532         /*
533          * It is easier here to use us instead of ns for the timeout,
534          * it prevents from overflows during calculation.
535          */
536         unsigned int us = DIV_ROUND_UP(ns, 1000);
537
538         /* Maximum clock frequency is host->bus_hz/2 */
539         return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
540 }
541
542 static void atmci_set_timeout(struct atmel_mci *host,
543                 struct atmel_mci_slot *slot, struct mmc_data *data)
544 {
545         static unsigned dtomul_to_shift[] = {
546                 0, 4, 7, 8, 10, 12, 16, 20
547         };
548         unsigned        timeout;
549         unsigned        dtocyc;
550         unsigned        dtomul;
551
552         timeout = atmci_ns_to_clocks(host, data->timeout_ns)
553                 + data->timeout_clks;
554
555         for (dtomul = 0; dtomul < 8; dtomul++) {
556                 unsigned shift = dtomul_to_shift[dtomul];
557                 dtocyc = (timeout + (1 << shift) - 1) >> shift;
558                 if (dtocyc < 15)
559                         break;
560         }
561
562         if (dtomul >= 8) {
563                 dtomul = 7;
564                 dtocyc = 15;
565         }
566
567         dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
568                         dtocyc << dtomul_to_shift[dtomul]);
569         atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
570 }
571
572 /*
573  * Return mask with command flags to be enabled for this command.
574  */
575 static u32 atmci_prepare_command(struct mmc_host *mmc,
576                                  struct mmc_command *cmd)
577 {
578         struct mmc_data *data;
579         u32             cmdr;
580
581         cmd->error = -EINPROGRESS;
582
583         cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
584
585         if (cmd->flags & MMC_RSP_PRESENT) {
586                 if (cmd->flags & MMC_RSP_136)
587                         cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
588                 else
589                         cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
590         }
591
592         /*
593          * This should really be MAXLAT_5 for CMD2 and ACMD41, but
594          * it's too difficult to determine whether this is an ACMD or
595          * not. Better make it 64.
596          */
597         cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
598
599         if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
600                 cmdr |= ATMCI_CMDR_OPDCMD;
601
602         data = cmd->data;
603         if (data) {
604                 cmdr |= ATMCI_CMDR_START_XFER;
605
606                 if (cmd->opcode == SD_IO_RW_EXTENDED) {
607                         cmdr |= ATMCI_CMDR_SDIO_BLOCK;
608                 } else {
609                         if (data->flags & MMC_DATA_STREAM)
610                                 cmdr |= ATMCI_CMDR_STREAM;
611                         else if (data->blocks > 1)
612                                 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
613                         else
614                                 cmdr |= ATMCI_CMDR_BLOCK;
615                 }
616
617                 if (data->flags & MMC_DATA_READ)
618                         cmdr |= ATMCI_CMDR_TRDIR_READ;
619         }
620
621         return cmdr;
622 }
623
624 static void atmci_send_command(struct atmel_mci *host,
625                 struct mmc_command *cmd, u32 cmd_flags)
626 {
627         WARN_ON(host->cmd);
628         host->cmd = cmd;
629
630         dev_vdbg(&host->pdev->dev,
631                         "start command: ARGR=0x%08x CMDR=0x%08x\n",
632                         cmd->arg, cmd_flags);
633
634         atmci_writel(host, ATMCI_ARGR, cmd->arg);
635         atmci_writel(host, ATMCI_CMDR, cmd_flags);
636 }
637
638 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
639 {
640         dev_dbg(&host->pdev->dev, "send stop command\n");
641         atmci_send_command(host, data->stop, host->stop_cmdr);
642         atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
643 }
644
645 /*
646  * Configure given PDC buffer taking care of alignement issues.
647  * Update host->data_size and host->sg.
648  */
649 static void atmci_pdc_set_single_buf(struct atmel_mci *host,
650         enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
651 {
652         u32 pointer_reg, counter_reg;
653         unsigned int buf_size;
654
655         if (dir == XFER_RECEIVE) {
656                 pointer_reg = ATMEL_PDC_RPR;
657                 counter_reg = ATMEL_PDC_RCR;
658         } else {
659                 pointer_reg = ATMEL_PDC_TPR;
660                 counter_reg = ATMEL_PDC_TCR;
661         }
662
663         if (buf_nb == PDC_SECOND_BUF) {
664                 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
665                 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
666         }
667
668         if (!host->caps.has_rwproof) {
669                 buf_size = host->buf_size;
670                 atmci_writel(host, pointer_reg, host->buf_phys_addr);
671         } else {
672                 buf_size = sg_dma_len(host->sg);
673                 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
674         }
675
676         if (host->data_size <= buf_size) {
677                 if (host->data_size & 0x3) {
678                         /* If size is different from modulo 4, transfer bytes */
679                         atmci_writel(host, counter_reg, host->data_size);
680                         atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
681                 } else {
682                         /* Else transfer 32-bits words */
683                         atmci_writel(host, counter_reg, host->data_size / 4);
684                 }
685                 host->data_size = 0;
686         } else {
687                 /* We assume the size of a page is 32-bits aligned */
688                 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
689                 host->data_size -= sg_dma_len(host->sg);
690                 if (host->data_size)
691                         host->sg = sg_next(host->sg);
692         }
693 }
694
695 /*
696  * Configure PDC buffer according to the data size ie configuring one or two
697  * buffers. Don't use this function if you want to configure only the second
698  * buffer. In this case, use atmci_pdc_set_single_buf.
699  */
700 static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
701 {
702         atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
703         if (host->data_size)
704                 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
705 }
706
707 /*
708  * Unmap sg lists, called when transfer is finished.
709  */
710 static void atmci_pdc_cleanup(struct atmel_mci *host)
711 {
712         struct mmc_data         *data = host->data;
713
714         if (data)
715                 dma_unmap_sg(&host->pdev->dev,
716                                 data->sg, data->sg_len,
717                                 ((data->flags & MMC_DATA_WRITE)
718                                  ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
719 }
720
721 /*
722  * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
723  * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
724  * interrupt needed for both transfer directions.
725  */
726 static void atmci_pdc_complete(struct atmel_mci *host)
727 {
728         int transfer_size = host->data->blocks * host->data->blksz;
729         int i;
730
731         atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
732
733         if ((!host->caps.has_rwproof)
734             && (host->data->flags & MMC_DATA_READ)) {
735                 if (host->caps.has_bad_data_ordering)
736                         for (i = 0; i < transfer_size; i++)
737                                 host->buffer[i] = swab32(host->buffer[i]);
738                 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
739                                     host->buffer, transfer_size);
740         }
741
742         atmci_pdc_cleanup(host);
743
744         /*
745          * If the card was removed, data will be NULL. No point trying
746          * to send the stop command or waiting for NBUSY in this case.
747          */
748         if (host->data) {
749                 dev_dbg(&host->pdev->dev,
750                         "(%s) set pending xfer complete\n", __func__);
751                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
752                 tasklet_schedule(&host->tasklet);
753         }
754 }
755
756 static void atmci_dma_cleanup(struct atmel_mci *host)
757 {
758         struct mmc_data                 *data = host->data;
759
760         if (data)
761                 dma_unmap_sg(host->dma.chan->device->dev,
762                                 data->sg, data->sg_len,
763                                 ((data->flags & MMC_DATA_WRITE)
764                                  ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
765 }
766
767 /*
768  * This function is called by the DMA driver from tasklet context.
769  */
770 static void atmci_dma_complete(void *arg)
771 {
772         struct atmel_mci        *host = arg;
773         struct mmc_data         *data = host->data;
774
775         dev_vdbg(&host->pdev->dev, "DMA complete\n");
776
777         if (host->caps.has_dma)
778                 /* Disable DMA hardware handshaking on MCI */
779                 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
780
781         atmci_dma_cleanup(host);
782
783         /*
784          * If the card was removed, data will be NULL. No point trying
785          * to send the stop command or waiting for NBUSY in this case.
786          */
787         if (data) {
788                 dev_dbg(&host->pdev->dev,
789                         "(%s) set pending xfer complete\n", __func__);
790                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
791                 tasklet_schedule(&host->tasklet);
792
793                 /*
794                  * Regardless of what the documentation says, we have
795                  * to wait for NOTBUSY even after block read
796                  * operations.
797                  *
798                  * When the DMA transfer is complete, the controller
799                  * may still be reading the CRC from the card, i.e.
800                  * the data transfer is still in progress and we
801                  * haven't seen all the potential error bits yet.
802                  *
803                  * The interrupt handler will schedule a different
804                  * tasklet to finish things up when the data transfer
805                  * is completely done.
806                  *
807                  * We may not complete the mmc request here anyway
808                  * because the mmc layer may call back and cause us to
809                  * violate the "don't submit new operations from the
810                  * completion callback" rule of the dma engine
811                  * framework.
812                  */
813                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
814         }
815 }
816
817 /*
818  * Returns a mask of interrupt flags to be enabled after the whole
819  * request has been prepared.
820  */
821 static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
822 {
823         u32 iflags;
824
825         data->error = -EINPROGRESS;
826
827         host->sg = data->sg;
828         host->data = data;
829         host->data_chan = NULL;
830
831         iflags = ATMCI_DATA_ERROR_FLAGS;
832
833         /*
834          * Errata: MMC data write operation with less than 12
835          * bytes is impossible.
836          *
837          * Errata: MCI Transmit Data Register (TDR) FIFO
838          * corruption when length is not multiple of 4.
839          */
840         if (data->blocks * data->blksz < 12
841                         || (data->blocks * data->blksz) & 3)
842                 host->need_reset = true;
843
844         host->pio_offset = 0;
845         if (data->flags & MMC_DATA_READ)
846                 iflags |= ATMCI_RXRDY;
847         else
848                 iflags |= ATMCI_TXRDY;
849
850         return iflags;
851 }
852
853 /*
854  * Set interrupt flags and set block length into the MCI mode register even
855  * if this value is also accessible in the MCI block register. It seems to be
856  * necessary before the High Speed MCI version. It also map sg and configure
857  * PDC registers.
858  */
859 static u32
860 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
861 {
862         u32 iflags, tmp;
863         unsigned int sg_len;
864         enum dma_data_direction dir;
865         int i;
866
867         data->error = -EINPROGRESS;
868
869         host->data = data;
870         host->sg = data->sg;
871         iflags = ATMCI_DATA_ERROR_FLAGS;
872
873         /* Enable pdc mode */
874         atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
875
876         if (data->flags & MMC_DATA_READ) {
877                 dir = DMA_FROM_DEVICE;
878                 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
879         } else {
880                 dir = DMA_TO_DEVICE;
881                 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
882         }
883
884         /* Set BLKLEN */
885         tmp = atmci_readl(host, ATMCI_MR);
886         tmp &= 0x0000ffff;
887         tmp |= ATMCI_BLKLEN(data->blksz);
888         atmci_writel(host, ATMCI_MR, tmp);
889
890         /* Configure PDC */
891         host->data_size = data->blocks * data->blksz;
892         sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
893
894         if ((!host->caps.has_rwproof)
895             && (host->data->flags & MMC_DATA_WRITE)) {
896                 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
897                                   host->buffer, host->data_size);
898                 if (host->caps.has_bad_data_ordering)
899                         for (i = 0; i < host->data_size; i++)
900                                 host->buffer[i] = swab32(host->buffer[i]);
901         }
902
903         if (host->data_size)
904                 atmci_pdc_set_both_buf(host,
905                         ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
906
907         return iflags;
908 }
909
910 static u32
911 atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
912 {
913         struct dma_chan                 *chan;
914         struct dma_async_tx_descriptor  *desc;
915         struct scatterlist              *sg;
916         unsigned int                    i;
917         enum dma_data_direction         direction;
918         enum dma_transfer_direction     slave_dirn;
919         unsigned int                    sglen;
920         u32                             maxburst;
921         u32 iflags;
922
923         data->error = -EINPROGRESS;
924
925         WARN_ON(host->data);
926         host->sg = NULL;
927         host->data = data;
928
929         iflags = ATMCI_DATA_ERROR_FLAGS;
930
931         /*
932          * We don't do DMA on "complex" transfers, i.e. with
933          * non-word-aligned buffers or lengths. Also, we don't bother
934          * with all the DMA setup overhead for short transfers.
935          */
936         if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
937                 return atmci_prepare_data(host, data);
938         if (data->blksz & 3)
939                 return atmci_prepare_data(host, data);
940
941         for_each_sg(data->sg, sg, data->sg_len, i) {
942                 if (sg->offset & 3 || sg->length & 3)
943                         return atmci_prepare_data(host, data);
944         }
945
946         /* If we don't have a channel, we can't do DMA */
947         chan = host->dma.chan;
948         if (chan)
949                 host->data_chan = chan;
950
951         if (!chan)
952                 return -ENODEV;
953
954         if (data->flags & MMC_DATA_READ) {
955                 direction = DMA_FROM_DEVICE;
956                 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
957                 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
958         } else {
959                 direction = DMA_TO_DEVICE;
960                 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
961                 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
962         }
963
964         atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
965
966         sglen = dma_map_sg(chan->device->dev, data->sg,
967                         data->sg_len, direction);
968
969         dmaengine_slave_config(chan, &host->dma_conf);
970         desc = dmaengine_prep_slave_sg(chan,
971                         data->sg, sglen, slave_dirn,
972                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
973         if (!desc)
974                 goto unmap_exit;
975
976         host->dma.data_desc = desc;
977         desc->callback = atmci_dma_complete;
978         desc->callback_param = host;
979
980         return iflags;
981 unmap_exit:
982         dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
983         return -ENOMEM;
984 }
985
986 static void
987 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
988 {
989         return;
990 }
991
992 /*
993  * Start PDC according to transfer direction.
994  */
995 static void
996 atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
997 {
998         if (data->flags & MMC_DATA_READ)
999                 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1000         else
1001                 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1002 }
1003
1004 static void
1005 atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1006 {
1007         struct dma_chan                 *chan = host->data_chan;
1008         struct dma_async_tx_descriptor  *desc = host->dma.data_desc;
1009
1010         if (chan) {
1011                 dmaengine_submit(desc);
1012                 dma_async_issue_pending(chan);
1013         }
1014 }
1015
1016 static void atmci_stop_transfer(struct atmel_mci *host)
1017 {
1018         dev_dbg(&host->pdev->dev,
1019                 "(%s) set pending xfer complete\n", __func__);
1020         atmci_set_pending(host, EVENT_XFER_COMPLETE);
1021         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1022 }
1023
1024 /*
1025  * Stop data transfer because error(s) occured.
1026  */
1027 static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1028 {
1029         atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1030 }
1031
1032 static void atmci_stop_transfer_dma(struct atmel_mci *host)
1033 {
1034         struct dma_chan *chan = host->data_chan;
1035
1036         if (chan) {
1037                 dmaengine_terminate_all(chan);
1038                 atmci_dma_cleanup(host);
1039         } else {
1040                 /* Data transfer was stopped by the interrupt handler */
1041                 dev_dbg(&host->pdev->dev,
1042                         "(%s) set pending xfer complete\n", __func__);
1043                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1044                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1045         }
1046 }
1047
1048 /*
1049  * Start a request: prepare data if needed, prepare the command and activate
1050  * interrupts.
1051  */
1052 static void atmci_start_request(struct atmel_mci *host,
1053                 struct atmel_mci_slot *slot)
1054 {
1055         struct mmc_request      *mrq;
1056         struct mmc_command      *cmd;
1057         struct mmc_data         *data;
1058         u32                     iflags;
1059         u32                     cmdflags;
1060
1061         mrq = slot->mrq;
1062         host->cur_slot = slot;
1063         host->mrq = mrq;
1064
1065         host->pending_events = 0;
1066         host->completed_events = 0;
1067         host->cmd_status = 0;
1068         host->data_status = 0;
1069
1070         dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1071
1072         if (host->need_reset || host->caps.need_reset_after_xfer) {
1073                 iflags = atmci_readl(host, ATMCI_IMR);
1074                 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1075                 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1076                 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1077                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1078                 if (host->caps.has_cfg_reg)
1079                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1080                 atmci_writel(host, ATMCI_IER, iflags);
1081                 host->need_reset = false;
1082         }
1083         atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1084
1085         iflags = atmci_readl(host, ATMCI_IMR);
1086         if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1087                 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1088                                 iflags);
1089
1090         if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1091                 /* Send init sequence (74 clock cycles) */
1092                 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1093                 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1094                         cpu_relax();
1095         }
1096         iflags = 0;
1097         data = mrq->data;
1098         if (data) {
1099                 atmci_set_timeout(host, slot, data);
1100
1101                 /* Must set block count/size before sending command */
1102                 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1103                                 | ATMCI_BLKLEN(data->blksz));
1104                 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1105                         ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1106
1107                 iflags |= host->prepare_data(host, data);
1108         }
1109
1110         iflags |= ATMCI_CMDRDY;
1111         cmd = mrq->cmd;
1112         cmdflags = atmci_prepare_command(slot->mmc, cmd);
1113         atmci_send_command(host, cmd, cmdflags);
1114
1115         if (data)
1116                 host->submit_data(host, data);
1117
1118         if (mrq->stop) {
1119                 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1120                 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1121                 if (!(data->flags & MMC_DATA_WRITE))
1122                         host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1123                 if (data->flags & MMC_DATA_STREAM)
1124                         host->stop_cmdr |= ATMCI_CMDR_STREAM;
1125                 else
1126                         host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1127         }
1128
1129         /*
1130          * We could have enabled interrupts earlier, but I suspect
1131          * that would open up a nice can of interesting race
1132          * conditions (e.g. command and data complete, but stop not
1133          * prepared yet.)
1134          */
1135         atmci_writel(host, ATMCI_IER, iflags);
1136
1137         mod_timer(&host->timer, jiffies +  msecs_to_jiffies(2000));
1138 }
1139
1140 static void atmci_queue_request(struct atmel_mci *host,
1141                 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1142 {
1143         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1144                         host->state);
1145
1146         spin_lock_bh(&host->lock);
1147         slot->mrq = mrq;
1148         if (host->state == STATE_IDLE) {
1149                 host->state = STATE_SENDING_CMD;
1150                 atmci_start_request(host, slot);
1151         } else {
1152                 dev_dbg(&host->pdev->dev, "queue request\n");
1153                 list_add_tail(&slot->queue_node, &host->queue);
1154         }
1155         spin_unlock_bh(&host->lock);
1156 }
1157
1158 static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1159 {
1160         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1161         struct atmel_mci        *host = slot->host;
1162         struct mmc_data         *data;
1163
1164         WARN_ON(slot->mrq);
1165         dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1166
1167         /*
1168          * We may "know" the card is gone even though there's still an
1169          * electrical connection. If so, we really need to communicate
1170          * this to the MMC core since there won't be any more
1171          * interrupts as the card is completely removed. Otherwise,
1172          * the MMC core might believe the card is still there even
1173          * though the card was just removed very slowly.
1174          */
1175         if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1176                 mrq->cmd->error = -ENOMEDIUM;
1177                 mmc_request_done(mmc, mrq);
1178                 return;
1179         }
1180
1181         /* We don't support multiple blocks of weird lengths. */
1182         data = mrq->data;
1183         if (data && data->blocks > 1 && data->blksz & 3) {
1184                 mrq->cmd->error = -EINVAL;
1185                 mmc_request_done(mmc, mrq);
1186         }
1187
1188         atmci_queue_request(host, slot, mrq);
1189 }
1190
1191 static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1192 {
1193         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1194         struct atmel_mci        *host = slot->host;
1195         unsigned int            i;
1196
1197         slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1198         switch (ios->bus_width) {
1199         case MMC_BUS_WIDTH_1:
1200                 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1201                 break;
1202         case MMC_BUS_WIDTH_4:
1203                 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1204                 break;
1205         }
1206
1207         if (ios->clock) {
1208                 unsigned int clock_min = ~0U;
1209                 u32 clkdiv;
1210
1211                 spin_lock_bh(&host->lock);
1212                 if (!host->mode_reg) {
1213                         clk_enable(host->mck);
1214                         atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1215                         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1216                         if (host->caps.has_cfg_reg)
1217                                 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1218                 }
1219
1220                 /*
1221                  * Use mirror of ios->clock to prevent race with mmc
1222                  * core ios update when finding the minimum.
1223                  */
1224                 slot->clock = ios->clock;
1225                 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1226                         if (host->slot[i] && host->slot[i]->clock
1227                                         && host->slot[i]->clock < clock_min)
1228                                 clock_min = host->slot[i]->clock;
1229                 }
1230
1231                 /* Calculate clock divider */
1232                 if (host->caps.has_odd_clk_div) {
1233                         clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1234                         if (clkdiv > 511) {
1235                                 dev_warn(&mmc->class_dev,
1236                                          "clock %u too slow; using %lu\n",
1237                                          clock_min, host->bus_hz / (511 + 2));
1238                                 clkdiv = 511;
1239                         }
1240                         host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1241                                          | ATMCI_MR_CLKODD(clkdiv & 1);
1242                 } else {
1243                         clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1244                         if (clkdiv > 255) {
1245                                 dev_warn(&mmc->class_dev,
1246                                          "clock %u too slow; using %lu\n",
1247                                          clock_min, host->bus_hz / (2 * 256));
1248                                 clkdiv = 255;
1249                         }
1250                         host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1251                 }
1252
1253                 /*
1254                  * WRPROOF and RDPROOF prevent overruns/underruns by
1255                  * stopping the clock when the FIFO is full/empty.
1256                  * This state is not expected to last for long.
1257                  */
1258                 if (host->caps.has_rwproof)
1259                         host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1260
1261                 if (host->caps.has_cfg_reg) {
1262                         /* setup High Speed mode in relation with card capacity */
1263                         if (ios->timing == MMC_TIMING_SD_HS)
1264                                 host->cfg_reg |= ATMCI_CFG_HSMODE;
1265                         else
1266                                 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1267                 }
1268
1269                 if (list_empty(&host->queue)) {
1270                         atmci_writel(host, ATMCI_MR, host->mode_reg);
1271                         if (host->caps.has_cfg_reg)
1272                                 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1273                 } else {
1274                         host->need_clock_update = true;
1275                 }
1276
1277                 spin_unlock_bh(&host->lock);
1278         } else {
1279                 bool any_slot_active = false;
1280
1281                 spin_lock_bh(&host->lock);
1282                 slot->clock = 0;
1283                 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1284                         if (host->slot[i] && host->slot[i]->clock) {
1285                                 any_slot_active = true;
1286                                 break;
1287                         }
1288                 }
1289                 if (!any_slot_active) {
1290                         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1291                         if (host->mode_reg) {
1292                                 atmci_readl(host, ATMCI_MR);
1293                                 clk_disable(host->mck);
1294                         }
1295                         host->mode_reg = 0;
1296                 }
1297                 spin_unlock_bh(&host->lock);
1298         }
1299
1300         switch (ios->power_mode) {
1301         case MMC_POWER_UP:
1302                 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1303                 break;
1304         default:
1305                 /*
1306                  * TODO: None of the currently available AVR32-based
1307                  * boards allow MMC power to be turned off. Implement
1308                  * power control when this can be tested properly.
1309                  *
1310                  * We also need to hook this into the clock management
1311                  * somehow so that newly inserted cards aren't
1312                  * subjected to a fast clock before we have a chance
1313                  * to figure out what the maximum rate is. Currently,
1314                  * there's no way to avoid this, and there never will
1315                  * be for boards that don't support power control.
1316                  */
1317                 break;
1318         }
1319 }
1320
1321 static int atmci_get_ro(struct mmc_host *mmc)
1322 {
1323         int                     read_only = -ENOSYS;
1324         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1325
1326         if (gpio_is_valid(slot->wp_pin)) {
1327                 read_only = gpio_get_value(slot->wp_pin);
1328                 dev_dbg(&mmc->class_dev, "card is %s\n",
1329                                 read_only ? "read-only" : "read-write");
1330         }
1331
1332         return read_only;
1333 }
1334
1335 static int atmci_get_cd(struct mmc_host *mmc)
1336 {
1337         int                     present = -ENOSYS;
1338         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1339
1340         if (gpio_is_valid(slot->detect_pin)) {
1341                 present = !(gpio_get_value(slot->detect_pin) ^
1342                             slot->detect_is_active_high);
1343                 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1344                                 present ? "" : "not ");
1345         }
1346
1347         return present;
1348 }
1349
1350 static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1351 {
1352         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1353         struct atmel_mci        *host = slot->host;
1354
1355         if (enable)
1356                 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1357         else
1358                 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1359 }
1360
1361 static const struct mmc_host_ops atmci_ops = {
1362         .request        = atmci_request,
1363         .set_ios        = atmci_set_ios,
1364         .get_ro         = atmci_get_ro,
1365         .get_cd         = atmci_get_cd,
1366         .enable_sdio_irq = atmci_enable_sdio_irq,
1367 };
1368
1369 /* Called with host->lock held */
1370 static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1371         __releases(&host->lock)
1372         __acquires(&host->lock)
1373 {
1374         struct atmel_mci_slot   *slot = NULL;
1375         struct mmc_host         *prev_mmc = host->cur_slot->mmc;
1376
1377         WARN_ON(host->cmd || host->data);
1378
1379         /*
1380          * Update the MMC clock rate if necessary. This may be
1381          * necessary if set_ios() is called when a different slot is
1382          * busy transferring data.
1383          */
1384         if (host->need_clock_update) {
1385                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1386                 if (host->caps.has_cfg_reg)
1387                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1388         }
1389
1390         host->cur_slot->mrq = NULL;
1391         host->mrq = NULL;
1392         if (!list_empty(&host->queue)) {
1393                 slot = list_entry(host->queue.next,
1394                                 struct atmel_mci_slot, queue_node);
1395                 list_del(&slot->queue_node);
1396                 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1397                                 mmc_hostname(slot->mmc));
1398                 host->state = STATE_SENDING_CMD;
1399                 atmci_start_request(host, slot);
1400         } else {
1401                 dev_vdbg(&host->pdev->dev, "list empty\n");
1402                 host->state = STATE_IDLE;
1403         }
1404
1405         del_timer(&host->timer);
1406
1407         spin_unlock(&host->lock);
1408         mmc_request_done(prev_mmc, mrq);
1409         spin_lock(&host->lock);
1410 }
1411
1412 static void atmci_command_complete(struct atmel_mci *host,
1413                         struct mmc_command *cmd)
1414 {
1415         u32             status = host->cmd_status;
1416
1417         /* Read the response from the card (up to 16 bytes) */
1418         cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1419         cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1420         cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1421         cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1422
1423         if (status & ATMCI_RTOE)
1424                 cmd->error = -ETIMEDOUT;
1425         else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1426                 cmd->error = -EILSEQ;
1427         else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1428                 cmd->error = -EIO;
1429         else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1430                 if (host->caps.need_blksz_mul_4) {
1431                         cmd->error = -EINVAL;
1432                         host->need_reset = 1;
1433                 }
1434         } else
1435                 cmd->error = 0;
1436 }
1437
1438 static void atmci_detect_change(unsigned long data)
1439 {
1440         struct atmel_mci_slot   *slot = (struct atmel_mci_slot *)data;
1441         bool                    present;
1442         bool                    present_old;
1443
1444         /*
1445          * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1446          * freeing the interrupt. We must not re-enable the interrupt
1447          * if it has been freed, and if we're shutting down, it
1448          * doesn't really matter whether the card is present or not.
1449          */
1450         smp_rmb();
1451         if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1452                 return;
1453
1454         enable_irq(gpio_to_irq(slot->detect_pin));
1455         present = !(gpio_get_value(slot->detect_pin) ^
1456                     slot->detect_is_active_high);
1457         present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1458
1459         dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1460                         present, present_old);
1461
1462         if (present != present_old) {
1463                 struct atmel_mci        *host = slot->host;
1464                 struct mmc_request      *mrq;
1465
1466                 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1467                         present ? "inserted" : "removed");
1468
1469                 spin_lock(&host->lock);
1470
1471                 if (!present)
1472                         clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1473                 else
1474                         set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1475
1476                 /* Clean up queue if present */
1477                 mrq = slot->mrq;
1478                 if (mrq) {
1479                         if (mrq == host->mrq) {
1480                                 /*
1481                                  * Reset controller to terminate any ongoing
1482                                  * commands or data transfers.
1483                                  */
1484                                 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1485                                 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1486                                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1487                                 if (host->caps.has_cfg_reg)
1488                                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1489
1490                                 host->data = NULL;
1491                                 host->cmd = NULL;
1492
1493                                 switch (host->state) {
1494                                 case STATE_IDLE:
1495                                         break;
1496                                 case STATE_SENDING_CMD:
1497                                         mrq->cmd->error = -ENOMEDIUM;
1498                                         if (mrq->data)
1499                                                 host->stop_transfer(host);
1500                                         break;
1501                                 case STATE_DATA_XFER:
1502                                         mrq->data->error = -ENOMEDIUM;
1503                                         host->stop_transfer(host);
1504                                         break;
1505                                 case STATE_WAITING_NOTBUSY:
1506                                         mrq->data->error = -ENOMEDIUM;
1507                                         break;
1508                                 case STATE_SENDING_STOP:
1509                                         mrq->stop->error = -ENOMEDIUM;
1510                                         break;
1511                                 case STATE_END_REQUEST:
1512                                         break;
1513                                 }
1514
1515                                 atmci_request_end(host, mrq);
1516                         } else {
1517                                 list_del(&slot->queue_node);
1518                                 mrq->cmd->error = -ENOMEDIUM;
1519                                 if (mrq->data)
1520                                         mrq->data->error = -ENOMEDIUM;
1521                                 if (mrq->stop)
1522                                         mrq->stop->error = -ENOMEDIUM;
1523
1524                                 spin_unlock(&host->lock);
1525                                 mmc_request_done(slot->mmc, mrq);
1526                                 spin_lock(&host->lock);
1527                         }
1528                 }
1529                 spin_unlock(&host->lock);
1530
1531                 mmc_detect_change(slot->mmc, 0);
1532         }
1533 }
1534
1535 static void atmci_tasklet_func(unsigned long priv)
1536 {
1537         struct atmel_mci        *host = (struct atmel_mci *)priv;
1538         struct mmc_request      *mrq = host->mrq;
1539         struct mmc_data         *data = host->data;
1540         enum atmel_mci_state    state = host->state;
1541         enum atmel_mci_state    prev_state;
1542         u32                     status;
1543
1544         spin_lock(&host->lock);
1545
1546         state = host->state;
1547
1548         dev_vdbg(&host->pdev->dev,
1549                 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1550                 state, host->pending_events, host->completed_events,
1551                 atmci_readl(host, ATMCI_IMR));
1552
1553         do {
1554                 prev_state = state;
1555                 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1556
1557                 switch (state) {
1558                 case STATE_IDLE:
1559                         break;
1560
1561                 case STATE_SENDING_CMD:
1562                         /*
1563                          * Command has been sent, we are waiting for command
1564                          * ready. Then we have three next states possible:
1565                          * END_REQUEST by default, WAITING_NOTBUSY if it's a
1566                          * command needing it or DATA_XFER if there is data.
1567                          */
1568                         dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1569                         if (!atmci_test_and_clear_pending(host,
1570                                                 EVENT_CMD_RDY))
1571                                 break;
1572
1573                         dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1574                         host->cmd = NULL;
1575                         atmci_set_completed(host, EVENT_CMD_RDY);
1576                         atmci_command_complete(host, mrq->cmd);
1577                         if (mrq->data) {
1578                                 dev_dbg(&host->pdev->dev,
1579                                         "command with data transfer");
1580                                 /*
1581                                  * If there is a command error don't start
1582                                  * data transfer.
1583                                  */
1584                                 if (mrq->cmd->error) {
1585                                         host->stop_transfer(host);
1586                                         host->data = NULL;
1587                                         atmci_writel(host, ATMCI_IDR,
1588                                                      ATMCI_TXRDY | ATMCI_RXRDY
1589                                                      | ATMCI_DATA_ERROR_FLAGS);
1590                                         state = STATE_END_REQUEST;
1591                                 } else
1592                                         state = STATE_DATA_XFER;
1593                         } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1594                                 dev_dbg(&host->pdev->dev,
1595                                         "command response need waiting notbusy");
1596                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1597                                 state = STATE_WAITING_NOTBUSY;
1598                         } else
1599                                 state = STATE_END_REQUEST;
1600
1601                         break;
1602
1603                 case STATE_DATA_XFER:
1604                         if (atmci_test_and_clear_pending(host,
1605                                                 EVENT_DATA_ERROR)) {
1606                                 dev_dbg(&host->pdev->dev, "set completed data error\n");
1607                                 atmci_set_completed(host, EVENT_DATA_ERROR);
1608                                 state = STATE_END_REQUEST;
1609                                 break;
1610                         }
1611
1612                         /*
1613                          * A data transfer is in progress. The event expected
1614                          * to move to the next state depends of data transfer
1615                          * type (PDC or DMA). Once transfer done we can move
1616                          * to the next step which is WAITING_NOTBUSY in write
1617                          * case and directly SENDING_STOP in read case.
1618                          */
1619                         dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1620                         if (!atmci_test_and_clear_pending(host,
1621                                                 EVENT_XFER_COMPLETE))
1622                                 break;
1623
1624                         dev_dbg(&host->pdev->dev,
1625                                 "(%s) set completed xfer complete\n",
1626                                 __func__);
1627                         atmci_set_completed(host, EVENT_XFER_COMPLETE);
1628
1629                         if (host->caps.need_notbusy_for_read_ops ||
1630                            (host->data->flags & MMC_DATA_WRITE)) {
1631                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1632                                 state = STATE_WAITING_NOTBUSY;
1633                         } else if (host->mrq->stop) {
1634                                 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1635                                 atmci_send_stop_cmd(host, data);
1636                                 state = STATE_SENDING_STOP;
1637                         } else {
1638                                 host->data = NULL;
1639                                 data->bytes_xfered = data->blocks * data->blksz;
1640                                 data->error = 0;
1641                                 state = STATE_END_REQUEST;
1642                         }
1643                         break;
1644
1645                 case STATE_WAITING_NOTBUSY:
1646                         /*
1647                          * We can be in the state for two reasons: a command
1648                          * requiring waiting not busy signal (stop command
1649                          * included) or a write operation. In the latest case,
1650                          * we need to send a stop command.
1651                          */
1652                         dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1653                         if (!atmci_test_and_clear_pending(host,
1654                                                 EVENT_NOTBUSY))
1655                                 break;
1656
1657                         dev_dbg(&host->pdev->dev, "set completed not busy\n");
1658                         atmci_set_completed(host, EVENT_NOTBUSY);
1659
1660                         if (host->data) {
1661                                 /*
1662                                  * For some commands such as CMD53, even if
1663                                  * there is data transfer, there is no stop
1664                                  * command to send.
1665                                  */
1666                                 if (host->mrq->stop) {
1667                                         atmci_writel(host, ATMCI_IER,
1668                                                      ATMCI_CMDRDY);
1669                                         atmci_send_stop_cmd(host, data);
1670                                         state = STATE_SENDING_STOP;
1671                                 } else {
1672                                         host->data = NULL;
1673                                         data->bytes_xfered = data->blocks
1674                                                              * data->blksz;
1675                                         data->error = 0;
1676                                         state = STATE_END_REQUEST;
1677                                 }
1678                         } else
1679                                 state = STATE_END_REQUEST;
1680                         break;
1681
1682                 case STATE_SENDING_STOP:
1683                         /*
1684                          * In this state, it is important to set host->data to
1685                          * NULL (which is tested in the waiting notbusy state)
1686                          * in order to go to the end request state instead of
1687                          * sending stop again.
1688                          */
1689                         dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1690                         if (!atmci_test_and_clear_pending(host,
1691                                                 EVENT_CMD_RDY))
1692                                 break;
1693
1694                         dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1695                         host->cmd = NULL;
1696                         data->bytes_xfered = data->blocks * data->blksz;
1697                         data->error = 0;
1698                         atmci_command_complete(host, mrq->stop);
1699                         if (mrq->stop->error) {
1700                                 host->stop_transfer(host);
1701                                 atmci_writel(host, ATMCI_IDR,
1702                                              ATMCI_TXRDY | ATMCI_RXRDY
1703                                              | ATMCI_DATA_ERROR_FLAGS);
1704                                 state = STATE_END_REQUEST;
1705                         } else {
1706                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1707                                 state = STATE_WAITING_NOTBUSY;
1708                         }
1709                         host->data = NULL;
1710                         break;
1711
1712                 case STATE_END_REQUEST:
1713                         atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1714                                            | ATMCI_DATA_ERROR_FLAGS);
1715                         status = host->data_status;
1716                         if (unlikely(status)) {
1717                                 host->stop_transfer(host);
1718                                 host->data = NULL;
1719                                 if (status & ATMCI_DTOE) {
1720                                         data->error = -ETIMEDOUT;
1721                                 } else if (status & ATMCI_DCRCE) {
1722                                         data->error = -EILSEQ;
1723                                 } else {
1724                                         data->error = -EIO;
1725                                 }
1726                         }
1727
1728                         atmci_request_end(host, host->mrq);
1729                         state = STATE_IDLE;
1730                         break;
1731                 }
1732         } while (state != prev_state);
1733
1734         host->state = state;
1735
1736         spin_unlock(&host->lock);
1737 }
1738
1739 static void atmci_read_data_pio(struct atmel_mci *host)
1740 {
1741         struct scatterlist      *sg = host->sg;
1742         void                    *buf = sg_virt(sg);
1743         unsigned int            offset = host->pio_offset;
1744         struct mmc_data         *data = host->data;
1745         u32                     value;
1746         u32                     status;
1747         unsigned int            nbytes = 0;
1748
1749         do {
1750                 value = atmci_readl(host, ATMCI_RDR);
1751                 if (likely(offset + 4 <= sg->length)) {
1752                         put_unaligned(value, (u32 *)(buf + offset));
1753
1754                         offset += 4;
1755                         nbytes += 4;
1756
1757                         if (offset == sg->length) {
1758                                 flush_dcache_page(sg_page(sg));
1759                                 host->sg = sg = sg_next(sg);
1760                                 if (!sg)
1761                                         goto done;
1762
1763                                 offset = 0;
1764                                 buf = sg_virt(sg);
1765                         }
1766                 } else {
1767                         unsigned int remaining = sg->length - offset;
1768                         memcpy(buf + offset, &value, remaining);
1769                         nbytes += remaining;
1770
1771                         flush_dcache_page(sg_page(sg));
1772                         host->sg = sg = sg_next(sg);
1773                         if (!sg)
1774                                 goto done;
1775
1776                         offset = 4 - remaining;
1777                         buf = sg_virt(sg);
1778                         memcpy(buf, (u8 *)&value + remaining, offset);
1779                         nbytes += offset;
1780                 }
1781
1782                 status = atmci_readl(host, ATMCI_SR);
1783                 if (status & ATMCI_DATA_ERROR_FLAGS) {
1784                         atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1785                                                 | ATMCI_DATA_ERROR_FLAGS));
1786                         host->data_status = status;
1787                         data->bytes_xfered += nbytes;
1788                         return;
1789                 }
1790         } while (status & ATMCI_RXRDY);
1791
1792         host->pio_offset = offset;
1793         data->bytes_xfered += nbytes;
1794
1795         return;
1796
1797 done:
1798         atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1799         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1800         data->bytes_xfered += nbytes;
1801         smp_wmb();
1802         atmci_set_pending(host, EVENT_XFER_COMPLETE);
1803 }
1804
1805 static void atmci_write_data_pio(struct atmel_mci *host)
1806 {
1807         struct scatterlist      *sg = host->sg;
1808         void                    *buf = sg_virt(sg);
1809         unsigned int            offset = host->pio_offset;
1810         struct mmc_data         *data = host->data;
1811         u32                     value;
1812         u32                     status;
1813         unsigned int            nbytes = 0;
1814
1815         do {
1816                 if (likely(offset + 4 <= sg->length)) {
1817                         value = get_unaligned((u32 *)(buf + offset));
1818                         atmci_writel(host, ATMCI_TDR, value);
1819
1820                         offset += 4;
1821                         nbytes += 4;
1822                         if (offset == sg->length) {
1823                                 host->sg = sg = sg_next(sg);
1824                                 if (!sg)
1825                                         goto done;
1826
1827                                 offset = 0;
1828                                 buf = sg_virt(sg);
1829                         }
1830                 } else {
1831                         unsigned int remaining = sg->length - offset;
1832
1833                         value = 0;
1834                         memcpy(&value, buf + offset, remaining);
1835                         nbytes += remaining;
1836
1837                         host->sg = sg = sg_next(sg);
1838                         if (!sg) {
1839                                 atmci_writel(host, ATMCI_TDR, value);
1840                                 goto done;
1841                         }
1842
1843                         offset = 4 - remaining;
1844                         buf = sg_virt(sg);
1845                         memcpy((u8 *)&value + remaining, buf, offset);
1846                         atmci_writel(host, ATMCI_TDR, value);
1847                         nbytes += offset;
1848                 }
1849
1850                 status = atmci_readl(host, ATMCI_SR);
1851                 if (status & ATMCI_DATA_ERROR_FLAGS) {
1852                         atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1853                                                 | ATMCI_DATA_ERROR_FLAGS));
1854                         host->data_status = status;
1855                         data->bytes_xfered += nbytes;
1856                         return;
1857                 }
1858         } while (status & ATMCI_TXRDY);
1859
1860         host->pio_offset = offset;
1861         data->bytes_xfered += nbytes;
1862
1863         return;
1864
1865 done:
1866         atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1867         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1868         data->bytes_xfered += nbytes;
1869         smp_wmb();
1870         atmci_set_pending(host, EVENT_XFER_COMPLETE);
1871 }
1872
1873 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1874 {
1875         int     i;
1876
1877         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1878                 struct atmel_mci_slot *slot = host->slot[i];
1879                 if (slot && (status & slot->sdio_irq)) {
1880                         mmc_signal_sdio_irq(slot->mmc);
1881                 }
1882         }
1883 }
1884
1885
1886 static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1887 {
1888         struct atmel_mci        *host = dev_id;
1889         u32                     status, mask, pending;
1890         unsigned int            pass_count = 0;
1891
1892         do {
1893                 status = atmci_readl(host, ATMCI_SR);
1894                 mask = atmci_readl(host, ATMCI_IMR);
1895                 pending = status & mask;
1896                 if (!pending)
1897                         break;
1898
1899                 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1900                         dev_dbg(&host->pdev->dev, "IRQ: data error\n");
1901                         atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1902                                         | ATMCI_RXRDY | ATMCI_TXRDY
1903                                         | ATMCI_ENDRX | ATMCI_ENDTX
1904                                         | ATMCI_RXBUFF | ATMCI_TXBUFE);
1905
1906                         host->data_status = status;
1907                         dev_dbg(&host->pdev->dev, "set pending data error\n");
1908                         smp_wmb();
1909                         atmci_set_pending(host, EVENT_DATA_ERROR);
1910                         tasklet_schedule(&host->tasklet);
1911                 }
1912
1913                 if (pending & ATMCI_TXBUFE) {
1914                         dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
1915                         atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1916                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1917                         /*
1918                          * We can receive this interruption before having configured
1919                          * the second pdc buffer, so we need to reconfigure first and
1920                          * second buffers again
1921                          */
1922                         if (host->data_size) {
1923                                 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
1924                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1925                                 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
1926                         } else {
1927                                 atmci_pdc_complete(host);
1928                         }
1929                 } else if (pending & ATMCI_ENDTX) {
1930                         dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
1931                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1932
1933                         if (host->data_size) {
1934                                 atmci_pdc_set_single_buf(host,
1935                                                 XFER_TRANSMIT, PDC_SECOND_BUF);
1936                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1937                         }
1938                 }
1939
1940                 if (pending & ATMCI_RXBUFF) {
1941                         dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
1942                         atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
1943                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1944                         /*
1945                          * We can receive this interruption before having configured
1946                          * the second pdc buffer, so we need to reconfigure first and
1947                          * second buffers again
1948                          */
1949                         if (host->data_size) {
1950                                 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
1951                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1952                                 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
1953                         } else {
1954                                 atmci_pdc_complete(host);
1955                         }
1956                 } else if (pending & ATMCI_ENDRX) {
1957                         dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
1958                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1959
1960                         if (host->data_size) {
1961                                 atmci_pdc_set_single_buf(host,
1962                                                 XFER_RECEIVE, PDC_SECOND_BUF);
1963                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1964                         }
1965                 }
1966
1967                 /*
1968                  * First mci IPs, so mainly the ones having pdc, have some
1969                  * issues with the notbusy signal. You can't get it after
1970                  * data transmission if you have not sent a stop command.
1971                  * The appropriate workaround is to use the BLKE signal.
1972                  */
1973                 if (pending & ATMCI_BLKE) {
1974                         dev_dbg(&host->pdev->dev, "IRQ: blke\n");
1975                         atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
1976                         smp_wmb();
1977                         dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1978                         atmci_set_pending(host, EVENT_NOTBUSY);
1979                         tasklet_schedule(&host->tasklet);
1980                 }
1981
1982                 if (pending & ATMCI_NOTBUSY) {
1983                         dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
1984                         atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
1985                         smp_wmb();
1986                         dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1987                         atmci_set_pending(host, EVENT_NOTBUSY);
1988                         tasklet_schedule(&host->tasklet);
1989                 }
1990
1991                 if (pending & ATMCI_RXRDY)
1992                         atmci_read_data_pio(host);
1993                 if (pending & ATMCI_TXRDY)
1994                         atmci_write_data_pio(host);
1995
1996                 if (pending & ATMCI_CMDRDY) {
1997                         dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
1998                         atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1999                         host->cmd_status = status;
2000                         smp_wmb();
2001                         dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2002                         atmci_set_pending(host, EVENT_CMD_RDY);
2003                         tasklet_schedule(&host->tasklet);
2004                 }
2005
2006                 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2007                         atmci_sdio_interrupt(host, status);
2008
2009         } while (pass_count++ < 5);
2010
2011         return pass_count ? IRQ_HANDLED : IRQ_NONE;
2012 }
2013
2014 static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2015 {
2016         struct atmel_mci_slot   *slot = dev_id;
2017
2018         /*
2019          * Disable interrupts until the pin has stabilized and check
2020          * the state then. Use mod_timer() since we may be in the
2021          * middle of the timer routine when this interrupt triggers.
2022          */
2023         disable_irq_nosync(irq);
2024         mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2025
2026         return IRQ_HANDLED;
2027 }
2028
2029 static int __init atmci_init_slot(struct atmel_mci *host,
2030                 struct mci_slot_pdata *slot_data, unsigned int id,
2031                 u32 sdc_reg, u32 sdio_irq)
2032 {
2033         struct mmc_host                 *mmc;
2034         struct atmel_mci_slot           *slot;
2035
2036         mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2037         if (!mmc)
2038                 return -ENOMEM;
2039
2040         slot = mmc_priv(mmc);
2041         slot->mmc = mmc;
2042         slot->host = host;
2043         slot->detect_pin = slot_data->detect_pin;
2044         slot->wp_pin = slot_data->wp_pin;
2045         slot->detect_is_active_high = slot_data->detect_is_active_high;
2046         slot->sdc_reg = sdc_reg;
2047         slot->sdio_irq = sdio_irq;
2048
2049         mmc->ops = &atmci_ops;
2050         mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2051         mmc->f_max = host->bus_hz / 2;
2052         mmc->ocr_avail  = MMC_VDD_32_33 | MMC_VDD_33_34;
2053         if (sdio_irq)
2054                 mmc->caps |= MMC_CAP_SDIO_IRQ;
2055         if (host->caps.has_highspeed)
2056                 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2057         /*
2058          * Without the read/write proof capability, it is strongly suggested to
2059          * use only one bit for data to prevent fifo underruns and overruns
2060          * which will corrupt data.
2061          */
2062         if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2063                 mmc->caps |= MMC_CAP_4_BIT_DATA;
2064
2065         if (atmci_get_version(host) < 0x200) {
2066                 mmc->max_segs = 256;
2067                 mmc->max_blk_size = 4095;
2068                 mmc->max_blk_count = 256;
2069                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2070                 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2071         } else {
2072                 mmc->max_segs = 64;
2073                 mmc->max_req_size = 32768 * 512;
2074                 mmc->max_blk_size = 32768;
2075                 mmc->max_blk_count = 512;
2076         }
2077
2078         /* Assume card is present initially */
2079         set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2080         if (gpio_is_valid(slot->detect_pin)) {
2081                 if (gpio_request(slot->detect_pin, "mmc_detect")) {
2082                         dev_dbg(&mmc->class_dev, "no detect pin available\n");
2083                         slot->detect_pin = -EBUSY;
2084                 } else if (gpio_get_value(slot->detect_pin) ^
2085                                 slot->detect_is_active_high) {
2086                         clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2087                 }
2088         }
2089
2090         if (!gpio_is_valid(slot->detect_pin))
2091                 mmc->caps |= MMC_CAP_NEEDS_POLL;
2092
2093         if (gpio_is_valid(slot->wp_pin)) {
2094                 if (gpio_request(slot->wp_pin, "mmc_wp")) {
2095                         dev_dbg(&mmc->class_dev, "no WP pin available\n");
2096                         slot->wp_pin = -EBUSY;
2097                 }
2098         }
2099
2100         host->slot[id] = slot;
2101         mmc_add_host(mmc);
2102
2103         if (gpio_is_valid(slot->detect_pin)) {
2104                 int ret;
2105
2106                 setup_timer(&slot->detect_timer, atmci_detect_change,
2107                                 (unsigned long)slot);
2108
2109                 ret = request_irq(gpio_to_irq(slot->detect_pin),
2110                                 atmci_detect_interrupt,
2111                                 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2112                                 "mmc-detect", slot);
2113                 if (ret) {
2114                         dev_dbg(&mmc->class_dev,
2115                                 "could not request IRQ %d for detect pin\n",
2116                                 gpio_to_irq(slot->detect_pin));
2117                         gpio_free(slot->detect_pin);
2118                         slot->detect_pin = -EBUSY;
2119                 }
2120         }
2121
2122         atmci_init_debugfs(slot);
2123
2124         return 0;
2125 }
2126
2127 static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2128                 unsigned int id)
2129 {
2130         /* Debugfs stuff is cleaned up by mmc core */
2131
2132         set_bit(ATMCI_SHUTDOWN, &slot->flags);
2133         smp_wmb();
2134
2135         mmc_remove_host(slot->mmc);
2136
2137         if (gpio_is_valid(slot->detect_pin)) {
2138                 int pin = slot->detect_pin;
2139
2140                 free_irq(gpio_to_irq(pin), slot);
2141                 del_timer_sync(&slot->detect_timer);
2142                 gpio_free(pin);
2143         }
2144         if (gpio_is_valid(slot->wp_pin))
2145                 gpio_free(slot->wp_pin);
2146
2147         slot->host->slot[id] = NULL;
2148         mmc_free_host(slot->mmc);
2149 }
2150
2151 static bool atmci_filter(struct dma_chan *chan, void *slave)
2152 {
2153         struct mci_dma_data     *sl = slave;
2154
2155         if (sl && find_slave_dev(sl) == chan->device->dev) {
2156                 chan->private = slave_data_ptr(sl);
2157                 return true;
2158         } else {
2159                 return false;
2160         }
2161 }
2162
2163 static bool atmci_configure_dma(struct atmel_mci *host)
2164 {
2165         struct mci_platform_data        *pdata;
2166
2167         if (host == NULL)
2168                 return false;
2169
2170         pdata = host->pdev->dev.platform_data;
2171
2172         if (pdata && find_slave_dev(pdata->dma_slave)) {
2173                 dma_cap_mask_t mask;
2174
2175                 /* Try to grab a DMA channel */
2176                 dma_cap_zero(mask);
2177                 dma_cap_set(DMA_SLAVE, mask);
2178                 host->dma.chan =
2179                         dma_request_channel(mask, atmci_filter, pdata->dma_slave);
2180         }
2181         if (!host->dma.chan) {
2182                 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2183                 return false;
2184         } else {
2185                 dev_info(&host->pdev->dev,
2186                                         "using %s for DMA transfers\n",
2187                                         dma_chan_name(host->dma.chan));
2188
2189                 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2190                 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2191                 host->dma_conf.src_maxburst = 1;
2192                 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2193                 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2194                 host->dma_conf.dst_maxburst = 1;
2195                 host->dma_conf.device_fc = false;
2196                 return true;
2197         }
2198 }
2199
2200 /*
2201  * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2202  * HSMCI provides DMA support and a new config register but no more supports
2203  * PDC.
2204  */
2205 static void __init atmci_get_cap(struct atmel_mci *host)
2206 {
2207         unsigned int version;
2208
2209         version = atmci_get_version(host);
2210         dev_info(&host->pdev->dev,
2211                         "version: 0x%x\n", version);
2212
2213         host->caps.has_dma = 0;
2214         host->caps.has_pdc = 1;
2215         host->caps.has_cfg_reg = 0;
2216         host->caps.has_cstor_reg = 0;
2217         host->caps.has_highspeed = 0;
2218         host->caps.has_rwproof = 0;
2219         host->caps.has_odd_clk_div = 0;
2220         host->caps.has_bad_data_ordering = 1;
2221         host->caps.need_reset_after_xfer = 1;
2222         host->caps.need_blksz_mul_4 = 1;
2223         host->caps.need_notbusy_for_read_ops = 0;
2224
2225         /* keep only major version number */
2226         switch (version & 0xf00) {
2227         case 0x500:
2228                 host->caps.has_odd_clk_div = 1;
2229         case 0x400:
2230         case 0x300:
2231 #ifdef CONFIG_AT_HDMAC
2232                 host->caps.has_dma = 1;
2233 #else
2234                 dev_info(&host->pdev->dev,
2235                         "has dma capability but dma engine is not selected, then use pio\n");
2236 #endif
2237                 host->caps.has_pdc = 0;
2238                 host->caps.has_cfg_reg = 1;
2239                 host->caps.has_cstor_reg = 1;
2240                 host->caps.has_highspeed = 1;
2241         case 0x200:
2242                 host->caps.has_rwproof = 1;
2243                 host->caps.need_blksz_mul_4 = 0;
2244                 host->caps.need_notbusy_for_read_ops = 1;
2245         case 0x100:
2246                 host->caps.has_bad_data_ordering = 0;
2247                 host->caps.need_reset_after_xfer = 0;
2248         case 0x0:
2249                 break;
2250         default:
2251                 host->caps.has_pdc = 0;
2252                 dev_warn(&host->pdev->dev,
2253                                 "Unmanaged mci version, set minimum capabilities\n");
2254                 break;
2255         }
2256 }
2257
2258 static int __init atmci_probe(struct platform_device *pdev)
2259 {
2260         struct mci_platform_data        *pdata;
2261         struct atmel_mci                *host;
2262         struct resource                 *regs;
2263         unsigned int                    nr_slots;
2264         int                             irq;
2265         int                             ret;
2266
2267         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2268         if (!regs)
2269                 return -ENXIO;
2270         pdata = pdev->dev.platform_data;
2271         if (!pdata)
2272                 return -ENXIO;
2273         irq = platform_get_irq(pdev, 0);
2274         if (irq < 0)
2275                 return irq;
2276
2277         host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2278         if (!host)
2279                 return -ENOMEM;
2280
2281         host->pdev = pdev;
2282         spin_lock_init(&host->lock);
2283         INIT_LIST_HEAD(&host->queue);
2284
2285         host->mck = clk_get(&pdev->dev, "mci_clk");
2286         if (IS_ERR(host->mck)) {
2287                 ret = PTR_ERR(host->mck);
2288                 goto err_clk_get;
2289         }
2290
2291         ret = -ENOMEM;
2292         host->regs = ioremap(regs->start, resource_size(regs));
2293         if (!host->regs)
2294                 goto err_ioremap;
2295
2296         clk_enable(host->mck);
2297         atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2298         host->bus_hz = clk_get_rate(host->mck);
2299         clk_disable(host->mck);
2300
2301         host->mapbase = regs->start;
2302
2303         tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2304
2305         ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2306         if (ret)
2307                 goto err_request_irq;
2308
2309         /* Get MCI capabilities and set operations according to it */
2310         atmci_get_cap(host);
2311         if (host->caps.has_dma && atmci_configure_dma(host)) {
2312                 host->prepare_data = &atmci_prepare_data_dma;
2313                 host->submit_data = &atmci_submit_data_dma;
2314                 host->stop_transfer = &atmci_stop_transfer_dma;
2315         } else if (host->caps.has_pdc) {
2316                 dev_info(&pdev->dev, "using PDC\n");
2317                 host->prepare_data = &atmci_prepare_data_pdc;
2318                 host->submit_data = &atmci_submit_data_pdc;
2319                 host->stop_transfer = &atmci_stop_transfer_pdc;
2320         } else {
2321                 dev_info(&pdev->dev, "using PIO\n");
2322                 host->prepare_data = &atmci_prepare_data;
2323                 host->submit_data = &atmci_submit_data;
2324                 host->stop_transfer = &atmci_stop_transfer;
2325         }
2326
2327         platform_set_drvdata(pdev, host);
2328
2329         setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2330
2331         /* We need at least one slot to succeed */
2332         nr_slots = 0;
2333         ret = -ENODEV;
2334         if (pdata->slot[0].bus_width) {
2335                 ret = atmci_init_slot(host, &pdata->slot[0],
2336                                 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2337                 if (!ret) {
2338                         nr_slots++;
2339                         host->buf_size = host->slot[0]->mmc->max_req_size;
2340                 }
2341         }
2342         if (pdata->slot[1].bus_width) {
2343                 ret = atmci_init_slot(host, &pdata->slot[1],
2344                                 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2345                 if (!ret) {
2346                         nr_slots++;
2347                         if (host->slot[1]->mmc->max_req_size > host->buf_size)
2348                                 host->buf_size =
2349                                         host->slot[1]->mmc->max_req_size;
2350                 }
2351         }
2352
2353         if (!nr_slots) {
2354                 dev_err(&pdev->dev, "init failed: no slot defined\n");
2355                 goto err_init_slot;
2356         }
2357
2358         if (!host->caps.has_rwproof) {
2359                 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2360                                                   &host->buf_phys_addr,
2361                                                   GFP_KERNEL);
2362                 if (!host->buffer) {
2363                         ret = -ENOMEM;
2364                         dev_err(&pdev->dev, "buffer allocation failed\n");
2365                         goto err_init_slot;
2366                 }
2367         }
2368
2369         dev_info(&pdev->dev,
2370                         "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2371                         host->mapbase, irq, nr_slots);
2372
2373         return 0;
2374
2375 err_init_slot:
2376         if (host->dma.chan)
2377                 dma_release_channel(host->dma.chan);
2378         free_irq(irq, host);
2379 err_request_irq:
2380         iounmap(host->regs);
2381 err_ioremap:
2382         clk_put(host->mck);
2383 err_clk_get:
2384         kfree(host);
2385         return ret;
2386 }
2387
2388 static int __exit atmci_remove(struct platform_device *pdev)
2389 {
2390         struct atmel_mci        *host = platform_get_drvdata(pdev);
2391         unsigned int            i;
2392
2393         platform_set_drvdata(pdev, NULL);
2394
2395         if (host->buffer)
2396                 dma_free_coherent(&pdev->dev, host->buf_size,
2397                                   host->buffer, host->buf_phys_addr);
2398
2399         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2400                 if (host->slot[i])
2401                         atmci_cleanup_slot(host->slot[i], i);
2402         }
2403
2404         clk_enable(host->mck);
2405         atmci_writel(host, ATMCI_IDR, ~0UL);
2406         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2407         atmci_readl(host, ATMCI_SR);
2408         clk_disable(host->mck);
2409
2410 #ifdef CONFIG_MMC_ATMELMCI_DMA
2411         if (host->dma.chan)
2412                 dma_release_channel(host->dma.chan);
2413 #endif
2414
2415         free_irq(platform_get_irq(pdev, 0), host);
2416         iounmap(host->regs);
2417
2418         clk_put(host->mck);
2419         kfree(host);
2420
2421         return 0;
2422 }
2423
2424 #ifdef CONFIG_PM
2425 static int atmci_suspend(struct device *dev)
2426 {
2427         struct atmel_mci *host = dev_get_drvdata(dev);
2428         int i;
2429
2430          for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2431                 struct atmel_mci_slot *slot = host->slot[i];
2432                 int ret;
2433
2434                 if (!slot)
2435                         continue;
2436                 ret = mmc_suspend_host(slot->mmc);
2437                 if (ret < 0) {
2438                         while (--i >= 0) {
2439                                 slot = host->slot[i];
2440                                 if (slot
2441                                 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
2442                                         mmc_resume_host(host->slot[i]->mmc);
2443                                         clear_bit(ATMCI_SUSPENDED, &slot->flags);
2444                                 }
2445                         }
2446                         return ret;
2447                 } else {
2448                         set_bit(ATMCI_SUSPENDED, &slot->flags);
2449                 }
2450         }
2451
2452         return 0;
2453 }
2454
2455 static int atmci_resume(struct device *dev)
2456 {
2457         struct atmel_mci *host = dev_get_drvdata(dev);
2458         int i;
2459         int ret = 0;
2460
2461         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2462                 struct atmel_mci_slot *slot = host->slot[i];
2463                 int err;
2464
2465                 slot = host->slot[i];
2466                 if (!slot)
2467                         continue;
2468                 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
2469                         continue;
2470                 err = mmc_resume_host(slot->mmc);
2471                 if (err < 0)
2472                         ret = err;
2473                 else
2474                         clear_bit(ATMCI_SUSPENDED, &slot->flags);
2475         }
2476
2477         return ret;
2478 }
2479 static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
2480 #define ATMCI_PM_OPS    (&atmci_pm)
2481 #else
2482 #define ATMCI_PM_OPS    NULL
2483 #endif
2484
2485 static struct platform_driver atmci_driver = {
2486         .remove         = __exit_p(atmci_remove),
2487         .driver         = {
2488                 .name           = "atmel_mci",
2489                 .pm             = ATMCI_PM_OPS,
2490         },
2491 };
2492
2493 static int __init atmci_init(void)
2494 {
2495         return platform_driver_probe(&atmci_driver, atmci_probe);
2496 }
2497
2498 static void __exit atmci_exit(void)
2499 {
2500         platform_driver_unregister(&atmci_driver);
2501 }
2502
2503 late_initcall(atmci_init); /* try to load after dma driver when built-in */
2504 module_exit(atmci_exit);
2505
2506 MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2507 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2508 MODULE_LICENSE("GPL v2");