]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mmc/host/dw_mmc.c
f68ea4d57b180d9450d3b00c0062674a29f12a7a
[karo-tx-linux.git] / drivers / mmc / host / dw_mmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
38 #include <linux/of.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
41
42 #include "dw_mmc.h"
43
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
47                                  SDMMC_INT_EBE)
48 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49                                  SDMMC_INT_RESP_ERR)
50 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
51                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS      1
53 #define DW_MCI_RECV_STATUS      2
54 #define DW_MCI_DMA_THRESHOLD    16
55
56 #define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000          /* unit: HZ */
58
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63                                  SDMMC_IDMAC_INT_TI)
64
65 struct idmac_desc_64addr {
66         u32             des0;   /* Control Descriptor */
67
68         u32             des1;   /* Reserved */
69
70         u32             des2;   /*Buffer sizes */
71 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72         ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))
73
74         u32             des3;   /* Reserved */
75
76         u32             des4;   /* Lower 32-bits of Buffer Address Pointer 1*/
77         u32             des5;   /* Upper 32-bits of Buffer Address Pointer 1*/
78
79         u32             des6;   /* Lower 32-bits of Next Descriptor Address */
80         u32             des7;   /* Upper 32-bits of Next Descriptor Address */
81 };
82
83 struct idmac_desc {
84         u32             des0;   /* Control Descriptor */
85 #define IDMAC_DES0_DIC  BIT(1)
86 #define IDMAC_DES0_LD   BIT(2)
87 #define IDMAC_DES0_FD   BIT(3)
88 #define IDMAC_DES0_CH   BIT(4)
89 #define IDMAC_DES0_ER   BIT(5)
90 #define IDMAC_DES0_CES  BIT(30)
91 #define IDMAC_DES0_OWN  BIT(31)
92
93         u32             des1;   /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
96
97         u32             des2;   /* buffer 1 physical address */
98
99         u32             des3;   /* buffer 2 physical address */
100 };
101 #endif /* CONFIG_MMC_DW_IDMAC */
102
103 static bool dw_mci_reset(struct dw_mci *host);
104 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
105 static int dw_mci_card_busy(struct mmc_host *mmc);
106
107 #if defined(CONFIG_DEBUG_FS)
108 static int dw_mci_req_show(struct seq_file *s, void *v)
109 {
110         struct dw_mci_slot *slot = s->private;
111         struct mmc_request *mrq;
112         struct mmc_command *cmd;
113         struct mmc_command *stop;
114         struct mmc_data *data;
115
116         /* Make sure we get a consistent snapshot */
117         spin_lock_bh(&slot->host->lock);
118         mrq = slot->mrq;
119
120         if (mrq) {
121                 cmd = mrq->cmd;
122                 data = mrq->data;
123                 stop = mrq->stop;
124
125                 if (cmd)
126                         seq_printf(s,
127                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
128                                    cmd->opcode, cmd->arg, cmd->flags,
129                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
130                                    cmd->resp[2], cmd->error);
131                 if (data)
132                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
133                                    data->bytes_xfered, data->blocks,
134                                    data->blksz, data->flags, data->error);
135                 if (stop)
136                         seq_printf(s,
137                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138                                    stop->opcode, stop->arg, stop->flags,
139                                    stop->resp[0], stop->resp[1], stop->resp[2],
140                                    stop->resp[2], stop->error);
141         }
142
143         spin_unlock_bh(&slot->host->lock);
144
145         return 0;
146 }
147
148 static int dw_mci_req_open(struct inode *inode, struct file *file)
149 {
150         return single_open(file, dw_mci_req_show, inode->i_private);
151 }
152
153 static const struct file_operations dw_mci_req_fops = {
154         .owner          = THIS_MODULE,
155         .open           = dw_mci_req_open,
156         .read           = seq_read,
157         .llseek         = seq_lseek,
158         .release        = single_release,
159 };
160
161 static int dw_mci_regs_show(struct seq_file *s, void *v)
162 {
163         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
164         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
165         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
166         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
167         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
168         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
169
170         return 0;
171 }
172
173 static int dw_mci_regs_open(struct inode *inode, struct file *file)
174 {
175         return single_open(file, dw_mci_regs_show, inode->i_private);
176 }
177
178 static const struct file_operations dw_mci_regs_fops = {
179         .owner          = THIS_MODULE,
180         .open           = dw_mci_regs_open,
181         .read           = seq_read,
182         .llseek         = seq_lseek,
183         .release        = single_release,
184 };
185
186 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
187 {
188         struct mmc_host *mmc = slot->mmc;
189         struct dw_mci *host = slot->host;
190         struct dentry *root;
191         struct dentry *node;
192
193         root = mmc->debugfs_root;
194         if (!root)
195                 return;
196
197         node = debugfs_create_file("regs", S_IRUSR, root, host,
198                                    &dw_mci_regs_fops);
199         if (!node)
200                 goto err;
201
202         node = debugfs_create_file("req", S_IRUSR, root, slot,
203                                    &dw_mci_req_fops);
204         if (!node)
205                 goto err;
206
207         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
208         if (!node)
209                 goto err;
210
211         node = debugfs_create_x32("pending_events", S_IRUSR, root,
212                                   (u32 *)&host->pending_events);
213         if (!node)
214                 goto err;
215
216         node = debugfs_create_x32("completed_events", S_IRUSR, root,
217                                   (u32 *)&host->completed_events);
218         if (!node)
219                 goto err;
220
221         return;
222
223 err:
224         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
225 }
226 #endif /* defined(CONFIG_DEBUG_FS) */
227
228 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
229
230 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231 {
232         struct mmc_data *data;
233         struct dw_mci_slot *slot = mmc_priv(mmc);
234         struct dw_mci *host = slot->host;
235         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
236         u32 cmdr;
237         cmd->error = -EINPROGRESS;
238
239         cmdr = cmd->opcode;
240
241         if (cmd->opcode == MMC_STOP_TRANSMISSION ||
242             cmd->opcode == MMC_GO_IDLE_STATE ||
243             cmd->opcode == MMC_GO_INACTIVE_STATE ||
244             (cmd->opcode == SD_IO_RW_DIRECT &&
245              ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
246                 cmdr |= SDMMC_CMD_STOP;
247         else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
248                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
249
250         if (cmd->opcode == SD_SWITCH_VOLTAGE) {
251                 u32 clk_en_a;
252
253                 /* Special bit makes CMD11 not die */
254                 cmdr |= SDMMC_CMD_VOLT_SWITCH;
255
256                 /* Change state to continue to handle CMD11 weirdness */
257                 WARN_ON(slot->host->state != STATE_SENDING_CMD);
258                 slot->host->state = STATE_SENDING_CMD11;
259
260                 /*
261                  * We need to disable low power mode (automatic clock stop)
262                  * while doing voltage switch so we don't confuse the card,
263                  * since stopping the clock is a specific part of the UHS
264                  * voltage change dance.
265                  *
266                  * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
267                  * unconditionally turned back on in dw_mci_setup_bus() if it's
268                  * ever called with a non-zero clock.  That shouldn't happen
269                  * until the voltage change is all done.
270                  */
271                 clk_en_a = mci_readl(host, CLKENA);
272                 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
273                 mci_writel(host, CLKENA, clk_en_a);
274                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
275                              SDMMC_CMD_PRV_DAT_WAIT, 0);
276         }
277
278         if (cmd->flags & MMC_RSP_PRESENT) {
279                 /* We expect a response, so set this bit */
280                 cmdr |= SDMMC_CMD_RESP_EXP;
281                 if (cmd->flags & MMC_RSP_136)
282                         cmdr |= SDMMC_CMD_RESP_LONG;
283         }
284
285         if (cmd->flags & MMC_RSP_CRC)
286                 cmdr |= SDMMC_CMD_RESP_CRC;
287
288         data = cmd->data;
289         if (data) {
290                 cmdr |= SDMMC_CMD_DAT_EXP;
291                 if (data->flags & MMC_DATA_STREAM)
292                         cmdr |= SDMMC_CMD_STRM_MODE;
293                 if (data->flags & MMC_DATA_WRITE)
294                         cmdr |= SDMMC_CMD_DAT_WR;
295         }
296
297         if (drv_data && drv_data->prepare_command)
298                 drv_data->prepare_command(slot->host, &cmdr);
299
300         return cmdr;
301 }
302
303 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
304 {
305         struct mmc_command *stop;
306         u32 cmdr;
307
308         if (!cmd->data)
309                 return 0;
310
311         stop = &host->stop_abort;
312         cmdr = cmd->opcode;
313         memset(stop, 0, sizeof(struct mmc_command));
314
315         if (cmdr == MMC_READ_SINGLE_BLOCK ||
316             cmdr == MMC_READ_MULTIPLE_BLOCK ||
317             cmdr == MMC_WRITE_BLOCK ||
318             cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
319             cmdr == MMC_SEND_TUNING_BLOCK ||
320             cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
321                 stop->opcode = MMC_STOP_TRANSMISSION;
322                 stop->arg = 0;
323                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
324         } else if (cmdr == SD_IO_RW_EXTENDED) {
325                 stop->opcode = SD_IO_RW_DIRECT;
326                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
327                              ((cmd->arg >> 28) & 0x7);
328                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
329         } else {
330                 return 0;
331         }
332
333         cmdr = stop->opcode | SDMMC_CMD_STOP |
334                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
335
336         return cmdr;
337 }
338
339 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
340 {
341         unsigned long timeout = jiffies + msecs_to_jiffies(500);
342
343         /*
344          * Databook says that before issuing a new data transfer command
345          * we need to check to see if the card is busy.  Data transfer commands
346          * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
347          *
348          * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
349          * expected.
350          */
351         if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
352             !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
353                 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
354                         if (time_after(jiffies, timeout)) {
355                                 /* Command will fail; we'll pass error then */
356                                 dev_err(host->dev, "Busy; trying anyway\n");
357                                 break;
358                         }
359                         udelay(10);
360                 }
361         }
362 }
363
364 static void dw_mci_start_command(struct dw_mci *host,
365                                  struct mmc_command *cmd, u32 cmd_flags)
366 {
367         host->cmd = cmd;
368         dev_vdbg(host->dev,
369                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
370                  cmd->arg, cmd_flags);
371
372         mci_writel(host, CMDARG, cmd->arg);
373         wmb();
374         dw_mci_wait_while_busy(host, cmd_flags);
375
376         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
377 }
378
379 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
380 {
381         struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
382         dw_mci_start_command(host, stop, host->stop_cmdr);
383 }
384
385 /* DMA interface functions */
386 static void dw_mci_stop_dma(struct dw_mci *host)
387 {
388         if (host->using_dma) {
389                 host->dma_ops->stop(host);
390                 host->dma_ops->cleanup(host);
391         }
392
393         /* Data transfer was stopped by the interrupt handler */
394         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
395 }
396
397 static int dw_mci_get_dma_dir(struct mmc_data *data)
398 {
399         if (data->flags & MMC_DATA_WRITE)
400                 return DMA_TO_DEVICE;
401         else
402                 return DMA_FROM_DEVICE;
403 }
404
405 #ifdef CONFIG_MMC_DW_IDMAC
406 static void dw_mci_dma_cleanup(struct dw_mci *host)
407 {
408         struct mmc_data *data = host->data;
409
410         if (data)
411                 if (!data->host_cookie)
412                         dma_unmap_sg(host->dev,
413                                      data->sg,
414                                      data->sg_len,
415                                      dw_mci_get_dma_dir(data));
416 }
417
418 static void dw_mci_idmac_reset(struct dw_mci *host)
419 {
420         u32 bmod = mci_readl(host, BMOD);
421         /* Software reset of DMA */
422         bmod |= SDMMC_IDMAC_SWRESET;
423         mci_writel(host, BMOD, bmod);
424 }
425
426 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
427 {
428         u32 temp;
429
430         /* Disable and reset the IDMAC interface */
431         temp = mci_readl(host, CTRL);
432         temp &= ~SDMMC_CTRL_USE_IDMAC;
433         temp |= SDMMC_CTRL_DMA_RESET;
434         mci_writel(host, CTRL, temp);
435
436         /* Stop the IDMAC running */
437         temp = mci_readl(host, BMOD);
438         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
439         temp |= SDMMC_IDMAC_SWRESET;
440         mci_writel(host, BMOD, temp);
441 }
442
443 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
444 {
445         struct mmc_data *data = host->data;
446
447         dev_vdbg(host->dev, "DMA complete\n");
448
449         host->dma_ops->cleanup(host);
450
451         /*
452          * If the card was removed, data will be NULL. No point in trying to
453          * send the stop command or waiting for NBUSY in this case.
454          */
455         if (data) {
456                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
457                 tasklet_schedule(&host->tasklet);
458         }
459 }
460
461 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
462                                     unsigned int sg_len)
463 {
464         int i;
465         if (host->dma_64bit_address == 1) {
466                 struct idmac_desc_64addr *desc = host->sg_cpu;
467
468                 for (i = 0; i < sg_len; i++, desc++) {
469                         unsigned int length = sg_dma_len(&data->sg[i]);
470                         u64 mem_addr = sg_dma_address(&data->sg[i]);
471
472                         /*
473                          * Set the OWN bit and disable interrupts for this
474                          * descriptor
475                          */
476                         desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
477                                                 IDMAC_DES0_CH;
478                         /* Buffer length */
479                         IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
480
481                         /* Physical address to DMA to/from */
482                         desc->des4 = mem_addr & 0xffffffff;
483                         desc->des5 = mem_addr >> 32;
484                 }
485
486                 /* Set first descriptor */
487                 desc = host->sg_cpu;
488                 desc->des0 |= IDMAC_DES0_FD;
489
490                 /* Set last descriptor */
491                 desc = host->sg_cpu + (i - 1) *
492                                 sizeof(struct idmac_desc_64addr);
493                 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
494                 desc->des0 |= IDMAC_DES0_LD;
495
496         } else {
497                 struct idmac_desc *desc = host->sg_cpu;
498
499                 for (i = 0; i < sg_len; i++, desc++) {
500                         unsigned int length = sg_dma_len(&data->sg[i]);
501                         u32 mem_addr = sg_dma_address(&data->sg[i]);
502
503                         /*
504                          * Set the OWN bit and disable interrupts for this
505                          * descriptor
506                          */
507                         desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
508                                                 IDMAC_DES0_CH;
509                         /* Buffer length */
510                         IDMAC_SET_BUFFER1_SIZE(desc, length);
511
512                         /* Physical address to DMA to/from */
513                         desc->des2 = mem_addr;
514                 }
515
516                 /* Set first descriptor */
517                 desc = host->sg_cpu;
518                 desc->des0 |= IDMAC_DES0_FD;
519
520                 /* Set last descriptor */
521                 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
522                 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
523                 desc->des0 |= IDMAC_DES0_LD;
524         }
525
526         wmb();
527 }
528
529 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
530 {
531         u32 temp;
532
533         dw_mci_translate_sglist(host, host->data, sg_len);
534
535         /* Make sure to reset DMA in case we did PIO before this */
536         dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
537         dw_mci_idmac_reset(host);
538
539         /* Select IDMAC interface */
540         temp = mci_readl(host, CTRL);
541         temp |= SDMMC_CTRL_USE_IDMAC;
542         mci_writel(host, CTRL, temp);
543
544         wmb();
545
546         /* Enable the IDMAC */
547         temp = mci_readl(host, BMOD);
548         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
549         mci_writel(host, BMOD, temp);
550
551         /* Start it running */
552         mci_writel(host, PLDMND, 1);
553 }
554
555 static int dw_mci_idmac_init(struct dw_mci *host)
556 {
557         int i;
558
559         if (host->dma_64bit_address == 1) {
560                 struct idmac_desc_64addr *p;
561                 /* Number of descriptors in the ring buffer */
562                 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
563
564                 /* Forward link the descriptor list */
565                 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
566                                                                 i++, p++) {
567                         p->des6 = (host->sg_dma +
568                                         (sizeof(struct idmac_desc_64addr) *
569                                                         (i + 1))) & 0xffffffff;
570
571                         p->des7 = (u64)(host->sg_dma +
572                                         (sizeof(struct idmac_desc_64addr) *
573                                                         (i + 1))) >> 32;
574                         /* Initialize reserved and buffer size fields to "0" */
575                         p->des1 = 0;
576                         p->des2 = 0;
577                         p->des3 = 0;
578                 }
579
580                 /* Set the last descriptor as the end-of-ring descriptor */
581                 p->des6 = host->sg_dma & 0xffffffff;
582                 p->des7 = (u64)host->sg_dma >> 32;
583                 p->des0 = IDMAC_DES0_ER;
584
585         } else {
586                 struct idmac_desc *p;
587                 /* Number of descriptors in the ring buffer */
588                 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
589
590                 /* Forward link the descriptor list */
591                 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
592                         p->des3 = host->sg_dma + (sizeof(struct idmac_desc) *
593                                                                 (i + 1));
594
595                 /* Set the last descriptor as the end-of-ring descriptor */
596                 p->des3 = host->sg_dma;
597                 p->des0 = IDMAC_DES0_ER;
598         }
599
600         dw_mci_idmac_reset(host);
601
602         if (host->dma_64bit_address == 1) {
603                 /* Mask out interrupts - get Tx & Rx complete only */
604                 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
605                 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
606                                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
607
608                 /* Set the descriptor base address */
609                 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
610                 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
611
612         } else {
613                 /* Mask out interrupts - get Tx & Rx complete only */
614                 mci_writel(host, IDSTS, IDMAC_INT_CLR);
615                 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
616                                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
617
618                 /* Set the descriptor base address */
619                 mci_writel(host, DBADDR, host->sg_dma);
620         }
621
622         return 0;
623 }
624
625 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
626         .init = dw_mci_idmac_init,
627         .start = dw_mci_idmac_start_dma,
628         .stop = dw_mci_idmac_stop_dma,
629         .complete = dw_mci_idmac_complete_dma,
630         .cleanup = dw_mci_dma_cleanup,
631 };
632 #endif /* CONFIG_MMC_DW_IDMAC */
633
634 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
635                                    struct mmc_data *data,
636                                    bool next)
637 {
638         struct scatterlist *sg;
639         unsigned int i, sg_len;
640
641         if (!next && data->host_cookie)
642                 return data->host_cookie;
643
644         /*
645          * We don't do DMA on "complex" transfers, i.e. with
646          * non-word-aligned buffers or lengths. Also, we don't bother
647          * with all the DMA setup overhead for short transfers.
648          */
649         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
650                 return -EINVAL;
651
652         if (data->blksz & 3)
653                 return -EINVAL;
654
655         for_each_sg(data->sg, sg, data->sg_len, i) {
656                 if (sg->offset & 3 || sg->length & 3)
657                         return -EINVAL;
658         }
659
660         sg_len = dma_map_sg(host->dev,
661                             data->sg,
662                             data->sg_len,
663                             dw_mci_get_dma_dir(data));
664         if (sg_len == 0)
665                 return -EINVAL;
666
667         if (next)
668                 data->host_cookie = sg_len;
669
670         return sg_len;
671 }
672
673 static void dw_mci_pre_req(struct mmc_host *mmc,
674                            struct mmc_request *mrq,
675                            bool is_first_req)
676 {
677         struct dw_mci_slot *slot = mmc_priv(mmc);
678         struct mmc_data *data = mrq->data;
679
680         if (!slot->host->use_dma || !data)
681                 return;
682
683         if (data->host_cookie) {
684                 data->host_cookie = 0;
685                 return;
686         }
687
688         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
689                 data->host_cookie = 0;
690 }
691
692 static void dw_mci_post_req(struct mmc_host *mmc,
693                             struct mmc_request *mrq,
694                             int err)
695 {
696         struct dw_mci_slot *slot = mmc_priv(mmc);
697         struct mmc_data *data = mrq->data;
698
699         if (!slot->host->use_dma || !data)
700                 return;
701
702         if (data->host_cookie)
703                 dma_unmap_sg(slot->host->dev,
704                              data->sg,
705                              data->sg_len,
706                              dw_mci_get_dma_dir(data));
707         data->host_cookie = 0;
708 }
709
710 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
711 {
712 #ifdef CONFIG_MMC_DW_IDMAC
713         unsigned int blksz = data->blksz;
714         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
715         u32 fifo_width = 1 << host->data_shift;
716         u32 blksz_depth = blksz / fifo_width, fifoth_val;
717         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
718         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
719
720         tx_wmark = (host->fifo_depth) / 2;
721         tx_wmark_invers = host->fifo_depth - tx_wmark;
722
723         /*
724          * MSIZE is '1',
725          * if blksz is not a multiple of the FIFO width
726          */
727         if (blksz % fifo_width) {
728                 msize = 0;
729                 rx_wmark = 1;
730                 goto done;
731         }
732
733         do {
734                 if (!((blksz_depth % mszs[idx]) ||
735                      (tx_wmark_invers % mszs[idx]))) {
736                         msize = idx;
737                         rx_wmark = mszs[idx] - 1;
738                         break;
739                 }
740         } while (--idx > 0);
741         /*
742          * If idx is '0', it won't be tried
743          * Thus, initial values are uesed
744          */
745 done:
746         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
747         mci_writel(host, FIFOTH, fifoth_val);
748 #endif
749 }
750
751 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
752 {
753         unsigned int blksz = data->blksz;
754         u32 blksz_depth, fifo_depth;
755         u16 thld_size;
756
757         WARN_ON(!(data->flags & MMC_DATA_READ));
758
759         /*
760          * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
761          * in the FIFO region, so we really shouldn't access it).
762          */
763         if (host->verid < DW_MMC_240A)
764                 return;
765
766         if (host->timing != MMC_TIMING_MMC_HS200 &&
767             host->timing != MMC_TIMING_MMC_HS400 &&
768             host->timing != MMC_TIMING_UHS_SDR104)
769                 goto disable;
770
771         blksz_depth = blksz / (1 << host->data_shift);
772         fifo_depth = host->fifo_depth;
773
774         if (blksz_depth > fifo_depth)
775                 goto disable;
776
777         /*
778          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
779          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
780          * Currently just choose blksz.
781          */
782         thld_size = blksz;
783         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
784         return;
785
786 disable:
787         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
788 }
789
790 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
791 {
792         unsigned long irqflags;
793         int sg_len;
794         u32 temp;
795
796         host->using_dma = 0;
797
798         /* If we don't have a channel, we can't do DMA */
799         if (!host->use_dma)
800                 return -ENODEV;
801
802         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
803         if (sg_len < 0) {
804                 host->dma_ops->stop(host);
805                 return sg_len;
806         }
807
808         host->using_dma = 1;
809
810         dev_vdbg(host->dev,
811                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
812                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
813                  sg_len);
814
815         /*
816          * Decide the MSIZE and RX/TX Watermark.
817          * If current block size is same with previous size,
818          * no need to update fifoth.
819          */
820         if (host->prev_blksz != data->blksz)
821                 dw_mci_adjust_fifoth(host, data);
822
823         /* Enable the DMA interface */
824         temp = mci_readl(host, CTRL);
825         temp |= SDMMC_CTRL_DMA_ENABLE;
826         mci_writel(host, CTRL, temp);
827
828         /* Disable RX/TX IRQs, let DMA handle it */
829         spin_lock_irqsave(&host->irq_lock, irqflags);
830         temp = mci_readl(host, INTMASK);
831         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
832         mci_writel(host, INTMASK, temp);
833         spin_unlock_irqrestore(&host->irq_lock, irqflags);
834
835         host->dma_ops->start(host, sg_len);
836
837         return 0;
838 }
839
840 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
841 {
842         unsigned long irqflags;
843         u32 temp;
844
845         data->error = -EINPROGRESS;
846
847         WARN_ON(host->data);
848         host->sg = NULL;
849         host->data = data;
850
851         if (data->flags & MMC_DATA_READ) {
852                 host->dir_status = DW_MCI_RECV_STATUS;
853                 dw_mci_ctrl_rd_thld(host, data);
854         } else {
855                 host->dir_status = DW_MCI_SEND_STATUS;
856         }
857
858         if (dw_mci_submit_data_dma(host, data)) {
859                 int flags = SG_MITER_ATOMIC;
860                 if (host->data->flags & MMC_DATA_READ)
861                         flags |= SG_MITER_TO_SG;
862                 else
863                         flags |= SG_MITER_FROM_SG;
864
865                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
866                 host->sg = data->sg;
867                 host->part_buf_start = 0;
868                 host->part_buf_count = 0;
869
870                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
871
872                 spin_lock_irqsave(&host->irq_lock, irqflags);
873                 temp = mci_readl(host, INTMASK);
874                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
875                 mci_writel(host, INTMASK, temp);
876                 spin_unlock_irqrestore(&host->irq_lock, irqflags);
877
878                 temp = mci_readl(host, CTRL);
879                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
880                 mci_writel(host, CTRL, temp);
881
882                 /*
883                  * Use the initial fifoth_val for PIO mode.
884                  * If next issued data may be transfered by DMA mode,
885                  * prev_blksz should be invalidated.
886                  */
887                 mci_writel(host, FIFOTH, host->fifoth_val);
888                 host->prev_blksz = 0;
889         } else {
890                 /*
891                  * Keep the current block size.
892                  * It will be used to decide whether to update
893                  * fifoth register next time.
894                  */
895                 host->prev_blksz = data->blksz;
896         }
897 }
898
899 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
900 {
901         struct dw_mci *host = slot->host;
902         unsigned long timeout = jiffies + msecs_to_jiffies(500);
903         unsigned int cmd_status = 0;
904
905         mci_writel(host, CMDARG, arg);
906         wmb();
907         dw_mci_wait_while_busy(host, cmd);
908         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
909
910         while (time_before(jiffies, timeout)) {
911                 cmd_status = mci_readl(host, CMD);
912                 if (!(cmd_status & SDMMC_CMD_START))
913                         return;
914         }
915         dev_err(&slot->mmc->class_dev,
916                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
917                 cmd, arg, cmd_status);
918 }
919
920 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
921 {
922         struct dw_mci *host = slot->host;
923         unsigned int clock = slot->clock;
924         u32 div;
925         u32 clk_en_a;
926         u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
927
928         /* We must continue to set bit 28 in CMD until the change is complete */
929         if (host->state == STATE_WAITING_CMD11_DONE)
930                 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
931
932         if (!clock) {
933                 mci_writel(host, CLKENA, 0);
934                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
935         } else if (clock != host->current_speed || force_clkinit) {
936                 div = host->bus_hz / clock;
937                 if (host->bus_hz % clock && host->bus_hz > clock)
938                         /*
939                          * move the + 1 after the divide to prevent
940                          * over-clocking the card.
941                          */
942                         div += 1;
943
944                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
945
946                 if ((clock << div) != slot->__clk_old || force_clkinit)
947                         dev_info(&slot->mmc->class_dev,
948                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
949                                  slot->id, host->bus_hz, clock,
950                                  div ? ((host->bus_hz / div) >> 1) :
951                                  host->bus_hz, div);
952
953                 /* disable clock */
954                 mci_writel(host, CLKENA, 0);
955                 mci_writel(host, CLKSRC, 0);
956
957                 /* inform CIU */
958                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
959
960                 /* set clock to desired speed */
961                 mci_writel(host, CLKDIV, div);
962
963                 /* inform CIU */
964                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
965
966                 /* enable clock; only low power if no SDIO */
967                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
968                 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
969                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
970                 mci_writel(host, CLKENA, clk_en_a);
971
972                 /* inform CIU */
973                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
974
975                 /* keep the clock with reflecting clock dividor */
976                 slot->__clk_old = clock << div;
977         }
978
979         host->current_speed = clock;
980
981         /* Set the current slot bus width */
982         mci_writel(host, CTYPE, (slot->ctype << slot->id));
983 }
984
985 static void __dw_mci_start_request(struct dw_mci *host,
986                                    struct dw_mci_slot *slot,
987                                    struct mmc_command *cmd)
988 {
989         struct mmc_request *mrq;
990         struct mmc_data *data;
991         u32 cmdflags;
992
993         mrq = slot->mrq;
994
995         host->cur_slot = slot;
996         host->mrq = mrq;
997
998         host->pending_events = 0;
999         host->completed_events = 0;
1000         host->cmd_status = 0;
1001         host->data_status = 0;
1002         host->dir_status = 0;
1003
1004         data = cmd->data;
1005         if (data) {
1006                 mci_writel(host, TMOUT, 0xFFFFFFFF);
1007                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1008                 mci_writel(host, BLKSIZ, data->blksz);
1009         }
1010
1011         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1012
1013         /* this is the first command, send the initialization clock */
1014         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1015                 cmdflags |= SDMMC_CMD_INIT;
1016
1017         if (data) {
1018                 dw_mci_submit_data(host, data);
1019                 wmb();
1020         }
1021
1022         dw_mci_start_command(host, cmd, cmdflags);
1023
1024         if (mrq->stop)
1025                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1026         else
1027                 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1028 }
1029
1030 static void dw_mci_start_request(struct dw_mci *host,
1031                                  struct dw_mci_slot *slot)
1032 {
1033         struct mmc_request *mrq = slot->mrq;
1034         struct mmc_command *cmd;
1035
1036         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1037         __dw_mci_start_request(host, slot, cmd);
1038 }
1039
1040 /* must be called with host->lock held */
1041 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1042                                  struct mmc_request *mrq)
1043 {
1044         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1045                  host->state);
1046
1047         slot->mrq = mrq;
1048
1049         if (host->state == STATE_WAITING_CMD11_DONE) {
1050                 dev_warn(&slot->mmc->class_dev,
1051                          "Voltage change didn't complete\n");
1052                 /*
1053                  * this case isn't expected to happen, so we can
1054                  * either crash here or just try to continue on
1055                  * in the closest possible state
1056                  */
1057                 host->state = STATE_IDLE;
1058         }
1059
1060         if (host->state == STATE_IDLE) {
1061                 host->state = STATE_SENDING_CMD;
1062                 dw_mci_start_request(host, slot);
1063         } else {
1064                 list_add_tail(&slot->queue_node, &host->queue);
1065         }
1066 }
1067
1068 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1069 {
1070         struct dw_mci_slot *slot = mmc_priv(mmc);
1071         struct dw_mci *host = slot->host;
1072
1073         WARN_ON(slot->mrq);
1074
1075         /*
1076          * The check for card presence and queueing of the request must be
1077          * atomic, otherwise the card could be removed in between and the
1078          * request wouldn't fail until another card was inserted.
1079          */
1080         spin_lock_bh(&host->lock);
1081
1082         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1083                 spin_unlock_bh(&host->lock);
1084                 mrq->cmd->error = -ENOMEDIUM;
1085                 mmc_request_done(mmc, mrq);
1086                 return;
1087         }
1088
1089         dw_mci_queue_request(host, slot, mrq);
1090
1091         spin_unlock_bh(&host->lock);
1092 }
1093
1094 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1095 {
1096         struct dw_mci_slot *slot = mmc_priv(mmc);
1097         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1098         u32 regs;
1099         int ret;
1100
1101         switch (ios->bus_width) {
1102         case MMC_BUS_WIDTH_4:
1103                 slot->ctype = SDMMC_CTYPE_4BIT;
1104                 break;
1105         case MMC_BUS_WIDTH_8:
1106                 slot->ctype = SDMMC_CTYPE_8BIT;
1107                 break;
1108         default:
1109                 /* set default 1 bit mode */
1110                 slot->ctype = SDMMC_CTYPE_1BIT;
1111         }
1112
1113         regs = mci_readl(slot->host, UHS_REG);
1114
1115         /* DDR mode set */
1116         if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1117             ios->timing == MMC_TIMING_MMC_HS400)
1118                 regs |= ((0x1 << slot->id) << 16);
1119         else
1120                 regs &= ~((0x1 << slot->id) << 16);
1121
1122         mci_writel(slot->host, UHS_REG, regs);
1123         slot->host->timing = ios->timing;
1124
1125         /*
1126          * Use mirror of ios->clock to prevent race with mmc
1127          * core ios update when finding the minimum.
1128          */
1129         slot->clock = ios->clock;
1130
1131         if (drv_data && drv_data->set_ios)
1132                 drv_data->set_ios(slot->host, ios);
1133
1134         switch (ios->power_mode) {
1135         case MMC_POWER_UP:
1136                 if (!IS_ERR(mmc->supply.vmmc)) {
1137                         ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1138                                         ios->vdd);
1139                         if (ret) {
1140                                 dev_err(slot->host->dev,
1141                                         "failed to enable vmmc regulator\n");
1142                                 /*return, if failed turn on vmmc*/
1143                                 return;
1144                         }
1145                 }
1146                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1147                 regs = mci_readl(slot->host, PWREN);
1148                 regs |= (1 << slot->id);
1149                 mci_writel(slot->host, PWREN, regs);
1150                 break;
1151         case MMC_POWER_ON:
1152                 if (!slot->host->vqmmc_enabled) {
1153                         if (!IS_ERR(mmc->supply.vqmmc)) {
1154                                 ret = regulator_enable(mmc->supply.vqmmc);
1155                                 if (ret < 0)
1156                                         dev_err(slot->host->dev,
1157                                                 "failed to enable vqmmc\n");
1158                                 else
1159                                         slot->host->vqmmc_enabled = true;
1160
1161                         } else {
1162                                 /* Keep track so we don't reset again */
1163                                 slot->host->vqmmc_enabled = true;
1164                         }
1165
1166                         /* Reset our state machine after powering on */
1167                         dw_mci_ctrl_reset(slot->host,
1168                                           SDMMC_CTRL_ALL_RESET_FLAGS);
1169                 }
1170
1171                 /* Adjust clock / bus width after power is up */
1172                 dw_mci_setup_bus(slot, false);
1173
1174                 break;
1175         case MMC_POWER_OFF:
1176                 /* Turn clock off before power goes down */
1177                 dw_mci_setup_bus(slot, false);
1178
1179                 if (!IS_ERR(mmc->supply.vmmc))
1180                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1181
1182                 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1183                         regulator_disable(mmc->supply.vqmmc);
1184                 slot->host->vqmmc_enabled = false;
1185
1186                 regs = mci_readl(slot->host, PWREN);
1187                 regs &= ~(1 << slot->id);
1188                 mci_writel(slot->host, PWREN, regs);
1189                 break;
1190         default:
1191                 break;
1192         }
1193
1194         if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1195                 slot->host->state = STATE_IDLE;
1196 }
1197
1198 static int dw_mci_card_busy(struct mmc_host *mmc)
1199 {
1200         struct dw_mci_slot *slot = mmc_priv(mmc);
1201         u32 status;
1202
1203         /*
1204          * Check the busy bit which is low when DAT[3:0]
1205          * (the data lines) are 0000
1206          */
1207         status = mci_readl(slot->host, STATUS);
1208
1209         return !!(status & SDMMC_STATUS_BUSY);
1210 }
1211
1212 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1213 {
1214         struct dw_mci_slot *slot = mmc_priv(mmc);
1215         struct dw_mci *host = slot->host;
1216         u32 uhs;
1217         u32 v18 = SDMMC_UHS_18V << slot->id;
1218         int min_uv, max_uv;
1219         int ret;
1220
1221         /*
1222          * Program the voltage.  Note that some instances of dw_mmc may use
1223          * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1224          * does no harm but you need to set the regulator directly.  Try both.
1225          */
1226         uhs = mci_readl(host, UHS_REG);
1227         if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1228                 min_uv = 2700000;
1229                 max_uv = 3600000;
1230                 uhs &= ~v18;
1231         } else {
1232                 min_uv = 1700000;
1233                 max_uv = 1950000;
1234                 uhs |= v18;
1235         }
1236         if (!IS_ERR(mmc->supply.vqmmc)) {
1237                 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1238
1239                 if (ret) {
1240                         dev_dbg(&mmc->class_dev,
1241                                          "Regulator set error %d: %d - %d\n",
1242                                          ret, min_uv, max_uv);
1243                         return ret;
1244                 }
1245         }
1246         mci_writel(host, UHS_REG, uhs);
1247
1248         return 0;
1249 }
1250
1251 static int dw_mci_get_ro(struct mmc_host *mmc)
1252 {
1253         int read_only;
1254         struct dw_mci_slot *slot = mmc_priv(mmc);
1255         int gpio_ro = mmc_gpio_get_ro(mmc);
1256
1257         /* Use platform get_ro function, else try on board write protect */
1258         if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1259                         (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1260                 read_only = 0;
1261         else if (!IS_ERR_VALUE(gpio_ro))
1262                 read_only = gpio_ro;
1263         else
1264                 read_only =
1265                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1266
1267         dev_dbg(&mmc->class_dev, "card is %s\n",
1268                 read_only ? "read-only" : "read-write");
1269
1270         return read_only;
1271 }
1272
1273 static int dw_mci_get_cd(struct mmc_host *mmc)
1274 {
1275         int present;
1276         struct dw_mci_slot *slot = mmc_priv(mmc);
1277         struct dw_mci_board *brd = slot->host->pdata;
1278         struct dw_mci *host = slot->host;
1279         int gpio_cd = mmc_gpio_get_cd(mmc);
1280
1281         /* Use platform get_cd function, else try onboard card detect */
1282         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1283                 present = 1;
1284         else if (!IS_ERR_VALUE(gpio_cd))
1285                 present = gpio_cd;
1286         else
1287                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1288                         == 0 ? 1 : 0;
1289
1290         spin_lock_bh(&host->lock);
1291         if (present) {
1292                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1293                 dev_dbg(&mmc->class_dev, "card is present\n");
1294         } else {
1295                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1296                 dev_dbg(&mmc->class_dev, "card is not present\n");
1297         }
1298         spin_unlock_bh(&host->lock);
1299
1300         return present;
1301 }
1302
1303 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1304 {
1305         struct dw_mci_slot *slot = mmc_priv(mmc);
1306         struct dw_mci *host = slot->host;
1307
1308         /*
1309          * Low power mode will stop the card clock when idle.  According to the
1310          * description of the CLKENA register we should disable low power mode
1311          * for SDIO cards if we need SDIO interrupts to work.
1312          */
1313         if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1314                 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1315                 u32 clk_en_a_old;
1316                 u32 clk_en_a;
1317
1318                 clk_en_a_old = mci_readl(host, CLKENA);
1319
1320                 if (card->type == MMC_TYPE_SDIO ||
1321                     card->type == MMC_TYPE_SD_COMBO) {
1322                         set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1323                         clk_en_a = clk_en_a_old & ~clken_low_pwr;
1324                 } else {
1325                         clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1326                         clk_en_a = clk_en_a_old | clken_low_pwr;
1327                 }
1328
1329                 if (clk_en_a != clk_en_a_old) {
1330                         mci_writel(host, CLKENA, clk_en_a);
1331                         mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1332                                      SDMMC_CMD_PRV_DAT_WAIT, 0);
1333                 }
1334         }
1335 }
1336
1337 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1338 {
1339         struct dw_mci_slot *slot = mmc_priv(mmc);
1340         struct dw_mci *host = slot->host;
1341         unsigned long irqflags;
1342         u32 int_mask;
1343
1344         spin_lock_irqsave(&host->irq_lock, irqflags);
1345
1346         /* Enable/disable Slot Specific SDIO interrupt */
1347         int_mask = mci_readl(host, INTMASK);
1348         if (enb)
1349                 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1350         else
1351                 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1352         mci_writel(host, INTMASK, int_mask);
1353
1354         spin_unlock_irqrestore(&host->irq_lock, irqflags);
1355 }
1356
1357 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1358 {
1359         struct dw_mci_slot *slot = mmc_priv(mmc);
1360         struct dw_mci *host = slot->host;
1361         const struct dw_mci_drv_data *drv_data = host->drv_data;
1362         int err = -ENOSYS;
1363
1364         if (drv_data && drv_data->execute_tuning)
1365                 err = drv_data->execute_tuning(slot);
1366         return err;
1367 }
1368
1369 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1370 {
1371         struct dw_mci_slot *slot = mmc_priv(mmc);
1372         struct dw_mci *host = slot->host;
1373         const struct dw_mci_drv_data *drv_data = host->drv_data;
1374
1375         if (drv_data && drv_data->prepare_hs400_tuning)
1376                 return drv_data->prepare_hs400_tuning(host, ios);
1377
1378         return 0;
1379 }
1380
1381 static const struct mmc_host_ops dw_mci_ops = {
1382         .request                = dw_mci_request,
1383         .pre_req                = dw_mci_pre_req,
1384         .post_req               = dw_mci_post_req,
1385         .set_ios                = dw_mci_set_ios,
1386         .get_ro                 = dw_mci_get_ro,
1387         .get_cd                 = dw_mci_get_cd,
1388         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1389         .execute_tuning         = dw_mci_execute_tuning,
1390         .card_busy              = dw_mci_card_busy,
1391         .start_signal_voltage_switch = dw_mci_switch_voltage,
1392         .init_card              = dw_mci_init_card,
1393         .prepare_hs400_tuning   = dw_mci_prepare_hs400_tuning,
1394 };
1395
1396 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1397         __releases(&host->lock)
1398         __acquires(&host->lock)
1399 {
1400         struct dw_mci_slot *slot;
1401         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1402
1403         WARN_ON(host->cmd || host->data);
1404
1405         host->cur_slot->mrq = NULL;
1406         host->mrq = NULL;
1407         if (!list_empty(&host->queue)) {
1408                 slot = list_entry(host->queue.next,
1409                                   struct dw_mci_slot, queue_node);
1410                 list_del(&slot->queue_node);
1411                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1412                          mmc_hostname(slot->mmc));
1413                 host->state = STATE_SENDING_CMD;
1414                 dw_mci_start_request(host, slot);
1415         } else {
1416                 dev_vdbg(host->dev, "list empty\n");
1417
1418                 if (host->state == STATE_SENDING_CMD11)
1419                         host->state = STATE_WAITING_CMD11_DONE;
1420                 else
1421                         host->state = STATE_IDLE;
1422         }
1423
1424         spin_unlock(&host->lock);
1425         mmc_request_done(prev_mmc, mrq);
1426         spin_lock(&host->lock);
1427 }
1428
1429 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1430 {
1431         u32 status = host->cmd_status;
1432
1433         host->cmd_status = 0;
1434
1435         /* Read the response from the card (up to 16 bytes) */
1436         if (cmd->flags & MMC_RSP_PRESENT) {
1437                 if (cmd->flags & MMC_RSP_136) {
1438                         cmd->resp[3] = mci_readl(host, RESP0);
1439                         cmd->resp[2] = mci_readl(host, RESP1);
1440                         cmd->resp[1] = mci_readl(host, RESP2);
1441                         cmd->resp[0] = mci_readl(host, RESP3);
1442                 } else {
1443                         cmd->resp[0] = mci_readl(host, RESP0);
1444                         cmd->resp[1] = 0;
1445                         cmd->resp[2] = 0;
1446                         cmd->resp[3] = 0;
1447                 }
1448         }
1449
1450         if (status & SDMMC_INT_RTO)
1451                 cmd->error = -ETIMEDOUT;
1452         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1453                 cmd->error = -EILSEQ;
1454         else if (status & SDMMC_INT_RESP_ERR)
1455                 cmd->error = -EIO;
1456         else
1457                 cmd->error = 0;
1458
1459         if (cmd->error) {
1460                 /* newer ip versions need a delay between retries */
1461                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1462                         mdelay(20);
1463         }
1464
1465         return cmd->error;
1466 }
1467
1468 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1469 {
1470         u32 status = host->data_status;
1471
1472         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1473                 if (status & SDMMC_INT_DRTO) {
1474                         data->error = -ETIMEDOUT;
1475                 } else if (status & SDMMC_INT_DCRC) {
1476                         data->error = -EILSEQ;
1477                 } else if (status & SDMMC_INT_EBE) {
1478                         if (host->dir_status ==
1479                                 DW_MCI_SEND_STATUS) {
1480                                 /*
1481                                  * No data CRC status was returned.
1482                                  * The number of bytes transferred
1483                                  * will be exaggerated in PIO mode.
1484                                  */
1485                                 data->bytes_xfered = 0;
1486                                 data->error = -ETIMEDOUT;
1487                         } else if (host->dir_status ==
1488                                         DW_MCI_RECV_STATUS) {
1489                                 data->error = -EIO;
1490                         }
1491                 } else {
1492                         /* SDMMC_INT_SBE is included */
1493                         data->error = -EIO;
1494                 }
1495
1496                 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1497
1498                 /*
1499                  * After an error, there may be data lingering
1500                  * in the FIFO
1501                  */
1502                 dw_mci_reset(host);
1503         } else {
1504                 data->bytes_xfered = data->blocks * data->blksz;
1505                 data->error = 0;
1506         }
1507
1508         return data->error;
1509 }
1510
1511 static void dw_mci_tasklet_func(unsigned long priv)
1512 {
1513         struct dw_mci *host = (struct dw_mci *)priv;
1514         struct mmc_data *data;
1515         struct mmc_command *cmd;
1516         struct mmc_request *mrq;
1517         enum dw_mci_state state;
1518         enum dw_mci_state prev_state;
1519         unsigned int err;
1520
1521         spin_lock(&host->lock);
1522
1523         state = host->state;
1524         data = host->data;
1525         mrq = host->mrq;
1526
1527         do {
1528                 prev_state = state;
1529
1530                 switch (state) {
1531                 case STATE_IDLE:
1532                 case STATE_WAITING_CMD11_DONE:
1533                         break;
1534
1535                 case STATE_SENDING_CMD11:
1536                 case STATE_SENDING_CMD:
1537                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1538                                                 &host->pending_events))
1539                                 break;
1540
1541                         cmd = host->cmd;
1542                         host->cmd = NULL;
1543                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1544                         err = dw_mci_command_complete(host, cmd);
1545                         if (cmd == mrq->sbc && !err) {
1546                                 prev_state = state = STATE_SENDING_CMD;
1547                                 __dw_mci_start_request(host, host->cur_slot,
1548                                                        mrq->cmd);
1549                                 goto unlock;
1550                         }
1551
1552                         if (cmd->data && err) {
1553                                 dw_mci_stop_dma(host);
1554                                 send_stop_abort(host, data);
1555                                 state = STATE_SENDING_STOP;
1556                                 break;
1557                         }
1558
1559                         if (!cmd->data || err) {
1560                                 dw_mci_request_end(host, mrq);
1561                                 goto unlock;
1562                         }
1563
1564                         prev_state = state = STATE_SENDING_DATA;
1565                         /* fall through */
1566
1567                 case STATE_SENDING_DATA:
1568                         /*
1569                          * We could get a data error and never a transfer
1570                          * complete so we'd better check for it here.
1571                          *
1572                          * Note that we don't really care if we also got a
1573                          * transfer complete; stopping the DMA and sending an
1574                          * abort won't hurt.
1575                          */
1576                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1577                                                &host->pending_events)) {
1578                                 dw_mci_stop_dma(host);
1579                                 if (data->stop ||
1580                                     !(host->data_status & (SDMMC_INT_DRTO |
1581                                                            SDMMC_INT_EBE)))
1582                                         send_stop_abort(host, data);
1583                                 state = STATE_DATA_ERROR;
1584                                 break;
1585                         }
1586
1587                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1588                                                 &host->pending_events))
1589                                 break;
1590
1591                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1592
1593                         /*
1594                          * Handle an EVENT_DATA_ERROR that might have shown up
1595                          * before the transfer completed.  This might not have
1596                          * been caught by the check above because the interrupt
1597                          * could have gone off between the previous check and
1598                          * the check for transfer complete.
1599                          *
1600                          * Technically this ought not be needed assuming we
1601                          * get a DATA_COMPLETE eventually (we'll notice the
1602                          * error and end the request), but it shouldn't hurt.
1603                          *
1604                          * This has the advantage of sending the stop command.
1605                          */
1606                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1607                                                &host->pending_events)) {
1608                                 dw_mci_stop_dma(host);
1609                                 if (data->stop ||
1610                                     !(host->data_status & (SDMMC_INT_DRTO |
1611                                                            SDMMC_INT_EBE)))
1612                                         send_stop_abort(host, data);
1613                                 state = STATE_DATA_ERROR;
1614                                 break;
1615                         }
1616                         prev_state = state = STATE_DATA_BUSY;
1617
1618                         /* fall through */
1619
1620                 case STATE_DATA_BUSY:
1621                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1622                                                 &host->pending_events))
1623                                 break;
1624
1625                         host->data = NULL;
1626                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1627                         err = dw_mci_data_complete(host, data);
1628
1629                         if (!err) {
1630                                 if (!data->stop || mrq->sbc) {
1631                                         if (mrq->sbc && data->stop)
1632                                                 data->stop->error = 0;
1633                                         dw_mci_request_end(host, mrq);
1634                                         goto unlock;
1635                                 }
1636
1637                                 /* stop command for open-ended transfer*/
1638                                 if (data->stop)
1639                                         send_stop_abort(host, data);
1640                         } else {
1641                                 /*
1642                                  * If we don't have a command complete now we'll
1643                                  * never get one since we just reset everything;
1644                                  * better end the request.
1645                                  *
1646                                  * If we do have a command complete we'll fall
1647                                  * through to the SENDING_STOP command and
1648                                  * everything will be peachy keen.
1649                                  */
1650                                 if (!test_bit(EVENT_CMD_COMPLETE,
1651                                               &host->pending_events)) {
1652                                         host->cmd = NULL;
1653                                         dw_mci_request_end(host, mrq);
1654                                         goto unlock;
1655                                 }
1656                         }
1657
1658                         /*
1659                          * If err has non-zero,
1660                          * stop-abort command has been already issued.
1661                          */
1662                         prev_state = state = STATE_SENDING_STOP;
1663
1664                         /* fall through */
1665
1666                 case STATE_SENDING_STOP:
1667                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1668                                                 &host->pending_events))
1669                                 break;
1670
1671                         /* CMD error in data command */
1672                         if (mrq->cmd->error && mrq->data)
1673                                 dw_mci_reset(host);
1674
1675                         host->cmd = NULL;
1676                         host->data = NULL;
1677
1678                         if (mrq->stop)
1679                                 dw_mci_command_complete(host, mrq->stop);
1680                         else
1681                                 host->cmd_status = 0;
1682
1683                         dw_mci_request_end(host, mrq);
1684                         goto unlock;
1685
1686                 case STATE_DATA_ERROR:
1687                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1688                                                 &host->pending_events))
1689                                 break;
1690
1691                         state = STATE_DATA_BUSY;
1692                         break;
1693                 }
1694         } while (state != prev_state);
1695
1696         host->state = state;
1697 unlock:
1698         spin_unlock(&host->lock);
1699
1700 }
1701
1702 /* push final bytes to part_buf, only use during push */
1703 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1704 {
1705         memcpy((void *)&host->part_buf, buf, cnt);
1706         host->part_buf_count = cnt;
1707 }
1708
1709 /* append bytes to part_buf, only use during push */
1710 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1711 {
1712         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1713         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1714         host->part_buf_count += cnt;
1715         return cnt;
1716 }
1717
1718 /* pull first bytes from part_buf, only use during pull */
1719 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1720 {
1721         cnt = min(cnt, (int)host->part_buf_count);
1722         if (cnt) {
1723                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1724                        cnt);
1725                 host->part_buf_count -= cnt;
1726                 host->part_buf_start += cnt;
1727         }
1728         return cnt;
1729 }
1730
1731 /* pull final bytes from the part_buf, assuming it's just been filled */
1732 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1733 {
1734         memcpy(buf, &host->part_buf, cnt);
1735         host->part_buf_start = cnt;
1736         host->part_buf_count = (1 << host->data_shift) - cnt;
1737 }
1738
1739 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1740 {
1741         struct mmc_data *data = host->data;
1742         int init_cnt = cnt;
1743
1744         /* try and push anything in the part_buf */
1745         if (unlikely(host->part_buf_count)) {
1746                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1747                 buf += len;
1748                 cnt -= len;
1749                 if (host->part_buf_count == 2) {
1750                         mci_writew(host, DATA(host->data_offset),
1751                                         host->part_buf16);
1752                         host->part_buf_count = 0;
1753                 }
1754         }
1755 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1756         if (unlikely((unsigned long)buf & 0x1)) {
1757                 while (cnt >= 2) {
1758                         u16 aligned_buf[64];
1759                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1760                         int items = len >> 1;
1761                         int i;
1762                         /* memcpy from input buffer into aligned buffer */
1763                         memcpy(aligned_buf, buf, len);
1764                         buf += len;
1765                         cnt -= len;
1766                         /* push data from aligned buffer into fifo */
1767                         for (i = 0; i < items; ++i)
1768                                 mci_writew(host, DATA(host->data_offset),
1769                                                 aligned_buf[i]);
1770                 }
1771         } else
1772 #endif
1773         {
1774                 u16 *pdata = buf;
1775                 for (; cnt >= 2; cnt -= 2)
1776                         mci_writew(host, DATA(host->data_offset), *pdata++);
1777                 buf = pdata;
1778         }
1779         /* put anything remaining in the part_buf */
1780         if (cnt) {
1781                 dw_mci_set_part_bytes(host, buf, cnt);
1782                  /* Push data if we have reached the expected data length */
1783                 if ((data->bytes_xfered + init_cnt) ==
1784                     (data->blksz * data->blocks))
1785                         mci_writew(host, DATA(host->data_offset),
1786                                    host->part_buf16);
1787         }
1788 }
1789
1790 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1791 {
1792 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1793         if (unlikely((unsigned long)buf & 0x1)) {
1794                 while (cnt >= 2) {
1795                         /* pull data from fifo into aligned buffer */
1796                         u16 aligned_buf[64];
1797                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1798                         int items = len >> 1;
1799                         int i;
1800                         for (i = 0; i < items; ++i)
1801                                 aligned_buf[i] = mci_readw(host,
1802                                                 DATA(host->data_offset));
1803                         /* memcpy from aligned buffer into output buffer */
1804                         memcpy(buf, aligned_buf, len);
1805                         buf += len;
1806                         cnt -= len;
1807                 }
1808         } else
1809 #endif
1810         {
1811                 u16 *pdata = buf;
1812                 for (; cnt >= 2; cnt -= 2)
1813                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1814                 buf = pdata;
1815         }
1816         if (cnt) {
1817                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1818                 dw_mci_pull_final_bytes(host, buf, cnt);
1819         }
1820 }
1821
1822 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1823 {
1824         struct mmc_data *data = host->data;
1825         int init_cnt = cnt;
1826
1827         /* try and push anything in the part_buf */
1828         if (unlikely(host->part_buf_count)) {
1829                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1830                 buf += len;
1831                 cnt -= len;
1832                 if (host->part_buf_count == 4) {
1833                         mci_writel(host, DATA(host->data_offset),
1834                                         host->part_buf32);
1835                         host->part_buf_count = 0;
1836                 }
1837         }
1838 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1839         if (unlikely((unsigned long)buf & 0x3)) {
1840                 while (cnt >= 4) {
1841                         u32 aligned_buf[32];
1842                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1843                         int items = len >> 2;
1844                         int i;
1845                         /* memcpy from input buffer into aligned buffer */
1846                         memcpy(aligned_buf, buf, len);
1847                         buf += len;
1848                         cnt -= len;
1849                         /* push data from aligned buffer into fifo */
1850                         for (i = 0; i < items; ++i)
1851                                 mci_writel(host, DATA(host->data_offset),
1852                                                 aligned_buf[i]);
1853                 }
1854         } else
1855 #endif
1856         {
1857                 u32 *pdata = buf;
1858                 for (; cnt >= 4; cnt -= 4)
1859                         mci_writel(host, DATA(host->data_offset), *pdata++);
1860                 buf = pdata;
1861         }
1862         /* put anything remaining in the part_buf */
1863         if (cnt) {
1864                 dw_mci_set_part_bytes(host, buf, cnt);
1865                  /* Push data if we have reached the expected data length */
1866                 if ((data->bytes_xfered + init_cnt) ==
1867                     (data->blksz * data->blocks))
1868                         mci_writel(host, DATA(host->data_offset),
1869                                    host->part_buf32);
1870         }
1871 }
1872
1873 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1874 {
1875 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1876         if (unlikely((unsigned long)buf & 0x3)) {
1877                 while (cnt >= 4) {
1878                         /* pull data from fifo into aligned buffer */
1879                         u32 aligned_buf[32];
1880                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1881                         int items = len >> 2;
1882                         int i;
1883                         for (i = 0; i < items; ++i)
1884                                 aligned_buf[i] = mci_readl(host,
1885                                                 DATA(host->data_offset));
1886                         /* memcpy from aligned buffer into output buffer */
1887                         memcpy(buf, aligned_buf, len);
1888                         buf += len;
1889                         cnt -= len;
1890                 }
1891         } else
1892 #endif
1893         {
1894                 u32 *pdata = buf;
1895                 for (; cnt >= 4; cnt -= 4)
1896                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1897                 buf = pdata;
1898         }
1899         if (cnt) {
1900                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1901                 dw_mci_pull_final_bytes(host, buf, cnt);
1902         }
1903 }
1904
1905 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1906 {
1907         struct mmc_data *data = host->data;
1908         int init_cnt = cnt;
1909
1910         /* try and push anything in the part_buf */
1911         if (unlikely(host->part_buf_count)) {
1912                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1913                 buf += len;
1914                 cnt -= len;
1915
1916                 if (host->part_buf_count == 8) {
1917                         mci_writeq(host, DATA(host->data_offset),
1918                                         host->part_buf);
1919                         host->part_buf_count = 0;
1920                 }
1921         }
1922 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1923         if (unlikely((unsigned long)buf & 0x7)) {
1924                 while (cnt >= 8) {
1925                         u64 aligned_buf[16];
1926                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1927                         int items = len >> 3;
1928                         int i;
1929                         /* memcpy from input buffer into aligned buffer */
1930                         memcpy(aligned_buf, buf, len);
1931                         buf += len;
1932                         cnt -= len;
1933                         /* push data from aligned buffer into fifo */
1934                         for (i = 0; i < items; ++i)
1935                                 mci_writeq(host, DATA(host->data_offset),
1936                                                 aligned_buf[i]);
1937                 }
1938         } else
1939 #endif
1940         {
1941                 u64 *pdata = buf;
1942                 for (; cnt >= 8; cnt -= 8)
1943                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1944                 buf = pdata;
1945         }
1946         /* put anything remaining in the part_buf */
1947         if (cnt) {
1948                 dw_mci_set_part_bytes(host, buf, cnt);
1949                 /* Push data if we have reached the expected data length */
1950                 if ((data->bytes_xfered + init_cnt) ==
1951                     (data->blksz * data->blocks))
1952                         mci_writeq(host, DATA(host->data_offset),
1953                                    host->part_buf);
1954         }
1955 }
1956
1957 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1958 {
1959 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1960         if (unlikely((unsigned long)buf & 0x7)) {
1961                 while (cnt >= 8) {
1962                         /* pull data from fifo into aligned buffer */
1963                         u64 aligned_buf[16];
1964                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1965                         int items = len >> 3;
1966                         int i;
1967                         for (i = 0; i < items; ++i)
1968                                 aligned_buf[i] = mci_readq(host,
1969                                                 DATA(host->data_offset));
1970                         /* memcpy from aligned buffer into output buffer */
1971                         memcpy(buf, aligned_buf, len);
1972                         buf += len;
1973                         cnt -= len;
1974                 }
1975         } else
1976 #endif
1977         {
1978                 u64 *pdata = buf;
1979                 for (; cnt >= 8; cnt -= 8)
1980                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1981                 buf = pdata;
1982         }
1983         if (cnt) {
1984                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1985                 dw_mci_pull_final_bytes(host, buf, cnt);
1986         }
1987 }
1988
1989 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1990 {
1991         int len;
1992
1993         /* get remaining partial bytes */
1994         len = dw_mci_pull_part_bytes(host, buf, cnt);
1995         if (unlikely(len == cnt))
1996                 return;
1997         buf += len;
1998         cnt -= len;
1999
2000         /* get the rest of the data */
2001         host->pull_data(host, buf, cnt);
2002 }
2003
2004 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2005 {
2006         struct sg_mapping_iter *sg_miter = &host->sg_miter;
2007         void *buf;
2008         unsigned int offset;
2009         struct mmc_data *data = host->data;
2010         int shift = host->data_shift;
2011         u32 status;
2012         unsigned int len;
2013         unsigned int remain, fcnt;
2014
2015         do {
2016                 if (!sg_miter_next(sg_miter))
2017                         goto done;
2018
2019                 host->sg = sg_miter->piter.sg;
2020                 buf = sg_miter->addr;
2021                 remain = sg_miter->length;
2022                 offset = 0;
2023
2024                 do {
2025                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2026                                         << shift) + host->part_buf_count;
2027                         len = min(remain, fcnt);
2028                         if (!len)
2029                                 break;
2030                         dw_mci_pull_data(host, (void *)(buf + offset), len);
2031                         data->bytes_xfered += len;
2032                         offset += len;
2033                         remain -= len;
2034                 } while (remain);
2035
2036                 sg_miter->consumed = offset;
2037                 status = mci_readl(host, MINTSTS);
2038                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2039         /* if the RXDR is ready read again */
2040         } while ((status & SDMMC_INT_RXDR) ||
2041                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2042
2043         if (!remain) {
2044                 if (!sg_miter_next(sg_miter))
2045                         goto done;
2046                 sg_miter->consumed = 0;
2047         }
2048         sg_miter_stop(sg_miter);
2049         return;
2050
2051 done:
2052         sg_miter_stop(sg_miter);
2053         host->sg = NULL;
2054         smp_wmb();
2055         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2056 }
2057
2058 static void dw_mci_write_data_pio(struct dw_mci *host)
2059 {
2060         struct sg_mapping_iter *sg_miter = &host->sg_miter;
2061         void *buf;
2062         unsigned int offset;
2063         struct mmc_data *data = host->data;
2064         int shift = host->data_shift;
2065         u32 status;
2066         unsigned int len;
2067         unsigned int fifo_depth = host->fifo_depth;
2068         unsigned int remain, fcnt;
2069
2070         do {
2071                 if (!sg_miter_next(sg_miter))
2072                         goto done;
2073
2074                 host->sg = sg_miter->piter.sg;
2075                 buf = sg_miter->addr;
2076                 remain = sg_miter->length;
2077                 offset = 0;
2078
2079                 do {
2080                         fcnt = ((fifo_depth -
2081                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2082                                         << shift) - host->part_buf_count;
2083                         len = min(remain, fcnt);
2084                         if (!len)
2085                                 break;
2086                         host->push_data(host, (void *)(buf + offset), len);
2087                         data->bytes_xfered += len;
2088                         offset += len;
2089                         remain -= len;
2090                 } while (remain);
2091
2092                 sg_miter->consumed = offset;
2093                 status = mci_readl(host, MINTSTS);
2094                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2095         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2096
2097         if (!remain) {
2098                 if (!sg_miter_next(sg_miter))
2099                         goto done;
2100                 sg_miter->consumed = 0;
2101         }
2102         sg_miter_stop(sg_miter);
2103         return;
2104
2105 done:
2106         sg_miter_stop(sg_miter);
2107         host->sg = NULL;
2108         smp_wmb();
2109         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2110 }
2111
2112 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2113 {
2114         if (!host->cmd_status)
2115                 host->cmd_status = status;
2116
2117         smp_wmb();
2118
2119         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2120         tasklet_schedule(&host->tasklet);
2121 }
2122
2123 static void dw_mci_handle_cd(struct dw_mci *host)
2124 {
2125         int i;
2126
2127         for (i = 0; i < host->num_slots; i++) {
2128                 struct dw_mci_slot *slot = host->slot[i];
2129
2130                 if (!slot)
2131                         continue;
2132
2133                 if (slot->mmc->ops->card_event)
2134                         slot->mmc->ops->card_event(slot->mmc);
2135                 mmc_detect_change(slot->mmc,
2136                         msecs_to_jiffies(host->pdata->detect_delay_ms));
2137         }
2138 }
2139
2140 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2141 {
2142         struct dw_mci *host = dev_id;
2143         u32 pending;
2144         int i;
2145
2146         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2147
2148         /*
2149          * DTO fix - version 2.10a and below, and only if internal DMA
2150          * is configured.
2151          */
2152         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2153                 if (!pending &&
2154                     ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2155                         pending |= SDMMC_INT_DATA_OVER;
2156         }
2157
2158         if (pending) {
2159                 /* Check volt switch first, since it can look like an error */
2160                 if ((host->state == STATE_SENDING_CMD11) &&
2161                     (pending & SDMMC_INT_VOLT_SWITCH)) {
2162                         mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2163                         pending &= ~SDMMC_INT_VOLT_SWITCH;
2164                         dw_mci_cmd_interrupt(host, pending);
2165                 }
2166
2167                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2168                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2169                         host->cmd_status = pending;
2170                         smp_wmb();
2171                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2172                 }
2173
2174                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2175                         /* if there is an error report DATA_ERROR */
2176                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2177                         host->data_status = pending;
2178                         smp_wmb();
2179                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
2180                         tasklet_schedule(&host->tasklet);
2181                 }
2182
2183                 if (pending & SDMMC_INT_DATA_OVER) {
2184                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2185                         if (!host->data_status)
2186                                 host->data_status = pending;
2187                         smp_wmb();
2188                         if (host->dir_status == DW_MCI_RECV_STATUS) {
2189                                 if (host->sg != NULL)
2190                                         dw_mci_read_data_pio(host, true);
2191                         }
2192                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2193                         tasklet_schedule(&host->tasklet);
2194                 }
2195
2196                 if (pending & SDMMC_INT_RXDR) {
2197                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2198                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2199                                 dw_mci_read_data_pio(host, false);
2200                 }
2201
2202                 if (pending & SDMMC_INT_TXDR) {
2203                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2204                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2205                                 dw_mci_write_data_pio(host);
2206                 }
2207
2208                 if (pending & SDMMC_INT_CMD_DONE) {
2209                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2210                         dw_mci_cmd_interrupt(host, pending);
2211                 }
2212
2213                 if (pending & SDMMC_INT_CD) {
2214                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
2215                         dw_mci_handle_cd(host);
2216                 }
2217
2218                 /* Handle SDIO Interrupts */
2219                 for (i = 0; i < host->num_slots; i++) {
2220                         struct dw_mci_slot *slot = host->slot[i];
2221
2222                         if (!slot)
2223                                 continue;
2224
2225                         if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2226                                 mci_writel(host, RINTSTS,
2227                                            SDMMC_INT_SDIO(slot->sdio_id));
2228                                 mmc_signal_sdio_irq(slot->mmc);
2229                         }
2230                 }
2231
2232         }
2233
2234 #ifdef CONFIG_MMC_DW_IDMAC
2235         /* Handle DMA interrupts */
2236         if (host->dma_64bit_address == 1) {
2237                 pending = mci_readl(host, IDSTS64);
2238                 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2239                         mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2240                                                         SDMMC_IDMAC_INT_RI);
2241                         mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2242                         host->dma_ops->complete(host);
2243                 }
2244         } else {
2245                 pending = mci_readl(host, IDSTS);
2246                 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2247                         mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2248                                                         SDMMC_IDMAC_INT_RI);
2249                         mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2250                         host->dma_ops->complete(host);
2251                 }
2252         }
2253 #endif
2254
2255         return IRQ_HANDLED;
2256 }
2257
2258 #ifdef CONFIG_OF
2259 /* given a slot id, find out the device node representing that slot */
2260 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2261 {
2262         struct device_node *np;
2263         const __be32 *addr;
2264         int len;
2265
2266         if (!dev || !dev->of_node)
2267                 return NULL;
2268
2269         for_each_child_of_node(dev->of_node, np) {
2270                 addr = of_get_property(np, "reg", &len);
2271                 if (!addr || (len < sizeof(int)))
2272                         continue;
2273                 if (be32_to_cpup(addr) == slot)
2274                         return np;
2275         }
2276         return NULL;
2277 }
2278
2279 static struct dw_mci_of_slot_quirks {
2280         char *quirk;
2281         int id;
2282 } of_slot_quirks[] = {
2283         {
2284                 .quirk  = "disable-wp",
2285                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2286         },
2287 };
2288
2289 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2290 {
2291         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2292         int quirks = 0;
2293         int idx;
2294
2295         /* get quirks */
2296         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2297                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2298                         dev_warn(dev, "Slot quirk %s is deprecated\n",
2299                                         of_slot_quirks[idx].quirk);
2300                         quirks |= of_slot_quirks[idx].id;
2301                 }
2302
2303         return quirks;
2304 }
2305 #else /* CONFIG_OF */
2306 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2307 {
2308         return 0;
2309 }
2310 #endif /* CONFIG_OF */
2311
2312 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2313 {
2314         struct mmc_host *mmc;
2315         struct dw_mci_slot *slot;
2316         const struct dw_mci_drv_data *drv_data = host->drv_data;
2317         int ctrl_id, ret;
2318         u32 freq[2];
2319
2320         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2321         if (!mmc)
2322                 return -ENOMEM;
2323
2324         slot = mmc_priv(mmc);
2325         slot->id = id;
2326         slot->sdio_id = host->sdio_id0 + id;
2327         slot->mmc = mmc;
2328         slot->host = host;
2329         host->slot[id] = slot;
2330
2331         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2332
2333         mmc->ops = &dw_mci_ops;
2334         if (of_property_read_u32_array(host->dev->of_node,
2335                                        "clock-freq-min-max", freq, 2)) {
2336                 mmc->f_min = DW_MCI_FREQ_MIN;
2337                 mmc->f_max = DW_MCI_FREQ_MAX;
2338         } else {
2339                 mmc->f_min = freq[0];
2340                 mmc->f_max = freq[1];
2341         }
2342
2343         /*if there are external regulators, get them*/
2344         ret = mmc_regulator_get_supply(mmc);
2345         if (ret == -EPROBE_DEFER)
2346                 goto err_host_allocated;
2347
2348         if (!mmc->ocr_avail)
2349                 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2350
2351         if (host->pdata->caps)
2352                 mmc->caps = host->pdata->caps;
2353
2354         if (host->pdata->pm_caps)
2355                 mmc->pm_caps = host->pdata->pm_caps;
2356
2357         if (host->dev->of_node) {
2358                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2359                 if (ctrl_id < 0)
2360                         ctrl_id = 0;
2361         } else {
2362                 ctrl_id = to_platform_device(host->dev)->id;
2363         }
2364         if (drv_data && drv_data->caps)
2365                 mmc->caps |= drv_data->caps[ctrl_id];
2366
2367         if (host->pdata->caps2)
2368                 mmc->caps2 = host->pdata->caps2;
2369
2370         ret = mmc_of_parse(mmc);
2371         if (ret)
2372                 goto err_host_allocated;
2373
2374         if (host->pdata->blk_settings) {
2375                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2376                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2377                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2378                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2379                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2380         } else {
2381                 /* Useful defaults if platform data is unset. */
2382 #ifdef CONFIG_MMC_DW_IDMAC
2383                 mmc->max_segs = host->ring_size;
2384                 mmc->max_blk_size = 65536;
2385                 mmc->max_seg_size = 0x1000;
2386                 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2387                 mmc->max_blk_count = mmc->max_req_size / 512;
2388 #else
2389                 mmc->max_segs = 64;
2390                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2391                 mmc->max_blk_count = 512;
2392                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2393                 mmc->max_seg_size = mmc->max_req_size;
2394 #endif /* CONFIG_MMC_DW_IDMAC */
2395         }
2396
2397         if (dw_mci_get_cd(mmc))
2398                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2399         else
2400                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2401
2402         ret = mmc_add_host(mmc);
2403         if (ret)
2404                 goto err_host_allocated;
2405
2406 #if defined(CONFIG_DEBUG_FS)
2407         dw_mci_init_debugfs(slot);
2408 #endif
2409
2410         return 0;
2411
2412 err_host_allocated:
2413         mmc_free_host(mmc);
2414         return ret;
2415 }
2416
2417 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2418 {
2419         /* Debugfs stuff is cleaned up by mmc core */
2420         mmc_remove_host(slot->mmc);
2421         slot->host->slot[id] = NULL;
2422         mmc_free_host(slot->mmc);
2423 }
2424
2425 static void dw_mci_init_dma(struct dw_mci *host)
2426 {
2427         int addr_config;
2428         /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
2429         addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
2430
2431         if (addr_config == 1) {
2432                 /* host supports IDMAC in 64-bit address mode */
2433                 host->dma_64bit_address = 1;
2434                 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2435                 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2436                         dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2437         } else {
2438                 /* host supports IDMAC in 32-bit address mode */
2439                 host->dma_64bit_address = 0;
2440                 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2441         }
2442
2443         /* Alloc memory for sg translation */
2444         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2445                                           &host->sg_dma, GFP_KERNEL);
2446         if (!host->sg_cpu) {
2447                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2448                         __func__);
2449                 goto no_dma;
2450         }
2451
2452         /* Determine which DMA interface to use */
2453 #ifdef CONFIG_MMC_DW_IDMAC
2454         host->dma_ops = &dw_mci_idmac_ops;
2455         dev_info(host->dev, "Using internal DMA controller.\n");
2456 #endif
2457
2458         if (!host->dma_ops)
2459                 goto no_dma;
2460
2461         if (host->dma_ops->init && host->dma_ops->start &&
2462             host->dma_ops->stop && host->dma_ops->cleanup) {
2463                 if (host->dma_ops->init(host)) {
2464                         dev_err(host->dev, "%s: Unable to initialize "
2465                                 "DMA Controller.\n", __func__);
2466                         goto no_dma;
2467                 }
2468         } else {
2469                 dev_err(host->dev, "DMA initialization not found.\n");
2470                 goto no_dma;
2471         }
2472
2473         host->use_dma = 1;
2474         return;
2475
2476 no_dma:
2477         dev_info(host->dev, "Using PIO mode.\n");
2478         host->use_dma = 0;
2479         return;
2480 }
2481
2482 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2483 {
2484         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2485         u32 ctrl;
2486
2487         ctrl = mci_readl(host, CTRL);
2488         ctrl |= reset;
2489         mci_writel(host, CTRL, ctrl);
2490
2491         /* wait till resets clear */
2492         do {
2493                 ctrl = mci_readl(host, CTRL);
2494                 if (!(ctrl & reset))
2495                         return true;
2496         } while (time_before(jiffies, timeout));
2497
2498         dev_err(host->dev,
2499                 "Timeout resetting block (ctrl reset %#x)\n",
2500                 ctrl & reset);
2501
2502         return false;
2503 }
2504
2505 static bool dw_mci_reset(struct dw_mci *host)
2506 {
2507         u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2508         bool ret = false;
2509
2510         /*
2511          * Reseting generates a block interrupt, hence setting
2512          * the scatter-gather pointer to NULL.
2513          */
2514         if (host->sg) {
2515                 sg_miter_stop(&host->sg_miter);
2516                 host->sg = NULL;
2517         }
2518
2519         if (host->use_dma)
2520                 flags |= SDMMC_CTRL_DMA_RESET;
2521
2522         if (dw_mci_ctrl_reset(host, flags)) {
2523                 /*
2524                  * In all cases we clear the RAWINTS register to clear any
2525                  * interrupts.
2526                  */
2527                 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2528
2529                 /* if using dma we wait for dma_req to clear */
2530                 if (host->use_dma) {
2531                         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2532                         u32 status;
2533                         do {
2534                                 status = mci_readl(host, STATUS);
2535                                 if (!(status & SDMMC_STATUS_DMA_REQ))
2536                                         break;
2537                                 cpu_relax();
2538                         } while (time_before(jiffies, timeout));
2539
2540                         if (status & SDMMC_STATUS_DMA_REQ) {
2541                                 dev_err(host->dev,
2542                                         "%s: Timeout waiting for dma_req to "
2543                                         "clear during reset\n", __func__);
2544                                 goto ciu_out;
2545                         }
2546
2547                         /* when using DMA next we reset the fifo again */
2548                         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2549                                 goto ciu_out;
2550                 }
2551         } else {
2552                 /* if the controller reset bit did clear, then set clock regs */
2553                 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2554                         dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2555                                 "clear but ciu was reset, doing clock update\n",
2556                                 __func__);
2557                         goto ciu_out;
2558                 }
2559         }
2560
2561 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2562         /* It is also recommended that we reset and reprogram idmac */
2563         dw_mci_idmac_reset(host);
2564 #endif
2565
2566         ret = true;
2567
2568 ciu_out:
2569         /* After a CTRL reset we need to have CIU set clock registers  */
2570         mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2571
2572         return ret;
2573 }
2574
2575 #ifdef CONFIG_OF
2576 static struct dw_mci_of_quirks {
2577         char *quirk;
2578         int id;
2579 } of_quirks[] = {
2580         {
2581                 .quirk  = "broken-cd",
2582                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2583         }, {
2584                 .quirk  = "disable-wp",
2585                 .id     = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2586         },
2587 };
2588
2589 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2590 {
2591         struct dw_mci_board *pdata;
2592         struct device *dev = host->dev;
2593         struct device_node *np = dev->of_node;
2594         const struct dw_mci_drv_data *drv_data = host->drv_data;
2595         int idx, ret;
2596         u32 clock_frequency;
2597
2598         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2599         if (!pdata)
2600                 return ERR_PTR(-ENOMEM);
2601
2602         /* find out number of slots supported */
2603         if (of_property_read_u32(dev->of_node, "num-slots",
2604                                 &pdata->num_slots)) {
2605                 dev_info(dev, "num-slots property not found, "
2606                                 "assuming 1 slot is available\n");
2607                 pdata->num_slots = 1;
2608         }
2609
2610         /* get quirks */
2611         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2612                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2613                         pdata->quirks |= of_quirks[idx].id;
2614
2615         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2616                 dev_info(dev, "fifo-depth property not found, using "
2617                                 "value of FIFOTH register as default\n");
2618
2619         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2620
2621         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2622                 pdata->bus_hz = clock_frequency;
2623
2624         if (drv_data && drv_data->parse_dt) {
2625                 ret = drv_data->parse_dt(host);
2626                 if (ret)
2627                         return ERR_PTR(ret);
2628         }
2629
2630         if (of_find_property(np, "supports-highspeed", NULL))
2631                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2632
2633         return pdata;
2634 }
2635
2636 #else /* CONFIG_OF */
2637 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2638 {
2639         return ERR_PTR(-EINVAL);
2640 }
2641 #endif /* CONFIG_OF */
2642
2643 static void dw_mci_enable_cd(struct dw_mci *host)
2644 {
2645         struct dw_mci_board *brd = host->pdata;
2646         unsigned long irqflags;
2647         u32 temp;
2648         int i;
2649
2650         /* No need for CD if broken card detection */
2651         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
2652                 return;
2653
2654         /* No need for CD if all slots have a non-error GPIO */
2655         for (i = 0; i < host->num_slots; i++) {
2656                 struct dw_mci_slot *slot = host->slot[i];
2657
2658                 if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
2659                         break;
2660         }
2661         if (i == host->num_slots)
2662                 return;
2663
2664         spin_lock_irqsave(&host->irq_lock, irqflags);
2665         temp = mci_readl(host, INTMASK);
2666         temp  |= SDMMC_INT_CD;
2667         mci_writel(host, INTMASK, temp);
2668         spin_unlock_irqrestore(&host->irq_lock, irqflags);
2669 }
2670
2671 int dw_mci_probe(struct dw_mci *host)
2672 {
2673         const struct dw_mci_drv_data *drv_data = host->drv_data;
2674         int width, i, ret = 0;
2675         u32 fifo_size;
2676         int init_slots = 0;
2677
2678         if (!host->pdata) {
2679                 host->pdata = dw_mci_parse_dt(host);
2680                 if (IS_ERR(host->pdata)) {
2681                         dev_err(host->dev, "platform data not available\n");
2682                         return -EINVAL;
2683                 }
2684         }
2685
2686         if (host->pdata->num_slots > 1) {
2687                 dev_err(host->dev,
2688                         "Platform data must supply num_slots.\n");
2689                 return -ENODEV;
2690         }
2691
2692         host->biu_clk = devm_clk_get(host->dev, "biu");
2693         if (IS_ERR(host->biu_clk)) {
2694                 dev_dbg(host->dev, "biu clock not available\n");
2695         } else {
2696                 ret = clk_prepare_enable(host->biu_clk);
2697                 if (ret) {
2698                         dev_err(host->dev, "failed to enable biu clock\n");
2699                         return ret;
2700                 }
2701         }
2702
2703         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2704         if (IS_ERR(host->ciu_clk)) {
2705                 dev_dbg(host->dev, "ciu clock not available\n");
2706                 host->bus_hz = host->pdata->bus_hz;
2707         } else {
2708                 ret = clk_prepare_enable(host->ciu_clk);
2709                 if (ret) {
2710                         dev_err(host->dev, "failed to enable ciu clock\n");
2711                         goto err_clk_biu;
2712                 }
2713
2714                 if (host->pdata->bus_hz) {
2715                         ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2716                         if (ret)
2717                                 dev_warn(host->dev,
2718                                          "Unable to set bus rate to %uHz\n",
2719                                          host->pdata->bus_hz);
2720                 }
2721                 host->bus_hz = clk_get_rate(host->ciu_clk);
2722         }
2723
2724         if (!host->bus_hz) {
2725                 dev_err(host->dev,
2726                         "Platform data must supply bus speed\n");
2727                 ret = -ENODEV;
2728                 goto err_clk_ciu;
2729         }
2730
2731         if (drv_data && drv_data->init) {
2732                 ret = drv_data->init(host);
2733                 if (ret) {
2734                         dev_err(host->dev,
2735                                 "implementation specific init failed\n");
2736                         goto err_clk_ciu;
2737                 }
2738         }
2739
2740         if (drv_data && drv_data->setup_clock) {
2741                 ret = drv_data->setup_clock(host);
2742                 if (ret) {
2743                         dev_err(host->dev,
2744                                 "implementation specific clock setup failed\n");
2745                         goto err_clk_ciu;
2746                 }
2747         }
2748
2749         host->quirks = host->pdata->quirks;
2750
2751         spin_lock_init(&host->lock);
2752         spin_lock_init(&host->irq_lock);
2753         INIT_LIST_HEAD(&host->queue);
2754
2755         /*
2756          * Get the host data width - this assumes that HCON has been set with
2757          * the correct values.
2758          */
2759         i = (mci_readl(host, HCON) >> 7) & 0x7;
2760         if (!i) {
2761                 host->push_data = dw_mci_push_data16;
2762                 host->pull_data = dw_mci_pull_data16;
2763                 width = 16;
2764                 host->data_shift = 1;
2765         } else if (i == 2) {
2766                 host->push_data = dw_mci_push_data64;
2767                 host->pull_data = dw_mci_pull_data64;
2768                 width = 64;
2769                 host->data_shift = 3;
2770         } else {
2771                 /* Check for a reserved value, and warn if it is */
2772                 WARN((i != 1),
2773                      "HCON reports a reserved host data width!\n"
2774                      "Defaulting to 32-bit access.\n");
2775                 host->push_data = dw_mci_push_data32;
2776                 host->pull_data = dw_mci_pull_data32;
2777                 width = 32;
2778                 host->data_shift = 2;
2779         }
2780
2781         /* Reset all blocks */
2782         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2783                 return -ENODEV;
2784
2785         host->dma_ops = host->pdata->dma_ops;
2786         dw_mci_init_dma(host);
2787
2788         /* Clear the interrupts for the host controller */
2789         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2790         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2791
2792         /* Put in max timeout */
2793         mci_writel(host, TMOUT, 0xFFFFFFFF);
2794
2795         /*
2796          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2797          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2798          */
2799         if (!host->pdata->fifo_depth) {
2800                 /*
2801                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2802                  * have been overwritten by the bootloader, just like we're
2803                  * about to do, so if you know the value for your hardware, you
2804                  * should put it in the platform data.
2805                  */
2806                 fifo_size = mci_readl(host, FIFOTH);
2807                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2808         } else {
2809                 fifo_size = host->pdata->fifo_depth;
2810         }
2811         host->fifo_depth = fifo_size;
2812         host->fifoth_val =
2813                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2814         mci_writel(host, FIFOTH, host->fifoth_val);
2815
2816         /* disable clock to CIU */
2817         mci_writel(host, CLKENA, 0);
2818         mci_writel(host, CLKSRC, 0);
2819
2820         /*
2821          * In 2.40a spec, Data offset is changed.
2822          * Need to check the version-id and set data-offset for DATA register.
2823          */
2824         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2825         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2826
2827         if (host->verid < DW_MMC_240A)
2828                 host->data_offset = DATA_OFFSET;
2829         else
2830                 host->data_offset = DATA_240A_OFFSET;
2831
2832         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2833         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2834                                host->irq_flags, "dw-mci", host);
2835         if (ret)
2836                 goto err_dmaunmap;
2837
2838         if (host->pdata->num_slots)
2839                 host->num_slots = host->pdata->num_slots;
2840         else
2841                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2842
2843         /*
2844          * Enable interrupts for command done, data over, data empty,
2845          * receive ready and error such as transmit, receive timeout, crc error
2846          */
2847         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2848         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2849                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2850                    DW_MCI_ERROR_FLAGS);
2851         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2852
2853         dev_info(host->dev, "DW MMC controller at irq %d, "
2854                  "%d bit host data width, "
2855                  "%u deep fifo\n",
2856                  host->irq, width, fifo_size);
2857
2858         /* We need at least one slot to succeed */
2859         for (i = 0; i < host->num_slots; i++) {
2860                 ret = dw_mci_init_slot(host, i);
2861                 if (ret)
2862                         dev_dbg(host->dev, "slot %d init failed\n", i);
2863                 else
2864                         init_slots++;
2865         }
2866
2867         /* Now that slots are all setup, we can enable card detect */
2868         dw_mci_enable_cd(host);
2869
2870         if (init_slots) {
2871                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2872         } else {
2873                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2874                                         "but failed on all\n", host->num_slots);
2875                 goto err_dmaunmap;
2876         }
2877
2878         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2879                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2880
2881         return 0;
2882
2883 err_dmaunmap:
2884         if (host->use_dma && host->dma_ops->exit)
2885                 host->dma_ops->exit(host);
2886
2887 err_clk_ciu:
2888         if (!IS_ERR(host->ciu_clk))
2889                 clk_disable_unprepare(host->ciu_clk);
2890
2891 err_clk_biu:
2892         if (!IS_ERR(host->biu_clk))
2893                 clk_disable_unprepare(host->biu_clk);
2894
2895         return ret;
2896 }
2897 EXPORT_SYMBOL(dw_mci_probe);
2898
2899 void dw_mci_remove(struct dw_mci *host)
2900 {
2901         int i;
2902
2903         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2904         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2905
2906         for (i = 0; i < host->num_slots; i++) {
2907                 dev_dbg(host->dev, "remove slot %d\n", i);
2908                 if (host->slot[i])
2909                         dw_mci_cleanup_slot(host->slot[i], i);
2910         }
2911
2912         /* disable clock to CIU */
2913         mci_writel(host, CLKENA, 0);
2914         mci_writel(host, CLKSRC, 0);
2915
2916         if (host->use_dma && host->dma_ops->exit)
2917                 host->dma_ops->exit(host);
2918
2919         if (!IS_ERR(host->ciu_clk))
2920                 clk_disable_unprepare(host->ciu_clk);
2921
2922         if (!IS_ERR(host->biu_clk))
2923                 clk_disable_unprepare(host->biu_clk);
2924 }
2925 EXPORT_SYMBOL(dw_mci_remove);
2926
2927
2928
2929 #ifdef CONFIG_PM_SLEEP
2930 /*
2931  * TODO: we should probably disable the clock to the card in the suspend path.
2932  */
2933 int dw_mci_suspend(struct dw_mci *host)
2934 {
2935         return 0;
2936 }
2937 EXPORT_SYMBOL(dw_mci_suspend);
2938
2939 int dw_mci_resume(struct dw_mci *host)
2940 {
2941         int i, ret;
2942
2943         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2944                 ret = -ENODEV;
2945                 return ret;
2946         }
2947
2948         if (host->use_dma && host->dma_ops->init)
2949                 host->dma_ops->init(host);
2950
2951         /*
2952          * Restore the initial value at FIFOTH register
2953          * And Invalidate the prev_blksz with zero
2954          */
2955         mci_writel(host, FIFOTH, host->fifoth_val);
2956         host->prev_blksz = 0;
2957
2958         /* Put in max timeout */
2959         mci_writel(host, TMOUT, 0xFFFFFFFF);
2960
2961         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2962         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2963                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2964                    DW_MCI_ERROR_FLAGS);
2965         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2966
2967         for (i = 0; i < host->num_slots; i++) {
2968                 struct dw_mci_slot *slot = host->slot[i];
2969                 if (!slot)
2970                         continue;
2971                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2972                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2973                         dw_mci_setup_bus(slot, true);
2974                 }
2975         }
2976
2977         /* Now that slots are all setup, we can enable card detect */
2978         dw_mci_enable_cd(host);
2979
2980         return 0;
2981 }
2982 EXPORT_SYMBOL(dw_mci_resume);
2983 #endif /* CONFIG_PM_SLEEP */
2984
2985 static int __init dw_mci_init(void)
2986 {
2987         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2988         return 0;
2989 }
2990
2991 static void __exit dw_mci_exit(void)
2992 {
2993 }
2994
2995 module_init(dw_mci_init);
2996 module_exit(dw_mci_exit);
2997
2998 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2999 MODULE_AUTHOR("NXP Semiconductor VietNam");
3000 MODULE_AUTHOR("Imagination Technologies Ltd");
3001 MODULE_LICENSE("GPL v2");