2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/highmem.h>
22 #include <linux/log2.h>
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/card.h>
25 #include <linux/amba/bus.h>
26 #include <linux/clk.h>
27 #include <linux/scatterlist.h>
28 #include <linux/gpio.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/dmaengine.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/amba/mmci.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/types.h>
36 #include <linux/pinctrl/consumer.h>
38 #include <asm/div64.h>
40 #include <asm/sizes.h>
44 #define DRIVER_NAME "mmci-pl18x"
46 static unsigned int fmax = 515633;
49 * struct variant_data - MMCI variant-specific quirks
50 * @clkreg: default value for MCICLOCK register
51 * @clkreg_enable: enable value for MMCICLOCK register
52 * @datalength_bits: number of bits in the MMCIDATALENGTH register
53 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
54 * is asserted (likewise for RX)
55 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
56 * is asserted (likewise for RX)
57 * @sdio: variant supports SDIO
58 * @st_clkdiv: true if using a ST-specific clock divider algorithm
59 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
60 * @pwrreg_powerup: power up value for MMCIPOWER register
61 * @signal_direction: input/out direction of bus signals can be indicated
65 unsigned int clkreg_enable;
66 unsigned int datalength_bits;
67 unsigned int fifosize;
68 unsigned int fifohalfsize;
71 bool blksz_datactrl16;
73 bool signal_direction;
76 static struct variant_data variant_arm = {
78 .fifohalfsize = 8 * 4,
79 .datalength_bits = 16,
80 .pwrreg_powerup = MCI_PWR_UP,
83 static struct variant_data variant_arm_extended_fifo = {
85 .fifohalfsize = 64 * 4,
86 .datalength_bits = 16,
87 .pwrreg_powerup = MCI_PWR_UP,
90 static struct variant_data variant_u300 = {
92 .fifohalfsize = 8 * 4,
93 .clkreg_enable = MCI_ST_U300_HWFCEN,
94 .datalength_bits = 16,
96 .pwrreg_powerup = MCI_PWR_ON,
97 .signal_direction = true,
100 static struct variant_data variant_nomadik = {
102 .fifohalfsize = 8 * 4,
103 .clkreg = MCI_CLK_ENABLE,
104 .datalength_bits = 24,
107 .pwrreg_powerup = MCI_PWR_ON,
108 .signal_direction = true,
111 static struct variant_data variant_ux500 = {
113 .fifohalfsize = 8 * 4,
114 .clkreg = MCI_CLK_ENABLE,
115 .clkreg_enable = MCI_ST_UX500_HWFCEN,
116 .datalength_bits = 24,
119 .pwrreg_powerup = MCI_PWR_ON,
120 .signal_direction = true,
123 static struct variant_data variant_ux500v2 = {
125 .fifohalfsize = 8 * 4,
126 .clkreg = MCI_CLK_ENABLE,
127 .clkreg_enable = MCI_ST_UX500_HWFCEN,
128 .datalength_bits = 24,
131 .blksz_datactrl16 = true,
132 .pwrreg_powerup = MCI_PWR_ON,
133 .signal_direction = true,
137 * This must be called with host->lock held
139 static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
141 if (host->clk_reg != clk) {
143 writel(clk, host->base + MMCICLOCK);
148 * This must be called with host->lock held
150 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
152 if (host->pwr_reg != pwr) {
154 writel(pwr, host->base + MMCIPOWER);
159 * This must be called with host->lock held
161 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
163 struct variant_data *variant = host->variant;
164 u32 clk = variant->clkreg;
167 if (desired >= host->mclk) {
168 clk = MCI_CLK_BYPASS;
169 if (variant->st_clkdiv)
170 clk |= MCI_ST_UX500_NEG_EDGE;
171 host->cclk = host->mclk;
172 } else if (variant->st_clkdiv) {
174 * DB8500 TRM says f = mclk / (clkdiv + 2)
175 * => clkdiv = (mclk / f) - 2
176 * Round the divider up so we don't exceed the max
179 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
182 host->cclk = host->mclk / (clk + 2);
185 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
186 * => clkdiv = mclk / (2 * f) - 1
188 clk = host->mclk / (2 * desired) - 1;
191 host->cclk = host->mclk / (2 * (clk + 1));
194 clk |= variant->clkreg_enable;
195 clk |= MCI_CLK_ENABLE;
196 /* This hasn't proven to be worthwhile */
197 /* clk |= MCI_CLK_PWRSAVE; */
200 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
202 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
203 clk |= MCI_ST_8BIT_BUS;
205 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
206 clk |= MCI_ST_UX500_NEG_EDGE;
208 mmci_write_clkreg(host, clk);
212 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
214 writel(0, host->base + MMCICOMMAND);
221 mmc_request_done(host->mmc, mrq);
223 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
224 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
227 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
229 void __iomem *base = host->base;
231 if (host->singleirq) {
232 unsigned int mask0 = readl(base + MMCIMASK0);
234 mask0 &= ~MCI_IRQ1MASK;
237 writel(mask0, base + MMCIMASK0);
240 writel(mask, base + MMCIMASK1);
243 static void mmci_stop_data(struct mmci_host *host)
245 writel(0, host->base + MMCIDATACTRL);
246 mmci_set_mask1(host, 0);
250 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
252 unsigned int flags = SG_MITER_ATOMIC;
254 if (data->flags & MMC_DATA_READ)
255 flags |= SG_MITER_TO_SG;
257 flags |= SG_MITER_FROM_SG;
259 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
263 * All the DMA operation mode stuff goes inside this ifdef.
264 * This assumes that you have a generic DMA device interface,
265 * no custom DMA interfaces are supported.
267 #ifdef CONFIG_DMA_ENGINE
268 static void mmci_dma_setup(struct mmci_host *host)
270 struct mmci_platform_data *plat = host->plat;
271 const char *rxname, *txname;
274 if (!plat || !plat->dma_filter) {
275 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
279 /* initialize pre request cookie */
280 host->next_data.cookie = 1;
282 /* Try to acquire a generic DMA engine slave channel */
284 dma_cap_set(DMA_SLAVE, mask);
287 * If only an RX channel is specified, the driver will
288 * attempt to use it bidirectionally, however if it is
289 * is specified but cannot be located, DMA will be disabled.
291 if (plat->dma_rx_param) {
292 host->dma_rx_channel = dma_request_channel(mask,
295 /* E.g if no DMA hardware is present */
296 if (!host->dma_rx_channel)
297 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
300 if (plat->dma_tx_param) {
301 host->dma_tx_channel = dma_request_channel(mask,
304 if (!host->dma_tx_channel)
305 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
307 host->dma_tx_channel = host->dma_rx_channel;
310 if (host->dma_rx_channel)
311 rxname = dma_chan_name(host->dma_rx_channel);
315 if (host->dma_tx_channel)
316 txname = dma_chan_name(host->dma_tx_channel);
320 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
324 * Limit the maximum segment size in any SG entry according to
325 * the parameters of the DMA engine device.
327 if (host->dma_tx_channel) {
328 struct device *dev = host->dma_tx_channel->device->dev;
329 unsigned int max_seg_size = dma_get_max_seg_size(dev);
331 if (max_seg_size < host->mmc->max_seg_size)
332 host->mmc->max_seg_size = max_seg_size;
334 if (host->dma_rx_channel) {
335 struct device *dev = host->dma_rx_channel->device->dev;
336 unsigned int max_seg_size = dma_get_max_seg_size(dev);
338 if (max_seg_size < host->mmc->max_seg_size)
339 host->mmc->max_seg_size = max_seg_size;
344 * This is used in or so inline it
345 * so it can be discarded.
347 static inline void mmci_dma_release(struct mmci_host *host)
349 struct mmci_platform_data *plat = host->plat;
351 if (host->dma_rx_channel)
352 dma_release_channel(host->dma_rx_channel);
353 if (host->dma_tx_channel && plat->dma_tx_param)
354 dma_release_channel(host->dma_tx_channel);
355 host->dma_rx_channel = host->dma_tx_channel = NULL;
358 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
360 struct dma_chan *chan = host->dma_current;
361 enum dma_data_direction dir;
365 /* Wait up to 1ms for the DMA to complete */
367 status = readl(host->base + MMCISTATUS);
368 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
374 * Check to see whether we still have some data left in the FIFO -
375 * this catches DMA controllers which are unable to monitor the
376 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
377 * contiguous buffers. On TX, we'll get a FIFO underrun error.
379 if (status & MCI_RXDATAAVLBLMASK) {
380 dmaengine_terminate_all(chan);
385 if (data->flags & MMC_DATA_WRITE) {
388 dir = DMA_FROM_DEVICE;
391 if (!data->host_cookie)
392 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
395 * Use of DMA with scatter-gather is impossible.
396 * Give up with DMA and switch back to PIO mode.
398 if (status & MCI_RXDATAAVLBLMASK) {
399 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
400 mmci_dma_release(host);
404 static void mmci_dma_data_error(struct mmci_host *host)
406 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
407 dmaengine_terminate_all(host->dma_current);
410 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
411 struct mmci_host_next *next)
413 struct variant_data *variant = host->variant;
414 struct dma_slave_config conf = {
415 .src_addr = host->phybase + MMCIFIFO,
416 .dst_addr = host->phybase + MMCIFIFO,
417 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
418 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
419 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
420 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
423 struct dma_chan *chan;
424 struct dma_device *device;
425 struct dma_async_tx_descriptor *desc;
426 enum dma_data_direction buffer_dirn;
429 /* Check if next job is already prepared */
430 if (data->host_cookie && !next &&
431 host->dma_current && host->dma_desc_current)
435 host->dma_current = NULL;
436 host->dma_desc_current = NULL;
439 if (data->flags & MMC_DATA_READ) {
440 conf.direction = DMA_DEV_TO_MEM;
441 buffer_dirn = DMA_FROM_DEVICE;
442 chan = host->dma_rx_channel;
444 conf.direction = DMA_MEM_TO_DEV;
445 buffer_dirn = DMA_TO_DEVICE;
446 chan = host->dma_tx_channel;
449 /* If there's no DMA channel, fall back to PIO */
453 /* If less than or equal to the fifo size, don't bother with DMA */
454 if (data->blksz * data->blocks <= variant->fifosize)
457 device = chan->device;
458 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
462 dmaengine_slave_config(chan, &conf);
463 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
464 conf.direction, DMA_CTRL_ACK);
469 next->dma_chan = chan;
470 next->dma_desc = desc;
472 host->dma_current = chan;
473 host->dma_desc_current = desc;
480 dmaengine_terminate_all(chan);
481 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
485 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
488 struct mmc_data *data = host->data;
490 ret = mmci_dma_prep_data(host, host->data, NULL);
494 /* Okay, go for it. */
495 dev_vdbg(mmc_dev(host->mmc),
496 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
497 data->sg_len, data->blksz, data->blocks, data->flags);
498 dmaengine_submit(host->dma_desc_current);
499 dma_async_issue_pending(host->dma_current);
501 datactrl |= MCI_DPSM_DMAENABLE;
503 /* Trigger the DMA transfer */
504 writel(datactrl, host->base + MMCIDATACTRL);
507 * Let the MMCI say when the data is ended and it's time
508 * to fire next DMA request. When that happens, MMCI will
509 * call mmci_data_end()
511 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
512 host->base + MMCIMASK0);
516 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
518 struct mmci_host_next *next = &host->next_data;
520 if (data->host_cookie && data->host_cookie != next->cookie) {
521 pr_warning("[%s] invalid cookie: data->host_cookie %d"
522 " host->next_data.cookie %d\n",
523 __func__, data->host_cookie, host->next_data.cookie);
524 data->host_cookie = 0;
527 if (!data->host_cookie)
530 host->dma_desc_current = next->dma_desc;
531 host->dma_current = next->dma_chan;
533 next->dma_desc = NULL;
534 next->dma_chan = NULL;
537 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
540 struct mmci_host *host = mmc_priv(mmc);
541 struct mmc_data *data = mrq->data;
542 struct mmci_host_next *nd = &host->next_data;
547 if (data->host_cookie) {
548 data->host_cookie = 0;
552 /* if config for dma */
553 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
554 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
555 if (mmci_dma_prep_data(host, data, nd))
556 data->host_cookie = 0;
558 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
562 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
565 struct mmci_host *host = mmc_priv(mmc);
566 struct mmc_data *data = mrq->data;
567 struct dma_chan *chan;
568 enum dma_data_direction dir;
573 if (data->flags & MMC_DATA_READ) {
574 dir = DMA_FROM_DEVICE;
575 chan = host->dma_rx_channel;
578 chan = host->dma_tx_channel;
582 /* if config for dma */
585 dmaengine_terminate_all(chan);
586 if (data->host_cookie)
587 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
589 mrq->data->host_cookie = 0;
594 /* Blank functions if the DMA engine is not available */
595 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
598 static inline void mmci_dma_setup(struct mmci_host *host)
602 static inline void mmci_dma_release(struct mmci_host *host)
606 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
610 static inline void mmci_dma_data_error(struct mmci_host *host)
614 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
619 #define mmci_pre_request NULL
620 #define mmci_post_request NULL
624 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
626 struct variant_data *variant = host->variant;
627 unsigned int datactrl, timeout, irqmask;
628 unsigned long long clks;
632 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
633 data->blksz, data->blocks, data->flags);
636 host->size = data->blksz * data->blocks;
637 data->bytes_xfered = 0;
639 clks = (unsigned long long)data->timeout_ns * host->cclk;
640 do_div(clks, 1000000000UL);
642 timeout = data->timeout_clks + (unsigned int)clks;
645 writel(timeout, base + MMCIDATATIMER);
646 writel(host->size, base + MMCIDATALENGTH);
648 blksz_bits = ffs(data->blksz) - 1;
649 BUG_ON(1 << blksz_bits != data->blksz);
651 if (variant->blksz_datactrl16)
652 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
654 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
656 if (data->flags & MMC_DATA_READ)
657 datactrl |= MCI_DPSM_DIRECTION;
659 /* The ST Micro variants has a special bit to enable SDIO */
660 if (variant->sdio && host->mmc->card)
661 if (mmc_card_sdio(host->mmc->card)) {
663 * The ST Micro variants has a special bit
668 datactrl |= MCI_ST_DPSM_SDIOEN;
671 * The ST Micro variant for SDIO small write transfers
672 * needs to have clock H/W flow control disabled,
673 * otherwise the transfer will not start. The threshold
674 * depends on the rate of MCLK.
676 if (data->flags & MMC_DATA_WRITE &&
678 (host->size <= 8 && host->mclk > 50000000)))
679 clk = host->clk_reg & ~variant->clkreg_enable;
681 clk = host->clk_reg | variant->clkreg_enable;
683 mmci_write_clkreg(host, clk);
686 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
687 datactrl |= MCI_ST_DPSM_DDRMODE;
690 * Attempt to use DMA operation mode, if this
691 * should fail, fall back to PIO mode
693 if (!mmci_dma_start_data(host, datactrl))
696 /* IRQ mode, map the SG list for CPU reading/writing */
697 mmci_init_sg(host, data);
699 if (data->flags & MMC_DATA_READ) {
700 irqmask = MCI_RXFIFOHALFFULLMASK;
703 * If we have less than the fifo 'half-full' threshold to
704 * transfer, trigger a PIO interrupt as soon as any data
707 if (host->size < variant->fifohalfsize)
708 irqmask |= MCI_RXDATAAVLBLMASK;
711 * We don't actually need to include "FIFO empty" here
712 * since its implicit in "FIFO half empty".
714 irqmask = MCI_TXFIFOHALFEMPTYMASK;
717 writel(datactrl, base + MMCIDATACTRL);
718 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
719 mmci_set_mask1(host, irqmask);
723 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
725 void __iomem *base = host->base;
727 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
728 cmd->opcode, cmd->arg, cmd->flags);
730 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
731 writel(0, base + MMCICOMMAND);
735 c |= cmd->opcode | MCI_CPSM_ENABLE;
736 if (cmd->flags & MMC_RSP_PRESENT) {
737 if (cmd->flags & MMC_RSP_136)
738 c |= MCI_CPSM_LONGRSP;
739 c |= MCI_CPSM_RESPONSE;
742 c |= MCI_CPSM_INTERRUPT;
746 writel(cmd->arg, base + MMCIARGUMENT);
747 writel(c, base + MMCICOMMAND);
751 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
754 /* First check for errors */
755 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
756 MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
759 /* Terminate the DMA transfer */
760 if (dma_inprogress(host))
761 mmci_dma_data_error(host);
764 * Calculate how far we are into the transfer. Note that
765 * the data counter gives the number of bytes transferred
766 * on the MMC bus, not on the host side. On reads, this
767 * can be as much as a FIFO-worth of data ahead. This
768 * matters for FIFO overruns only.
770 remain = readl(host->base + MMCIDATACNT);
771 success = data->blksz * data->blocks - remain;
773 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
775 if (status & MCI_DATACRCFAIL) {
776 /* Last block was not successful */
778 data->error = -EILSEQ;
779 } else if (status & MCI_DATATIMEOUT) {
780 data->error = -ETIMEDOUT;
781 } else if (status & MCI_STARTBITERR) {
782 data->error = -ECOMM;
783 } else if (status & MCI_TXUNDERRUN) {
785 } else if (status & MCI_RXOVERRUN) {
786 if (success > host->variant->fifosize)
787 success -= host->variant->fifosize;
792 data->bytes_xfered = round_down(success, data->blksz);
795 if (status & MCI_DATABLOCKEND)
796 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
798 if (status & MCI_DATAEND || data->error) {
799 if (dma_inprogress(host))
800 mmci_dma_unmap(host, data);
801 mmci_stop_data(host);
804 /* The error clause is handled above, success! */
805 data->bytes_xfered = data->blksz * data->blocks;
808 mmci_request_end(host, data->mrq);
810 mmci_start_command(host, data->stop, 0);
816 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
819 void __iomem *base = host->base;
823 if (status & MCI_CMDTIMEOUT) {
824 cmd->error = -ETIMEDOUT;
825 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
826 cmd->error = -EILSEQ;
828 cmd->resp[0] = readl(base + MMCIRESPONSE0);
829 cmd->resp[1] = readl(base + MMCIRESPONSE1);
830 cmd->resp[2] = readl(base + MMCIRESPONSE2);
831 cmd->resp[3] = readl(base + MMCIRESPONSE3);
834 if (!cmd->data || cmd->error) {
836 /* Terminate the DMA transfer */
837 if (dma_inprogress(host))
838 mmci_dma_data_error(host);
839 mmci_stop_data(host);
841 mmci_request_end(host, cmd->mrq);
842 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
843 mmci_start_data(host, cmd->data);
847 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
849 void __iomem *base = host->base;
852 int host_remain = host->size;
855 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
864 * SDIO especially may want to send something that is
865 * not divisible by 4 (as opposed to card sectors
866 * etc). Therefore make sure to always read the last bytes
867 * while only doing full 32-bit reads towards the FIFO.
869 if (unlikely(count & 0x3)) {
871 unsigned char buf[4];
872 ioread32_rep(base + MMCIFIFO, buf, 1);
873 memcpy(ptr, buf, count);
875 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
879 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
884 host_remain -= count;
889 status = readl(base + MMCISTATUS);
890 } while (status & MCI_RXDATAAVLBL);
895 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
897 struct variant_data *variant = host->variant;
898 void __iomem *base = host->base;
902 unsigned int count, maxcnt;
904 maxcnt = status & MCI_TXFIFOEMPTY ?
905 variant->fifosize : variant->fifohalfsize;
906 count = min(remain, maxcnt);
909 * SDIO especially may want to send something that is
910 * not divisible by 4 (as opposed to card sectors
911 * etc), and the FIFO only accept full 32-bit writes.
912 * So compensate by adding +3 on the count, a single
913 * byte become a 32bit write, 7 bytes will be two
916 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
924 status = readl(base + MMCISTATUS);
925 } while (status & MCI_TXFIFOHALFEMPTY);
931 * PIO data transfer IRQ handler.
933 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
935 struct mmci_host *host = dev_id;
936 struct sg_mapping_iter *sg_miter = &host->sg_miter;
937 struct variant_data *variant = host->variant;
938 void __iomem *base = host->base;
942 status = readl(base + MMCISTATUS);
944 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
946 local_irq_save(flags);
949 unsigned int remain, len;
953 * For write, we only need to test the half-empty flag
954 * here - if the FIFO is completely empty, then by
955 * definition it is more than half empty.
957 * For read, check for data available.
959 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
962 if (!sg_miter_next(sg_miter))
965 buffer = sg_miter->addr;
966 remain = sg_miter->length;
969 if (status & MCI_RXACTIVE)
970 len = mmci_pio_read(host, buffer, remain);
971 if (status & MCI_TXACTIVE)
972 len = mmci_pio_write(host, buffer, remain, status);
974 sg_miter->consumed = len;
982 status = readl(base + MMCISTATUS);
985 sg_miter_stop(sg_miter);
987 local_irq_restore(flags);
990 * If we have less than the fifo 'half-full' threshold to transfer,
991 * trigger a PIO interrupt as soon as any data is available.
993 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
994 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
997 * If we run out of data, disable the data IRQs; this
998 * prevents a race where the FIFO becomes empty before
999 * the chip itself has disabled the data path, and
1000 * stops us racing with our data end IRQ.
1002 if (host->size == 0) {
1003 mmci_set_mask1(host, 0);
1004 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1011 * Handle completion of command and data transfers.
1013 static irqreturn_t mmci_irq(int irq, void *dev_id)
1015 struct mmci_host *host = dev_id;
1019 spin_lock(&host->lock);
1022 struct mmc_command *cmd;
1023 struct mmc_data *data;
1025 status = readl(host->base + MMCISTATUS);
1027 if (host->singleirq) {
1028 if (status & readl(host->base + MMCIMASK1))
1029 mmci_pio_irq(irq, dev_id);
1031 status &= ~MCI_IRQ1MASK;
1034 status &= readl(host->base + MMCIMASK0);
1035 writel(status, host->base + MMCICLEAR);
1037 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1040 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
1041 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
1042 MCI_DATABLOCKEND) && data)
1043 mmci_data_irq(host, data, status);
1046 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
1047 mmci_cmd_irq(host, cmd, status);
1052 spin_unlock(&host->lock);
1054 return IRQ_RETVAL(ret);
1057 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1059 struct mmci_host *host = mmc_priv(mmc);
1060 unsigned long flags;
1062 WARN_ON(host->mrq != NULL);
1064 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
1065 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
1067 mrq->cmd->error = -EINVAL;
1068 mmc_request_done(mmc, mrq);
1072 pm_runtime_get_sync(mmc_dev(mmc));
1074 spin_lock_irqsave(&host->lock, flags);
1079 mmci_get_next_data(host, mrq->data);
1081 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1082 mmci_start_data(host, mrq->data);
1084 mmci_start_command(host, mrq->cmd, 0);
1086 spin_unlock_irqrestore(&host->lock, flags);
1089 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1091 struct mmci_host *host = mmc_priv(mmc);
1092 struct variant_data *variant = host->variant;
1094 unsigned long flags;
1097 pm_runtime_get_sync(mmc_dev(mmc));
1099 if (host->plat->ios_handler &&
1100 host->plat->ios_handler(mmc_dev(mmc), ios))
1101 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1103 switch (ios->power_mode) {
1106 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
1110 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
1112 dev_err(mmc_dev(mmc), "unable to set OCR\n");
1114 * The .set_ios() function in the mmc_host_ops
1115 * struct return void, and failing to set the
1116 * power should be rare so we print an error
1123 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1124 * and instead uses MCI_PWR_ON so apply whatever value is
1125 * configured in the variant data.
1127 pwr |= variant->pwrreg_powerup;
1135 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1137 * The ST Micro variant has some additional bits
1138 * indicating signal direction for the signals in
1139 * the SD/MMC bus and feedback-clock usage.
1141 pwr |= host->plat->sigdir;
1143 if (ios->bus_width == MMC_BUS_WIDTH_4)
1144 pwr &= ~MCI_ST_DATA74DIREN;
1145 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1146 pwr &= (~MCI_ST_DATA74DIREN &
1147 ~MCI_ST_DATA31DIREN &
1148 ~MCI_ST_DATA2DIREN);
1151 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1152 if (host->hw_designer != AMBA_VENDOR_ST)
1156 * The ST Micro variant use the ROD bit for something
1157 * else and only has OD (Open Drain).
1163 spin_lock_irqsave(&host->lock, flags);
1165 mmci_set_clkreg(host, ios->clock);
1166 mmci_write_pwrreg(host, pwr);
1168 spin_unlock_irqrestore(&host->lock, flags);
1171 pm_runtime_mark_last_busy(mmc_dev(mmc));
1172 pm_runtime_put_autosuspend(mmc_dev(mmc));
1175 static int mmci_get_ro(struct mmc_host *mmc)
1177 struct mmci_host *host = mmc_priv(mmc);
1179 if (host->gpio_wp == -ENOSYS)
1182 return gpio_get_value_cansleep(host->gpio_wp);
1185 static int mmci_get_cd(struct mmc_host *mmc)
1187 struct mmci_host *host = mmc_priv(mmc);
1188 struct mmci_platform_data *plat = host->plat;
1189 unsigned int status;
1191 if (host->gpio_cd == -ENOSYS) {
1193 return 1; /* Assume always present */
1195 status = plat->status(mmc_dev(host->mmc));
1197 status = !!gpio_get_value_cansleep(host->gpio_cd)
1201 * Use positive logic throughout - status is zero for no card,
1202 * non-zero for card inserted.
1207 static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
1209 struct mmci_host *host = dev_id;
1211 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1216 static const struct mmc_host_ops mmci_ops = {
1217 .request = mmci_request,
1218 .pre_req = mmci_pre_request,
1219 .post_req = mmci_post_request,
1220 .set_ios = mmci_set_ios,
1221 .get_ro = mmci_get_ro,
1222 .get_cd = mmci_get_cd,
1226 static void mmci_dt_populate_generic_pdata(struct device_node *np,
1227 struct mmci_platform_data *pdata)
1231 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
1232 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
1234 if (of_get_property(np, "cd-inverted", NULL))
1235 pdata->cd_invert = true;
1237 pdata->cd_invert = false;
1239 of_property_read_u32(np, "max-frequency", &pdata->f_max);
1241 pr_warn("%s has no 'max-frequency' property\n", np->full_name);
1243 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1244 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
1245 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1246 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
1248 of_property_read_u32(np, "bus-width", &bus_width);
1249 switch (bus_width) {
1251 /* No bus-width supplied. */
1254 pdata->capabilities |= MMC_CAP_4_BIT_DATA;
1257 pdata->capabilities |= MMC_CAP_8_BIT_DATA;
1260 pr_warn("%s: Unsupported bus width\n", np->full_name);
1264 static void mmci_dt_populate_generic_pdata(struct device_node *np,
1265 struct mmci_platform_data *pdata)
1271 static int mmci_probe(struct amba_device *dev,
1272 const struct amba_id *id)
1274 struct mmci_platform_data *plat = dev->dev.platform_data;
1275 struct device_node *np = dev->dev.of_node;
1276 struct variant_data *variant = id->data;
1277 struct mmci_host *host;
1278 struct mmc_host *mmc;
1281 /* Must have platform data or Device Tree. */
1283 dev_err(&dev->dev, "No plat data or DT found\n");
1288 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1294 mmci_dt_populate_generic_pdata(np, plat);
1296 ret = amba_request_regions(dev, DRIVER_NAME);
1300 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1306 host = mmc_priv(mmc);
1309 host->gpio_wp = -ENOSYS;
1310 host->gpio_cd = -ENOSYS;
1311 host->gpio_cd_irq = -1;
1313 host->hw_designer = amba_manf(dev);
1314 host->hw_revision = amba_rev(dev);
1315 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1316 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1318 host->clk = clk_get(&dev->dev, NULL);
1319 if (IS_ERR(host->clk)) {
1320 ret = PTR_ERR(host->clk);
1325 ret = clk_prepare_enable(host->clk);
1330 host->variant = variant;
1331 host->mclk = clk_get_rate(host->clk);
1333 * According to the spec, mclk is max 100 MHz,
1334 * so we try to adjust the clock down to this,
1337 if (host->mclk > 100000000) {
1338 ret = clk_set_rate(host->clk, 100000000);
1341 host->mclk = clk_get_rate(host->clk);
1342 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1345 host->phybase = dev->res.start;
1346 host->base = ioremap(dev->res.start, resource_size(&dev->res));
1352 mmc->ops = &mmci_ops;
1354 * The ARM and ST versions of the block have slightly different
1355 * clock divider equations which means that the minimum divider
1358 if (variant->st_clkdiv)
1359 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1361 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1363 * If the platform data supplies a maximum operating
1364 * frequency, this takes precedence. Else, we fall back
1365 * to using the module parameter, which has a (low)
1366 * default value in case it is not specified. Either
1367 * value must not exceed the clock rate into the block,
1371 mmc->f_max = min(host->mclk, plat->f_max);
1373 mmc->f_max = min(host->mclk, fmax);
1374 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1376 host->pinctrl = devm_pinctrl_get(&dev->dev);
1377 if (IS_ERR(host->pinctrl)) {
1378 ret = PTR_ERR(host->pinctrl);
1382 host->pins_default = pinctrl_lookup_state(host->pinctrl,
1383 PINCTRL_STATE_DEFAULT);
1385 /* enable pins to be muxed in and configured */
1386 if (!IS_ERR(host->pins_default)) {
1387 ret = pinctrl_select_state(host->pinctrl, host->pins_default);
1389 dev_warn(&dev->dev, "could not set default pins\n");
1391 dev_warn(&dev->dev, "could not get default pinstate\n");
1393 #ifdef CONFIG_REGULATOR
1394 /* If we're using the regulator framework, try to fetch a regulator */
1395 host->vcc = regulator_get(&dev->dev, "vmmc");
1396 if (IS_ERR(host->vcc))
1399 int mask = mmc_regulator_get_ocrmask(host->vcc);
1402 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1405 host->mmc->ocr_avail = (u32) mask;
1408 "Provided ocr_mask/setpower will not be used "
1409 "(using regulator instead)\n");
1413 /* Fall back to platform data if no regulator is found */
1414 if (host->vcc == NULL)
1415 mmc->ocr_avail = plat->ocr_mask;
1416 mmc->caps = plat->capabilities;
1417 mmc->caps2 = plat->capabilities2;
1422 mmc->max_segs = NR_SG;
1425 * Since only a certain number of bits are valid in the data length
1426 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1429 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1432 * Set the maximum segment size. Since we aren't doing DMA
1433 * (yet) we are only limited by the data length register.
1435 mmc->max_seg_size = mmc->max_req_size;
1438 * Block size can be up to 2048 bytes, but must be a power of two.
1440 mmc->max_blk_size = 1 << 11;
1443 * Limit the number of blocks transferred so that we don't overflow
1444 * the maximum request size.
1446 mmc->max_blk_count = mmc->max_req_size >> 11;
1448 spin_lock_init(&host->lock);
1450 writel(0, host->base + MMCIMASK0);
1451 writel(0, host->base + MMCIMASK1);
1452 writel(0xfff, host->base + MMCICLEAR);
1454 if (plat->gpio_cd == -EPROBE_DEFER) {
1455 ret = -EPROBE_DEFER;
1458 if (gpio_is_valid(plat->gpio_cd)) {
1459 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1461 ret = gpio_direction_input(plat->gpio_cd);
1463 host->gpio_cd = plat->gpio_cd;
1464 else if (ret != -ENOSYS)
1468 * A gpio pin that will detect cards when inserted and removed
1469 * will most likely want to trigger on the edges if it is
1470 * 0 when ejected and 1 when inserted (or mutatis mutandis
1471 * for the inverted case) so we request triggers on both
1474 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1476 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1477 DRIVER_NAME " (cd)", host);
1479 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1481 if (plat->gpio_wp == -EPROBE_DEFER) {
1482 ret = -EPROBE_DEFER;
1485 if (gpio_is_valid(plat->gpio_wp)) {
1486 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1488 ret = gpio_direction_input(plat->gpio_wp);
1490 host->gpio_wp = plat->gpio_wp;
1491 else if (ret != -ENOSYS)
1495 if ((host->plat->status || host->gpio_cd != -ENOSYS)
1496 && host->gpio_cd_irq < 0)
1497 mmc->caps |= MMC_CAP_NEEDS_POLL;
1499 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1504 host->singleirq = true;
1506 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1507 DRIVER_NAME " (pio)", host);
1512 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1514 amba_set_drvdata(dev, mmc);
1516 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1517 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1518 amba_rev(dev), (unsigned long long)dev->res.start,
1519 dev->irq[0], dev->irq[1]);
1521 mmci_dma_setup(host);
1523 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1524 pm_runtime_use_autosuspend(&dev->dev);
1525 pm_runtime_put(&dev->dev);
1532 free_irq(dev->irq[0], host);
1534 if (host->gpio_wp != -ENOSYS)
1535 gpio_free(host->gpio_wp);
1537 if (host->gpio_cd_irq >= 0)
1538 free_irq(host->gpio_cd_irq, host);
1539 if (host->gpio_cd != -ENOSYS)
1540 gpio_free(host->gpio_cd);
1542 iounmap(host->base);
1544 clk_disable_unprepare(host->clk);
1550 amba_release_regions(dev);
1555 static int mmci_remove(struct amba_device *dev)
1557 struct mmc_host *mmc = amba_get_drvdata(dev);
1559 amba_set_drvdata(dev, NULL);
1562 struct mmci_host *host = mmc_priv(mmc);
1565 * Undo pm_runtime_put() in probe. We use the _sync
1566 * version here so that we can access the primecell.
1568 pm_runtime_get_sync(&dev->dev);
1570 mmc_remove_host(mmc);
1572 writel(0, host->base + MMCIMASK0);
1573 writel(0, host->base + MMCIMASK1);
1575 writel(0, host->base + MMCICOMMAND);
1576 writel(0, host->base + MMCIDATACTRL);
1578 mmci_dma_release(host);
1579 free_irq(dev->irq[0], host);
1580 if (!host->singleirq)
1581 free_irq(dev->irq[1], host);
1583 if (host->gpio_wp != -ENOSYS)
1584 gpio_free(host->gpio_wp);
1585 if (host->gpio_cd_irq >= 0)
1586 free_irq(host->gpio_cd_irq, host);
1587 if (host->gpio_cd != -ENOSYS)
1588 gpio_free(host->gpio_cd);
1590 iounmap(host->base);
1591 clk_disable_unprepare(host->clk);
1595 mmc_regulator_set_ocr(mmc, host->vcc, 0);
1596 regulator_put(host->vcc);
1600 amba_release_regions(dev);
1606 #ifdef CONFIG_SUSPEND
1607 static int mmci_suspend(struct device *dev)
1609 struct amba_device *adev = to_amba_device(dev);
1610 struct mmc_host *mmc = amba_get_drvdata(adev);
1614 struct mmci_host *host = mmc_priv(mmc);
1616 ret = mmc_suspend_host(mmc);
1618 pm_runtime_get_sync(dev);
1619 writel(0, host->base + MMCIMASK0);
1626 static int mmci_resume(struct device *dev)
1628 struct amba_device *adev = to_amba_device(dev);
1629 struct mmc_host *mmc = amba_get_drvdata(adev);
1633 struct mmci_host *host = mmc_priv(mmc);
1635 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1636 pm_runtime_put(dev);
1638 ret = mmc_resume_host(mmc);
1645 static const struct dev_pm_ops mmci_dev_pm_ops = {
1646 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
1649 static struct amba_id mmci_ids[] = {
1653 .data = &variant_arm,
1658 .data = &variant_arm_extended_fifo,
1663 .data = &variant_arm,
1665 /* ST Micro variants */
1669 .data = &variant_u300,
1674 .data = &variant_nomadik,
1679 .data = &variant_u300,
1684 .data = &variant_ux500,
1689 .data = &variant_ux500v2,
1694 MODULE_DEVICE_TABLE(amba, mmci_ids);
1696 static struct amba_driver mmci_driver = {
1698 .name = DRIVER_NAME,
1699 .pm = &mmci_dev_pm_ops,
1701 .probe = mmci_probe,
1702 .remove = mmci_remove,
1703 .id_table = mmci_ids,
1706 module_amba_driver(mmci_driver);
1708 module_param(fmax, uint, 0444);
1710 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1711 MODULE_LICENSE("GPL");