X-Git-Url: https://git.kernelconcepts.de/?p=karo-tx-uboot.git;a=blobdiff_plain;f=drivers%2Fspi%2Fmxs_spi.c;h=168dbe497e6dc73f70cdd38a3c98030e32b6dbe8;hp=4e6f14ee0722d102b2db33cdcb67ff1c02a1c272;hb=6e2fbdea1b26d75314d87c380a36b0015bf824cf;hpb=5c877b1ae0a4219ed6bd8d32cf3f7106b81ecb3b diff --git a/drivers/spi/mxs_spi.c b/drivers/spi/mxs_spi.c index 4e6f14ee07..168dbe497e 100644 --- a/drivers/spi/mxs_spi.c +++ b/drivers/spi/mxs_spi.c @@ -31,15 +31,31 @@ #include #include #include +#include #define MXS_SPI_MAX_TIMEOUT 1000000 #define MXS_SPI_PORT_OFFSET 0x2000 +#define MXS_SSP_CHIPSELECT_MASK 0x00300000 +#define MXS_SSP_CHIPSELECT_SHIFT 20 + +#define MXSSSP_SMALL_TRANSFER 512 + +/* + * CONFIG_MXS_SPI_DMA_ENABLE: Experimental mixed PIO/DMA support for MXS SPI + * host. Use with utmost caution! + * + * Enabling this is not yet recommended since this + * still doesn't support transfers to/from unaligned + * addresses. Therefore this driver will not work + * for example with saving environment. This is + * caused by DMA alignment constraints on MXS. + */ struct mxs_spi_slave { struct spi_slave slave; uint32_t max_khz; uint32_t mode; - struct mx28_ssp_regs *regs; + struct mxs_ssp_regs *regs; }; static inline struct mxs_spi_slave *to_mxs_slave(struct spi_slave *slave) @@ -51,30 +67,54 @@ void spi_init(void) { } +int spi_cs_is_valid(unsigned int bus, unsigned int cs) +{ + /* MXS SPI: 4 ports and 3 chip selects maximum */ + if (bus > 3 || cs > 2) + return 0; + else + return 1; +} + struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs, unsigned int max_hz, unsigned int mode) { struct mxs_spi_slave *mxs_slave; uint32_t addr; + struct mxs_ssp_regs *ssp_regs; + int reg; - if (bus > 3) { - printf("MXS SPI: Max bus number is 3\n"); + if (!spi_cs_is_valid(bus, cs)) { + printf("mxs_spi: invalid bus %d / chip select %d\n", bus, cs); return NULL; } - mxs_slave = malloc(sizeof(struct mxs_spi_slave)); + mxs_slave = calloc(sizeof(struct mxs_spi_slave), 1); if (!mxs_slave) return NULL; + if (mxs_dma_init_channel(bus)) + goto err_init; + addr = MXS_SSP0_BASE + (bus * MXS_SPI_PORT_OFFSET); mxs_slave->slave.bus = bus; mxs_slave->slave.cs = cs; mxs_slave->max_khz = max_hz / 1000; mxs_slave->mode = mode; - mxs_slave->regs = (struct mx28_ssp_regs *)addr; + mxs_slave->regs = (struct mxs_ssp_regs *)addr; + ssp_regs = mxs_slave->regs; + reg = readl(&ssp_regs->hw_ssp_ctrl0); + reg &= ~(MXS_SSP_CHIPSELECT_MASK); + reg |= cs << MXS_SSP_CHIPSELECT_SHIFT; + + writel(reg, &ssp_regs->hw_ssp_ctrl0); return &mxs_slave->slave; + +err_init: + free(mxs_slave); + return NULL; } void spi_free_slave(struct spi_slave *slave) @@ -86,10 +126,10 @@ void spi_free_slave(struct spi_slave *slave) int spi_claim_bus(struct spi_slave *slave) { struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); - struct mx28_ssp_regs *ssp_regs = mxs_slave->regs; + struct mxs_ssp_regs *ssp_regs = mxs_slave->regs; uint32_t reg = 0; - mx28_reset_block(&ssp_regs->hw_ssp_ctrl0_reg); + mxs_reset_block(&ssp_regs->hw_ssp_ctrl0_reg); writel(SSP_CTRL0_BUS_WIDTH_ONE_BIT, &ssp_regs->hw_ssp_ctrl0); @@ -109,79 +149,63 @@ void spi_release_bus(struct spi_slave *slave) { } -static void mxs_spi_start_xfer(struct mx28_ssp_regs *ssp_regs) +static void mxs_spi_start_xfer(struct mxs_ssp_regs *ssp_regs) { writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_set); writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_clr); } -static void mxs_spi_end_xfer(struct mx28_ssp_regs *ssp_regs) +static void mxs_spi_end_xfer(struct mxs_ssp_regs *ssp_regs) { writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_clr); writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_set); } -int spi_xfer(struct spi_slave *slave, unsigned int bitlen, - const void *dout, void *din, unsigned long flags) +static int mxs_spi_xfer_pio(struct mxs_spi_slave *slave, + char *data, int length, int write, unsigned long flags) { - struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); - struct mx28_ssp_regs *ssp_regs = mxs_slave->regs; - int len = bitlen / 8; - const char *tx = dout; - char *rx = din; - char dummy; - - if (bitlen == 0) { - if (flags & SPI_XFER_END) { - rx = &dummy; - len = 1; - } else - return 0; - } - - if (!rx && !tx) - return 0; + struct mxs_ssp_regs *ssp_regs = slave->regs; if (flags & SPI_XFER_BEGIN) mxs_spi_start_xfer(ssp_regs); - while (len--) { + while (length--) { /* We transfer 1 byte */ writel(1, &ssp_regs->hw_ssp_xfer_size); - if ((flags & SPI_XFER_END) && !len) + if ((flags & SPI_XFER_END) && !length) mxs_spi_end_xfer(ssp_regs); - if (tx) + if (write) writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_clr); else writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_set); writel(SSP_CTRL0_RUN, &ssp_regs->hw_ssp_ctrl0_set); - if (mx28_wait_mask_set(&ssp_regs->hw_ssp_ctrl0_reg, + if (mxs_wait_mask_set(&ssp_regs->hw_ssp_ctrl0_reg, SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) { printf("MXS SPI: Timeout waiting for start\n"); return -ETIMEDOUT; } - if (tx) - writel(*tx++, &ssp_regs->hw_ssp_data); + if (write) + writel(*data++, &ssp_regs->hw_ssp_data); writel(SSP_CTRL0_DATA_XFER, &ssp_regs->hw_ssp_ctrl0_set); - if (rx) { - if (mx28_wait_mask_clr(&ssp_regs->hw_ssp_status_reg, + if (!write) { + if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_status_reg, SSP_STATUS_FIFO_EMPTY, MXS_SPI_MAX_TIMEOUT)) { printf("MXS SPI: Timeout waiting for data\n"); return -ETIMEDOUT; } - *rx = readl(&ssp_regs->hw_ssp_data); - rx++; + *data = readl(&ssp_regs->hw_ssp_data); + data++; } - if (mx28_wait_mask_clr(&ssp_regs->hw_ssp_ctrl0_reg, + if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_ctrl0_reg, SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) { printf("MXS SPI: Timeout waiting for finish\n"); return -ETIMEDOUT; @@ -190,3 +214,166 @@ int spi_xfer(struct spi_slave *slave, unsigned int bitlen, return 0; } + +static int mxs_spi_xfer_dma(struct mxs_spi_slave *slave, + char *data, int length, int write, unsigned long flags) +{ + const int xfer_max_sz = 0xff00; + const int desc_count = DIV_ROUND_UP(length, xfer_max_sz) + 1; + struct mxs_ssp_regs *ssp_regs = slave->regs; + struct mxs_dma_desc *dp; + uint32_t ctrl0; + uint32_t cache_data_count; + int dmach; + int tl; + + ALLOC_CACHE_ALIGN_BUFFER(struct mxs_dma_desc, desc, desc_count); + + memset(desc, 0, sizeof(struct mxs_dma_desc) * desc_count); + + ctrl0 = readl(&ssp_regs->hw_ssp_ctrl0); + ctrl0 |= SSP_CTRL0_DATA_XFER; + + if (flags & SPI_XFER_BEGIN) + ctrl0 |= SSP_CTRL0_LOCK_CS; + if (!write) + ctrl0 |= SSP_CTRL0_READ; + + writel(length, &ssp_regs->hw_ssp_xfer_size); + + if (length % ARCH_DMA_MINALIGN) + cache_data_count = roundup(length, ARCH_DMA_MINALIGN); + else + cache_data_count = length; + + if (write) + /* Flush data to DRAM so DMA can pick them up */ + flush_dcache_range((uint32_t)data, + (uint32_t)(data + cache_data_count)); + + dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + slave->slave.bus; + + dp = desc; + while (length) { + dp->address = (dma_addr_t)dp; + dp->cmd.address = (dma_addr_t)data; + + /* + * This is correct, even though it does indeed look insane. + * I hereby have to, wholeheartedly, thank Freescale Inc., + * for always inventing insane hardware and keeping me busy + * and employed ;-) + */ + if (write) + dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ; + else + dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE; + + /* + * The DMA controller can transfer large chunks (64kB) at + * time by setting the transfer length to 0. Setting tl to + * 0x10000 will overflow below and make .data contain 0. + * Otherwise, 0xff00 is the transfer maximum. + */ + if (length >= 0x10000) + tl = 0x10000; + else + tl = min(length, xfer_max_sz); + + dp->cmd.data |= + (tl << MXS_DMA_DESC_BYTES_OFFSET) | + (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | + MXS_DMA_DESC_HALT_ON_TERMINATE | + MXS_DMA_DESC_TERMINATE_FLUSH; + dp->cmd.pio_words[0] = ctrl0; + + data += tl; + length -= tl; + + mxs_dma_desc_append(dmach, dp); + + dp++; + } + + dp->address = (dma_addr_t)dp; + dp->cmd.address = (dma_addr_t)0; + dp->cmd.data = MXS_DMA_DESC_COMMAND_NO_DMAXFER | + (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | + MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM; + if (flags & SPI_XFER_END) { + ctrl0 &= ~SSP_CTRL0_LOCK_CS; + dp->cmd.pio_words[0] = ctrl0 | SSP_CTRL0_IGNORE_CRC; + } + mxs_dma_desc_append(dmach, dp); + + if (mxs_dma_go(dmach)) + return -EINVAL; + + /* The data arrived into DRAM, invalidate cache over them */ + if (!write) { + invalidate_dcache_range((uint32_t)data, + (uint32_t)(data + cache_data_count)); + } + + return 0; +} + +int spi_xfer(struct spi_slave *slave, unsigned int bitlen, + const void *dout, void *din, unsigned long flags) +{ + struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); + struct mxs_ssp_regs *ssp_regs = mxs_slave->regs; + int len = bitlen / 8; + char dummy; + int write = 0; + char *data = NULL; + +#ifdef CONFIG_MXS_SPI_DMA_ENABLE + int dma = 1; +#else + int dma = 0; +#endif + + if (bitlen == 0) { + if (flags & SPI_XFER_END) { + din = (void *)&dummy; + len = 1; + } else + return 0; + } + + /* Half-duplex only */ + if (din && dout) + return -EINVAL; + /* No data */ + if (!din && !dout) + return 0; + + if (dout) { + data = (char *)dout; + write = 1; + } else if (din) { + data = (char *)din; + write = 0; + } + + /* + * Check for alignment, if the buffer is aligned, do DMA transfer, + * PIO otherwise. This is a temporary workaround until proper bounce + * buffer is in place. + */ + if (dma) { + if (((uint32_t)data) & (ARCH_DMA_MINALIGN - 1)) + dma = 0; + if (((uint32_t)len) & (ARCH_DMA_MINALIGN - 1)) + dma = 0; + } + + if (!dma || (len < MXSSSP_SMALL_TRANSFER)) { + writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_clr); + return mxs_spi_xfer_pio(mxs_slave, data, len, write, flags); + } else { + writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_set); + return mxs_spi_xfer_dma(mxs_slave, data, len, write, flags); + } +}