#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/wait.h>
+#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
dma_addr_t context_phys;
struct dma_device dma_device;
struct clk *clk;
- struct mutex channel_0_lock;
+ spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
};
}
/*
- * sdma_run_channel - run a channel and wait till it's done
+ * sdma_run_channel0 - run channel 0 and wait till it's done
*/
-static int sdma_run_channel(struct sdma_channel *sdmac)
+#define SDMA_TIMEOUT_US 500
+
+static int sdma_run_channel0(struct sdma_engine *sdma)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
int ret;
+ unsigned long timeout = SDMA_TIMEOUT_US;
- init_completion(&sdmac->done);
-
- sdma_enable_channel(sdma, channel);
+ sdma_enable_channel(sdma, 0);
- ret = wait_for_completion_timeout(&sdmac->done, HZ);
+ while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
+ if (timeout-- <= 0)
+ break;
+ udelay(1);
+ }
+ if (ret)
+ /* clear interrupt status */
+ writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
+ else
+ dev_err(sdma->dev, "Timeout waiting for CH0 ready: %08x\n",
+ readl_relaxed(sdma->regs + SDMA_H_INTR));
return ret ? 0 : -ETIMEDOUT;
}
void *buf_virt;
dma_addr_t buf_phys;
int ret;
-
- mutex_lock(&sdma->channel_0_lock);
+ unsigned long flags;
buf_virt = dma_alloc_coherent(NULL,
size,
&buf_phys, GFP_KERNEL);
if (!buf_virt) {
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2;
memcpy(buf_virt, buf, size);
- ret = sdma_run_channel(&sdma->channel[0]);
+ ret = sdma_run_channel0(sdma);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
-err_out:
- mutex_unlock(&sdma->channel_0_lock);
+ dma_free_coherent(NULL, size, buf_virt, buf_phys);
return ret;
}
{
complete(&sdmac->done);
- /* not interested in channel 0 interrupts */
- if (sdmac->channel == 0)
- return;
-
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
else
unsigned long stat;
stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+ /* not interested in channel 0 interrupts */
+ stat &= ~1;
writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
while (stat) {
struct sdma_context_data *context = sdma->context;
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
+ unsigned long flags;
if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
- mutex_lock(&sdma->channel_0_lock);
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
- ret = sdma_run_channel(&sdma->channel[0]);
+ ret = sdma_run_channel0(sdma);
- mutex_unlock(&sdma->channel_0_lock);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
return ret;
}
dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
- clk_disable(sdma->clk);
+ clk_disable_unprepare(sdma->clk);
}
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
addr = (void *)header + header->script_addrs_start;
ram_code = (void *)header + header->ram_code_start;
- clk_enable(sdma->clk);
+ clk_prepare_enable(sdma->clk);
/* download the RAM image for SDMA */
sdma_load_script(sdma, ram_code,
header->ram_code_size,
addr->ram_code_start_addr);
- clk_disable(sdma->clk);
+ clk_disable_unprepare(sdma->clk);
sdma_add_scripts(sdma, addr);
return -ENODEV;
}
- clk_enable(sdma->clk);
+ clk_prepare_enable(sdma->clk);
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
/* Initializes channel's priorities */
sdma_set_channel_priority(&sdma->channel[0], 7);
- clk_disable(sdma->clk);
+ clk_disable_unprepare(sdma->clk);
return 0;
err_dma_alloc:
- clk_disable(sdma->clk);
+ clk_disable_unprepare(sdma->clk);
dev_err(sdma->dev, "initialisation failed with %d\n", ret);
return ret;
}
if (!sdma)
return -ENOMEM;
- mutex_init(&sdma->channel_0_lock);
+ spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
goto err_init;
}
+ dev_set_drvdata(&pdev->dev, sdma);
+
dev_info(sdma->dev, "initialized\n");
return 0;
return -EBUSY;
}
+#ifdef CONFIG_PM_SLEEP
+static struct sdma_suspend_context {
+ u32 stop_stat;
+ u32 host_ovr;
+} sdma_regs;
+
+static void sdma_save_regs(struct sdma_engine *sdma,
+ struct sdma_suspend_context *ctx)
+{
+ ctx->stop_stat = readl_relaxed(sdma->regs + SDMA_H_STATSTOP);
+ writel_relaxed(ctx->stop_stat, sdma->regs + SDMA_H_STATSTOP);
+
+ ctx->host_ovr = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
+ writel_relaxed(0, sdma->regs + SDMA_H_HOSTOVR);
+}
+
+static void sdma_restore_regs(struct sdma_engine *sdma,
+ struct sdma_suspend_context *ctx)
+{
+ writel_relaxed(ctx->host_ovr, sdma->regs + SDMA_H_HOSTOVR);
+ writel_relaxed(ctx->stop_stat, sdma->regs + SDMA_H_START);
+}
+
+static int sdma_suspend(struct device *dev)
+{
+ struct sdma_engine *sdma = dev_get_drvdata(dev);
+
+ /* save status and clear all channel enables */
+ sdma_save_regs(sdma, &sdma_regs);
+
+ /* reschedule */
+ writel_relaxed((1 << 1), sdma->regs + SDMA_H_RESET);
+
+ clk_disable_unprepare(sdma->clk);
+ return 0;
+}
+static int sdma_resume(struct device *dev)
+{
+ struct sdma_engine *sdma = dev_get_drvdata(dev);
+
+ clk_prepare_enable(sdma->clk);
+
+ sdma_restore_regs(sdma, &sdma_regs);
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(sdma_pm_ops, sdma_suspend, sdma_resume);
+
static struct platform_driver sdma_driver = {
.driver = {
.name = "imx-sdma",
.of_match_table = sdma_dt_ids,
+ .pm = &sdma_pm_ops,
},
.id_table = sdma_devtypes,
.remove = __exit_p(sdma_remove),