#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
-#include <asm/dmaengine.h>
+#include <linux/sh_dma.h>
#include "shdma.h"
#define LOG2_DEFAULT_XFER_SIZE 2
/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
-static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
+static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
struct sh_dmae_device, common);
struct sh_dmae_pdata *pdata = shdev->pdata;
- struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
int shift = chan_pdata->dmars_bit;
return NULL;
}
-static struct sh_dmae_slave_config *sh_dmae_find_slave(
- struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
+static const struct sh_dmae_slave_config *sh_dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
struct dma_device *dma_dev = sh_chan->common.device;
struct sh_dmae_device *shdev = container_of(dma_dev,
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
- if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
+ if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
return NULL;
for (i = 0; i < pdata->slave_num; i++)
- if (pdata->slave[i].slave_id == slave_id)
+ if (pdata->slave[i].slave_id == param->slave_id)
return pdata->slave + i;
return NULL;
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
struct sh_desc *desc;
struct sh_dmae_slave *param = chan->private;
+ int ret;
pm_runtime_get_sync(sh_chan->dev);
* never runs concurrently with itself or free_chan_resources.
*/
if (param) {
- struct sh_dmae_slave_config *cfg;
+ const struct sh_dmae_slave_config *cfg;
- cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
- if (!cfg)
- return -EINVAL;
+ cfg = sh_dmae_find_slave(sh_chan, param);
+ if (!cfg) {
+ ret = -EINVAL;
+ goto efindslave;
+ }
- if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
- return -EBUSY;
+ if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
+ ret = -EBUSY;
+ goto etestused;
+ }
param->config = cfg;
}
spin_unlock_bh(&sh_chan->desc_lock);
- if (!sh_chan->descs_allocated)
- pm_runtime_put(sh_chan->dev);
+ if (!sh_chan->descs_allocated) {
+ ret = -ENOMEM;
+ goto edescalloc;
+ }
return sh_chan->descs_allocated;
+
+edescalloc:
+ if (param)
+ clear_bit(param->slave_id, sh_dmae_slave_used);
+etestused:
+efindslave:
+ pm_runtime_put(sh_chan->dev);
+ return ret;
}
/*
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
+ dma_addr_t slave_addr;
if (!chan)
return NULL;
sh_chan = to_sh_chan(chan);
param = chan->private;
+ slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.
*/
- return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr,
+ return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
direction, flags);
}
- static void sh_dmae_terminate_all(struct dma_chan *chan)
+ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!chan)
- return;
+ return -EINVAL;
dmae_halt(sh_chan);
spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true);
+
+ return 0;
}
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
sh_chan_xfer_ld_queue(sh_chan);
}
- static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
+ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
dma_cookie_t last_used;
last_used = chan->cookie;
last_complete = sh_chan->completed_cookie;
BUG_ON(last_complete < 0);
-
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
spin_lock_bh(&sh_chan->desc_lock);
int irq, unsigned long flags)
{
int err;
- struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
struct platform_device *pdev = to_platform_device(shdev->common.dev);
struct sh_dmae_chan *new_sh_chan;
= sh_dmae_alloc_chan_resources;
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
- shdev->common.device_is_tx_complete = sh_dmae_is_complete;
+ shdev->common.device_tx_status = sh_dmae_tx_status;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
/* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
- shdev->common.device_terminate_all = sh_dmae_terminate_all;
+ shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
/* Interface clock */
struct clk *iclk;
- /* Data clock */
- struct clk *dclk;
+ /* Function clock */
+ struct clk *fclk;
struct list_head node;
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct device *dma_dev;
- enum sh_dmae_slave_chan_id slave_tx;
- enum sh_dmae_slave_chan_id slave_rx;
+ unsigned int slave_tx;
+ unsigned int slave_rx;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx[2];
dma_cookie_t cookie_tx;
struct work_struct work_tx;
struct work_struct work_rx;
struct timer_list rx_timer;
+ unsigned int rx_timeout;
#endif
};
handle_error(port);
continue;
}
- } while (!(status & SCxSR_RDxF(port)));
+ break;
+ } while (1);
+
+ if (!(status & SCxSR_RDxF(port)))
+ return NO_POLL_CHAR;
c = sci_in(port, SCxRDR);
struct sci_port *s = to_sci_port(port);
if (s->chan_rx) {
- unsigned long tout;
u16 scr = sci_in(port, SCSCR);
u16 ssr = sci_in(port, SCxSR);
/* Disable future Rx interrupts */
- sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE);
+ if (port->type == PORT_SCIFA) {
+ disable_irq_nosync(irq);
+ scr |= 0x4000;
+ } else {
+ scr &= ~SCI_CTRL_FLAGS_RIE;
+ }
+ sci_out(port, SCSCR, scr);
/* Clear current interrupt */
sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
- /* Calculate delay for 1.5 DMA buffers */
- tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
- port->fifosize / 2;
- dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
- tout * 1000 / HZ);
- if (tout < 2)
- tout = 2;
- mod_timer(&s->rx_timer, jiffies + tout);
+ dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
+ jiffies, s->rx_timeout);
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
return IRQ_HANDLED;
}
(phase == CPUFREQ_RESUMECHANGE)) {
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(sci_port, &priv->ports, node)
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
spin_unlock_irqrestore(&priv->lock, flags);
}
{
struct sci_port *sci_port = to_sci_port(port);
- clk_enable(sci_port->dclk);
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
-
- if (sci_port->iclk)
- clk_enable(sci_port->iclk);
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
}
static void sci_clk_disable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- if (sci_port->iclk)
- clk_disable(sci_port->iclk);
-
- clk_disable(sci_port->dclk);
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
}
static int sci_request_irq(struct sci_port *port)
spin_lock_irqsave(&port->lock, flags);
- xmit->tail += s->sg_tx.length;
+ xmit->tail += sg_dma_len(&s->sg_tx);
xmit->tail &= UART_XMIT_SIZE - 1;
- port->icount.tx += s->sg_tx.length;
+ port->icount.tx += sg_dma_len(&s->sg_tx);
async_tx_ack(s->desc_tx);
s->cookie_tx = -EINVAL;
s->desc_tx = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
-
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_chars_pending(xmit))
+ if (!uart_circ_empty(xmit)) {
schedule_work(&s->work_tx);
+ } else if (port->type == PORT_SCIFA) {
+ u16 ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
}
/* Locking: called with port lock held */
unsigned long flags;
int count;
- dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
spin_lock_irqsave(&port->lock, flags);
count = sci_dma_rx_push(s, tty, s->buf_len_rx);
- mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5));
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
spin_unlock_irqrestore(&port->lock, flags);
sci_rx_dma_release(s, true);
return;
}
+ dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
+ s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
unsigned long flags;
int count;
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
sh_desc->partial, sh_desc->cookie);
return;
}
- dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
- s->cookie_rx[new], new);
-
s->active_rx = s->cookie_rx[!new];
+
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
+ s->cookie_rx[new], new, s->active_rx);
}
static void work_fn_tx(struct work_struct *work)
*/
spin_lock_irq(&port->lock);
sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
- sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
+ sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
sg->offset;
- sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
+ sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
- sg->dma_length = sg->length;
spin_unlock_irq(&port->lock);
- BUG_ON(!sg->length);
+ BUG_ON(!sg_dma_len(sg));
desc = chan->device->device_prep_slave_sg(chan,
sg, s->sg_len_tx, DMA_TO_DEVICE,
static void sci_start_tx(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
unsigned short ctrl;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct sci_port *s = to_sci_port(port);
-
- if (s->chan_tx) {
- if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0)
- schedule_work(&s->work_tx);
-
- return;
+ if (port->type == PORT_SCIFA) {
+ u16 new, scr = sci_in(port, SCSCR);
+ if (s->chan_tx)
+ new = scr | 0x8000;
+ else
+ new = scr & ~0x8000;
+ if (new != scr)
+ sci_out(port, SCSCR, new);
}
+ if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
+ s->cookie_tx < 0)
+ schedule_work(&s->work_tx);
#endif
-
- /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = sci_in(port, SCSCR);
- ctrl |= SCI_CTRL_FLAGS_TIE;
- sci_out(port, SCSCR, ctrl);
+ if (!s->chan_tx || port->type == PORT_SCIFA) {
+ /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
+ ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
+ }
}
static void sci_stop_tx(struct uart_port *port)
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x8000;
ctrl &= ~SCI_CTRL_FLAGS_TIE;
sci_out(port, SCSCR, ctrl);
}
/* Set RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl |= sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
sci_out(port, SCSCR, ctrl);
}
/* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
sci_out(port, SCSCR, ctrl);
}
{
struct sci_port *s = (struct sci_port *)arg;
struct uart_port *port = &s->port;
-
u16 scr = sci_in(port, SCSCR);
+
+ if (port->type == PORT_SCIFA) {
+ scr &= ~0x4000;
+ enable_irq(s->irqs[1]);
+ }
sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
schedule_work(&s->work_rx);
sg_init_table(sg, 1);
sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
(int)buf[i] & ~PAGE_MASK);
- sg->dma_address = dma[i];
- sg->dma_length = sg->length;
+ sg_dma_address(sg) = dma[i];
}
INIT_WORK(&s->work_rx, work_fn_rx);
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct sci_port *s = to_sci_port(port);
+#endif
unsigned int status, baud, smr_val, max_baud;
int t = -1;
+ u16 scfcr = 0;
/*
* earlyprintk comes here early on with port->uartclk set to zero.
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
if (port->type != PORT_SCI)
- sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+ sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
smr_val = sci_in(port, SCSMR) & 3;
if ((termios->c_cflag & CSIZE) == CS7)
}
sci_init_pins(port, termios->c_cflag);
- sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0);
+ sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
sci_out(port, SCSCR, SCSCR_INIT(port));
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ /*
+ * Calculate delay for 1.5 DMA buffers: see
+ * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
+ * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
+ * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
+ * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
+ * sizes), but it has been found out experimentally, that this is not
+ * enough: the driver too often needlessly runs on a DMA timeout. 20ms
+ * as a minimum seem to work perfectly.
+ */
+ if (s->chan_rx) {
+ s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
+ port->fifosize / 2;
+ dev_dbg(port->dev,
+ "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ s->rx_timeout * 1000 / HZ, port->timeout);
+ if (s->rx_timeout < msecs_to_jiffies(20))
+ s->rx_timeout = msecs_to_jiffies(20);
+ }
+#endif
+
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
}
#endif
};
-static void __devinit sci_init_single(struct platform_device *dev,
- struct sci_port *sci_port,
- unsigned int index,
- struct plat_sci_port *p)
+static int __devinit sci_init_single(struct platform_device *dev,
+ struct sci_port *sci_port,
+ unsigned int index,
+ struct plat_sci_port *p)
{
struct uart_port *port = &sci_port->port;
}
if (dev) {
- sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
- sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->iclk = clk_get(&dev->dev, "sci_ick");
+ if (IS_ERR(sci_port->iclk)) {
+ sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
+ if (IS_ERR(sci_port->iclk)) {
+ dev_err(&dev->dev, "can't get iclk\n");
+ return PTR_ERR(sci_port->iclk);
+ }
+ }
+
+ /*
+ * The function clock is optional, ignore it if we can't
+ * find it.
+ */
+ sci_port->fclk = clk_get(&dev->dev, "sci_fck");
+ if (IS_ERR(sci_port->fclk))
+ sci_port->fclk = NULL;
+
sci_port->enable = sci_clk_enable;
sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
#endif
memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
+ return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
+ list_for_each_entry(p, &priv->ports, node) {
uart_remove_one_port(&sci_uart_driver, &p->port);
+ clk_put(p->iclk);
+ clk_put(p->fclk);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
kfree(priv);
return 0;
}
- sci_init_single(dev, sciport, index, p);
+ ret = sci_init_single(dev, sciport, index, p);
+ if (ret)
+ return ret;
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
if (ret)