2 * Driver for AMBA serial ports
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
8 * Copyright (C) 2010 ST-Ericsson SA
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
37 #include <linux/module.h>
38 #include <linux/ioport.h>
39 #include <linux/init.h>
40 #include <linux/console.h>
41 #include <linux/sysrq.h>
42 #include <linux/device.h>
43 #include <linux/tty.h>
44 #include <linux/tty_flip.h>
45 #include <linux/serial_core.h>
46 #include <linux/serial.h>
47 #include <linux/amba/bus.h>
48 #include <linux/amba/serial.h>
49 #include <linux/clk.h>
50 #include <linux/slab.h>
51 #include <linux/dmaengine.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/scatterlist.h>
54 #include <linux/delay.h>
55 #include <linux/types.h>
57 #include <linux/of_device.h>
58 #include <linux/pinctrl/consumer.h>
59 #include <linux/sizes.h>
61 #include <linux/acpi.h>
65 #define SERIAL_AMBA_MAJOR 204
66 #define SERIAL_AMBA_MINOR 64
67 #define SERIAL_AMBA_NR UART_NR
69 #define AMBA_ISR_PASS_LIMIT 256
71 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
72 #define UART_DUMMY_DR_RX (1 << 16)
74 /* There is by now at least one vendor with differing details, so handle it */
81 bool cts_event_workaround;
85 unsigned int (*get_fifosize)(struct amba_device *dev);
90 REG_RSR = UART01x_RSR,
91 REG_ST_DMAWM = ST_UART011_DMAWM,
93 REG_ST_LCRH_RX = ST_UART011_LCRH_RX,
94 REG_ILPR = UART01x_ILPR,
95 REG_IBRD = UART011_IBRD,
96 REG_FBRD = UART011_FBRD,
97 REG_LCRH = UART011_LCRH,
99 REG_IFLS = UART011_IFLS,
100 REG_IMSC = UART011_IMSC,
101 REG_RIS = UART011_RIS,
102 REG_MIS = UART011_MIS,
103 REG_ICR = UART011_ICR,
104 REG_DMACR = UART011_DMACR,
107 static unsigned int get_fifosize_arm(struct amba_device *dev)
109 return amba_rev(dev) < 3 ? 16 : 32;
112 static struct vendor_data vendor_arm = {
113 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
116 .oversampling = false,
117 .dma_threshold = false,
118 .cts_event_workaround = false,
119 .always_enabled = false,
120 .fixed_options = false,
121 .get_fifosize = get_fifosize_arm,
124 static struct vendor_data vendor_sbsa = {
125 .oversampling = false,
126 .dma_threshold = false,
127 .cts_event_workaround = false,
128 .always_enabled = true,
129 .fixed_options = true,
132 static unsigned int get_fifosize_st(struct amba_device *dev)
137 static struct vendor_data vendor_st = {
138 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
140 .lcrh_rx = REG_ST_LCRH_RX,
141 .oversampling = true,
142 .dma_threshold = true,
143 .cts_event_workaround = true,
144 .always_enabled = false,
145 .fixed_options = false,
146 .get_fifosize = get_fifosize_st,
149 /* Deals with DMA transactions */
152 struct scatterlist sg;
156 struct pl011_dmarx_data {
157 struct dma_chan *chan;
158 struct completion complete;
160 struct pl011_sgbuf sgbuf_a;
161 struct pl011_sgbuf sgbuf_b;
164 struct timer_list timer;
165 unsigned int last_residue;
166 unsigned long last_jiffies;
168 unsigned int poll_rate;
169 unsigned int poll_timeout;
172 struct pl011_dmatx_data {
173 struct dma_chan *chan;
174 struct scatterlist sg;
180 * We wrap our port structure around the generic uart_port.
182 struct uart_amba_port {
183 struct uart_port port;
185 const struct vendor_data *vendor;
186 unsigned int dmacr; /* dma control reg */
187 unsigned int im; /* interrupt mask */
188 unsigned int old_status;
189 unsigned int fifosize; /* vendor-specific */
190 unsigned int lcrh_tx; /* vendor-specific */
191 unsigned int lcrh_rx; /* vendor-specific */
192 unsigned int old_cr; /* state during shutdown */
194 unsigned int fixed_baud; /* vendor-set fixed baud rate */
196 #ifdef CONFIG_DMA_ENGINE
200 struct pl011_dmarx_data dmarx;
201 struct pl011_dmatx_data dmatx;
207 * Reads up to 256 characters from the FIFO or until it's empty and
208 * inserts them into the TTY layer. Returns the number of characters
209 * read from the FIFO.
211 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
214 unsigned int flag, max_count = 256;
217 while (max_count--) {
218 status = readw(uap->port.membase + REG_FR);
219 if (status & UART01x_FR_RXFE)
222 /* Take chars from the FIFO and update status */
223 ch = readw(uap->port.membase + REG_DR) |
226 uap->port.icount.rx++;
229 if (unlikely(ch & UART_DR_ERROR)) {
230 if (ch & UART011_DR_BE) {
231 ch &= ~(UART011_DR_FE | UART011_DR_PE);
232 uap->port.icount.brk++;
233 if (uart_handle_break(&uap->port))
235 } else if (ch & UART011_DR_PE)
236 uap->port.icount.parity++;
237 else if (ch & UART011_DR_FE)
238 uap->port.icount.frame++;
239 if (ch & UART011_DR_OE)
240 uap->port.icount.overrun++;
242 ch &= uap->port.read_status_mask;
244 if (ch & UART011_DR_BE)
246 else if (ch & UART011_DR_PE)
248 else if (ch & UART011_DR_FE)
252 if (uart_handle_sysrq_char(&uap->port, ch & 255))
255 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
263 * All the DMA operation mode stuff goes inside this ifdef.
264 * This assumes that you have a generic DMA device interface,
265 * no custom DMA interfaces are supported.
267 #ifdef CONFIG_DMA_ENGINE
269 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
271 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
272 enum dma_data_direction dir)
276 sg->buf = dma_alloc_coherent(chan->device->dev,
277 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
281 sg_init_table(&sg->sg, 1);
282 sg_set_page(&sg->sg, phys_to_page(dma_addr),
283 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
284 sg_dma_address(&sg->sg) = dma_addr;
285 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
290 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
291 enum dma_data_direction dir)
294 dma_free_coherent(chan->device->dev,
295 PL011_DMA_BUFFER_SIZE, sg->buf,
296 sg_dma_address(&sg->sg));
300 static void pl011_dma_probe(struct uart_amba_port *uap)
302 /* DMA is the sole user of the platform data right now */
303 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
304 struct device *dev = uap->port.dev;
305 struct dma_slave_config tx_conf = {
306 .dst_addr = uap->port.mapbase + REG_DR,
307 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
308 .direction = DMA_MEM_TO_DEV,
309 .dst_maxburst = uap->fifosize >> 1,
312 struct dma_chan *chan;
315 uap->dma_probed = true;
316 chan = dma_request_slave_channel_reason(dev, "tx");
318 if (PTR_ERR(chan) == -EPROBE_DEFER) {
319 uap->dma_probed = false;
323 /* We need platform data */
324 if (!plat || !plat->dma_filter) {
325 dev_info(uap->port.dev, "no DMA platform data\n");
329 /* Try to acquire a generic DMA engine slave TX channel */
331 dma_cap_set(DMA_SLAVE, mask);
333 chan = dma_request_channel(mask, plat->dma_filter,
336 dev_err(uap->port.dev, "no TX DMA channel!\n");
341 dmaengine_slave_config(chan, &tx_conf);
342 uap->dmatx.chan = chan;
344 dev_info(uap->port.dev, "DMA channel TX %s\n",
345 dma_chan_name(uap->dmatx.chan));
347 /* Optionally make use of an RX channel as well */
348 chan = dma_request_slave_channel(dev, "rx");
350 if (!chan && plat->dma_rx_param) {
351 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
354 dev_err(uap->port.dev, "no RX DMA channel!\n");
360 struct dma_slave_config rx_conf = {
361 .src_addr = uap->port.mapbase + REG_DR,
362 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
363 .direction = DMA_DEV_TO_MEM,
364 .src_maxburst = uap->fifosize >> 2,
367 struct dma_slave_caps caps;
370 * Some DMA controllers provide information on their capabilities.
371 * If the controller does, check for suitable residue processing
372 * otherwise assime all is well.
374 if (0 == dma_get_slave_caps(chan, &caps)) {
375 if (caps.residue_granularity ==
376 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
377 dma_release_channel(chan);
378 dev_info(uap->port.dev,
379 "RX DMA disabled - no residue processing\n");
383 dmaengine_slave_config(chan, &rx_conf);
384 uap->dmarx.chan = chan;
386 uap->dmarx.auto_poll_rate = false;
387 if (plat && plat->dma_rx_poll_enable) {
388 /* Set poll rate if specified. */
389 if (plat->dma_rx_poll_rate) {
390 uap->dmarx.auto_poll_rate = false;
391 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
394 * 100 ms defaults to poll rate if not
395 * specified. This will be adjusted with
396 * the baud rate at set_termios.
398 uap->dmarx.auto_poll_rate = true;
399 uap->dmarx.poll_rate = 100;
401 /* 3 secs defaults poll_timeout if not specified. */
402 if (plat->dma_rx_poll_timeout)
403 uap->dmarx.poll_timeout =
404 plat->dma_rx_poll_timeout;
406 uap->dmarx.poll_timeout = 3000;
407 } else if (!plat && dev->of_node) {
408 uap->dmarx.auto_poll_rate = of_property_read_bool(
409 dev->of_node, "auto-poll");
410 if (uap->dmarx.auto_poll_rate) {
413 if (0 == of_property_read_u32(dev->of_node,
415 uap->dmarx.poll_rate = x;
417 uap->dmarx.poll_rate = 100;
418 if (0 == of_property_read_u32(dev->of_node,
419 "poll-timeout-ms", &x))
420 uap->dmarx.poll_timeout = x;
422 uap->dmarx.poll_timeout = 3000;
425 dev_info(uap->port.dev, "DMA channel RX %s\n",
426 dma_chan_name(uap->dmarx.chan));
430 static void pl011_dma_remove(struct uart_amba_port *uap)
433 dma_release_channel(uap->dmatx.chan);
435 dma_release_channel(uap->dmarx.chan);
438 /* Forward declare these for the refill routine */
439 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
440 static void pl011_start_tx_pio(struct uart_amba_port *uap);
443 * The current DMA TX buffer has been sent.
444 * Try to queue up another DMA buffer.
446 static void pl011_dma_tx_callback(void *data)
448 struct uart_amba_port *uap = data;
449 struct pl011_dmatx_data *dmatx = &uap->dmatx;
453 spin_lock_irqsave(&uap->port.lock, flags);
454 if (uap->dmatx.queued)
455 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
459 uap->dmacr = dmacr & ~UART011_TXDMAE;
460 writew(uap->dmacr, uap->port.membase + REG_DMACR);
463 * If TX DMA was disabled, it means that we've stopped the DMA for
464 * some reason (eg, XOFF received, or we want to send an X-char.)
466 * Note: we need to be careful here of a potential race between DMA
467 * and the rest of the driver - if the driver disables TX DMA while
468 * a TX buffer completing, we must update the tx queued status to
469 * get further refills (hence we check dmacr).
471 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
472 uart_circ_empty(&uap->port.state->xmit)) {
473 uap->dmatx.queued = false;
474 spin_unlock_irqrestore(&uap->port.lock, flags);
478 if (pl011_dma_tx_refill(uap) <= 0)
480 * We didn't queue a DMA buffer for some reason, but we
481 * have data pending to be sent. Re-enable the TX IRQ.
483 pl011_start_tx_pio(uap);
485 spin_unlock_irqrestore(&uap->port.lock, flags);
489 * Try to refill the TX DMA buffer.
490 * Locking: called with port lock held and IRQs disabled.
492 * 1 if we queued up a TX DMA buffer.
493 * 0 if we didn't want to handle this by DMA
496 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
498 struct pl011_dmatx_data *dmatx = &uap->dmatx;
499 struct dma_chan *chan = dmatx->chan;
500 struct dma_device *dma_dev = chan->device;
501 struct dma_async_tx_descriptor *desc;
502 struct circ_buf *xmit = &uap->port.state->xmit;
506 * Try to avoid the overhead involved in using DMA if the
507 * transaction fits in the first half of the FIFO, by using
508 * the standard interrupt handling. This ensures that we
509 * issue a uart_write_wakeup() at the appropriate time.
511 count = uart_circ_chars_pending(xmit);
512 if (count < (uap->fifosize >> 1)) {
513 uap->dmatx.queued = false;
518 * Bodge: don't send the last character by DMA, as this
519 * will prevent XON from notifying us to restart DMA.
523 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
524 if (count > PL011_DMA_BUFFER_SIZE)
525 count = PL011_DMA_BUFFER_SIZE;
527 if (xmit->tail < xmit->head)
528 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
530 size_t first = UART_XMIT_SIZE - xmit->tail;
535 second = count - first;
537 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
539 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
542 dmatx->sg.length = count;
544 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
545 uap->dmatx.queued = false;
546 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
550 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
551 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
553 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
554 uap->dmatx.queued = false;
556 * If DMA cannot be used right now, we complete this
557 * transaction via IRQ and let the TTY layer retry.
559 dev_dbg(uap->port.dev, "TX DMA busy\n");
563 /* Some data to go along to the callback */
564 desc->callback = pl011_dma_tx_callback;
565 desc->callback_param = uap;
567 /* All errors should happen at prepare time */
568 dmaengine_submit(desc);
570 /* Fire the DMA transaction */
571 dma_dev->device_issue_pending(chan);
573 uap->dmacr |= UART011_TXDMAE;
574 writew(uap->dmacr, uap->port.membase + REG_DMACR);
575 uap->dmatx.queued = true;
578 * Now we know that DMA will fire, so advance the ring buffer
579 * with the stuff we just dispatched.
581 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
582 uap->port.icount.tx += count;
584 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
585 uart_write_wakeup(&uap->port);
591 * We received a transmit interrupt without a pending X-char but with
592 * pending characters.
593 * Locking: called with port lock held and IRQs disabled.
595 * false if we want to use PIO to transmit
596 * true if we queued a DMA buffer
598 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
600 if (!uap->using_tx_dma)
604 * If we already have a TX buffer queued, but received a
605 * TX interrupt, it will be because we've just sent an X-char.
606 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
608 if (uap->dmatx.queued) {
609 uap->dmacr |= UART011_TXDMAE;
610 writew(uap->dmacr, uap->port.membase + REG_DMACR);
611 uap->im &= ~UART011_TXIM;
612 writew(uap->im, uap->port.membase + REG_IMSC);
617 * We don't have a TX buffer queued, so try to queue one.
618 * If we successfully queued a buffer, mask the TX IRQ.
620 if (pl011_dma_tx_refill(uap) > 0) {
621 uap->im &= ~UART011_TXIM;
622 writew(uap->im, uap->port.membase + REG_IMSC);
629 * Stop the DMA transmit (eg, due to received XOFF).
630 * Locking: called with port lock held and IRQs disabled.
632 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
634 if (uap->dmatx.queued) {
635 uap->dmacr &= ~UART011_TXDMAE;
636 writew(uap->dmacr, uap->port.membase + REG_DMACR);
641 * Try to start a DMA transmit, or in the case of an XON/OFF
642 * character queued for send, try to get that character out ASAP.
643 * Locking: called with port lock held and IRQs disabled.
645 * false if we want the TX IRQ to be enabled
646 * true if we have a buffer queued
648 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
652 if (!uap->using_tx_dma)
655 if (!uap->port.x_char) {
656 /* no X-char, try to push chars out in DMA mode */
659 if (!uap->dmatx.queued) {
660 if (pl011_dma_tx_refill(uap) > 0) {
661 uap->im &= ~UART011_TXIM;
662 writew(uap->im, uap->port.membase +
666 } else if (!(uap->dmacr & UART011_TXDMAE)) {
667 uap->dmacr |= UART011_TXDMAE;
669 uap->port.membase + REG_DMACR);
675 * We have an X-char to send. Disable DMA to prevent it loading
676 * the TX fifo, and then see if we can stuff it into the FIFO.
679 uap->dmacr &= ~UART011_TXDMAE;
680 writew(uap->dmacr, uap->port.membase + REG_DMACR);
682 if (readw(uap->port.membase + REG_FR) & UART01x_FR_TXFF) {
684 * No space in the FIFO, so enable the transmit interrupt
685 * so we know when there is space. Note that once we've
686 * loaded the character, we should just re-enable DMA.
691 writew(uap->port.x_char, uap->port.membase + REG_DR);
692 uap->port.icount.tx++;
693 uap->port.x_char = 0;
695 /* Success - restore the DMA state */
697 writew(dmacr, uap->port.membase + REG_DMACR);
703 * Flush the transmit buffer.
704 * Locking: called with port lock held and IRQs disabled.
706 static void pl011_dma_flush_buffer(struct uart_port *port)
707 __releases(&uap->port.lock)
708 __acquires(&uap->port.lock)
710 struct uart_amba_port *uap =
711 container_of(port, struct uart_amba_port, port);
713 if (!uap->using_tx_dma)
716 /* Avoid deadlock with the DMA engine callback */
717 spin_unlock(&uap->port.lock);
718 dmaengine_terminate_all(uap->dmatx.chan);
719 spin_lock(&uap->port.lock);
720 if (uap->dmatx.queued) {
721 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
723 uap->dmatx.queued = false;
724 uap->dmacr &= ~UART011_TXDMAE;
725 writew(uap->dmacr, uap->port.membase + REG_DMACR);
729 static void pl011_dma_rx_callback(void *data);
731 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
733 struct dma_chan *rxchan = uap->dmarx.chan;
734 struct pl011_dmarx_data *dmarx = &uap->dmarx;
735 struct dma_async_tx_descriptor *desc;
736 struct pl011_sgbuf *sgbuf;
741 /* Start the RX DMA job */
742 sgbuf = uap->dmarx.use_buf_b ?
743 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
744 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
746 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
748 * If the DMA engine is busy and cannot prepare a
749 * channel, no big deal, the driver will fall back
750 * to interrupt mode as a result of this error code.
753 uap->dmarx.running = false;
754 dmaengine_terminate_all(rxchan);
758 /* Some data to go along to the callback */
759 desc->callback = pl011_dma_rx_callback;
760 desc->callback_param = uap;
761 dmarx->cookie = dmaengine_submit(desc);
762 dma_async_issue_pending(rxchan);
764 uap->dmacr |= UART011_RXDMAE;
765 writew(uap->dmacr, uap->port.membase + REG_DMACR);
766 uap->dmarx.running = true;
768 uap->im &= ~UART011_RXIM;
769 writew(uap->im, uap->port.membase + REG_IMSC);
775 * This is called when either the DMA job is complete, or
776 * the FIFO timeout interrupt occurred. This must be called
777 * with the port spinlock uap->port.lock held.
779 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
780 u32 pending, bool use_buf_b,
783 struct tty_port *port = &uap->port.state->port;
784 struct pl011_sgbuf *sgbuf = use_buf_b ?
785 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
787 u32 fifotaken = 0; /* only used for vdbg() */
789 struct pl011_dmarx_data *dmarx = &uap->dmarx;
792 if (uap->dmarx.poll_rate) {
793 /* The data can be taken by polling */
794 dmataken = sgbuf->sg.length - dmarx->last_residue;
795 /* Recalculate the pending size */
796 if (pending >= dmataken)
800 /* Pick the remain data from the DMA */
804 * First take all chars in the DMA pipe, then look in the FIFO.
805 * Note that tty_insert_flip_buf() tries to take as many chars
808 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
811 uap->port.icount.rx += dma_count;
812 if (dma_count < pending)
813 dev_warn(uap->port.dev,
814 "couldn't insert all characters (TTY is full?)\n");
817 /* Reset the last_residue for Rx DMA poll */
818 if (uap->dmarx.poll_rate)
819 dmarx->last_residue = sgbuf->sg.length;
822 * Only continue with trying to read the FIFO if all DMA chars have
825 if (dma_count == pending && readfifo) {
826 /* Clear any error flags */
827 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
828 uap->port.membase + REG_ICR);
831 * If we read all the DMA'd characters, and we had an
832 * incomplete buffer, that could be due to an rx error, or
833 * maybe we just timed out. Read any pending chars and check
836 * Error conditions will only occur in the FIFO, these will
837 * trigger an immediate interrupt and stop the DMA job, so we
838 * will always find the error in the FIFO, never in the DMA
841 fifotaken = pl011_fifo_to_tty(uap);
844 spin_unlock(&uap->port.lock);
845 dev_vdbg(uap->port.dev,
846 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
847 dma_count, fifotaken);
848 tty_flip_buffer_push(port);
849 spin_lock(&uap->port.lock);
852 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
854 struct pl011_dmarx_data *dmarx = &uap->dmarx;
855 struct dma_chan *rxchan = dmarx->chan;
856 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
857 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
859 struct dma_tx_state state;
860 enum dma_status dmastat;
863 * Pause the transfer so we can trust the current counter,
864 * do this before we pause the PL011 block, else we may
867 if (dmaengine_pause(rxchan))
868 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
869 dmastat = rxchan->device->device_tx_status(rxchan,
870 dmarx->cookie, &state);
871 if (dmastat != DMA_PAUSED)
872 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
874 /* Disable RX DMA - incoming data will wait in the FIFO */
875 uap->dmacr &= ~UART011_RXDMAE;
876 writew(uap->dmacr, uap->port.membase + REG_DMACR);
877 uap->dmarx.running = false;
879 pending = sgbuf->sg.length - state.residue;
880 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
881 /* Then we terminate the transfer - we now know our residue */
882 dmaengine_terminate_all(rxchan);
885 * This will take the chars we have so far and insert
886 * into the framework.
888 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
890 /* Switch buffer & re-trigger DMA job */
891 dmarx->use_buf_b = !dmarx->use_buf_b;
892 if (pl011_dma_rx_trigger_dma(uap)) {
893 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
894 "fall back to interrupt mode\n");
895 uap->im |= UART011_RXIM;
896 writew(uap->im, uap->port.membase + REG_IMSC);
900 static void pl011_dma_rx_callback(void *data)
902 struct uart_amba_port *uap = data;
903 struct pl011_dmarx_data *dmarx = &uap->dmarx;
904 struct dma_chan *rxchan = dmarx->chan;
905 bool lastbuf = dmarx->use_buf_b;
906 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
907 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
909 struct dma_tx_state state;
913 * This completion interrupt occurs typically when the
914 * RX buffer is totally stuffed but no timeout has yet
915 * occurred. When that happens, we just want the RX
916 * routine to flush out the secondary DMA buffer while
917 * we immediately trigger the next DMA job.
919 spin_lock_irq(&uap->port.lock);
921 * Rx data can be taken by the UART interrupts during
922 * the DMA irq handler. So we check the residue here.
924 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
925 pending = sgbuf->sg.length - state.residue;
926 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
927 /* Then we terminate the transfer - we now know our residue */
928 dmaengine_terminate_all(rxchan);
930 uap->dmarx.running = false;
931 dmarx->use_buf_b = !lastbuf;
932 ret = pl011_dma_rx_trigger_dma(uap);
934 pl011_dma_rx_chars(uap, pending, lastbuf, false);
935 spin_unlock_irq(&uap->port.lock);
937 * Do this check after we picked the DMA chars so we don't
938 * get some IRQ immediately from RX.
941 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
942 "fall back to interrupt mode\n");
943 uap->im |= UART011_RXIM;
944 writew(uap->im, uap->port.membase + REG_IMSC);
949 * Stop accepting received characters, when we're shutting down or
950 * suspending this port.
951 * Locking: called with port lock held and IRQs disabled.
953 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
955 /* FIXME. Just disable the DMA enable */
956 uap->dmacr &= ~UART011_RXDMAE;
957 writew(uap->dmacr, uap->port.membase + REG_DMACR);
961 * Timer handler for Rx DMA polling.
962 * Every polling, It checks the residue in the dma buffer and transfer
963 * data to the tty. Also, last_residue is updated for the next polling.
965 static void pl011_dma_rx_poll(unsigned long args)
967 struct uart_amba_port *uap = (struct uart_amba_port *)args;
968 struct tty_port *port = &uap->port.state->port;
969 struct pl011_dmarx_data *dmarx = &uap->dmarx;
970 struct dma_chan *rxchan = uap->dmarx.chan;
971 unsigned long flags = 0;
972 unsigned int dmataken = 0;
973 unsigned int size = 0;
974 struct pl011_sgbuf *sgbuf;
976 struct dma_tx_state state;
978 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
979 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
980 if (likely(state.residue < dmarx->last_residue)) {
981 dmataken = sgbuf->sg.length - dmarx->last_residue;
982 size = dmarx->last_residue - state.residue;
983 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
985 if (dma_count == size)
986 dmarx->last_residue = state.residue;
987 dmarx->last_jiffies = jiffies;
989 tty_flip_buffer_push(port);
992 * If no data is received in poll_timeout, the driver will fall back
993 * to interrupt mode. We will retrigger DMA at the first interrupt.
995 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
996 > uap->dmarx.poll_timeout) {
998 spin_lock_irqsave(&uap->port.lock, flags);
999 pl011_dma_rx_stop(uap);
1000 uap->im |= UART011_RXIM;
1001 writew(uap->im, uap->port.membase + REG_IMSC);
1002 spin_unlock_irqrestore(&uap->port.lock, flags);
1004 uap->dmarx.running = false;
1005 dmaengine_terminate_all(rxchan);
1006 del_timer(&uap->dmarx.timer);
1008 mod_timer(&uap->dmarx.timer,
1009 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1013 static void pl011_dma_startup(struct uart_amba_port *uap)
1017 if (!uap->dma_probed)
1018 pl011_dma_probe(uap);
1020 if (!uap->dmatx.chan)
1023 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1024 if (!uap->dmatx.buf) {
1025 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1026 uap->port.fifosize = uap->fifosize;
1030 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1032 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1033 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1034 uap->using_tx_dma = true;
1036 if (!uap->dmarx.chan)
1039 /* Allocate and map DMA RX buffers */
1040 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1043 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1044 "RX buffer A", ret);
1048 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1051 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1052 "RX buffer B", ret);
1053 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1058 uap->using_rx_dma = true;
1061 /* Turn on DMA error (RX/TX will be enabled on demand) */
1062 uap->dmacr |= UART011_DMAONERR;
1063 writew(uap->dmacr, uap->port.membase + REG_DMACR);
1066 * ST Micro variants has some specific dma burst threshold
1067 * compensation. Set this to 16 bytes, so burst will only
1068 * be issued above/below 16 bytes.
1070 if (uap->vendor->dma_threshold)
1071 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1072 uap->port.membase + REG_ST_DMAWM);
1075 if (uap->using_rx_dma) {
1076 if (pl011_dma_rx_trigger_dma(uap))
1077 dev_dbg(uap->port.dev, "could not trigger initial "
1078 "RX DMA job, fall back to interrupt mode\n");
1079 if (uap->dmarx.poll_rate) {
1080 init_timer(&(uap->dmarx.timer));
1081 uap->dmarx.timer.function = pl011_dma_rx_poll;
1082 uap->dmarx.timer.data = (unsigned long)uap;
1083 mod_timer(&uap->dmarx.timer,
1085 msecs_to_jiffies(uap->dmarx.poll_rate));
1086 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1087 uap->dmarx.last_jiffies = jiffies;
1092 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1094 if (!(uap->using_tx_dma || uap->using_rx_dma))
1097 /* Disable RX and TX DMA */
1098 while (readw(uap->port.membase + REG_FR) & UART01x_FR_BUSY)
1101 spin_lock_irq(&uap->port.lock);
1102 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1103 writew(uap->dmacr, uap->port.membase + REG_DMACR);
1104 spin_unlock_irq(&uap->port.lock);
1106 if (uap->using_tx_dma) {
1107 /* In theory, this should already be done by pl011_dma_flush_buffer */
1108 dmaengine_terminate_all(uap->dmatx.chan);
1109 if (uap->dmatx.queued) {
1110 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1112 uap->dmatx.queued = false;
1115 kfree(uap->dmatx.buf);
1116 uap->using_tx_dma = false;
1119 if (uap->using_rx_dma) {
1120 dmaengine_terminate_all(uap->dmarx.chan);
1121 /* Clean up the RX DMA */
1122 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1123 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1124 if (uap->dmarx.poll_rate)
1125 del_timer_sync(&uap->dmarx.timer);
1126 uap->using_rx_dma = false;
1130 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1132 return uap->using_rx_dma;
1135 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1137 return uap->using_rx_dma && uap->dmarx.running;
1141 /* Blank functions if the DMA engine is not available */
1142 static inline void pl011_dma_probe(struct uart_amba_port *uap)
1146 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1150 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1154 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1158 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1163 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1167 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1172 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1176 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1180 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1185 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1190 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1195 #define pl011_dma_flush_buffer NULL
1198 static void pl011_stop_tx(struct uart_port *port)
1200 struct uart_amba_port *uap =
1201 container_of(port, struct uart_amba_port, port);
1203 uap->im &= ~UART011_TXIM;
1204 writew(uap->im, uap->port.membase + REG_IMSC);
1205 pl011_dma_tx_stop(uap);
1208 static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1210 /* Start TX with programmed I/O only (no DMA) */
1211 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1213 uap->im |= UART011_TXIM;
1214 writew(uap->im, uap->port.membase + REG_IMSC);
1215 pl011_tx_chars(uap, false);
1218 static void pl011_start_tx(struct uart_port *port)
1220 struct uart_amba_port *uap =
1221 container_of(port, struct uart_amba_port, port);
1223 if (!pl011_dma_tx_start(uap))
1224 pl011_start_tx_pio(uap);
1227 static void pl011_stop_rx(struct uart_port *port)
1229 struct uart_amba_port *uap =
1230 container_of(port, struct uart_amba_port, port);
1232 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1233 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1234 writew(uap->im, uap->port.membase + REG_IMSC);
1236 pl011_dma_rx_stop(uap);
1239 static void pl011_enable_ms(struct uart_port *port)
1241 struct uart_amba_port *uap =
1242 container_of(port, struct uart_amba_port, port);
1244 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1245 writew(uap->im, uap->port.membase + REG_IMSC);
1248 static void pl011_rx_chars(struct uart_amba_port *uap)
1249 __releases(&uap->port.lock)
1250 __acquires(&uap->port.lock)
1252 pl011_fifo_to_tty(uap);
1254 spin_unlock(&uap->port.lock);
1255 tty_flip_buffer_push(&uap->port.state->port);
1257 * If we were temporarily out of DMA mode for a while,
1258 * attempt to switch back to DMA mode again.
1260 if (pl011_dma_rx_available(uap)) {
1261 if (pl011_dma_rx_trigger_dma(uap)) {
1262 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1263 "fall back to interrupt mode again\n");
1264 uap->im |= UART011_RXIM;
1265 writew(uap->im, uap->port.membase + REG_IMSC);
1267 #ifdef CONFIG_DMA_ENGINE
1268 /* Start Rx DMA poll */
1269 if (uap->dmarx.poll_rate) {
1270 uap->dmarx.last_jiffies = jiffies;
1271 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1272 mod_timer(&uap->dmarx.timer,
1274 msecs_to_jiffies(uap->dmarx.poll_rate));
1279 spin_lock(&uap->port.lock);
1282 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1285 if (unlikely(!from_irq) &&
1286 readw(uap->port.membase + REG_FR) & UART01x_FR_TXFF)
1287 return false; /* unable to transmit character */
1289 writew(c, uap->port.membase + REG_DR);
1290 uap->port.icount.tx++;
1295 static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1297 struct circ_buf *xmit = &uap->port.state->xmit;
1298 int count = uap->fifosize >> 1;
1300 if (uap->port.x_char) {
1301 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1303 uap->port.x_char = 0;
1306 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1307 pl011_stop_tx(&uap->port);
1311 /* If we are using DMA mode, try to send some characters. */
1312 if (pl011_dma_tx_irq(uap))
1316 if (likely(from_irq) && count-- == 0)
1319 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1322 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1323 } while (!uart_circ_empty(xmit));
1325 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1326 uart_write_wakeup(&uap->port);
1328 if (uart_circ_empty(xmit))
1329 pl011_stop_tx(&uap->port);
1332 static void pl011_modem_status(struct uart_amba_port *uap)
1334 unsigned int status, delta;
1336 status = readw(uap->port.membase + REG_FR) & UART01x_FR_MODEM_ANY;
1338 delta = status ^ uap->old_status;
1339 uap->old_status = status;
1344 if (delta & UART01x_FR_DCD)
1345 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1347 if (delta & UART01x_FR_DSR)
1348 uap->port.icount.dsr++;
1350 if (delta & UART01x_FR_CTS)
1351 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1353 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1356 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1358 unsigned int dummy_read;
1360 if (!uap->vendor->cts_event_workaround)
1363 /* workaround to make sure that all bits are unlocked.. */
1364 writew(0x00, uap->port.membase + REG_ICR);
1367 * WA: introduce 26ns(1 uart clk) delay before W1C;
1368 * single apb access will incur 2 pclk(133.12Mhz) delay,
1369 * so add 2 dummy reads
1371 dummy_read = readw(uap->port.membase + REG_ICR);
1372 dummy_read = readw(uap->port.membase + REG_ICR);
1375 static irqreturn_t pl011_int(int irq, void *dev_id)
1377 struct uart_amba_port *uap = dev_id;
1378 unsigned long flags;
1379 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1383 spin_lock_irqsave(&uap->port.lock, flags);
1384 imsc = readw(uap->port.membase + REG_IMSC);
1385 status = readw(uap->port.membase + REG_RIS) & imsc;
1388 check_apply_cts_event_workaround(uap);
1390 writew(status & ~(UART011_TXIS|UART011_RTIS|
1392 uap->port.membase + REG_ICR);
1394 if (status & (UART011_RTIS|UART011_RXIS)) {
1395 if (pl011_dma_rx_running(uap))
1396 pl011_dma_rx_irq(uap);
1398 pl011_rx_chars(uap);
1400 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1401 UART011_CTSMIS|UART011_RIMIS))
1402 pl011_modem_status(uap);
1403 if (status & UART011_TXIS)
1404 pl011_tx_chars(uap, true);
1406 if (pass_counter-- == 0)
1409 status = readw(uap->port.membase + REG_RIS) & imsc;
1410 } while (status != 0);
1414 spin_unlock_irqrestore(&uap->port.lock, flags);
1416 return IRQ_RETVAL(handled);
1419 static unsigned int pl011_tx_empty(struct uart_port *port)
1421 struct uart_amba_port *uap =
1422 container_of(port, struct uart_amba_port, port);
1423 unsigned int status = readw(uap->port.membase + REG_FR);
1424 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1427 static unsigned int pl011_get_mctrl(struct uart_port *port)
1429 struct uart_amba_port *uap =
1430 container_of(port, struct uart_amba_port, port);
1431 unsigned int result = 0;
1432 unsigned int status = readw(uap->port.membase + REG_FR);
1434 #define TIOCMBIT(uartbit, tiocmbit) \
1435 if (status & uartbit) \
1438 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1439 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1440 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1441 TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1446 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1448 struct uart_amba_port *uap =
1449 container_of(port, struct uart_amba_port, port);
1452 cr = readw(uap->port.membase + REG_CR);
1454 #define TIOCMBIT(tiocmbit, uartbit) \
1455 if (mctrl & tiocmbit) \
1460 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1461 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1462 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1463 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1464 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1467 /* We need to disable auto-RTS if we want to turn RTS off */
1468 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1472 writew(cr, uap->port.membase + REG_CR);
1475 static void pl011_break_ctl(struct uart_port *port, int break_state)
1477 struct uart_amba_port *uap =
1478 container_of(port, struct uart_amba_port, port);
1479 unsigned long flags;
1482 spin_lock_irqsave(&uap->port.lock, flags);
1483 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1484 if (break_state == -1)
1485 lcr_h |= UART01x_LCRH_BRK;
1487 lcr_h &= ~UART01x_LCRH_BRK;
1488 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1489 spin_unlock_irqrestore(&uap->port.lock, flags);
1492 #ifdef CONFIG_CONSOLE_POLL
1494 static void pl011_quiesce_irqs(struct uart_port *port)
1496 struct uart_amba_port *uap =
1497 container_of(port, struct uart_amba_port, port);
1498 unsigned char __iomem *regs = uap->port.membase;
1500 writew(readw(regs + REG_MIS), regs + REG_ICR);
1502 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1503 * we simply mask it. start_tx() will unmask it.
1505 * Note we can race with start_tx(), and if the race happens, the
1506 * polling user might get another interrupt just after we clear it.
1507 * But it should be OK and can happen even w/o the race, e.g.
1508 * controller immediately got some new data and raised the IRQ.
1510 * And whoever uses polling routines assumes that it manages the device
1511 * (including tx queue), so we're also fine with start_tx()'s caller
1514 writew(readw(regs + REG_IMSC) & ~UART011_TXIM, regs + REG_IMSC);
1517 static int pl011_get_poll_char(struct uart_port *port)
1519 struct uart_amba_port *uap =
1520 container_of(port, struct uart_amba_port, port);
1521 unsigned int status;
1524 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1527 pl011_quiesce_irqs(port);
1529 status = readw(uap->port.membase + REG_FR);
1530 if (status & UART01x_FR_RXFE)
1531 return NO_POLL_CHAR;
1533 return readw(uap->port.membase + REG_DR);
1536 static void pl011_put_poll_char(struct uart_port *port,
1539 struct uart_amba_port *uap =
1540 container_of(port, struct uart_amba_port, port);
1542 while (readw(uap->port.membase + REG_FR) & UART01x_FR_TXFF)
1545 writew(ch, uap->port.membase + REG_DR);
1548 #endif /* CONFIG_CONSOLE_POLL */
1550 static int pl011_hwinit(struct uart_port *port)
1552 struct uart_amba_port *uap =
1553 container_of(port, struct uart_amba_port, port);
1556 /* Optionaly enable pins to be muxed in and configured */
1557 pinctrl_pm_select_default_state(port->dev);
1560 * Try to enable the clock producer.
1562 retval = clk_prepare_enable(uap->clk);
1566 uap->port.uartclk = clk_get_rate(uap->clk);
1568 /* Clear pending error and receive interrupts */
1569 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1570 UART011_RTIS | UART011_RXIS, uap->port.membase + REG_ICR);
1573 * Save interrupts enable mask, and enable RX interrupts in case if
1574 * the interrupt is used for NMI entry.
1576 uap->im = readw(uap->port.membase + REG_IMSC);
1577 writew(UART011_RTIM | UART011_RXIM, uap->port.membase + REG_IMSC);
1579 if (dev_get_platdata(uap->port.dev)) {
1580 struct amba_pl011_data *plat;
1582 plat = dev_get_platdata(uap->port.dev);
1589 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1591 writew(lcr_h, uap->port.membase + uap->lcrh_rx);
1592 if (uap->lcrh_rx != uap->lcrh_tx) {
1595 * Wait 10 PCLKs before writing LCRH_TX register,
1596 * to get this delay write read only register 10 times
1598 for (i = 0; i < 10; ++i)
1599 writew(0xff, uap->port.membase + REG_MIS);
1600 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1604 static int pl011_allocate_irq(struct uart_amba_port *uap)
1606 writew(uap->im, uap->port.membase + REG_IMSC);
1608 return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1612 * Enable interrupts, only timeouts when using DMA
1613 * if initial RX DMA job failed, start in interrupt mode
1616 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1618 spin_lock_irq(&uap->port.lock);
1620 /* Clear out any spuriously appearing RX interrupts */
1621 writew(UART011_RTIS | UART011_RXIS,
1622 uap->port.membase + REG_ICR);
1623 uap->im = UART011_RTIM;
1624 if (!pl011_dma_rx_running(uap))
1625 uap->im |= UART011_RXIM;
1626 writew(uap->im, uap->port.membase + REG_IMSC);
1627 spin_unlock_irq(&uap->port.lock);
1630 static int pl011_startup(struct uart_port *port)
1632 struct uart_amba_port *uap =
1633 container_of(port, struct uart_amba_port, port);
1637 retval = pl011_hwinit(port);
1641 retval = pl011_allocate_irq(uap);
1645 writew(uap->vendor->ifls, uap->port.membase + REG_IFLS);
1647 spin_lock_irq(&uap->port.lock);
1649 /* restore RTS and DTR */
1650 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1651 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1652 writew(cr, uap->port.membase + REG_CR);
1654 spin_unlock_irq(&uap->port.lock);
1657 * initialise the old status of the modem signals
1659 uap->old_status = readw(uap->port.membase + REG_FR) &
1660 UART01x_FR_MODEM_ANY;
1663 pl011_dma_startup(uap);
1665 pl011_enable_interrupts(uap);
1670 clk_disable_unprepare(uap->clk);
1674 static int sbsa_uart_startup(struct uart_port *port)
1676 struct uart_amba_port *uap =
1677 container_of(port, struct uart_amba_port, port);
1680 retval = pl011_hwinit(port);
1684 retval = pl011_allocate_irq(uap);
1688 /* The SBSA UART does not support any modem status lines. */
1689 uap->old_status = 0;
1691 pl011_enable_interrupts(uap);
1696 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1701 val = readw(uap->port.membase + lcrh);
1702 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1703 writew(val, uap->port.membase + lcrh);
1707 * disable the port. It should not disable RTS and DTR.
1708 * Also RTS and DTR state should be preserved to restore
1709 * it during startup().
1711 static void pl011_disable_uart(struct uart_amba_port *uap)
1715 uap->autorts = false;
1716 spin_lock_irq(&uap->port.lock);
1717 cr = readw(uap->port.membase + REG_CR);
1719 cr &= UART011_CR_RTS | UART011_CR_DTR;
1720 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1721 writew(cr, uap->port.membase + REG_CR);
1722 spin_unlock_irq(&uap->port.lock);
1725 * disable break condition and fifos
1727 pl011_shutdown_channel(uap, uap->lcrh_rx);
1728 if (uap->lcrh_rx != uap->lcrh_tx)
1729 pl011_shutdown_channel(uap, uap->lcrh_tx);
1732 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1734 spin_lock_irq(&uap->port.lock);
1736 /* mask all interrupts and clear all pending ones */
1738 writew(uap->im, uap->port.membase + REG_IMSC);
1739 writew(0xffff, uap->port.membase + REG_ICR);
1741 spin_unlock_irq(&uap->port.lock);
1744 static void pl011_shutdown(struct uart_port *port)
1746 struct uart_amba_port *uap =
1747 container_of(port, struct uart_amba_port, port);
1749 pl011_disable_interrupts(uap);
1751 pl011_dma_shutdown(uap);
1753 free_irq(uap->port.irq, uap);
1755 pl011_disable_uart(uap);
1758 * Shut down the clock producer
1760 clk_disable_unprepare(uap->clk);
1761 /* Optionally let pins go into sleep states */
1762 pinctrl_pm_select_sleep_state(port->dev);
1764 if (dev_get_platdata(uap->port.dev)) {
1765 struct amba_pl011_data *plat;
1767 plat = dev_get_platdata(uap->port.dev);
1772 if (uap->port.ops->flush_buffer)
1773 uap->port.ops->flush_buffer(port);
1776 static void sbsa_uart_shutdown(struct uart_port *port)
1778 struct uart_amba_port *uap =
1779 container_of(port, struct uart_amba_port, port);
1781 pl011_disable_interrupts(uap);
1783 free_irq(uap->port.irq, uap);
1785 if (uap->port.ops->flush_buffer)
1786 uap->port.ops->flush_buffer(port);
1790 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1792 port->read_status_mask = UART011_DR_OE | 255;
1793 if (termios->c_iflag & INPCK)
1794 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1795 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1796 port->read_status_mask |= UART011_DR_BE;
1799 * Characters to ignore
1801 port->ignore_status_mask = 0;
1802 if (termios->c_iflag & IGNPAR)
1803 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1804 if (termios->c_iflag & IGNBRK) {
1805 port->ignore_status_mask |= UART011_DR_BE;
1807 * If we're ignoring parity and break indicators,
1808 * ignore overruns too (for real raw support).
1810 if (termios->c_iflag & IGNPAR)
1811 port->ignore_status_mask |= UART011_DR_OE;
1815 * Ignore all characters if CREAD is not set.
1817 if ((termios->c_cflag & CREAD) == 0)
1818 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1822 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1823 struct ktermios *old)
1825 struct uart_amba_port *uap =
1826 container_of(port, struct uart_amba_port, port);
1827 unsigned int lcr_h, old_cr;
1828 unsigned long flags;
1829 unsigned int baud, quot, clkdiv;
1831 if (uap->vendor->oversampling)
1837 * Ask the core to calculate the divisor for us.
1839 baud = uart_get_baud_rate(port, termios, old, 0,
1840 port->uartclk / clkdiv);
1841 #ifdef CONFIG_DMA_ENGINE
1843 * Adjust RX DMA polling rate with baud rate if not specified.
1845 if (uap->dmarx.auto_poll_rate)
1846 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1849 if (baud > port->uartclk/16)
1850 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1852 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1854 switch (termios->c_cflag & CSIZE) {
1856 lcr_h = UART01x_LCRH_WLEN_5;
1859 lcr_h = UART01x_LCRH_WLEN_6;
1862 lcr_h = UART01x_LCRH_WLEN_7;
1865 lcr_h = UART01x_LCRH_WLEN_8;
1868 if (termios->c_cflag & CSTOPB)
1869 lcr_h |= UART01x_LCRH_STP2;
1870 if (termios->c_cflag & PARENB) {
1871 lcr_h |= UART01x_LCRH_PEN;
1872 if (!(termios->c_cflag & PARODD))
1873 lcr_h |= UART01x_LCRH_EPS;
1875 if (uap->fifosize > 1)
1876 lcr_h |= UART01x_LCRH_FEN;
1878 spin_lock_irqsave(&port->lock, flags);
1881 * Update the per-port timeout.
1883 uart_update_timeout(port, termios->c_cflag, baud);
1885 pl011_setup_status_masks(port, termios);
1887 if (UART_ENABLE_MS(port, termios->c_cflag))
1888 pl011_enable_ms(port);
1890 /* first, disable everything */
1891 old_cr = readw(port->membase + REG_CR);
1892 writew(0, port->membase + REG_CR);
1894 if (termios->c_cflag & CRTSCTS) {
1895 if (old_cr & UART011_CR_RTS)
1896 old_cr |= UART011_CR_RTSEN;
1898 old_cr |= UART011_CR_CTSEN;
1899 uap->autorts = true;
1901 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1902 uap->autorts = false;
1905 if (uap->vendor->oversampling) {
1906 if (baud > port->uartclk / 16)
1907 old_cr |= ST_UART011_CR_OVSFACT;
1909 old_cr &= ~ST_UART011_CR_OVSFACT;
1913 * Workaround for the ST Micro oversampling variants to
1914 * increase the bitrate slightly, by lowering the divisor,
1915 * to avoid delayed sampling of start bit at high speeds,
1916 * else we see data corruption.
1918 if (uap->vendor->oversampling) {
1919 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1921 else if ((baud > 3250000) && (quot > 2))
1925 writew(quot & 0x3f, port->membase + REG_FBRD);
1926 writew(quot >> 6, port->membase + REG_IBRD);
1929 * ----------v----------v----------v----------v-----
1930 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1931 * REG_FBRD & REG_IBRD.
1932 * ----------^----------^----------^----------^-----
1934 pl011_write_lcr_h(uap, lcr_h);
1935 writew(old_cr, port->membase + REG_CR);
1937 spin_unlock_irqrestore(&port->lock, flags);
1941 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
1942 struct ktermios *old)
1944 struct uart_amba_port *uap =
1945 container_of(port, struct uart_amba_port, port);
1946 unsigned long flags;
1948 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
1950 /* The SBSA UART only supports 8n1 without hardware flow control. */
1951 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
1952 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
1953 termios->c_cflag |= CS8 | CLOCAL;
1955 spin_lock_irqsave(&port->lock, flags);
1956 uart_update_timeout(port, CS8, uap->fixed_baud);
1957 pl011_setup_status_masks(port, termios);
1958 spin_unlock_irqrestore(&port->lock, flags);
1961 static const char *pl011_type(struct uart_port *port)
1963 struct uart_amba_port *uap =
1964 container_of(port, struct uart_amba_port, port);
1965 return uap->port.type == PORT_AMBA ? uap->type : NULL;
1969 * Release the memory region(s) being used by 'port'
1971 static void pl011_release_port(struct uart_port *port)
1973 release_mem_region(port->mapbase, SZ_4K);
1977 * Request the memory region(s) being used by 'port'
1979 static int pl011_request_port(struct uart_port *port)
1981 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1982 != NULL ? 0 : -EBUSY;
1986 * Configure/autoconfigure the port.
1988 static void pl011_config_port(struct uart_port *port, int flags)
1990 if (flags & UART_CONFIG_TYPE) {
1991 port->type = PORT_AMBA;
1992 pl011_request_port(port);
1997 * verify the new serial_struct (for TIOCSSERIAL).
1999 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2002 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2004 if (ser->irq < 0 || ser->irq >= nr_irqs)
2006 if (ser->baud_base < 9600)
2011 static struct uart_ops amba_pl011_pops = {
2012 .tx_empty = pl011_tx_empty,
2013 .set_mctrl = pl011_set_mctrl,
2014 .get_mctrl = pl011_get_mctrl,
2015 .stop_tx = pl011_stop_tx,
2016 .start_tx = pl011_start_tx,
2017 .stop_rx = pl011_stop_rx,
2018 .enable_ms = pl011_enable_ms,
2019 .break_ctl = pl011_break_ctl,
2020 .startup = pl011_startup,
2021 .shutdown = pl011_shutdown,
2022 .flush_buffer = pl011_dma_flush_buffer,
2023 .set_termios = pl011_set_termios,
2025 .release_port = pl011_release_port,
2026 .request_port = pl011_request_port,
2027 .config_port = pl011_config_port,
2028 .verify_port = pl011_verify_port,
2029 #ifdef CONFIG_CONSOLE_POLL
2030 .poll_init = pl011_hwinit,
2031 .poll_get_char = pl011_get_poll_char,
2032 .poll_put_char = pl011_put_poll_char,
2036 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2040 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2045 static const struct uart_ops sbsa_uart_pops = {
2046 .tx_empty = pl011_tx_empty,
2047 .set_mctrl = sbsa_uart_set_mctrl,
2048 .get_mctrl = sbsa_uart_get_mctrl,
2049 .stop_tx = pl011_stop_tx,
2050 .start_tx = pl011_start_tx,
2051 .stop_rx = pl011_stop_rx,
2052 .startup = sbsa_uart_startup,
2053 .shutdown = sbsa_uart_shutdown,
2054 .set_termios = sbsa_uart_set_termios,
2056 .release_port = pl011_release_port,
2057 .request_port = pl011_request_port,
2058 .config_port = pl011_config_port,
2059 .verify_port = pl011_verify_port,
2060 #ifdef CONFIG_CONSOLE_POLL
2061 .poll_init = pl011_hwinit,
2062 .poll_get_char = pl011_get_poll_char,
2063 .poll_put_char = pl011_put_poll_char,
2067 static struct uart_amba_port *amba_ports[UART_NR];
2069 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2071 static void pl011_console_putchar(struct uart_port *port, int ch)
2073 struct uart_amba_port *uap =
2074 container_of(port, struct uart_amba_port, port);
2076 while (readw(uap->port.membase + REG_FR) & UART01x_FR_TXFF)
2078 writew(ch, uap->port.membase + REG_DR);
2082 pl011_console_write(struct console *co, const char *s, unsigned int count)
2084 struct uart_amba_port *uap = amba_ports[co->index];
2085 unsigned int status, old_cr = 0, new_cr;
2086 unsigned long flags;
2089 clk_enable(uap->clk);
2091 local_irq_save(flags);
2092 if (uap->port.sysrq)
2094 else if (oops_in_progress)
2095 locked = spin_trylock(&uap->port.lock);
2097 spin_lock(&uap->port.lock);
2100 * First save the CR then disable the interrupts
2102 if (!uap->vendor->always_enabled) {
2103 old_cr = readw(uap->port.membase + REG_CR);
2104 new_cr = old_cr & ~UART011_CR_CTSEN;
2105 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2106 writew(new_cr, uap->port.membase + REG_CR);
2109 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2112 * Finally, wait for transmitter to become empty
2113 * and restore the TCR
2116 status = readw(uap->port.membase + REG_FR);
2117 } while (status & UART01x_FR_BUSY);
2118 if (!uap->vendor->always_enabled)
2119 writew(old_cr, uap->port.membase + REG_CR);
2122 spin_unlock(&uap->port.lock);
2123 local_irq_restore(flags);
2125 clk_disable(uap->clk);
2129 pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2130 int *parity, int *bits)
2132 if (readw(uap->port.membase + REG_CR) & UART01x_CR_UARTEN) {
2133 unsigned int lcr_h, ibrd, fbrd;
2135 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
2138 if (lcr_h & UART01x_LCRH_PEN) {
2139 if (lcr_h & UART01x_LCRH_EPS)
2145 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2150 ibrd = readw(uap->port.membase + REG_IBRD);
2151 fbrd = readw(uap->port.membase + REG_FBRD);
2153 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2155 if (uap->vendor->oversampling) {
2156 if (readw(uap->port.membase + REG_CR)
2157 & ST_UART011_CR_OVSFACT)
2163 static int __init pl011_console_setup(struct console *co, char *options)
2165 struct uart_amba_port *uap;
2173 * Check whether an invalid uart number has been specified, and
2174 * if so, search for the first available port that does have
2177 if (co->index >= UART_NR)
2179 uap = amba_ports[co->index];
2183 /* Allow pins to be muxed in and configured */
2184 pinctrl_pm_select_default_state(uap->port.dev);
2186 ret = clk_prepare(uap->clk);
2190 if (dev_get_platdata(uap->port.dev)) {
2191 struct amba_pl011_data *plat;
2193 plat = dev_get_platdata(uap->port.dev);
2198 uap->port.uartclk = clk_get_rate(uap->clk);
2200 if (uap->vendor->fixed_options) {
2201 baud = uap->fixed_baud;
2204 uart_parse_options(options,
2205 &baud, &parity, &bits, &flow);
2207 pl011_console_get_options(uap, &baud, &parity, &bits);
2210 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2213 static struct uart_driver amba_reg;
2214 static struct console amba_console = {
2216 .write = pl011_console_write,
2217 .device = uart_console_device,
2218 .setup = pl011_console_setup,
2219 .flags = CON_PRINTBUFFER,
2224 #define AMBA_CONSOLE (&amba_console)
2226 static void pl011_putc(struct uart_port *port, int c)
2228 while (readl(port->membase + REG_FR) & UART01x_FR_TXFF)
2230 writeb(c, port->membase + REG_DR);
2231 while (readl(port->membase + REG_FR) & UART01x_FR_BUSY)
2235 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2237 struct earlycon_device *dev = con->data;
2239 uart_console_write(&dev->port, s, n, pl011_putc);
2242 static int __init pl011_early_console_setup(struct earlycon_device *device,
2245 if (!device->port.membase)
2248 device->con->write = pl011_early_write;
2251 EARLYCON_DECLARE(pl011, pl011_early_console_setup);
2252 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2255 #define AMBA_CONSOLE NULL
2258 static struct uart_driver amba_reg = {
2259 .owner = THIS_MODULE,
2260 .driver_name = "ttyAMA",
2261 .dev_name = "ttyAMA",
2262 .major = SERIAL_AMBA_MAJOR,
2263 .minor = SERIAL_AMBA_MINOR,
2265 .cons = AMBA_CONSOLE,
2268 static int pl011_probe_dt_alias(int index, struct device *dev)
2270 struct device_node *np;
2271 static bool seen_dev_with_alias = false;
2272 static bool seen_dev_without_alias = false;
2275 if (!IS_ENABLED(CONFIG_OF))
2282 ret = of_alias_get_id(np, "serial");
2283 if (IS_ERR_VALUE(ret)) {
2284 seen_dev_without_alias = true;
2287 seen_dev_with_alias = true;
2288 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2289 dev_warn(dev, "requested serial port %d not available.\n", ret);
2294 if (seen_dev_with_alias && seen_dev_without_alias)
2295 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2300 /* unregisters the driver also if no more ports are left */
2301 static void pl011_unregister_port(struct uart_amba_port *uap)
2306 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2307 if (amba_ports[i] == uap)
2308 amba_ports[i] = NULL;
2309 else if (amba_ports[i])
2312 pl011_dma_remove(uap);
2314 uart_unregister_driver(&amba_reg);
2317 static int pl011_find_free_port(void)
2321 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2322 if (amba_ports[i] == NULL)
2328 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2329 struct resource *mmiobase, int index)
2333 base = devm_ioremap_resource(dev, mmiobase);
2335 return PTR_ERR(base);
2337 index = pl011_probe_dt_alias(index, dev);
2340 uap->port.dev = dev;
2341 uap->port.mapbase = mmiobase->start;
2342 uap->port.membase = base;
2343 uap->port.iotype = UPIO_MEM;
2344 uap->port.fifosize = uap->fifosize;
2345 uap->port.flags = UPF_BOOT_AUTOCONF;
2346 uap->port.line = index;
2348 amba_ports[index] = uap;
2353 static int pl011_register_port(struct uart_amba_port *uap)
2357 /* Ensure interrupts from this UART are masked and cleared */
2358 writew(0, uap->port.membase + REG_IMSC);
2359 writew(0xffff, uap->port.membase + REG_ICR);
2361 if (!amba_reg.state) {
2362 ret = uart_register_driver(&amba_reg);
2364 dev_err(uap->port.dev,
2365 "Failed to register AMBA-PL011 driver\n");
2370 ret = uart_add_one_port(&amba_reg, &uap->port);
2372 pl011_unregister_port(uap);
2377 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2379 struct uart_amba_port *uap;
2380 struct vendor_data *vendor = id->data;
2383 portnr = pl011_find_free_port();
2387 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2392 uap->clk = devm_clk_get(&dev->dev, NULL);
2393 if (IS_ERR(uap->clk))
2394 return PTR_ERR(uap->clk);
2396 uap->vendor = vendor;
2397 uap->lcrh_rx = vendor->lcrh_rx;
2398 uap->lcrh_tx = vendor->lcrh_tx;
2399 uap->fifosize = vendor->get_fifosize(dev);
2400 uap->port.irq = dev->irq[0];
2401 uap->port.ops = &amba_pl011_pops;
2403 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2405 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2409 amba_set_drvdata(dev, uap);
2411 return pl011_register_port(uap);
2414 static int pl011_remove(struct amba_device *dev)
2416 struct uart_amba_port *uap = amba_get_drvdata(dev);
2418 uart_remove_one_port(&amba_reg, &uap->port);
2419 pl011_unregister_port(uap);
2423 #ifdef CONFIG_PM_SLEEP
2424 static int pl011_suspend(struct device *dev)
2426 struct uart_amba_port *uap = dev_get_drvdata(dev);
2431 return uart_suspend_port(&amba_reg, &uap->port);
2434 static int pl011_resume(struct device *dev)
2436 struct uart_amba_port *uap = dev_get_drvdata(dev);
2441 return uart_resume_port(&amba_reg, &uap->port);
2445 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2447 static int sbsa_uart_probe(struct platform_device *pdev)
2449 struct uart_amba_port *uap;
2455 * Check the mandatory baud rate parameter in the DT node early
2456 * so that we can easily exit with the error.
2458 if (pdev->dev.of_node) {
2459 struct device_node *np = pdev->dev.of_node;
2461 ret = of_property_read_u32(np, "current-speed", &baudrate);
2468 portnr = pl011_find_free_port();
2472 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2477 uap->vendor = &vendor_sbsa;
2479 uap->port.irq = platform_get_irq(pdev, 0);
2480 uap->port.ops = &sbsa_uart_pops;
2481 uap->fixed_baud = baudrate;
2483 snprintf(uap->type, sizeof(uap->type), "SBSA");
2485 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2487 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2491 platform_set_drvdata(pdev, uap);
2493 return pl011_register_port(uap);
2496 static int sbsa_uart_remove(struct platform_device *pdev)
2498 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2500 uart_remove_one_port(&amba_reg, &uap->port);
2501 pl011_unregister_port(uap);
2505 static const struct of_device_id sbsa_uart_of_match[] = {
2506 { .compatible = "arm,sbsa-uart", },
2509 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2511 static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2515 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2517 static struct platform_driver arm_sbsa_uart_platform_driver = {
2518 .probe = sbsa_uart_probe,
2519 .remove = sbsa_uart_remove,
2521 .name = "sbsa-uart",
2522 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2523 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2527 static struct amba_id pl011_ids[] = {
2531 .data = &vendor_arm,
2541 MODULE_DEVICE_TABLE(amba, pl011_ids);
2543 static struct amba_driver pl011_driver = {
2545 .name = "uart-pl011",
2546 .pm = &pl011_dev_pm_ops,
2548 .id_table = pl011_ids,
2549 .probe = pl011_probe,
2550 .remove = pl011_remove,
2553 static int __init pl011_init(void)
2555 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2557 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2558 pr_warn("could not register SBSA UART platform driver\n");
2559 return amba_driver_register(&pl011_driver);
2562 static void __exit pl011_exit(void)
2564 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2565 amba_driver_unregister(&pl011_driver);
2569 * While this can be a module, if builtin it's most likely the console
2570 * So let's leave module_exit but move module_init to an earlier place
2572 arch_initcall(pl011_init);
2573 module_exit(pl011_exit);
2575 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2576 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2577 MODULE_LICENSE("GPL");