]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/tty/serial/8250/8250_dma.c
md/raid10: fix the 'new' raid10 layout to work correctly.
[karo-tx-linux.git] / drivers / tty / serial / 8250 / 8250_dma.c
1 /*
2  * 8250_dma.c - DMA Engine API support for 8250.c
3  *
4  * Copyright (C) 2013 Intel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 #include <linux/tty.h>
12 #include <linux/tty_flip.h>
13 #include <linux/serial_reg.h>
14 #include <linux/dma-mapping.h>
15
16 #include "8250.h"
17
18 static void __dma_tx_complete(void *param)
19 {
20         struct uart_8250_port   *p = param;
21         struct uart_8250_dma    *dma = p->dma;
22         struct circ_buf         *xmit = &p->port.state->xmit;
23         unsigned long   flags;
24         int             ret;
25
26         dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
27                                 UART_XMIT_SIZE, DMA_TO_DEVICE);
28
29         spin_lock_irqsave(&p->port.lock, flags);
30
31         dma->tx_running = 0;
32
33         xmit->tail += dma->tx_size;
34         xmit->tail &= UART_XMIT_SIZE - 1;
35         p->port.icount.tx += dma->tx_size;
36
37         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
38                 uart_write_wakeup(&p->port);
39
40         ret = serial8250_tx_dma(p);
41         if (ret) {
42                 p->ier |= UART_IER_THRI;
43                 serial_port_out(&p->port, UART_IER, p->ier);
44         }
45
46         spin_unlock_irqrestore(&p->port.lock, flags);
47 }
48
49 static void __dma_rx_complete(void *param)
50 {
51         struct uart_8250_port   *p = param;
52         struct uart_8250_dma    *dma = p->dma;
53         struct tty_port         *tty_port = &p->port.state->port;
54         struct dma_tx_state     state;
55         int                     count;
56
57         dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
58                                 dma->rx_size, DMA_FROM_DEVICE);
59
60         dma->rx_running = 0;
61         dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
62
63         count = dma->rx_size - state.residue;
64
65         tty_insert_flip_string(tty_port, dma->rx_buf, count);
66         p->port.icount.rx += count;
67
68         tty_flip_buffer_push(tty_port);
69 }
70
71 int serial8250_tx_dma(struct uart_8250_port *p)
72 {
73         struct uart_8250_dma            *dma = p->dma;
74         struct circ_buf                 *xmit = &p->port.state->xmit;
75         struct dma_async_tx_descriptor  *desc;
76         int ret;
77
78         if (uart_tx_stopped(&p->port) || dma->tx_running ||
79             uart_circ_empty(xmit))
80                 return 0;
81
82         dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
83         if (dma->tx_size < p->port.fifosize) {
84                 ret = -EINVAL;
85                 goto err;
86         }
87
88         desc = dmaengine_prep_slave_single(dma->txchan,
89                                            dma->tx_addr + xmit->tail,
90                                            dma->tx_size, DMA_MEM_TO_DEV,
91                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
92         if (!desc) {
93                 ret = -EBUSY;
94                 goto err;
95         }
96
97         dma->tx_running = 1;
98         desc->callback = __dma_tx_complete;
99         desc->callback_param = p;
100
101         dma->tx_cookie = dmaengine_submit(desc);
102
103         dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
104                                    UART_XMIT_SIZE, DMA_TO_DEVICE);
105
106         dma_async_issue_pending(dma->txchan);
107         if (dma->tx_err) {
108                 dma->tx_err = 0;
109                 if (p->ier & UART_IER_THRI) {
110                         p->ier &= ~UART_IER_THRI;
111                         serial_out(p, UART_IER, p->ier);
112                 }
113         }
114         return 0;
115 err:
116         dma->tx_err = 1;
117         return ret;
118 }
119
120 int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
121 {
122         struct uart_8250_dma            *dma = p->dma;
123         struct dma_async_tx_descriptor  *desc;
124
125         switch (iir & 0x3f) {
126         case UART_IIR_RLSI:
127                 /* 8250_core handles errors and break interrupts */
128                 return -EIO;
129         case UART_IIR_RX_TIMEOUT:
130                 /*
131                  * If RCVR FIFO trigger level was not reached, complete the
132                  * transfer and let 8250_core copy the remaining data.
133                  */
134                 if (dma->rx_running) {
135                         dmaengine_pause(dma->rxchan);
136                         __dma_rx_complete(p);
137                         dmaengine_terminate_all(dma->rxchan);
138                 }
139                 return -ETIMEDOUT;
140         default:
141                 break;
142         }
143
144         if (dma->rx_running)
145                 return 0;
146
147         desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
148                                            dma->rx_size, DMA_DEV_TO_MEM,
149                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
150         if (!desc)
151                 return -EBUSY;
152
153         dma->rx_running = 1;
154         desc->callback = __dma_rx_complete;
155         desc->callback_param = p;
156
157         dma->rx_cookie = dmaengine_submit(desc);
158
159         dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
160                                    dma->rx_size, DMA_FROM_DEVICE);
161
162         dma_async_issue_pending(dma->rxchan);
163
164         return 0;
165 }
166
167 int serial8250_request_dma(struct uart_8250_port *p)
168 {
169         struct uart_8250_dma    *dma = p->dma;
170         dma_cap_mask_t          mask;
171
172         /* Default slave configuration parameters */
173         dma->rxconf.direction           = DMA_DEV_TO_MEM;
174         dma->rxconf.src_addr_width      = DMA_SLAVE_BUSWIDTH_1_BYTE;
175         dma->rxconf.src_addr            = p->port.mapbase + UART_RX;
176
177         dma->txconf.direction           = DMA_MEM_TO_DEV;
178         dma->txconf.dst_addr_width      = DMA_SLAVE_BUSWIDTH_1_BYTE;
179         dma->txconf.dst_addr            = p->port.mapbase + UART_TX;
180
181         dma_cap_zero(mask);
182         dma_cap_set(DMA_SLAVE, mask);
183
184         /* Get a channel for RX */
185         dma->rxchan = dma_request_slave_channel_compat(mask,
186                                                        dma->fn, dma->rx_param,
187                                                        p->port.dev, "rx");
188         if (!dma->rxchan)
189                 return -ENODEV;
190
191         dmaengine_slave_config(dma->rxchan, &dma->rxconf);
192
193         /* Get a channel for TX */
194         dma->txchan = dma_request_slave_channel_compat(mask,
195                                                        dma->fn, dma->tx_param,
196                                                        p->port.dev, "tx");
197         if (!dma->txchan) {
198                 dma_release_channel(dma->rxchan);
199                 return -ENODEV;
200         }
201
202         dmaengine_slave_config(dma->txchan, &dma->txconf);
203
204         /* RX buffer */
205         if (!dma->rx_size)
206                 dma->rx_size = PAGE_SIZE;
207
208         dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
209                                         &dma->rx_addr, GFP_KERNEL);
210         if (!dma->rx_buf)
211                 goto err;
212
213         /* TX buffer */
214         dma->tx_addr = dma_map_single(dma->txchan->device->dev,
215                                         p->port.state->xmit.buf,
216                                         UART_XMIT_SIZE,
217                                         DMA_TO_DEVICE);
218         if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
219                 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
220                                   dma->rx_buf, dma->rx_addr);
221                 goto err;
222         }
223
224         dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
225
226         return 0;
227 err:
228         dma_release_channel(dma->rxchan);
229         dma_release_channel(dma->txchan);
230
231         return -ENOMEM;
232 }
233 EXPORT_SYMBOL_GPL(serial8250_request_dma);
234
235 void serial8250_release_dma(struct uart_8250_port *p)
236 {
237         struct uart_8250_dma *dma = p->dma;
238
239         if (!dma)
240                 return;
241
242         /* Release RX resources */
243         dmaengine_terminate_all(dma->rxchan);
244         dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
245                           dma->rx_addr);
246         dma_release_channel(dma->rxchan);
247         dma->rxchan = NULL;
248
249         /* Release TX resources */
250         dmaengine_terminate_all(dma->txchan);
251         dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
252                          UART_XMIT_SIZE, DMA_TO_DEVICE);
253         dma_release_channel(dma->txchan);
254         dma->txchan = NULL;
255         dma->tx_running = 0;
256
257         dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
258 }
259 EXPORT_SYMBOL_GPL(serial8250_release_dma);