]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/dma/omap-dma.c
9a9e81907475c83f901238e103f2400f10c3a164
[karo-tx-linux.git] / drivers / dma / omap-dma.c
1 /*
2  * OMAP DMAengine support
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/omap-dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22
23 #include "virt-dma.h"
24
25 struct omap_dmadev {
26         struct dma_device ddev;
27         spinlock_t lock;
28         struct tasklet_struct task;
29         struct list_head pending;
30         struct omap_system_dma_plat_info *plat;
31 };
32
33 struct omap_chan {
34         struct virt_dma_chan vc;
35         struct list_head node;
36         struct omap_system_dma_plat_info *plat;
37
38         struct dma_slave_config cfg;
39         unsigned dma_sig;
40         bool cyclic;
41         bool paused;
42
43         int dma_ch;
44         struct omap_desc *desc;
45         unsigned sgidx;
46 };
47
48 struct omap_sg {
49         dma_addr_t addr;
50         uint32_t en;            /* number of elements (24-bit) */
51         uint32_t fn;            /* number of frames (16-bit) */
52 };
53
54 struct omap_desc {
55         struct virt_dma_desc vd;
56         enum dma_transfer_direction dir;
57         dma_addr_t dev_addr;
58
59         int16_t fi;             /* for OMAP_DMA_SYNC_PACKET */
60         uint8_t es;             /* OMAP_DMA_DATA_TYPE_xxx */
61         uint32_t ccr;           /* CCR value */
62         uint16_t cicr;          /* CICR value */
63         uint32_t csdp;          /* CSDP value */
64
65         unsigned sglen;
66         struct omap_sg sg[0];
67 };
68
69 static const unsigned es_bytes[] = {
70         [OMAP_DMA_DATA_TYPE_S8] = 1,
71         [OMAP_DMA_DATA_TYPE_S16] = 2,
72         [OMAP_DMA_DATA_TYPE_S32] = 4,
73 };
74
75 static struct of_dma_filter_info omap_dma_info = {
76         .filter_fn = omap_dma_filter_fn,
77 };
78
79 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
80 {
81         return container_of(d, struct omap_dmadev, ddev);
82 }
83
84 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
85 {
86         return container_of(c, struct omap_chan, vc.chan);
87 }
88
89 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
90 {
91         return container_of(t, struct omap_desc, vd.tx);
92 }
93
94 static void omap_dma_desc_free(struct virt_dma_desc *vd)
95 {
96         kfree(container_of(vd, struct omap_desc, vd));
97 }
98
99 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
100 {
101         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
102         uint32_t val;
103
104         if (__dma_omap15xx(od->plat->dma_attr))
105                 c->plat->dma_write(0, CPC, c->dma_ch);
106         else
107                 c->plat->dma_write(0, CDAC, c->dma_ch);
108
109         if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
110                 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
111
112                 if (dma_omap1())
113                         val &= ~(1 << 14);
114
115                 val |= c->dma_ch | 1 << 15;
116
117                 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
118         } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
119                 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
120
121         /* Clear CSR */
122         if (dma_omap1())
123                 c->plat->dma_read(CSR, c->dma_ch);
124         else
125                 c->plat->dma_write(~0, CSR, c->dma_ch);
126
127         /* Enable interrupts */
128         c->plat->dma_write(d->cicr, CICR, c->dma_ch);
129
130         val = c->plat->dma_read(CCR, c->dma_ch);
131         if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
132                 val |= OMAP_DMA_CCR_BUFFERING_DISABLE;
133         val |= OMAP_DMA_CCR_EN;
134         mb();
135         c->plat->dma_write(val, CCR, c->dma_ch);
136 }
137
138 static void omap_dma_stop(struct omap_chan *c)
139 {
140         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
141         uint32_t val;
142
143         /* disable irq */
144         c->plat->dma_write(0, CICR, c->dma_ch);
145
146         /* Clear CSR */
147         if (dma_omap1())
148                 c->plat->dma_read(CSR, c->dma_ch);
149         else
150                 c->plat->dma_write(~0, CSR, c->dma_ch);
151
152         val = c->plat->dma_read(CCR, c->dma_ch);
153         if (od->plat->errata & DMA_ERRATA_i541 &&
154             val & OMAP_DMA_CCR_SEL_SRC_DST_SYNC) {
155                 uint32_t sysconfig;
156                 unsigned i;
157
158                 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
159                 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
160                 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
161                 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
162
163                 val = c->plat->dma_read(CCR, c->dma_ch);
164                 val &= ~OMAP_DMA_CCR_EN;
165                 c->plat->dma_write(val, CCR, c->dma_ch);
166
167                 /* Wait for sDMA FIFO to drain */
168                 for (i = 0; ; i++) {
169                         val = c->plat->dma_read(CCR, c->dma_ch);
170                         if (!(val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE)))
171                                 break;
172
173                         if (i > 100)
174                                 break;
175
176                         udelay(5);
177                 }
178
179                 if (val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE))
180                         dev_err(c->vc.chan.device->dev,
181                                 "DMA drain did not complete on lch %d\n",
182                                 c->dma_ch);
183
184                 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
185         } else {
186                 val &= ~OMAP_DMA_CCR_EN;
187                 c->plat->dma_write(val, CCR, c->dma_ch);
188         }
189
190         mb();
191
192         if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
193                 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
194
195                 if (dma_omap1())
196                         val |= 1 << 14; /* set the STOP_LNK bit */
197                 else
198                         val &= ~(1 << 15); /* Clear the ENABLE_LNK bit */
199
200                 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
201         }
202 }
203
204 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
205         unsigned idx)
206 {
207         struct omap_sg *sg = d->sg + idx;
208
209         if (d->dir == DMA_DEV_TO_MEM) {
210                 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
211                 c->plat->dma_write(0, CDEI, c->dma_ch);
212                 c->plat->dma_write(0, CDFI, c->dma_ch);
213         } else {
214                 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
215                 c->plat->dma_write(0, CSEI, c->dma_ch);
216                 c->plat->dma_write(0, CSFI, c->dma_ch);
217         }
218
219         c->plat->dma_write(sg->en, CEN, c->dma_ch);
220         c->plat->dma_write(sg->fn, CFN, c->dma_ch);
221
222         omap_dma_start(c, d);
223 }
224
225 static void omap_dma_start_desc(struct omap_chan *c)
226 {
227         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
228         struct omap_desc *d;
229
230         if (!vd) {
231                 c->desc = NULL;
232                 return;
233         }
234
235         list_del(&vd->node);
236
237         c->desc = d = to_omap_dma_desc(&vd->tx);
238         c->sgidx = 0;
239
240         c->plat->dma_write(d->ccr, CCR, c->dma_ch);
241         if (dma_omap1())
242                 c->plat->dma_write(d->ccr >> 16, CCR2, c->dma_ch);
243
244         if (d->dir == DMA_DEV_TO_MEM) {
245                 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
246                 c->plat->dma_write(0, CSEI, c->dma_ch);
247                 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
248         } else {
249                 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
250                 c->plat->dma_write(0, CDEI, c->dma_ch);
251                 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
252         }
253
254         c->plat->dma_write(d->csdp, CSDP, c->dma_ch);
255
256         omap_dma_start_sg(c, d, 0);
257 }
258
259 static void omap_dma_callback(int ch, u16 status, void *data)
260 {
261         struct omap_chan *c = data;
262         struct omap_desc *d;
263         unsigned long flags;
264
265         spin_lock_irqsave(&c->vc.lock, flags);
266         d = c->desc;
267         if (d) {
268                 if (!c->cyclic) {
269                         if (++c->sgidx < d->sglen) {
270                                 omap_dma_start_sg(c, d, c->sgidx);
271                         } else {
272                                 omap_dma_start_desc(c);
273                                 vchan_cookie_complete(&d->vd);
274                         }
275                 } else {
276                         vchan_cyclic_callback(&d->vd);
277                 }
278         }
279         spin_unlock_irqrestore(&c->vc.lock, flags);
280 }
281
282 /*
283  * This callback schedules all pending channels.  We could be more
284  * clever here by postponing allocation of the real DMA channels to
285  * this point, and freeing them when our virtual channel becomes idle.
286  *
287  * We would then need to deal with 'all channels in-use'
288  */
289 static void omap_dma_sched(unsigned long data)
290 {
291         struct omap_dmadev *d = (struct omap_dmadev *)data;
292         LIST_HEAD(head);
293
294         spin_lock_irq(&d->lock);
295         list_splice_tail_init(&d->pending, &head);
296         spin_unlock_irq(&d->lock);
297
298         while (!list_empty(&head)) {
299                 struct omap_chan *c = list_first_entry(&head,
300                         struct omap_chan, node);
301
302                 spin_lock_irq(&c->vc.lock);
303                 list_del_init(&c->node);
304                 omap_dma_start_desc(c);
305                 spin_unlock_irq(&c->vc.lock);
306         }
307 }
308
309 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
310 {
311         struct omap_chan *c = to_omap_dma_chan(chan);
312
313         dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
314
315         return omap_request_dma(c->dma_sig, "DMA engine",
316                 omap_dma_callback, c, &c->dma_ch);
317 }
318
319 static void omap_dma_free_chan_resources(struct dma_chan *chan)
320 {
321         struct omap_chan *c = to_omap_dma_chan(chan);
322
323         vchan_free_chan_resources(&c->vc);
324         omap_free_dma(c->dma_ch);
325
326         dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
327 }
328
329 static size_t omap_dma_sg_size(struct omap_sg *sg)
330 {
331         return sg->en * sg->fn;
332 }
333
334 static size_t omap_dma_desc_size(struct omap_desc *d)
335 {
336         unsigned i;
337         size_t size;
338
339         for (size = i = 0; i < d->sglen; i++)
340                 size += omap_dma_sg_size(&d->sg[i]);
341
342         return size * es_bytes[d->es];
343 }
344
345 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
346 {
347         unsigned i;
348         size_t size, es_size = es_bytes[d->es];
349
350         for (size = i = 0; i < d->sglen; i++) {
351                 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
352
353                 if (size)
354                         size += this_size;
355                 else if (addr >= d->sg[i].addr &&
356                          addr < d->sg[i].addr + this_size)
357                         size += d->sg[i].addr + this_size - addr;
358         }
359         return size;
360 }
361
362 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
363 {
364         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
365         dma_addr_t addr;
366
367         if (__dma_omap15xx(od->plat->dma_attr))
368                 addr = c->plat->dma_read(CPC, c->dma_ch);
369         else
370                 addr = c->plat->dma_read(CSAC, c->dma_ch);
371
372         if (od->plat->errata & DMA_ERRATA_3_3 && addr == 0)
373                 addr = c->plat->dma_read(CSAC, c->dma_ch);
374
375         if (!__dma_omap15xx(od->plat->dma_attr)) {
376                 /*
377                  * CDAC == 0 indicates that the DMA transfer on the channel has
378                  * not been started (no data has been transferred so far).
379                  * Return the programmed source start address in this case.
380                  */
381                 if (c->plat->dma_read(CDAC, c->dma_ch))
382                         addr = c->plat->dma_read(CSAC, c->dma_ch);
383                 else
384                         addr = c->plat->dma_read(CSSA, c->dma_ch);
385         }
386
387         if (dma_omap1())
388                 addr |= c->plat->dma_read(CSSA, c->dma_ch) & 0xffff0000;
389
390         return addr;
391 }
392
393 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
394 {
395         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
396         dma_addr_t addr;
397
398         if (__dma_omap15xx(od->plat->dma_attr))
399                 addr = c->plat->dma_read(CPC, c->dma_ch);
400         else
401                 addr = c->plat->dma_read(CDAC, c->dma_ch);
402
403         /*
404          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
405          * read before the DMA controller finished disabling the channel.
406          */
407         if (!__dma_omap15xx(od->plat->dma_attr) && addr == 0) {
408                 addr = c->plat->dma_read(CDAC, c->dma_ch);
409                 /*
410                  * CDAC == 0 indicates that the DMA transfer on the channel has
411                  * not been started (no data has been transferred so far).
412                  * Return the programmed destination start address in this case.
413                  */
414                 if (addr == 0)
415                         addr = c->plat->dma_read(CDSA, c->dma_ch);
416         }
417
418         if (dma_omap1())
419                 addr |= c->plat->dma_read(CDSA, c->dma_ch) & 0xffff0000;
420
421         return addr;
422 }
423
424 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
425         dma_cookie_t cookie, struct dma_tx_state *txstate)
426 {
427         struct omap_chan *c = to_omap_dma_chan(chan);
428         struct virt_dma_desc *vd;
429         enum dma_status ret;
430         unsigned long flags;
431
432         ret = dma_cookie_status(chan, cookie, txstate);
433         if (ret == DMA_COMPLETE || !txstate)
434                 return ret;
435
436         spin_lock_irqsave(&c->vc.lock, flags);
437         vd = vchan_find_desc(&c->vc, cookie);
438         if (vd) {
439                 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
440         } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
441                 struct omap_desc *d = c->desc;
442                 dma_addr_t pos;
443
444                 if (d->dir == DMA_MEM_TO_DEV)
445                         pos = omap_dma_get_src_pos(c);
446                 else if (d->dir == DMA_DEV_TO_MEM)
447                         pos = omap_dma_get_dst_pos(c);
448                 else
449                         pos = 0;
450
451                 txstate->residue = omap_dma_desc_size_pos(d, pos);
452         } else {
453                 txstate->residue = 0;
454         }
455         spin_unlock_irqrestore(&c->vc.lock, flags);
456
457         return ret;
458 }
459
460 static void omap_dma_issue_pending(struct dma_chan *chan)
461 {
462         struct omap_chan *c = to_omap_dma_chan(chan);
463         unsigned long flags;
464
465         spin_lock_irqsave(&c->vc.lock, flags);
466         if (vchan_issue_pending(&c->vc) && !c->desc) {
467                 /*
468                  * c->cyclic is used only by audio and in this case the DMA need
469                  * to be started without delay.
470                  */
471                 if (!c->cyclic) {
472                         struct omap_dmadev *d = to_omap_dma_dev(chan->device);
473                         spin_lock(&d->lock);
474                         if (list_empty(&c->node))
475                                 list_add_tail(&c->node, &d->pending);
476                         spin_unlock(&d->lock);
477                         tasklet_schedule(&d->task);
478                 } else {
479                         omap_dma_start_desc(c);
480                 }
481         }
482         spin_unlock_irqrestore(&c->vc.lock, flags);
483 }
484
485 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
486         struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
487         enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
488 {
489         struct omap_chan *c = to_omap_dma_chan(chan);
490         enum dma_slave_buswidth dev_width;
491         struct scatterlist *sgent;
492         struct omap_desc *d;
493         dma_addr_t dev_addr;
494         unsigned i, j = 0, es, en, frame_bytes;
495         u32 burst;
496
497         if (dir == DMA_DEV_TO_MEM) {
498                 dev_addr = c->cfg.src_addr;
499                 dev_width = c->cfg.src_addr_width;
500                 burst = c->cfg.src_maxburst;
501         } else if (dir == DMA_MEM_TO_DEV) {
502                 dev_addr = c->cfg.dst_addr;
503                 dev_width = c->cfg.dst_addr_width;
504                 burst = c->cfg.dst_maxburst;
505         } else {
506                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
507                 return NULL;
508         }
509
510         /* Bus width translates to the element size (ES) */
511         switch (dev_width) {
512         case DMA_SLAVE_BUSWIDTH_1_BYTE:
513                 es = OMAP_DMA_DATA_TYPE_S8;
514                 break;
515         case DMA_SLAVE_BUSWIDTH_2_BYTES:
516                 es = OMAP_DMA_DATA_TYPE_S16;
517                 break;
518         case DMA_SLAVE_BUSWIDTH_4_BYTES:
519                 es = OMAP_DMA_DATA_TYPE_S32;
520                 break;
521         default: /* not reached */
522                 return NULL;
523         }
524
525         /* Now allocate and setup the descriptor. */
526         d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
527         if (!d)
528                 return NULL;
529
530         d->dir = dir;
531         d->dev_addr = dev_addr;
532         d->es = es;
533
534         d->ccr = 0;
535         if (dir == DMA_DEV_TO_MEM)
536                 d->ccr |= OMAP_DMA_AMODE_POST_INC << 14 |
537                           OMAP_DMA_AMODE_CONSTANT << 12;
538         else
539                 d->ccr |= OMAP_DMA_AMODE_CONSTANT << 14 |
540                           OMAP_DMA_AMODE_POST_INC << 12;
541
542         d->cicr = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
543         d->csdp = es;
544
545         if (dma_omap1()) {
546                 d->ccr |= 1 << 5; /* frame sync */
547                 if (__dma_omap16xx(od->plat->dma_attr)) {
548                         d->ccr |= 1 << 10; /* disable 3.0/3.1 compatibility mode */
549                         /* Duplicate what plat-omap/dma.c does */
550                         d->ccr |= c->dma_ch + 1;
551                 } else {
552                         d->ccr |= c->dma_sig & 0x1f;
553                 }
554
555                 d->cicr |= OMAP1_DMA_TOUT_IRQ;
556
557                 if (dir == DMA_DEV_TO_MEM)
558                         d->csdp |= OMAP_DMA_PORT_EMIFF << 9 |
559                                    OMAP_DMA_PORT_TIPB << 2;
560                 else
561                         d->csdp |= OMAP_DMA_PORT_TIPB << 9 |
562                                    OMAP_DMA_PORT_EMIFF << 2;
563         } else {
564                 d->ccr |= (c->dma_sig & ~0x1f) << 14;
565                 d->ccr |= c->dma_sig & 0x1f;
566                 d->ccr |= 1 << 5; /* frame sync */
567
568                 if (dir == DMA_DEV_TO_MEM)
569                         d->ccr |= 1 << 24; /* source synch */
570
571                 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
572         }
573
574         /*
575          * Build our scatterlist entries: each contains the address,
576          * the number of elements (EN) in each frame, and the number of
577          * frames (FN).  Number of bytes for this entry = ES * EN * FN.
578          *
579          * Burst size translates to number of elements with frame sync.
580          * Note: DMA engine defines burst to be the number of dev-width
581          * transfers.
582          */
583         en = burst;
584         frame_bytes = es_bytes[es] * en;
585         for_each_sg(sgl, sgent, sglen, i) {
586                 d->sg[j].addr = sg_dma_address(sgent);
587                 d->sg[j].en = en;
588                 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
589                 j++;
590         }
591
592         d->sglen = j;
593
594         return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
595 }
596
597 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
598         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
599         size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
600         void *context)
601 {
602         struct omap_dmadev *od = to_omap_dma_dev(chan->device);
603         struct omap_chan *c = to_omap_dma_chan(chan);
604         enum dma_slave_buswidth dev_width;
605         struct omap_desc *d;
606         dma_addr_t dev_addr;
607         unsigned es;
608         u32 burst;
609
610         if (dir == DMA_DEV_TO_MEM) {
611                 dev_addr = c->cfg.src_addr;
612                 dev_width = c->cfg.src_addr_width;
613                 burst = c->cfg.src_maxburst;
614         } else if (dir == DMA_MEM_TO_DEV) {
615                 dev_addr = c->cfg.dst_addr;
616                 dev_width = c->cfg.dst_addr_width;
617                 burst = c->cfg.dst_maxburst;
618         } else {
619                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
620                 return NULL;
621         }
622
623         /* Bus width translates to the element size (ES) */
624         switch (dev_width) {
625         case DMA_SLAVE_BUSWIDTH_1_BYTE:
626                 es = OMAP_DMA_DATA_TYPE_S8;
627                 break;
628         case DMA_SLAVE_BUSWIDTH_2_BYTES:
629                 es = OMAP_DMA_DATA_TYPE_S16;
630                 break;
631         case DMA_SLAVE_BUSWIDTH_4_BYTES:
632                 es = OMAP_DMA_DATA_TYPE_S32;
633                 break;
634         default: /* not reached */
635                 return NULL;
636         }
637
638         /* Now allocate and setup the descriptor. */
639         d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
640         if (!d)
641                 return NULL;
642
643         d->dir = dir;
644         d->dev_addr = dev_addr;
645         d->fi = burst;
646         d->es = es;
647         d->sg[0].addr = buf_addr;
648         d->sg[0].en = period_len / es_bytes[es];
649         d->sg[0].fn = buf_len / period_len;
650         d->sglen = 1;
651
652         d->ccr = 0;
653         if (__dma_omap15xx(od->plat->dma_attr))
654                 d->ccr = 3 << 8;
655         if (dir == DMA_DEV_TO_MEM)
656                 d->ccr |= OMAP_DMA_AMODE_POST_INC << 14 |
657                           OMAP_DMA_AMODE_CONSTANT << 12;
658         else
659                 d->ccr |= OMAP_DMA_AMODE_CONSTANT << 14 |
660                           OMAP_DMA_AMODE_POST_INC << 12;
661
662         d->cicr = OMAP_DMA_DROP_IRQ;
663         if (flags & DMA_PREP_INTERRUPT)
664                 d->cicr |= OMAP_DMA_FRAME_IRQ;
665
666         d->csdp = es;
667
668         if (dma_omap1()) {
669                 if (__dma_omap16xx(od->plat->dma_attr)) {
670                         d->ccr |= 1 << 10; /* disable 3.0/3.1 compatibility mode */
671                         /* Duplicate what plat-omap/dma.c does */
672                         d->ccr |= c->dma_ch + 1;
673                 } else {
674                         d->ccr |= c->dma_sig & 0x1f;
675                 }
676
677                 d->cicr |= OMAP1_DMA_TOUT_IRQ;
678
679                 if (dir == DMA_DEV_TO_MEM)
680                         d->csdp |= OMAP_DMA_PORT_EMIFF << 9 |
681                                    OMAP_DMA_PORT_MPUI << 2;
682                 else
683                         d->csdp |= OMAP_DMA_PORT_MPUI << 9 |
684                                    OMAP_DMA_PORT_EMIFF << 2;
685         } else {
686                 d->ccr |= (c->dma_sig & ~0x1f) << 14;
687                 d->ccr |= c->dma_sig & 0x1f;
688
689                 if (burst)
690                         d->ccr |= 1 << 18 | 1 << 5; /* packet */
691
692                 if (dir == DMA_DEV_TO_MEM)
693                         d->ccr |= 1 << 24; /* source synch */
694
695                 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
696
697                 /* src and dst burst mode 16 */
698                 d->csdp |= 3 << 14 | 3 << 7;
699         }
700
701         c->cyclic = true;
702
703         return vchan_tx_prep(&c->vc, &d->vd, flags);
704 }
705
706 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
707 {
708         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
709             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
710                 return -EINVAL;
711
712         memcpy(&c->cfg, cfg, sizeof(c->cfg));
713
714         return 0;
715 }
716
717 static int omap_dma_terminate_all(struct omap_chan *c)
718 {
719         struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
720         unsigned long flags;
721         LIST_HEAD(head);
722
723         spin_lock_irqsave(&c->vc.lock, flags);
724
725         /* Prevent this channel being scheduled */
726         spin_lock(&d->lock);
727         list_del_init(&c->node);
728         spin_unlock(&d->lock);
729
730         /*
731          * Stop DMA activity: we assume the callback will not be called
732          * after omap_dma_stop() returns (even if it does, it will see
733          * c->desc is NULL and exit.)
734          */
735         if (c->desc) {
736                 c->desc = NULL;
737                 /* Avoid stopping the dma twice */
738                 if (!c->paused)
739                         omap_dma_stop(c);
740         }
741
742         if (c->cyclic) {
743                 c->cyclic = false;
744                 c->paused = false;
745         }
746
747         vchan_get_all_descriptors(&c->vc, &head);
748         spin_unlock_irqrestore(&c->vc.lock, flags);
749         vchan_dma_desc_free_list(&c->vc, &head);
750
751         return 0;
752 }
753
754 static int omap_dma_pause(struct omap_chan *c)
755 {
756         /* Pause/Resume only allowed with cyclic mode */
757         if (!c->cyclic)
758                 return -EINVAL;
759
760         if (!c->paused) {
761                 omap_dma_stop(c);
762                 c->paused = true;
763         }
764
765         return 0;
766 }
767
768 static int omap_dma_resume(struct omap_chan *c)
769 {
770         /* Pause/Resume only allowed with cyclic mode */
771         if (!c->cyclic)
772                 return -EINVAL;
773
774         if (c->paused) {
775                 omap_dma_start(c, c->desc);
776                 c->paused = false;
777         }
778
779         return 0;
780 }
781
782 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
783         unsigned long arg)
784 {
785         struct omap_chan *c = to_omap_dma_chan(chan);
786         int ret;
787
788         switch (cmd) {
789         case DMA_SLAVE_CONFIG:
790                 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
791                 break;
792
793         case DMA_TERMINATE_ALL:
794                 ret = omap_dma_terminate_all(c);
795                 break;
796
797         case DMA_PAUSE:
798                 ret = omap_dma_pause(c);
799                 break;
800
801         case DMA_RESUME:
802                 ret = omap_dma_resume(c);
803                 break;
804
805         default:
806                 ret = -ENXIO;
807                 break;
808         }
809
810         return ret;
811 }
812
813 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
814 {
815         struct omap_chan *c;
816
817         c = kzalloc(sizeof(*c), GFP_KERNEL);
818         if (!c)
819                 return -ENOMEM;
820
821         c->plat = od->plat;
822         c->dma_sig = dma_sig;
823         c->vc.desc_free = omap_dma_desc_free;
824         vchan_init(&c->vc, &od->ddev);
825         INIT_LIST_HEAD(&c->node);
826
827         od->ddev.chancnt++;
828
829         return 0;
830 }
831
832 static void omap_dma_free(struct omap_dmadev *od)
833 {
834         tasklet_kill(&od->task);
835         while (!list_empty(&od->ddev.channels)) {
836                 struct omap_chan *c = list_first_entry(&od->ddev.channels,
837                         struct omap_chan, vc.chan.device_node);
838
839                 list_del(&c->vc.chan.device_node);
840                 tasklet_kill(&c->vc.task);
841                 kfree(c);
842         }
843 }
844
845 static int omap_dma_probe(struct platform_device *pdev)
846 {
847         struct omap_dmadev *od;
848         int rc, i;
849
850         od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
851         if (!od)
852                 return -ENOMEM;
853
854         od->plat = omap_get_plat_info();
855         if (!od->plat)
856                 return -EPROBE_DEFER;
857
858         dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
859         dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
860         od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
861         od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
862         od->ddev.device_tx_status = omap_dma_tx_status;
863         od->ddev.device_issue_pending = omap_dma_issue_pending;
864         od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
865         od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
866         od->ddev.device_control = omap_dma_control;
867         od->ddev.dev = &pdev->dev;
868         INIT_LIST_HEAD(&od->ddev.channels);
869         INIT_LIST_HEAD(&od->pending);
870         spin_lock_init(&od->lock);
871
872         tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
873
874         for (i = 0; i < 127; i++) {
875                 rc = omap_dma_chan_init(od, i);
876                 if (rc) {
877                         omap_dma_free(od);
878                         return rc;
879                 }
880         }
881
882         rc = dma_async_device_register(&od->ddev);
883         if (rc) {
884                 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
885                         rc);
886                 omap_dma_free(od);
887                 return rc;
888         }
889
890         platform_set_drvdata(pdev, od);
891
892         if (pdev->dev.of_node) {
893                 omap_dma_info.dma_cap = od->ddev.cap_mask;
894
895                 /* Device-tree DMA controller registration */
896                 rc = of_dma_controller_register(pdev->dev.of_node,
897                                 of_dma_simple_xlate, &omap_dma_info);
898                 if (rc) {
899                         pr_warn("OMAP-DMA: failed to register DMA controller\n");
900                         dma_async_device_unregister(&od->ddev);
901                         omap_dma_free(od);
902                 }
903         }
904
905         dev_info(&pdev->dev, "OMAP DMA engine driver\n");
906
907         return rc;
908 }
909
910 static int omap_dma_remove(struct platform_device *pdev)
911 {
912         struct omap_dmadev *od = platform_get_drvdata(pdev);
913
914         if (pdev->dev.of_node)
915                 of_dma_controller_free(pdev->dev.of_node);
916
917         dma_async_device_unregister(&od->ddev);
918         omap_dma_free(od);
919
920         return 0;
921 }
922
923 static const struct of_device_id omap_dma_match[] = {
924         { .compatible = "ti,omap2420-sdma", },
925         { .compatible = "ti,omap2430-sdma", },
926         { .compatible = "ti,omap3430-sdma", },
927         { .compatible = "ti,omap3630-sdma", },
928         { .compatible = "ti,omap4430-sdma", },
929         {},
930 };
931 MODULE_DEVICE_TABLE(of, omap_dma_match);
932
933 static struct platform_driver omap_dma_driver = {
934         .probe  = omap_dma_probe,
935         .remove = omap_dma_remove,
936         .driver = {
937                 .name = "omap-dma-engine",
938                 .owner = THIS_MODULE,
939                 .of_match_table = of_match_ptr(omap_dma_match),
940         },
941 };
942
943 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
944 {
945         if (chan->device->dev->driver == &omap_dma_driver.driver) {
946                 struct omap_chan *c = to_omap_dma_chan(chan);
947                 unsigned req = *(unsigned *)param;
948
949                 return req == c->dma_sig;
950         }
951         return false;
952 }
953 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
954
955 static int omap_dma_init(void)
956 {
957         return platform_driver_register(&omap_dma_driver);
958 }
959 subsys_initcall(omap_dma_init);
960
961 static void __exit omap_dma_exit(void)
962 {
963         platform_driver_unregister(&omap_dma_driver);
964 }
965 module_exit(omap_dma_exit);
966
967 MODULE_AUTHOR("Russell King");
968 MODULE_LICENSE("GPL");