2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/edma.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/of_irq.h>
33 #include <linux/pm_runtime.h>
35 #include <linux/platform_data/edma.h>
37 /* Offsets matching "struct edmacc_param" */
40 #define PARM_A_B_CNT 0x08
42 #define PARM_SRC_DST_BIDX 0x10
43 #define PARM_LINK_BCNTRLD 0x14
44 #define PARM_SRC_DST_CIDX 0x18
45 #define PARM_CCNT 0x1c
47 #define PARM_SIZE 0x20
49 /* Offsets for EDMA CC global channel registers and their shadows */
50 #define SH_ER 0x00 /* 64 bits */
51 #define SH_ECR 0x08 /* 64 bits */
52 #define SH_ESR 0x10 /* 64 bits */
53 #define SH_CER 0x18 /* 64 bits */
54 #define SH_EER 0x20 /* 64 bits */
55 #define SH_EECR 0x28 /* 64 bits */
56 #define SH_EESR 0x30 /* 64 bits */
57 #define SH_SER 0x38 /* 64 bits */
58 #define SH_SECR 0x40 /* 64 bits */
59 #define SH_IER 0x50 /* 64 bits */
60 #define SH_IECR 0x58 /* 64 bits */
61 #define SH_IESR 0x60 /* 64 bits */
62 #define SH_IPR 0x68 /* 64 bits */
63 #define SH_ICR 0x70 /* 64 bits */
73 /* Offsets for EDMA CC global registers */
74 #define EDMA_REV 0x0000
75 #define EDMA_CCCFG 0x0004
76 #define EDMA_QCHMAP 0x0200 /* 8 registers */
77 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
78 #define EDMA_QDMAQNUM 0x0260
79 #define EDMA_QUETCMAP 0x0280
80 #define EDMA_QUEPRI 0x0284
81 #define EDMA_EMR 0x0300 /* 64 bits */
82 #define EDMA_EMCR 0x0308 /* 64 bits */
83 #define EDMA_QEMR 0x0310
84 #define EDMA_QEMCR 0x0314
85 #define EDMA_CCERR 0x0318
86 #define EDMA_CCERRCLR 0x031c
87 #define EDMA_EEVAL 0x0320
88 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
89 #define EDMA_QRAE 0x0380 /* 4 registers */
90 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
91 #define EDMA_QSTAT 0x0600 /* 2 registers */
92 #define EDMA_QWMTHRA 0x0620
93 #define EDMA_QWMTHRB 0x0624
94 #define EDMA_CCSTAT 0x0640
96 #define EDMA_M 0x1000 /* global channel registers */
97 #define EDMA_ECR 0x1008
98 #define EDMA_ECRH 0x100C
99 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
100 #define EDMA_PARM 0x4000 /* 128 param entries */
102 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
104 #define EDMA_DCHMAP 0x0100 /* 64 registers */
107 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
108 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
109 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
110 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
111 #define CHMAP_EXIST BIT(24)
113 #define EDMA_MAX_DMACH 64
114 #define EDMA_MAX_PARAMENTRY 512
116 /*****************************************************************************/
121 /* how many dma resources of each type */
122 unsigned num_channels;
126 enum dma_event_q default_queue;
128 /* list of channels with no even trigger; terminated by "-1" */
131 struct edma_soc_info *info;
133 bool unused_chan_list_done;
134 /* The edma_inuse bit for each PaRAM slot is clear unless the
135 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
137 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
139 /* The edma_unused bit for each channel is clear unless
140 * it is not being used on this platform. It uses a bit
141 * of SOC-specific initialization code.
143 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
145 struct dma_interrupt_data {
146 void (*callback)(unsigned channel, unsigned short ch_status,
149 } intr_data[EDMA_MAX_DMACH];
151 /*****************************************************************************/
153 static inline unsigned int edma_read(struct edma *cc, int offset)
155 return (unsigned int)__raw_readl(cc->base + offset);
158 static inline void edma_write(struct edma *cc, int offset, int val)
160 __raw_writel(val, cc->base + offset);
162 static inline void edma_modify(struct edma *cc, int offset, unsigned and,
165 unsigned val = edma_read(cc, offset);
168 edma_write(cc, offset, val);
170 static inline void edma_and(struct edma *cc, int offset, unsigned and)
172 unsigned val = edma_read(cc, offset);
174 edma_write(cc, offset, val);
176 static inline void edma_or(struct edma *cc, int offset, unsigned or)
178 unsigned val = edma_read(cc, offset);
180 edma_write(cc, offset, val);
182 static inline unsigned int edma_read_array(struct edma *cc, int offset, int i)
184 return edma_read(cc, offset + (i << 2));
186 static inline void edma_write_array(struct edma *cc, int offset, int i,
189 edma_write(cc, offset + (i << 2), val);
191 static inline void edma_modify_array(struct edma *cc, int offset, int i,
192 unsigned and, unsigned or)
194 edma_modify(cc, offset + (i << 2), and, or);
196 static inline void edma_or_array(struct edma *cc, int offset, int i, unsigned or)
198 edma_or(cc, offset + (i << 2), or);
200 static inline void edma_or_array2(struct edma *cc, int offset, int i, int j,
203 edma_or(cc, offset + ((i*2 + j) << 2), or);
205 static inline void edma_write_array2(struct edma *cc, int offset, int i, int j,
208 edma_write(cc, offset + ((i*2 + j) << 2), val);
210 static inline unsigned int edma_shadow0_read(struct edma *cc, int offset)
212 return edma_read(cc, EDMA_SHADOW0 + offset);
214 static inline unsigned int edma_shadow0_read_array(struct edma *cc, int offset,
217 return edma_read(cc, EDMA_SHADOW0 + offset + (i << 2));
219 static inline void edma_shadow0_write(struct edma *cc, int offset, unsigned val)
221 edma_write(cc, EDMA_SHADOW0 + offset, val);
223 static inline void edma_shadow0_write_array(struct edma *cc, int offset, int i,
226 edma_write(cc, EDMA_SHADOW0 + offset + (i << 2), val);
228 static inline unsigned int edma_parm_read(struct edma *cc, int offset,
231 return edma_read(cc, EDMA_PARM + offset + (param_no << 5));
233 static inline void edma_parm_write(struct edma *cc, int offset, int param_no,
236 edma_write(cc, EDMA_PARM + offset + (param_no << 5), val);
238 static inline void edma_parm_modify(struct edma *cc, int offset, int param_no,
239 unsigned and, unsigned or)
241 edma_modify(cc, EDMA_PARM + offset + (param_no << 5), and, or);
243 static inline void edma_parm_and(struct edma *cc, int offset, int param_no,
246 edma_and(cc, EDMA_PARM + offset + (param_no << 5), and);
248 static inline void edma_parm_or(struct edma *cc, int offset, int param_no,
251 edma_or(cc, EDMA_PARM + offset + (param_no << 5), or);
254 static inline void set_bits(int offset, int len, unsigned long *p)
256 for (; len > 0; len--)
257 set_bit(offset + (len - 1), p);
260 static inline void clear_bits(int offset, int len, unsigned long *p)
262 for (; len > 0; len--)
263 clear_bit(offset + (len - 1), p);
266 /*****************************************************************************/
267 static int arch_num_cc;
269 /* dummy param set used to (re)initialize parameter RAM slots */
270 static const struct edmacc_param dummy_paramset = {
271 .link_bcntrld = 0xffff,
275 static const struct of_device_id edma_of_ids[] = {
276 { .compatible = "ti,edma3", },
280 /*****************************************************************************/
282 static void map_dmach_queue(struct edma *cc, unsigned ch_no,
283 enum dma_event_q queue_no)
285 int bit = (ch_no & 0x7) * 4;
287 /* default to low priority queue */
288 if (queue_no == EVENTQ_DEFAULT)
289 queue_no = cc->default_queue;
292 edma_modify_array(cc, EDMA_DMAQNUM, (ch_no >> 3),
293 ~(0x7 << bit), queue_no << bit);
296 static void assign_priority_to_queue(struct edma *cc, int queue_no,
299 int bit = queue_no * 4;
300 edma_modify(cc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
304 * map_dmach_param - Maps channel number to param entry number
306 * This maps the dma channel number to param entry numberter. In
307 * other words using the DMA channel mapping registers a param entry
308 * can be mapped to any channel
310 * Callers are responsible for ensuring the channel mapping logic is
311 * included in that particular EDMA variant (Eg : dm646x)
314 static void map_dmach_param(struct edma *cc)
317 for (i = 0; i < EDMA_MAX_DMACH; i++)
318 edma_write_array(cc, EDMA_DCHMAP , i , (i << 5));
321 static inline void setup_dma_interrupt(struct edma *cc, unsigned lch,
322 void (*callback)(unsigned channel, u16 ch_status, void *data),
325 lch = EDMA_CHAN_SLOT(lch);
328 edma_shadow0_write_array(cc, SH_IECR, lch >> 5,
331 cc->intr_data[lch].callback = callback;
332 cc->intr_data[lch].data = data;
335 edma_shadow0_write_array(cc, SH_ICR, lch >> 5, BIT(lch & 0x1f));
336 edma_shadow0_write_array(cc, SH_IESR, lch >> 5,
341 /******************************************************************************
343 * DMA interrupt handler
345 *****************************************************************************/
346 static irqreturn_t dma_irq_handler(int irq, void *data)
348 struct edma *cc = data;
358 dev_dbg(cc->dev, "dma_irq_handler\n");
360 sh_ipr = edma_shadow0_read_array(cc, SH_IPR, 0);
362 sh_ipr = edma_shadow0_read_array(cc, SH_IPR, 1);
365 sh_ier = edma_shadow0_read_array(cc, SH_IER, 1);
368 sh_ier = edma_shadow0_read_array(cc, SH_IER, 0);
376 dev_dbg(cc->dev, "IPR%d %08x\n", bank, sh_ipr);
378 slot = __ffs(sh_ipr);
379 sh_ipr &= ~(BIT(slot));
381 if (sh_ier & BIT(slot)) {
382 channel = (bank << 5) | slot;
383 /* Clear the corresponding IPR bits */
384 edma_shadow0_write_array(cc, SH_ICR, bank, BIT(slot));
385 if (cc->intr_data[channel].callback)
386 cc->intr_data[channel].callback(
387 EDMA_CTLR_CHAN(ctlr, channel),
389 cc->intr_data[channel].data);
393 edma_shadow0_write(cc, SH_IEVAL, 1);
397 /******************************************************************************
399 * DMA error interrupt handler
401 *****************************************************************************/
402 static irqreturn_t dma_ccerr_handler(int irq, void *data)
404 struct edma *cc = data;
407 unsigned int cnt = 0;
413 dev_dbg(cc->dev, "dma_ccerr_handler\n");
415 if ((edma_read_array(cc, EDMA_EMR, 0) == 0) &&
416 (edma_read_array(cc, EDMA_EMR, 1) == 0) &&
417 (edma_read(cc, EDMA_QEMR) == 0) &&
418 (edma_read(cc, EDMA_CCERR) == 0))
423 if (edma_read_array(cc, EDMA_EMR, 0))
425 else if (edma_read_array(cc, EDMA_EMR, 1))
428 dev_dbg(cc->dev, "EMR%d %08x\n", j,
429 edma_read_array(cc, EDMA_EMR, j));
430 for (i = 0; i < 32; i++) {
431 int k = (j << 5) + i;
432 if (edma_read_array(cc, EDMA_EMR, j) &
434 /* Clear the corresponding EMR bits */
435 edma_write_array(cc, EDMA_EMCR, j,
438 edma_shadow0_write_array(cc, SH_SECR,
440 if (cc->intr_data[k].callback) {
441 cc->intr_data[k].callback(
442 EDMA_CTLR_CHAN(ctlr, k),
444 cc->intr_data[k].data);
448 } else if (edma_read(cc, EDMA_QEMR)) {
449 dev_dbg(cc->dev, "QEMR %02x\n",
450 edma_read(cc, EDMA_QEMR));
451 for (i = 0; i < 8; i++) {
452 if (edma_read(cc, EDMA_QEMR) & BIT(i)) {
453 /* Clear the corresponding IPR bits */
454 edma_write(cc, EDMA_QEMCR, BIT(i));
455 edma_shadow0_write(cc, SH_QSECR,
458 /* NOTE: not reported!! */
461 } else if (edma_read(cc, EDMA_CCERR)) {
462 dev_dbg(cc->dev, "CCERR %08x\n",
463 edma_read(cc, EDMA_CCERR));
464 /* FIXME: CCERR.BIT(16) ignored! much better
465 * to just write CCERRCLR with CCERR value...
467 for (i = 0; i < 8; i++) {
468 if (edma_read(cc, EDMA_CCERR) & BIT(i)) {
469 /* Clear the corresponding IPR bits */
470 edma_write(cc, EDMA_CCERRCLR, BIT(i));
472 /* NOTE: not reported!! */
476 if ((edma_read_array(cc, EDMA_EMR, 0) == 0) &&
477 (edma_read_array(cc, EDMA_EMR, 1) == 0) &&
478 (edma_read(cc, EDMA_QEMR) == 0) &&
479 (edma_read(cc, EDMA_CCERR) == 0))
485 edma_write(cc, EDMA_EEVAL, 1);
489 static int prepare_unused_channel_list(struct device *dev, void *data)
491 struct platform_device *pdev = to_platform_device(dev);
492 struct edma *cc = data;
494 struct of_phandle_args dma_spec;
497 struct platform_device *dma_pdev;
499 count = of_property_count_strings(dev->of_node, "dma-names");
502 for (i = 0; i < count; i++) {
504 if (of_parse_phandle_with_args(dev->of_node, "dmas",
509 if (!of_match_node(edma_of_ids, dma_spec.np)) {
510 of_node_put(dma_spec.np);
514 dma_pdev = of_find_device_by_node(dma_spec.np);
515 if (&dma_pdev->dev != cc->dev)
518 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
520 of_node_put(dma_spec.np);
525 /* For non-OF case */
526 for (i = 0; i < pdev->num_resources; i++) {
527 struct resource *res = &pdev->resource[i];
529 if ((res->flags & IORESOURCE_DMA) && (int)res->start >= 0) {
530 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
538 /*-----------------------------------------------------------------------*/
540 /* Resource alloc/free: dma channels, parameter RAM slots */
543 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
544 * @channel: specific channel to allocate; negative for "any unmapped channel"
545 * @callback: optional; to be issued on DMA completion or errors
546 * @data: passed to callback
547 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
548 * Controller (TC) executes requests using this channel. Use
549 * EVENTQ_DEFAULT unless you really need a high priority queue.
551 * This allocates a DMA channel and its associated parameter RAM slot.
552 * The parameter RAM is initialized to hold a dummy transfer.
554 * Normal use is to pass a specific channel number as @channel, to make
555 * use of hardware events mapped to that channel. When the channel will
556 * be used only for software triggering or event chaining, channels not
557 * mapped to hardware events (or mapped to unused events) are preferable.
559 * DMA transfers start from a channel using edma_start(), or by
560 * chaining. When the transfer described in that channel's parameter RAM
561 * slot completes, that slot's data may be reloaded through a link.
563 * DMA errors are only reported to the @callback associated with the
564 * channel driving that transfer, but transfer completion callbacks can
565 * be sent to another channel under control of the TCC field in
566 * the option word of the transfer's parameter RAM set. Drivers must not
567 * use DMA transfer completion callbacks for channels they did not allocate.
568 * (The same applies to TCC codes used in transfer chaining.)
570 * Returns the number of the channel, else negative errno.
572 int edma_alloc_channel(struct edma *cc, int channel,
573 void (*callback)(unsigned channel, u16 ch_status, void *data),
575 enum dma_event_q eventq_no)
580 if (!cc->unused_chan_list_done) {
582 * Scan all the platform devices to find out the EDMA channels
583 * used and clear them in the unused list, making the rest
584 * available for ARM usage.
586 ret = bus_for_each_dev(&platform_bus_type, NULL, cc,
587 prepare_unused_channel_list);
591 cc->unused_chan_list_done = true;
595 if (cc->id != EDMA_CTLR(channel)) {
596 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n",
597 __func__, cc->id, EDMA_CTLR(channel));
600 channel = EDMA_CHAN_SLOT(channel);
606 channel = find_next_bit(cc->edma_unused,
607 cc->num_channels, channel);
608 if (channel == cc->num_channels)
610 if (!test_and_set_bit(channel, cc->edma_inuse)) {
618 } else if (channel >= cc->num_channels) {
620 } else if (test_and_set_bit(channel, cc->edma_inuse)) {
624 /* ensure access through shadow region 0 */
625 edma_or_array2(cc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
627 /* ensure no events are pending */
628 edma_stop(cc, EDMA_CTLR_CHAN(cc->id, channel));
629 memcpy_toio(cc->base + PARM_OFFSET(channel), &dummy_paramset,
633 setup_dma_interrupt(cc, EDMA_CTLR_CHAN(cc->id, channel),
636 map_dmach_queue(cc, channel, eventq_no);
638 return EDMA_CTLR_CHAN(cc->id, channel);
640 EXPORT_SYMBOL(edma_alloc_channel);
644 * edma_free_channel - deallocate DMA channel
645 * @channel: dma channel returned from edma_alloc_channel()
647 * This deallocates the DMA channel and associated parameter RAM slot
648 * allocated by edma_alloc_channel().
650 * Callers are responsible for ensuring the channel is inactive, and
651 * will not be reactivated by linking, chaining, or software calls to
654 void edma_free_channel(struct edma *cc, unsigned channel)
657 if (cc->id != EDMA_CTLR(channel)) {
658 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
659 cc->id, EDMA_CTLR(channel));
662 channel = EDMA_CHAN_SLOT(channel);
664 if (channel >= cc->num_channels)
667 setup_dma_interrupt(cc, channel, NULL, NULL);
668 /* REVISIT should probably take out of shadow region 0 */
670 memcpy_toio(cc->base + PARM_OFFSET(channel), &dummy_paramset,
672 clear_bit(channel, cc->edma_inuse);
674 EXPORT_SYMBOL(edma_free_channel);
677 * edma_alloc_slot - allocate DMA parameter RAM
678 * @slot: specific slot to allocate; negative for "any unused slot"
680 * This allocates a parameter RAM slot, initializing it to hold a
681 * dummy transfer. Slots allocated using this routine have not been
682 * mapped to a hardware DMA channel, and will normally be used by
683 * linking to them from a slot associated with a DMA channel.
685 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
686 * slots may be allocated on behalf of DSP firmware.
688 * Returns the number of the slot, else negative errno.
690 int edma_alloc_slot(struct edma *cc, int slot)
693 slot = EDMA_CHAN_SLOT(slot);
695 slot = cc->num_channels;
697 slot = find_next_zero_bit(cc->edma_inuse, cc->num_slots,
699 if (slot == cc->num_slots)
701 if (!test_and_set_bit(slot, cc->edma_inuse))
704 } else if (slot < cc->num_channels || slot >= cc->num_slots) {
706 } else if (test_and_set_bit(slot, cc->edma_inuse)) {
710 memcpy_toio(cc->base + PARM_OFFSET(slot), &dummy_paramset, PARM_SIZE);
714 EXPORT_SYMBOL(edma_alloc_slot);
717 * edma_free_slot - deallocate DMA parameter RAM
718 * @slot: parameter RAM slot returned from edma_alloc_slot()
720 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
721 * Callers are responsible for ensuring the slot is inactive, and will
724 void edma_free_slot(struct edma *cc, unsigned slot)
727 slot = EDMA_CHAN_SLOT(slot);
728 if (slot < cc->num_channels || slot >= cc->num_slots)
731 memcpy_toio(cc->base + PARM_OFFSET(slot), &dummy_paramset, PARM_SIZE);
732 clear_bit(slot, cc->edma_inuse);
734 EXPORT_SYMBOL(edma_free_slot);
736 /*-----------------------------------------------------------------------*/
738 /* Parameter RAM operations (i) -- read/write partial slots */
741 * edma_get_position - returns the current transfer point
742 * @slot: parameter RAM slot being examined
743 * @dst: true selects the dest position, false the source
745 * Returns the position of the current active slot
747 dma_addr_t edma_get_position(struct edma *cc, unsigned slot, bool dst)
751 slot = EDMA_CHAN_SLOT(slot);
752 offs = PARM_OFFSET(slot);
753 offs += dst ? PARM_DST : PARM_SRC;
755 return edma_read(cc, offs);
759 * edma_link - link one parameter RAM slot to another
760 * @from: parameter RAM slot originating the link
761 * @to: parameter RAM slot which is the link target
763 * The originating slot should not be part of any active DMA transfer.
765 void edma_link(struct edma *cc, unsigned from, unsigned to)
767 from = EDMA_CHAN_SLOT(from);
768 to = EDMA_CHAN_SLOT(to);
769 if (from >= cc->num_slots || to >= cc->num_slots)
772 edma_parm_modify(cc, PARM_LINK_BCNTRLD, from, 0xffff0000,
775 EXPORT_SYMBOL(edma_link);
777 /*-----------------------------------------------------------------------*/
779 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
782 * edma_write_slot - write parameter RAM data for slot
783 * @slot: number of parameter RAM slot being modified
784 * @param: data to be written into parameter RAM slot
786 * Use this to assign all parameters of a transfer at once. This
787 * allows more efficient setup of transfers than issuing multiple
788 * calls to set up those parameters in small pieces, and provides
789 * complete control over all transfer options.
791 void edma_write_slot(struct edma *cc, unsigned slot,
792 const struct edmacc_param *param)
794 slot = EDMA_CHAN_SLOT(slot);
795 if (slot >= cc->num_slots)
797 memcpy_toio(cc->base + PARM_OFFSET(slot), param, PARM_SIZE);
799 EXPORT_SYMBOL(edma_write_slot);
802 * edma_read_slot - read parameter RAM data from slot
803 * @slot: number of parameter RAM slot being copied
804 * @param: where to store copy of parameter RAM data
806 * Use this to read data from a parameter RAM slot, perhaps to
807 * save them as a template for later reuse.
809 void edma_read_slot(struct edma *cc, unsigned slot, struct edmacc_param *param)
811 slot = EDMA_CHAN_SLOT(slot);
812 if (slot >= cc->num_slots)
814 memcpy_fromio(param, cc->base + PARM_OFFSET(slot), PARM_SIZE);
816 EXPORT_SYMBOL(edma_read_slot);
818 /*-----------------------------------------------------------------------*/
820 /* Various EDMA channel control operations */
823 * edma_pause - pause dma on a channel
824 * @channel: on which edma_start() has been called
826 * This temporarily disables EDMA hardware events on the specified channel,
827 * preventing them from triggering new transfers on its behalf
829 void edma_pause(struct edma *cc, unsigned channel)
831 if (cc->id != EDMA_CTLR(channel)) {
832 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
833 cc->id, EDMA_CTLR(channel));
836 channel = EDMA_CHAN_SLOT(channel);
838 if (channel < cc->num_channels) {
839 unsigned int mask = BIT(channel & 0x1f);
841 edma_shadow0_write_array(cc, SH_EECR, channel >> 5, mask);
844 EXPORT_SYMBOL(edma_pause);
847 * edma_resume - resumes dma on a paused channel
848 * @channel: on which edma_pause() has been called
850 * This re-enables EDMA hardware events on the specified channel.
852 void edma_resume(struct edma *cc, unsigned channel)
854 if (cc->id != EDMA_CTLR(channel)) {
855 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
856 cc->id, EDMA_CTLR(channel));
859 channel = EDMA_CHAN_SLOT(channel);
861 if (channel < cc->num_channels) {
862 unsigned int mask = BIT(channel & 0x1f);
864 edma_shadow0_write_array(cc, SH_EESR, channel >> 5, mask);
867 EXPORT_SYMBOL(edma_resume);
869 int edma_trigger_channel(struct edma *cc, unsigned channel)
873 if (cc->id != EDMA_CTLR(channel)) {
874 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
875 cc->id, EDMA_CTLR(channel));
878 channel = EDMA_CHAN_SLOT(channel);
879 mask = BIT(channel & 0x1f);
881 edma_shadow0_write_array(cc, SH_ESR, (channel >> 5), mask);
883 pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
884 edma_shadow0_read_array(cc, SH_ESR, (channel >> 5)));
887 EXPORT_SYMBOL(edma_trigger_channel);
890 * edma_start - start dma on a channel
891 * @channel: channel being activated
893 * Channels with event associations will be triggered by their hardware
894 * events, and channels without such associations will be triggered by
895 * software. (At this writing there is no interface for using software
896 * triggers except with channels that don't support hardware triggers.)
898 * Returns zero on success, else negative errno.
900 int edma_start(struct edma *cc, unsigned channel)
902 if (cc->id != EDMA_CTLR(channel)) {
903 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
904 cc->id, EDMA_CTLR(channel));
907 channel = EDMA_CHAN_SLOT(channel);
909 if (channel < cc->num_channels) {
910 int j = channel >> 5;
911 unsigned int mask = BIT(channel & 0x1f);
913 /* EDMA channels without event association */
914 if (test_bit(channel, cc->edma_unused)) {
915 pr_debug("EDMA: ESR%d %08x\n", j,
916 edma_shadow0_read_array(cc, SH_ESR, j));
917 edma_shadow0_write_array(cc, SH_ESR, j, mask);
921 /* EDMA channel with event association */
922 pr_debug("EDMA: ER%d %08x\n", j,
923 edma_shadow0_read_array(cc, SH_ER, j));
924 /* Clear any pending event or error */
925 edma_write_array(cc, EDMA_ECR, j, mask);
926 edma_write_array(cc, EDMA_EMCR, j, mask);
928 edma_shadow0_write_array(cc, SH_SECR, j, mask);
929 edma_shadow0_write_array(cc, SH_EESR, j, mask);
930 pr_debug("EDMA: EER%d %08x\n", j,
931 edma_shadow0_read_array(cc, SH_EER, j));
937 EXPORT_SYMBOL(edma_start);
940 * edma_stop - stops dma on the channel passed
941 * @channel: channel being deactivated
943 * When @lch is a channel, any active transfer is paused and
944 * all pending hardware events are cleared. The current transfer
945 * may not be resumed, and the channel's Parameter RAM should be
946 * reinitialized before being reused.
948 void edma_stop(struct edma *cc, unsigned channel)
950 if (cc->id != EDMA_CTLR(channel)) {
951 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
952 cc->id, EDMA_CTLR(channel));
955 channel = EDMA_CHAN_SLOT(channel);
957 if (channel < cc->num_channels) {
958 int j = channel >> 5;
959 unsigned int mask = BIT(channel & 0x1f);
961 edma_shadow0_write_array(cc, SH_EECR, j, mask);
962 edma_shadow0_write_array(cc, SH_ECR, j, mask);
963 edma_shadow0_write_array(cc, SH_SECR, j, mask);
964 edma_write_array(cc, EDMA_EMCR, j, mask);
966 /* clear possibly pending completion interrupt */
967 edma_shadow0_write_array(cc, SH_ICR, j, mask);
969 pr_debug("EDMA: EER%d %08x\n", j,
970 edma_shadow0_read_array(cc, SH_EER, j));
972 /* REVISIT: consider guarding against inappropriate event
973 * chaining by overwriting with dummy_paramset.
977 EXPORT_SYMBOL(edma_stop);
979 /******************************************************************************
981 * It cleans ParamEntry qand bring back EDMA to initial state if media has
982 * been removed before EDMA has finished.It is usedful for removable media.
986 * Return: zero on success, or corresponding error no on failure
988 * FIXME this should not be needed ... edma_stop() should suffice.
990 *****************************************************************************/
992 void edma_clean_channel(struct edma *cc, unsigned channel)
994 if (cc->id != EDMA_CTLR(channel)) {
995 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
996 cc->id, EDMA_CTLR(channel));
999 channel = EDMA_CHAN_SLOT(channel);
1001 if (channel < cc->num_channels) {
1002 int j = (channel >> 5);
1003 unsigned int mask = BIT(channel & 0x1f);
1005 pr_debug("EDMA: EMR%d %08x\n", j,
1006 edma_read_array(cc, EDMA_EMR, j));
1007 edma_shadow0_write_array(cc, SH_ECR, j, mask);
1008 /* Clear the corresponding EMR bits */
1009 edma_write_array(cc, EDMA_EMCR, j, mask);
1011 edma_shadow0_write_array(cc, SH_SECR, j, mask);
1012 edma_write(cc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1015 EXPORT_SYMBOL(edma_clean_channel);
1018 * edma_assign_channel_eventq - move given channel to desired eventq
1020 * channel - channel number
1021 * eventq_no - queue to move the channel
1023 * Can be used to move a channel to a selected event queue.
1025 void edma_assign_channel_eventq(struct edma *cc, unsigned channel,
1026 enum dma_event_q eventq_no)
1028 if (cc->id != EDMA_CTLR(channel)) {
1029 dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
1030 cc->id, EDMA_CTLR(channel));
1033 channel = EDMA_CHAN_SLOT(channel);
1035 if (channel >= cc->num_channels)
1038 /* default to low priority queue */
1039 if (eventq_no == EVENTQ_DEFAULT)
1040 eventq_no = cc->default_queue;
1041 if (eventq_no >= cc->num_tc)
1044 map_dmach_queue(cc, channel, eventq_no);
1046 EXPORT_SYMBOL(edma_assign_channel_eventq);
1048 struct edma *edma_get_data(struct device *edma_dev)
1050 return dev_get_drvdata(edma_dev);
1054 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1055 struct edma *edma_cc, int cc_id)
1059 s8 (*queue_priority_map)[2];
1061 /* Decode the eDMA3 configuration from CCCFG register */
1062 cccfg = edma_read(edma_cc, EDMA_CCCFG);
1064 value = GET_NUM_REGN(cccfg);
1065 edma_cc->num_region = BIT(value);
1067 value = GET_NUM_DMACH(cccfg);
1068 edma_cc->num_channels = BIT(value + 1);
1070 value = GET_NUM_PAENTRY(cccfg);
1071 edma_cc->num_slots = BIT(value + 4);
1073 value = GET_NUM_EVQUE(cccfg);
1074 edma_cc->num_tc = value + 1;
1076 dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id,
1078 dev_dbg(dev, "num_region: %u\n", edma_cc->num_region);
1079 dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels);
1080 dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots);
1081 dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc);
1083 /* Nothing need to be done if queue priority is provided */
1084 if (pdata->queue_priority_mapping)
1088 * Configure TC/queue priority as follows:
1093 * The meaning of priority numbers: 0 highest priority, 7 lowest
1094 * priority. So Q0 is the highest priority queue and the last queue has
1095 * the lowest priority.
1097 queue_priority_map = devm_kzalloc(dev,
1098 (edma_cc->num_tc + 1) * sizeof(s8),
1100 if (!queue_priority_map)
1103 for (i = 0; i < edma_cc->num_tc; i++) {
1104 queue_priority_map[i][0] = i;
1105 queue_priority_map[i][1] = i;
1107 queue_priority_map[i][0] = -1;
1108 queue_priority_map[i][1] = -1;
1110 pdata->queue_priority_mapping = queue_priority_map;
1111 /* Default queue has the lowest priority */
1112 pdata->default_queue = i - 1;
1117 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
1119 static int edma_xbar_event_map(struct device *dev, struct device_node *node,
1120 struct edma_soc_info *pdata, size_t sz)
1122 const char pname[] = "ti,edma-xbar-event-map";
1123 struct resource res;
1125 s16 (*xbar_chans)[2];
1126 size_t nelm = sz / sizeof(s16);
1127 u32 shift, offset, mux;
1130 xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
1134 ret = of_address_to_resource(node, 1, &res);
1138 xbar = devm_ioremap(dev, res.start, resource_size(&res));
1142 ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
1146 /* Invalidate last entry for the other user of this mess */
1148 xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
1150 for (i = 0; i < nelm; i++) {
1151 shift = (xbar_chans[i][1] & 0x03) << 3;
1152 offset = xbar_chans[i][1] & 0xfffffffc;
1153 mux = readl(xbar + offset);
1154 mux &= ~(0xff << shift);
1155 mux |= xbar_chans[i][0] << shift;
1156 writel(mux, (xbar + offset));
1159 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
1163 static int edma_of_parse_dt(struct device *dev,
1164 struct device_node *node,
1165 struct edma_soc_info *pdata)
1168 struct property *prop;
1170 struct edma_rsv_info *rsv_info;
1172 rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
1175 pdata->rsv = rsv_info;
1177 prop = of_find_property(node, "ti,edma-xbar-event-map", &sz);
1179 ret = edma_xbar_event_map(dev, node, pdata, sz);
1184 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1185 struct device_node *node)
1187 struct edma_soc_info *info;
1190 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
1192 return ERR_PTR(-ENOMEM);
1194 ret = edma_of_parse_dt(dev, node, info);
1196 return ERR_PTR(ret);
1201 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1202 struct device_node *node)
1204 return ERR_PTR(-ENOSYS);
1208 static int edma_probe(struct platform_device *pdev)
1210 struct edma_soc_info *info = pdev->dev.platform_data;
1211 s8 (*queue_priority_mapping)[2];
1213 const s16 (*rsv_chans)[2];
1214 const s16 (*rsv_slots)[2];
1215 const s16 (*xbar_chans)[2];
1218 struct resource *mem;
1219 struct device_node *node = pdev->dev.of_node;
1220 struct device *dev = &pdev->dev;
1221 int dev_id = pdev->id;
1224 struct platform_device_info edma_dev_info = {
1225 .name = "edma-dma-engine",
1226 .dma_mask = DMA_BIT_MASK(32),
1227 .parent = &pdev->dev,
1230 /* When booting with DT the pdev->id is -1 */
1232 dev_id = arch_num_cc;
1234 if (dev_id >= EDMA_MAX_CC) {
1236 "eDMA3 with device id 0 and 1 is supported (id: %d)\n",
1242 /* Check if this is a second instance registered */
1244 dev_err(dev, "only one EDMA instance is supported via DT\n");
1248 info = edma_setup_info_from_dt(dev, node);
1250 dev_err(dev, "failed to get DT data\n");
1251 return PTR_ERR(info);
1258 pm_runtime_enable(dev);
1259 ret = pm_runtime_get_sync(dev);
1261 dev_err(dev, "pm_runtime_get_sync() failed\n");
1265 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
1267 dev_dbg(dev, "mem resource not found, using index 0\n");
1268 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1270 dev_err(dev, "no mem resource?\n");
1275 cc = devm_kzalloc(dev, sizeof(struct edma), GFP_KERNEL);
1281 dev_set_drvdata(dev, cc);
1283 cc->base = devm_ioremap_resource(dev, mem);
1284 if (IS_ERR(cc->base))
1285 return PTR_ERR(cc->base);
1287 /* Get eDMA3 configuration from IP */
1288 ret = edma_setup_from_hw(dev, info, cc, dev_id);
1292 cc->default_queue = info->default_queue;
1294 for (i = 0; i < cc->num_slots; i++)
1295 memcpy_toio(cc->base + PARM_OFFSET(i), &dummy_paramset,
1298 /* Mark all channels as unused */
1299 memset(cc->edma_unused, 0xff, sizeof(cc->edma_unused));
1303 /* Clear the reserved channels in unused list */
1304 rsv_chans = info->rsv->rsv_chans;
1306 for (i = 0; rsv_chans[i][0] != -1; i++) {
1307 off = rsv_chans[i][0];
1308 ln = rsv_chans[i][1];
1309 clear_bits(off, ln, cc->edma_unused);
1313 /* Set the reserved slots in inuse list */
1314 rsv_slots = info->rsv->rsv_slots;
1316 for (i = 0; rsv_slots[i][0] != -1; i++) {
1317 off = rsv_slots[i][0];
1318 ln = rsv_slots[i][1];
1319 set_bits(off, ln, cc->edma_inuse);
1324 /* Clear the xbar mapped channels in unused list */
1325 xbar_chans = info->xbar_chans;
1327 for (i = 0; xbar_chans[i][1] != -1; i++) {
1328 off = xbar_chans[i][1];
1329 clear_bits(off, 1, cc->edma_unused);
1333 irq = platform_get_irq_byname(pdev, "edma3_ccint");
1334 if (irq < 0 && node)
1335 irq = irq_of_parse_and_map(node, 0);
1338 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
1340 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
1343 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
1348 irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
1349 if (irq < 0 && node)
1350 irq = irq_of_parse_and_map(node, 2);
1353 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
1355 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
1358 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
1363 for (i = 0; i < cc->num_channels; i++)
1364 map_dmach_queue(cc, i, info->default_queue);
1366 queue_priority_mapping = info->queue_priority_mapping;
1368 /* Event queue priority mapping */
1369 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1370 assign_priority_to_queue(cc, queue_priority_mapping[i][0],
1371 queue_priority_mapping[i][1]);
1373 /* Map the channel to param entry if channel mapping logic exist */
1374 if (edma_read(cc, EDMA_CCCFG) & CHMAP_EXIST)
1375 map_dmach_param(cc);
1377 for (i = 0; i < cc->num_region; i++) {
1378 edma_write_array2(cc, EDMA_DRAE, i, 0, 0x0);
1379 edma_write_array2(cc, EDMA_DRAE, i, 1, 0x0);
1380 edma_write_array(cc, EDMA_QRAE, i, 0x0);
1385 edma_dev_info.id = dev_id;
1387 platform_device_register_full(&edma_dev_info);
1392 #ifdef CONFIG_PM_SLEEP
1393 static int edma_pm_resume(struct device *dev)
1395 struct edma *cc = dev_get_drvdata(dev);
1397 s8 (*queue_priority_mapping)[2];
1399 queue_priority_mapping = cc->info->queue_priority_mapping;
1401 /* Event queue priority mapping */
1402 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1403 assign_priority_to_queue(cc, queue_priority_mapping[i][0],
1404 queue_priority_mapping[i][1]);
1406 /* Map the channel to param entry if channel mapping logic */
1407 if (edma_read(cc, EDMA_CCCFG) & CHMAP_EXIST)
1408 map_dmach_param(cc);
1410 for (i = 0; i < cc->num_channels; i++) {
1411 if (test_bit(i, cc->edma_inuse)) {
1412 /* ensure access through shadow region 0 */
1413 edma_or_array2(cc, EDMA_DRAE, 0, i >> 5, BIT(i & 0x1f));
1415 setup_dma_interrupt(cc, EDMA_CTLR_CHAN(cc->id, i),
1416 cc->intr_data[i].callback,
1417 cc->intr_data[i].data);
1425 static const struct dev_pm_ops edma_pm_ops = {
1426 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
1429 static struct platform_driver edma_driver = {
1433 .of_match_table = edma_of_ids,
1435 .probe = edma_probe,
1438 static int __init edma_init(void)
1440 return platform_driver_probe(&edma_driver, edma_probe);
1442 arch_initcall(edma_init);