2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
21 #include <brcmu_utils.h>
27 * Each descriptor ring must be 8kB aligned, and fit within a
28 * contiguous 8kB physical address.
30 #define D64RINGALIGN_BITS 13
31 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
32 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
34 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
36 /* transmit channel control */
37 #define D64_XC_XE 0x00000001 /* transmit enable */
38 #define D64_XC_SE 0x00000002 /* transmit suspend request */
39 #define D64_XC_LE 0x00000004 /* loopback enable */
40 #define D64_XC_FL 0x00000010 /* flush request */
41 #define D64_XC_PD 0x00000800 /* parity check disable */
42 #define D64_XC_AE 0x00030000 /* address extension bits */
43 #define D64_XC_AE_SHIFT 16
45 /* transmit descriptor table pointer */
46 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
48 /* transmit channel status */
49 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
50 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
51 #define D64_XS0_XS_SHIFT 28
52 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
53 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
54 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
55 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
56 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
58 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
59 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
60 #define D64_XS1_XE_SHIFT 28
61 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
62 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
63 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
64 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
65 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
66 #define D64_XS1_XE_COREE 0x50000000 /* core error */
68 /* receive channel control */
70 #define D64_RC_RE 0x00000001
71 /* receive frame offset */
72 #define D64_RC_RO_MASK 0x000000fe
73 #define D64_RC_RO_SHIFT 1
74 /* direct fifo receive (pio) mode */
75 #define D64_RC_FM 0x00000100
76 /* separate rx header descriptor enable */
77 #define D64_RC_SH 0x00000200
78 /* overflow continue */
79 #define D64_RC_OC 0x00000400
80 /* parity check disable */
81 #define D64_RC_PD 0x00000800
82 /* address extension bits */
83 #define D64_RC_AE 0x00030000
84 #define D64_RC_AE_SHIFT 16
86 /* flags for dma controller */
88 #define DMA_CTRL_PEN (1 << 0)
89 /* rx overflow continue */
90 #define DMA_CTRL_ROC (1 << 1)
91 /* allow rx scatter to multiple descriptors */
92 #define DMA_CTRL_RXMULTI (1 << 2)
93 /* Unframed Rx/Tx data */
94 #define DMA_CTRL_UNFRAMED (1 << 3)
96 /* receive descriptor table pointer */
97 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
99 /* receive channel status */
100 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
101 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
102 #define D64_RS0_RS_SHIFT 28
103 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
104 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
105 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
106 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
107 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
109 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
110 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
111 #define D64_RS1_RE_SHIFT 28
112 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
113 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
114 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
115 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
116 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
117 #define D64_RS1_RE_COREE 0x50000000 /* core error */
120 #define D64_FA_OFF_MASK 0xffff /* offset */
121 #define D64_FA_SEL_MASK 0xf0000 /* select */
122 #define D64_FA_SEL_SHIFT 16
123 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
124 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
125 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
126 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
127 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
128 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
129 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
130 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
131 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
132 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
134 /* descriptor control flags 1 */
135 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
136 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
137 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
138 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
139 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
141 /* descriptor control flags 2 */
142 /* buffer byte count. real data len must <= 16KB */
143 #define D64_CTRL2_BC_MASK 0x00007fff
144 /* address extension bits */
145 #define D64_CTRL2_AE 0x00030000
146 #define D64_CTRL2_AE_SHIFT 16
148 #define D64_CTRL2_PARITY 0x00040000
150 /* control flags in the range [27:20] are core-specific and not defined here */
151 #define D64_CTRL_CORE_MASK 0x0ff00000
153 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
154 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
155 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
156 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
158 #define DMA64_DD_PARITY(dd) \
159 parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
162 * packet headroom necessary to accommodate the largest header
163 * in the system, (i.e TXOFF). By doing, we avoid the need to
164 * allocate an extra buffer for the header when bridging to WL.
165 * There is a compile time check in wlc.c which ensure that this
166 * value is at least as big as TXOFF. This value is used in
170 #define BCMEXTRAHDROOM 172
174 #define DMA_ERROR(args) \
176 if (!(*di->msg_level & 1)) \
181 #define DMA_TRACE(args) \
183 if (!(*di->msg_level & 2)) \
189 #define DMA_ERROR(args)
190 #define DMA_TRACE(args)
193 #define DMA_NONE(args)
195 #define d64txregs dregs.d64_u.txregs_64
196 #define d64rxregs dregs.d64_u.rxregs_64
197 #define txd64 dregs.d64_u.txd_64
198 #define rxd64 dregs.d64_u.rxd_64
200 #define MAXNAMEL 8 /* 8 char names */
202 #define DI_INFO(dmah) ((dma_info_t *)dmah)
204 /* descriptor bumping macros */
205 /* faster than %, but n must be power of 2 */
206 #define XXD(x, n) ((x) & ((n) - 1))
208 #define TXD(x) XXD((x), di->ntxd)
209 #define RXD(x) XXD((x), di->nrxd)
210 #define NEXTTXD(i) TXD((i) + 1)
211 #define PREVTXD(i) TXD((i) - 1)
212 #define NEXTRXD(i) RXD((i) + 1)
213 #define PREVRXD(i) RXD((i) - 1)
215 #define NTXDACTIVE(h, t) TXD((t) - (h))
216 #define NRXDACTIVE(h, t) RXD((t) - (h))
218 /* macros to convert between byte offsets and indexes */
219 #define B2I(bytes, type) ((bytes) / sizeof(type))
220 #define I2B(index, type) ((index) * sizeof(type))
222 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
223 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
225 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
226 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
230 * Descriptors are only read by the hardware, never written back.
233 u32 ctrl1; /* misc control bits & bufcount */
234 u32 ctrl2; /* buffer count and address extension */
235 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
236 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
239 /* dma engine software state */
241 struct dma_pub dma; /* exported structure */
242 uint *msg_level; /* message level pointer */
243 char name[MAXNAMEL]; /* callers name for diag msgs */
245 struct pci_dev *pbus; /* bus handle */
247 bool dma64; /* this dma engine is operating in 64-bit mode */
248 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
252 /* 64-bit dma tx engine registers */
253 struct dma64regs *txregs_64;
254 /* 64-bit dma rx engine registers */
255 struct dma64regs *rxregs_64;
256 /* pointer to dma64 tx descriptor ring */
257 struct dma64desc *txd_64;
258 /* pointer to dma64 rx descriptor ring */
259 struct dma64desc *rxd_64;
263 u16 dmadesc_align; /* alignment requirement for dma descriptors */
265 u16 ntxd; /* # tx descriptors tunable */
266 u16 txin; /* index of next descriptor to reclaim */
267 u16 txout; /* index of next descriptor to post */
268 /* pointer to parallel array of pointers to packets */
269 struct sk_buff **txp;
270 /* Aligned physical address of descriptor ring */
272 /* Original physical address of descriptor ring */
273 dma_addr_t txdpaorig;
274 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
275 u32 txdalloc; /* #bytes allocated for the ring */
276 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
277 * is not just an index, it needs all 13 bits to be
278 * an offset from the addr register.
281 u16 nrxd; /* # rx descriptors tunable */
282 u16 rxin; /* index of next descriptor to reclaim */
283 u16 rxout; /* index of next descriptor to post */
284 /* pointer to parallel array of pointers to packets */
285 struct sk_buff **rxp;
286 /* Aligned physical address of descriptor ring */
288 /* Original physical address of descriptor ring */
289 dma_addr_t rxdpaorig;
290 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
291 u32 rxdalloc; /* #bytes allocated for the ring */
292 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
295 unsigned int rxbufsize; /* rx buffer size in bytes, not including
298 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper
299 * stack, e.g. some rx pkt buffers will be
300 * bridged to tx side without byte copying.
301 * The extra headroom needs to be large enough
302 * to fit txheader needs. Some dongle driver may
305 uint nrxpost; /* # rx buffers to keep posted */
306 unsigned int rxoffset; /* rxcontrol offset */
307 /* add to get dma address of descriptor ring, low 32 bits */
311 /* add to get dma address of data buffer, low 32 bits */
315 /* descriptor base need to be aligned or not */
320 * default dma message level (if input msg_level
321 * pointer is null in dma_attach())
323 static uint dma_msg_level;
325 static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
327 uint dmactrlflags = di->dma.dmactrlflags;
330 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
334 dmactrlflags &= ~mask;
335 dmactrlflags |= flags;
337 /* If trying to enable parity, check if parity is actually supported */
338 if (dmactrlflags & DMA_CTRL_PEN) {
341 control = R_REG(&di->d64txregs->control);
342 W_REG(&di->d64txregs->control,
343 control | D64_XC_PD);
344 if (R_REG(&di->d64txregs->control) & D64_XC_PD)
345 /* We *can* disable it so it is supported,
346 * restore control register
348 W_REG(&di->d64txregs->control,
351 /* Not supported, don't allow it to be enabled */
352 dmactrlflags &= ~DMA_CTRL_PEN;
355 di->dma.dmactrlflags = dmactrlflags;
360 static bool _dma64_addrext(struct dma64regs *dma64regs)
363 OR_REG(&dma64regs->control, D64_XC_AE);
364 w = R_REG(&dma64regs->control);
365 AND_REG(&dma64regs->control, ~D64_XC_AE);
366 return (w & D64_XC_AE) == D64_XC_AE;
370 * return true if this dma engine supports DmaExtendedAddrChanges,
373 static bool _dma_isaddrext(struct dma_info *di)
375 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
377 /* not all tx or rx channel are available */
378 if (di->d64txregs != NULL) {
379 if (!_dma64_addrext(di->d64txregs))
380 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
381 "AE set\n", di->name));
383 } else if (di->d64rxregs != NULL) {
384 if (!_dma64_addrext(di->d64rxregs))
385 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
386 "AE set\n", di->name));
393 static bool _dma_descriptor_align(struct dma_info *di)
397 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
398 if (di->d64txregs != NULL) {
399 W_REG(&di->d64txregs->addrlow, 0xff0);
400 addrl = R_REG(&di->d64txregs->addrlow);
403 } else if (di->d64rxregs != NULL) {
404 W_REG(&di->d64rxregs->addrlow, 0xff0);
405 addrl = R_REG(&di->d64rxregs->addrlow);
412 void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
413 uint *alloced, dma_addr_t *pap)
416 u16 align = (1 << align_bits);
417 if (!IS_ALIGNED(PAGE_SIZE, align))
421 return pci_alloc_consistent(pdev, size, pap);
425 u8 dma_align_sizetobits(uint size)
433 /* This function ensures that the DMA descriptor ring will not get allocated
434 * across Page boundary. If the allocation is done across the page boundary
435 * at the first time, then it is freed and the allocation is done at
436 * descriptor ring size aligned location. This will ensure that the ring will
437 * not cross page boundary
439 static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
440 u16 *alignbits, uint *alloced,
445 u32 alignbytes = 1 << *alignbits;
447 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
452 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
453 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
455 *alignbits = dma_align_sizetobits(size);
456 pci_free_consistent(di->pbus, size, va, *descpa);
457 va = dma_alloc_consistent(di->pbus, size, *alignbits,
463 static bool dma64_alloc(struct dma_info *di, uint direction)
472 ddlen = sizeof(struct dma64desc);
474 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
475 align_bits = di->dmadesc_align;
476 align = (1 << align_bits);
478 if (direction == DMA_TX) {
479 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
480 &alloced, &di->txdpaorig);
482 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
483 " failed\n", di->name));
486 align = (1 << align_bits);
487 di->txd64 = (struct dma64desc *)
488 roundup((unsigned long)va, align);
489 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
490 di->txdpa = di->txdpaorig + di->txdalign;
491 di->txdalloc = alloced;
493 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
494 &alloced, &di->rxdpaorig);
496 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
497 " failed\n", di->name));
500 align = (1 << align_bits);
501 di->rxd64 = (struct dma64desc *)
502 roundup((unsigned long)va, align);
503 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
504 di->rxdpa = di->rxdpaorig + di->rxdalign;
505 di->rxdalloc = alloced;
511 static bool _dma_alloc(struct dma_info *di, uint direction)
513 return dma64_alloc(di, direction);
516 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
517 void *dmaregstx, void *dmaregsrx, uint ntxd,
518 uint nrxd, uint rxbufsize, int rxextheadroom,
519 uint nrxpost, uint rxoffset, uint *msg_level)
524 /* allocate private info structure */
525 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
528 printk(KERN_ERR "dma_attach: out of memory\n");
533 di->msg_level = msg_level ? msg_level : &dma_msg_level;
536 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
538 /* init dma reg pointer */
539 di->d64txregs = (struct dma64regs *) dmaregstx;
540 di->d64rxregs = (struct dma64regs *) dmaregsrx;
543 * Default flags (which can be changed by the driver calling
544 * dma_ctrlflags before enable): For backwards compatibility
545 * both Rx Overflow Continue and Parity are DISABLED.
547 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
549 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
550 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
551 "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
552 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
553 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
555 /* make a private copy of our callers name */
556 strncpy(di->name, name, MAXNAMEL);
557 di->name[MAXNAMEL - 1] = '\0';
559 di->pbus = ((struct si_info *)sih)->pbus;
562 di->ntxd = (u16) ntxd;
563 di->nrxd = (u16) nrxd;
565 /* the actual dma size doesn't include the extra headroom */
567 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
568 if (rxbufsize > BCMEXTRAHDROOM)
569 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
571 di->rxbufsize = (u16) rxbufsize;
573 di->nrxpost = (u16) nrxpost;
574 di->rxoffset = (u8) rxoffset;
577 * figure out the DMA physical address offset for dd and data
578 * PCI/PCIE: they map silicon backplace address to zero
579 * based memory, need offset
580 * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
581 * swapped region for data buffer, not descriptor
584 di->dataoffsetlow = 0;
585 /* add offset for pcie with DMA64 bus */
587 di->ddoffsethigh = SI_PCIE_DMA_H32;
588 di->dataoffsetlow = di->ddoffsetlow;
589 di->dataoffsethigh = di->ddoffsethigh;
590 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
591 if ((ai_coreid(sih) == SDIOD_CORE_ID)
592 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
594 else if ((ai_coreid(sih) == I2S_CORE_ID) &&
595 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
598 di->addrext = _dma_isaddrext(di);
600 /* does the descriptor need to be aligned and if yes, on 4K/8K or not */
601 di->aligndesc_4k = _dma_descriptor_align(di);
602 if (di->aligndesc_4k) {
603 di->dmadesc_align = D64RINGALIGN_BITS;
604 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
605 /* for smaller dd table, HW relax alignment reqmnt */
606 di->dmadesc_align = D64RINGALIGN_BITS - 1;
608 di->dmadesc_align = 4; /* 16 byte alignment */
611 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
612 di->aligndesc_4k, di->dmadesc_align));
614 /* allocate tx packet pointer vector */
616 size = ntxd * sizeof(void *);
617 di->txp = kzalloc(size, GFP_ATOMIC);
618 if (di->txp == NULL) {
619 DMA_ERROR(("%s: dma_attach: out of tx memory\n",
625 /* allocate rx packet pointer vector */
627 size = nrxd * sizeof(void *);
628 di->rxp = kzalloc(size, GFP_ATOMIC);
629 if (di->rxp == NULL) {
630 DMA_ERROR(("%s: dma_attach: out of rx memory\n",
637 * allocate transmit descriptor ring, only need ntxd descriptors
638 * but it must be aligned
641 if (!_dma_alloc(di, DMA_TX))
646 * allocate receive descriptor ring, only need nrxd descriptors
647 * but it must be aligned
650 if (!_dma_alloc(di, DMA_RX))
654 if ((di->ddoffsetlow != 0) && !di->addrext) {
655 if (di->txdpa > SI_PCI_DMA_SZ) {
656 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
657 "supported\n", di->name, (u32)di->txdpa));
660 if (di->rxdpa > SI_PCI_DMA_SZ) {
661 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
662 "supported\n", di->name, (u32)di->rxdpa));
667 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
668 "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
669 di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
672 return (struct dma_pub *) di;
675 dma_detach((struct dma_pub *)di);
679 /* Check for odd number of 1's */
680 static inline u32 parity32(u32 data)
692 dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
693 dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
695 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
697 /* PCI bus with big(>1G) physical address, use address extension */
698 if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
699 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
700 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
701 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
702 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
704 /* address extension for 32-bit PCI */
707 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
708 pa &= ~PCI32ADDR_HIGH;
710 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
711 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
712 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
713 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
714 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
716 if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
717 if (DMA64_DD_PARITY(&ddring[outidx]))
718 ddring[outidx].ctrl2 =
719 cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
723 /* !! may be called with core in reset */
724 void dma_detach(struct dma_pub *pub)
726 struct dma_info *di = (struct dma_info *)pub;
728 DMA_TRACE(("%s: dma_detach\n", di->name));
730 /* free dma descriptor rings */
732 pci_free_consistent(di->pbus, di->txdalloc,
733 ((s8 *)di->txd64 - di->txdalign),
736 pci_free_consistent(di->pbus, di->rxdalloc,
737 ((s8 *)di->rxd64 - di->rxdalign),
740 /* free packet pointer vectors */
744 /* free our private info structure */
749 /* initialize descriptor table base address */
751 _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
753 if (!di->aligndesc_4k) {
754 if (direction == DMA_TX)
760 if ((di->ddoffsetlow == 0)
761 || !(pa & PCI32ADDR_HIGH)) {
762 if (direction == DMA_TX) {
763 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
764 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
766 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
767 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
770 /* DMA64 32bits address extension */
773 /* shift the high bit(s) from pa to ae */
774 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
775 pa &= ~PCI32ADDR_HIGH;
777 if (direction == DMA_TX) {
778 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
779 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
780 SET_REG(&di->d64txregs->control,
781 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
783 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
784 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
785 SET_REG(&di->d64rxregs->control,
786 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
791 static void _dma_rxenable(struct dma_info *di)
793 uint dmactrlflags = di->dma.dmactrlflags;
796 DMA_TRACE(("%s: dma_rxenable\n", di->name));
799 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
802 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
803 control |= D64_RC_PD;
805 if (dmactrlflags & DMA_CTRL_ROC)
806 control |= D64_RC_OC;
808 W_REG(&di->d64rxregs->control,
809 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
812 void dma_rxinit(struct dma_pub *pub)
814 struct dma_info *di = (struct dma_info *)pub;
816 DMA_TRACE(("%s: dma_rxinit\n", di->name));
821 di->rxin = di->rxout = 0;
823 /* clear rx descriptor ring */
824 memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
826 /* DMA engine with out alignment requirement requires table to be inited
827 * before enabling the engine
829 if (!di->aligndesc_4k)
830 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
834 if (di->aligndesc_4k)
835 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
838 static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
846 /* return if no packets posted */
851 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
852 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
854 /* ignore curr if forceall */
855 if (!forceall && (i == curr))
858 /* get the packet pointer that corresponds to the rx descriptor */
862 pa = cpu_to_le32(di->rxd64[i].addrlow) - di->dataoffsetlow;
864 /* clear this packet from the descriptor ring */
865 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
867 di->rxd64[i].addrlow = 0xdeadbeef;
868 di->rxd64[i].addrhigh = 0xdeadbeef;
870 di->rxin = NEXTRXD(i);
875 static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
880 return dma64_getnextrxp(di, forceall);
884 * !! rx entry routine
885 * returns a pointer to the next frame received, or NULL if there are no more
886 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
887 * supported with pkts chain
888 * otherwise, it's treated as giant pkt and will be tossed.
889 * The DMA scattering starts with normal DMA header, followed by first
890 * buffer data. After it reaches the max size of buffer, the data continues
891 * in next DMA descriptor buffer WITHOUT DMA header
893 struct sk_buff *dma_rx(struct dma_pub *pub)
895 struct dma_info *di = (struct dma_info *)pub;
896 struct sk_buff *p, *head, *tail;
902 head = _dma_getnextrxp(di, false);
906 len = le16_to_cpu(*(u16 *) (head->data));
907 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
908 dma_spin_for_len(len, head);
910 /* set actual length */
911 pkt_len = min((di->rxoffset + len), di->rxbufsize);
912 __skb_trim(head, pkt_len);
913 resid = len - (di->rxbufsize - di->rxoffset);
915 /* check for single or multi-buffer rx */
918 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
920 pkt_len = min_t(uint, resid, di->rxbufsize);
921 __skb_trim(p, pkt_len);
924 resid -= di->rxbufsize;
931 B2I(((R_REG(&di->d64rxregs->status0) &
933 di->rcvptrbase) & D64_RS0_CD_MASK,
935 DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
936 di->rxin, di->rxout, cur));
940 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
941 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
943 brcmu_pkt_buf_free_skb(head);
952 static bool dma64_rxidle(struct dma_info *di)
954 DMA_TRACE(("%s: dma_rxidle\n", di->name));
959 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
960 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
964 * post receive buffers
965 * return false is refill failed completely and ring is empty this will stall
966 * the rx dma and user might want to call rxfill again asap. This unlikely
967 * happens on memory-rich NIC, but often on memory-constrained dongle
969 bool dma_rxfill(struct dma_pub *pub)
971 struct dma_info *di = (struct dma_info *)pub;
978 uint extra_offset = 0;
984 * Determine how many receive buffers we're lacking
985 * from the full complement, allocate, initialize,
986 * and post them, then update the chip rx lastdscr.
992 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
994 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
996 if (di->rxbufsize > BCMEXTRAHDROOM)
997 extra_offset = di->rxextrahdrroom;
999 for (i = 0; i < n; i++) {
1001 * the di->rxbufsize doesn't include the extra headroom,
1002 * we need to add it to the size to be allocated
1004 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1007 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1009 if (i == 0 && dma64_rxidle(di)) {
1010 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
1017 /* reserve an extra headroom, if applicable */
1019 skb_pull(p, extra_offset);
1021 /* Do a cached write instead of uncached write since DMA_MAP
1022 * will flush the cache.
1024 *(u32 *) (p->data) = 0;
1026 pa = pci_map_single(di->pbus, p->data,
1027 di->rxbufsize, PCI_DMA_FROMDEVICE);
1029 /* save the free packet pointer */
1032 /* reset flags for each descriptor */
1034 if (rxout == (di->nrxd - 1))
1035 flags = D64_CTRL1_EOT;
1037 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1039 rxout = NEXTRXD(rxout);
1044 /* update the chip lastdscr pointer */
1045 W_REG(&di->d64rxregs->ptr,
1046 di->rcvptrbase + I2B(rxout, struct dma64desc));
1051 void dma_rxreclaim(struct dma_pub *pub)
1053 struct dma_info *di = (struct dma_info *)pub;
1056 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1058 while ((p = _dma_getnextrxp(di, true)))
1059 brcmu_pkt_buf_free_skb(p);
1062 void dma_counterreset(struct dma_pub *pub)
1064 /* reset all software counters */
1070 /* get the address of the var in order to change later */
1071 unsigned long dma_getvar(struct dma_pub *pub, const char *name)
1073 struct dma_info *di = (struct dma_info *)pub;
1075 if (!strcmp(name, "&txavail"))
1076 return (unsigned long)&(di->dma.txavail);
1080 /* 64-bit DMA functions */
1082 void dma_txinit(struct dma_pub *pub)
1084 struct dma_info *di = (struct dma_info *)pub;
1085 u32 control = D64_XC_XE;
1087 DMA_TRACE(("%s: dma_txinit\n", di->name));
1092 di->txin = di->txout = 0;
1093 di->dma.txavail = di->ntxd - 1;
1095 /* clear tx descriptor ring */
1096 memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
1098 /* DMA engine with out alignment requirement requires table to be inited
1099 * before enabling the engine
1101 if (!di->aligndesc_4k)
1102 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1104 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1105 control |= D64_XC_PD;
1106 OR_REG(&di->d64txregs->control, control);
1108 /* DMA engine with alignment requirement requires table to be inited
1109 * before enabling the engine
1111 if (di->aligndesc_4k)
1112 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1115 void dma_txsuspend(struct dma_pub *pub)
1117 struct dma_info *di = (struct dma_info *)pub;
1119 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1124 OR_REG(&di->d64txregs->control, D64_XC_SE);
1127 void dma_txresume(struct dma_pub *pub)
1129 struct dma_info *di = (struct dma_info *)pub;
1131 DMA_TRACE(("%s: dma_txresume\n", di->name));
1136 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1139 bool dma_txsuspended(struct dma_pub *pub)
1141 struct dma_info *di = (struct dma_info *)pub;
1143 return (di->ntxd == 0) ||
1144 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1148 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1150 struct dma_info *di = (struct dma_info *)pub;
1153 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1154 (range == DMA_RANGE_ALL) ? "all" :
1156 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1159 if (di->txin == di->txout)
1162 while ((p = dma_getnexttxp(pub, range))) {
1163 /* For unframed data, we don't have any packets to free */
1164 if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
1165 brcmu_pkt_buf_free_skb(p);
1169 bool dma_txreset(struct dma_pub *pub)
1171 struct dma_info *di = (struct dma_info *)pub;
1177 /* suspend tx DMA first */
1178 W_REG(&di->d64txregs->control, D64_XC_SE);
1180 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1181 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1182 && (status != D64_XS0_XS_STOPPED), 10000);
1184 W_REG(&di->d64txregs->control, 0);
1186 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1187 != D64_XS0_XS_DISABLED), 10000);
1189 /* wait for the last transaction to complete */
1192 return status == D64_XS0_XS_DISABLED;
1195 bool dma_rxreset(struct dma_pub *pub)
1197 struct dma_info *di = (struct dma_info *)pub;
1203 W_REG(&di->d64rxregs->control, 0);
1205 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1206 != D64_RS0_RS_DISABLED), 10000);
1208 return status == D64_RS0_RS_DISABLED;
1212 * !! tx entry routine
1213 * WARNING: call must check the return value for error.
1214 * the error(toss frames) could be fatal and cause many subsequent hard
1217 int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1219 struct dma_info *di = (struct dma_info *)pub;
1220 struct sk_buff *p, *next;
1221 unsigned char *data;
1227 DMA_TRACE(("%s: dma_txfast\n", di->name));
1232 * Walk the chain of packet buffers
1233 * allocating and initializing transmit descriptor entries.
1235 for (p = p0; p; p = next) {
1240 /* return nonzero if out of tx descriptors */
1241 if (NEXTTXD(txout) == di->txin)
1247 /* get physical address of buffer start */
1248 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1252 flags |= D64_CTRL1_SOF;
1254 /* With a DMA segment list, Descriptor table is filled
1255 * using the segment list instead of looping over
1256 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1257 * is when end of segment list is reached.
1260 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1261 if (txout == (di->ntxd - 1))
1262 flags |= D64_CTRL1_EOT;
1264 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1266 txout = NEXTTXD(txout);
1269 /* if last txd eof not set, fix it */
1270 if (!(flags & D64_CTRL1_EOF))
1271 di->txd64[PREVTXD(txout)].ctrl1 =
1272 cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
1274 /* save the packet */
1275 di->txp[PREVTXD(txout)] = p0;
1277 /* bump the tx descriptor index */
1282 W_REG(&di->d64txregs->ptr,
1283 di->xmtptrbase + I2B(txout, struct dma64desc));
1285 /* tx flow control */
1286 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1291 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1292 brcmu_pkt_buf_free_skb(p0);
1293 di->dma.txavail = 0;
1299 * Reclaim next completed txd (txds if using chained buffers) in the range
1300 * specified and return associated packet.
1301 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1302 * transmitted as noted by the hardware "CurrDescr" pointer.
1303 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1304 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1305 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1306 * return associated packet regardless of the value of hardware pointers.
1308 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1310 struct dma_info *di = (struct dma_info *)pub;
1313 struct sk_buff *txp;
1315 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1316 (range == DMA_RANGE_ALL) ? "all" :
1318 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1327 if (range == DMA_RANGE_ALL)
1330 struct dma64regs *dregs = di->d64txregs;
1332 end = (u16) (B2I(((R_REG(&dregs->status0) &
1334 di->xmtptrbase) & D64_XS0_CD_MASK,
1337 if (range == DMA_RANGE_TRANSFERED) {
1339 (u16) (R_REG(&dregs->status1) &
1342 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1343 active_desc = B2I(active_desc, struct dma64desc);
1344 if (end != active_desc)
1345 end = PREVTXD(active_desc);
1349 if ((start == 0) && (end > di->txout))
1352 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1356 pa = cpu_to_le32(di->txd64[i].addrlow) - di->dataoffsetlow;
1359 (cpu_to_le32(di->txd64[i].ctrl2) &
1362 di->txd64[i].addrlow = 0xdeadbeef;
1363 di->txd64[i].addrhigh = 0xdeadbeef;
1368 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1373 /* tx flow control */
1374 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1379 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
1380 "force %d\n", start, end, di->txout, forceall));
1385 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1386 * modified. The modified portion of the packet is not under control of the DMA
1387 * engine. This function calls a caller-supplied function for each packet in
1388 * the caller specified dma chain.
1390 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1391 (void *pkt, void *arg_a), void *arg_a)
1393 struct dma_info *di = (struct dma_info *) dmah;
1395 uint end = di->txout;
1396 struct sk_buff *skb;
1397 struct ieee80211_tx_info *tx_info;
1400 skb = (struct sk_buff *)di->txp[i];
1402 tx_info = (struct ieee80211_tx_info *)skb->cb;
1403 (callback_fnc)(tx_info, arg_a);