2 * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux
4 * This software may be used and distributed according to the terms of the
5 * GNU General Public License.
7 * The author may be reached as romieu@cogenit.fr.
8 * Specific bug reports/asian food will be welcome.
10 * Special thanks to the nice people at CS-Telecom for the hardware and the
11 * access to the test/measure tools.
16 * I. Board Compatibility
18 * This device driver is designed for the Siemens PEB20534 4 ports serial
19 * controller as found on Etinc PCISYNC cards. The documentation for the
20 * chipset is available at http://www.infineon.com:
21 * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with
22 * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1";
23 * - Application Hint "Management of DSCC4 on-chip FIFO resources".
24 * - Errata sheet DS5 (courtesy of Michael Skerritt).
25 * Jens David has built an adapter based on the same chipset. Take a look
26 * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific
28 * Sample code (2 revisions) is available at Infineon.
30 * II. Board-specific settings
32 * Pcisync can transmit some clock signal to the outside world on the
33 * *first two* ports provided you put a quartz and a line driver on it and
34 * remove the jumpers. The operation is described on Etinc web site. If you
35 * go DCE on these ports, don't forget to use an adequate cable.
37 * Sharing of the PCI interrupt line for this board is possible.
39 * III. Driver operation
41 * The rx/tx operations are based on a linked list of descriptors. The driver
42 * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more
43 * I tried to fix it, the more it started to look like (convoluted) software
44 * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider
45 * this a rfc2119 MUST.
48 * When the tx ring is full, the xmit routine issues a call to netdev_stop.
49 * The device is supposed to be enabled again during an ALLS irq (we could
50 * use HI but as it's easy to lose events, it's fscked).
53 * The received frames aren't supposed to span over multiple receiving areas.
54 * I may implement it some day but it isn't the highest ranked item.
57 * The current error (XDU, RFO) recovery code is untested.
58 * So far, RDO takes his RX channel down and the right sequence to enable it
59 * again is still a mystery. If RDO happens, plan a reboot. More details
60 * in the code (NB: as this happens, TX still works).
61 * Don't mess the cables during operation, especially on DTE ports. I don't
62 * suggest it for DCE either but at least one can get some messages instead
63 * of a complete instant freeze.
64 * Tests are done on Rev. 20 of the silicium. The RDO handling changes with
65 * the documentation/chipset releases.
69 * - use polling at high irq/s,
70 * - performance analysis,
73 * 2001/12/10 Daniela Squassoni <daniela@cyclades.com>
74 * - Contribution to support the new generic HDLC layer.
77 * - old style interface removal
78 * - dscc4_release_ring fix (related to DMA mapping)
79 * - hard_start_xmit fix (hint: TxSizeMax)
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
85 #include <linux/module.h>
86 #include <linux/sched.h>
87 #include <linux/types.h>
88 #include <linux/errno.h>
89 #include <linux/list.h>
90 #include <linux/ioport.h>
91 #include <linux/pci.h>
92 #include <linux/kernel.h>
94 #include <linux/slab.h>
96 #include <asm/cache.h>
97 #include <asm/byteorder.h>
98 #include <asm/uaccess.h>
102 #include <linux/init.h>
103 #include <linux/interrupt.h>
104 #include <linux/string.h>
106 #include <linux/if_arp.h>
107 #include <linux/netdevice.h>
108 #include <linux/skbuff.h>
109 #include <linux/delay.h>
110 #include <linux/hdlc.h>
111 #include <linux/mutex.h>
114 static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
118 #ifdef CONFIG_DSCC4_PCI_RST
119 static DEFINE_MUTEX(dscc4_mutex);
120 static u32 dscc4_pci_config_store[16];
123 #define DRV_NAME "dscc4"
127 /* Module parameters */
129 MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
130 MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
131 MODULE_LICENSE("GPL");
132 module_param(debug, int, 0);
133 MODULE_PARM_DESC(debug,"Enable/disable extra messages");
134 module_param(quartz, int, 0);
135 MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
149 u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */
150 /* FWIW, datasheet calls that "dummy" and says that card
151 * never looks at it; neither does the driver */
162 #define DUMMY_SKB_SIZE 64
164 #define TX_RING_SIZE 32
165 #define RX_RING_SIZE 32
166 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
167 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
168 #define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */
169 #define TX_TIMEOUT (HZ/10)
170 #define DSCC4_HZ_MAX 33000000
171 #define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */
172 #define dev_per_card 4
173 #define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */
175 #define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
176 #define TO_SIZE(state) (((state) >> 16) & 0x1fff)
179 * Given the operating range of Linux HDLC, the 2 defines below could be
180 * made simpler. However they are a fine reminder for the limitations of
181 * the driver: it's better to stay < TxSizeMax and < RxSizeMax.
183 #define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
184 #define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
185 #define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */
186 #define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
188 struct dscc4_pci_priv {
192 struct pci_dev *pdev;
194 struct dscc4_dev_priv *root;
195 dma_addr_t iqcfg_dma;
199 struct dscc4_dev_priv {
200 struct sk_buff *rx_skbuff[RX_RING_SIZE];
201 struct sk_buff *tx_skbuff[TX_RING_SIZE];
208 /* FIXME: check all the volatile are required */
209 volatile u32 tx_current;
214 volatile u32 tx_dirty;
219 dma_addr_t tx_fd_dma;
220 dma_addr_t rx_fd_dma;
224 u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */
226 struct timer_list timer;
228 struct dscc4_pci_priv *pci_priv;
235 unsigned short encoding;
236 unsigned short parity;
237 struct net_device *dev;
238 sync_serial_settings settings;
239 void __iomem *base_addr;
240 u32 __pad __attribute__ ((aligned (4)));
243 /* GLOBAL registers definitions */
264 /* SCC registers definitions */
265 #define SCC_START 0x0100
266 #define SCC_OFFSET 0x80
278 #define GPDATA 0x0404
282 #define EncodingMask 0x00700000
283 #define CrcMask 0x00000003
285 #define IntRxScc0 0x10000000
286 #define IntTxScc0 0x01000000
288 #define TxPollCmd 0x00000400
289 #define RxActivate 0x08000000
290 #define MTFi 0x04000000
291 #define Rdr 0x00400000
292 #define Rdt 0x00200000
293 #define Idr 0x00100000
294 #define Idt 0x00080000
295 #define TxSccRes 0x01000000
296 #define RxSccRes 0x00010000
297 #define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */
298 #define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */
300 #define Ccr0ClockMask 0x0000003f
301 #define Ccr1LoopMask 0x00000200
302 #define IsrMask 0x000fffff
303 #define BrrExpMask 0x00000f00
304 #define BrrMultMask 0x0000003f
305 #define EncodingMask 0x00700000
306 #define Hold cpu_to_le32(0x40000000)
307 #define SccBusy 0x10000000
308 #define PowerUp 0x80000000
309 #define Vis 0x00001000
310 #define FrameOk (FrameVfr | FrameCrc)
311 #define FrameVfr 0x80
312 #define FrameRdo 0x40
313 #define FrameCrc 0x20
314 #define FrameRab 0x10
315 #define FrameAborted cpu_to_le32(0x00000200)
316 #define FrameEnd cpu_to_le32(0x80000000)
317 #define DataComplete cpu_to_le32(0x40000000)
318 #define LengthCheck 0x00008000
319 #define SccEvt 0x02000000
320 #define NoAck 0x00000200
321 #define Action 0x00000001
322 #define HiDesc cpu_to_le32(0x20000000)
325 #define RxEvt 0xf0000000
326 #define TxEvt 0x0f000000
327 #define Alls 0x00040000
328 #define Xdu 0x00010000
329 #define Cts 0x00004000
330 #define Xmr 0x00002000
331 #define Xpr 0x00001000
332 #define Rdo 0x00000080
333 #define Rfs 0x00000040
334 #define Cd 0x00000004
335 #define Rfo 0x00000002
336 #define Flex 0x00000001
338 /* DMA core events */
339 #define Cfg 0x00200000
340 #define Hi 0x00040000
341 #define Fi 0x00020000
342 #define Err 0x00010000
343 #define Arf 0x00000002
344 #define ArAck 0x00000001
347 #define Ready 0x00000000
348 #define NeedIDR 0x00000001
349 #define NeedIDT 0x00000002
350 #define RdoSet 0x00000004
351 #define FakeReset 0x00000008
353 /* Don't mask RDO. Ever. */
355 #define EventsMask 0xfffeef7f
357 #define EventsMask 0xfffa8f7a
360 /* Functions prototypes */
361 static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
362 static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
363 static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
364 static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
365 static int dscc4_open(struct net_device *);
366 static netdev_tx_t dscc4_start_xmit(struct sk_buff *,
367 struct net_device *);
368 static int dscc4_close(struct net_device *);
369 static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
370 static int dscc4_init_ring(struct net_device *);
371 static void dscc4_release_ring(struct dscc4_dev_priv *);
372 static void dscc4_timer(unsigned long);
373 static void dscc4_tx_timeout(struct net_device *);
374 static irqreturn_t dscc4_irq(int irq, void *dev_id);
375 static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
376 static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
378 static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
381 static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
383 return dev_to_hdlc(dev)->priv;
386 static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p)
391 static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
392 struct net_device *dev, int offset)
396 /* Cf scc_writel for concern regarding thread-safety */
397 state = dpriv->scc_regs[offset >> 2];
400 dpriv->scc_regs[offset >> 2] = state;
401 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
404 static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
405 struct net_device *dev, int offset)
409 * As of 2002/02/16, there are no thread racing for access.
411 dpriv->scc_regs[offset >> 2] = bits;
412 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
415 static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
417 return dpriv->scc_regs[offset >> 2];
420 static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
422 /* Cf errata DS5 p.4 */
423 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
424 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
427 static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
428 struct net_device *dev)
430 dpriv->ltda = dpriv->tx_fd_dma +
431 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
432 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
433 /* Flush posted writes *NOW* */
434 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
437 static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
438 struct net_device *dev)
440 dpriv->lrda = dpriv->rx_fd_dma +
441 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
442 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
445 static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
447 return dpriv->tx_current == dpriv->tx_dirty;
450 static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
451 struct net_device *dev)
453 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
456 static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
457 struct net_device *dev, const char *msg)
462 if (SOURCE_ID(state) != dpriv->dev_id) {
463 printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
464 dev->name, msg, SOURCE_ID(state), state );
467 if (state & 0x0df80c00) {
468 printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
469 dev->name, msg, state);
476 static void dscc4_tx_print(struct net_device *dev,
477 struct dscc4_dev_priv *dpriv,
480 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
481 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
484 static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
486 struct pci_dev *pdev = dpriv->pci_priv->pdev;
487 struct TxFD *tx_fd = dpriv->tx_fd;
488 struct RxFD *rx_fd = dpriv->rx_fd;
489 struct sk_buff **skbuff;
492 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
493 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
495 skbuff = dpriv->tx_skbuff;
496 for (i = 0; i < TX_RING_SIZE; i++) {
498 pci_unmap_single(pdev, le32_to_cpu(tx_fd->data),
499 (*skbuff)->len, PCI_DMA_TODEVICE);
500 dev_kfree_skb(*skbuff);
506 skbuff = dpriv->rx_skbuff;
507 for (i = 0; i < RX_RING_SIZE; i++) {
509 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
510 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
511 dev_kfree_skb(*skbuff);
518 static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
519 struct net_device *dev)
521 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
522 struct RxFD *rx_fd = dpriv->rx_fd + dirty;
523 const int len = RX_MAX(HDLC_MAX_MRU);
527 skb = dev_alloc_skb(len);
528 dpriv->rx_skbuff[dirty] = skb;
530 skb->protocol = hdlc_type_trans(skb, dev);
531 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
532 skb->data, len, PCI_DMA_FROMDEVICE));
541 * IRQ/thread/whatever safe
543 static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
544 struct net_device *dev, char *msg)
549 if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
550 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
554 schedule_timeout_uninterruptible(10);
557 netdev_err(dev, "%s timeout\n", msg);
559 return (i >= 0) ? i : -EAGAIN;
562 static int dscc4_do_action(struct net_device *dev, char *msg)
564 void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
567 writel(Action, ioaddr + GCMDR);
570 u32 state = readl(ioaddr);
573 netdev_dbg(dev, "%s ack\n", msg);
574 writel(ArAck, ioaddr);
576 } else if (state & Arf) {
577 netdev_err(dev, "%s failed\n", msg);
584 netdev_err(dev, "%s timeout\n", msg);
589 static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
591 int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
595 if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
596 (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
599 schedule_timeout_uninterruptible(10);
602 return (i >= 0 ) ? i : -EAGAIN;
605 #if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */
606 static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
610 spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
611 /* Cf errata DS5 p.6 */
612 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
613 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
614 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
615 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
616 writel(Action, dpriv->base_addr + GCMDR);
617 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
623 static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
627 /* Cf errata DS5 p.7 */
628 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
629 scc_writel(0x00050000, dpriv, dev, CCR2);
631 * Must be longer than the time required to fill the fifo.
633 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
638 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
639 if (dscc4_do_action(dev, "Rdt") < 0)
640 netdev_err(dev, "Tx reset failed\n");
644 /* TODO: (ab)use this function to refill a completely depleted RX ring. */
645 static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
646 struct net_device *dev)
648 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
649 struct pci_dev *pdev = dpriv->pci_priv->pdev;
653 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
655 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
658 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
659 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
660 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
661 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
662 dev->stats.rx_packets++;
663 dev->stats.rx_bytes += pkt_len;
664 skb_put(skb, pkt_len);
665 if (netif_running(dev))
666 skb->protocol = hdlc_type_trans(skb, dev);
669 if (skb->data[pkt_len] & FrameRdo)
670 dev->stats.rx_fifo_errors++;
671 else if (!(skb->data[pkt_len] & FrameCrc))
672 dev->stats.rx_crc_errors++;
673 else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) !=
674 (FrameVfr | FrameRab))
675 dev->stats.rx_length_errors++;
676 dev->stats.rx_errors++;
677 dev_kfree_skb_irq(skb);
680 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
681 if (try_get_rx_skb(dpriv, dev) < 0)
685 dscc4_rx_update(dpriv, dev);
686 rx_fd->state2 = 0x00000000;
687 rx_fd->end = cpu_to_le32(0xbabeface);
690 static void dscc4_free1(struct pci_dev *pdev)
692 struct dscc4_pci_priv *ppriv;
693 struct dscc4_dev_priv *root;
696 ppriv = pci_get_drvdata(pdev);
699 for (i = 0; i < dev_per_card; i++)
700 unregister_hdlc_device(dscc4_to_dev(root + i));
702 pci_set_drvdata(pdev, NULL);
704 for (i = 0; i < dev_per_card; i++)
705 free_netdev(root[i].dev);
710 static int __devinit dscc4_init_one(struct pci_dev *pdev,
711 const struct pci_device_id *ent)
713 struct dscc4_pci_priv *priv;
714 struct dscc4_dev_priv *dpriv;
715 void __iomem *ioaddr;
718 printk(KERN_DEBUG "%s", version);
720 rc = pci_enable_device(pdev);
724 rc = pci_request_region(pdev, 0, "registers");
726 pr_err("can't reserve MMIO region (regs)\n");
729 rc = pci_request_region(pdev, 1, "LBI interface");
731 pr_err("can't reserve MMIO region (lbi)\n");
732 goto err_free_mmio_region_1;
735 ioaddr = pci_ioremap_bar(pdev, 0);
737 pr_err("cannot remap MMIO region %llx @ %llx\n",
738 (unsigned long long)pci_resource_len(pdev, 0),
739 (unsigned long long)pci_resource_start(pdev, 0));
741 goto err_free_mmio_regions_2;
743 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
744 (unsigned long long)pci_resource_start(pdev, 0),
745 (unsigned long long)pci_resource_start(pdev, 1), pdev->irq);
747 /* Cf errata DS5 p.2 */
748 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
749 pci_set_master(pdev);
751 rc = dscc4_found1(pdev, ioaddr);
755 priv = pci_get_drvdata(pdev);
757 rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root);
759 pr_warn("IRQ %d busy\n", pdev->irq);
763 /* power up/little endian/dma core controlled via lrda/ltda */
764 writel(0x00000001, ioaddr + GMODE);
765 /* Shared interrupt queue */
769 bits = (IRQ_RING_SIZE >> 5) - 1;
773 writel(bits, ioaddr + IQLENR0);
775 /* Global interrupt queue */
776 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
780 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
781 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
784 writel(priv->iqcfg_dma, ioaddr + IQCFG);
787 * SCC 0-3 private rx/tx irq structures
788 * IQRX/TXi needs to be set soon. Learned it the hard way...
790 for (i = 0; i < dev_per_card; i++) {
791 dpriv = priv->root + i;
792 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev,
793 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
795 goto err_free_iqtx_6;
796 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
798 for (i = 0; i < dev_per_card; i++) {
799 dpriv = priv->root + i;
800 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev,
801 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
803 goto err_free_iqrx_7;
804 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
807 /* Cf application hint. Beware of hard-lock condition on threshold. */
808 writel(0x42104000, ioaddr + FIFOCR1);
809 //writel(0x9ce69800, ioaddr + FIFOCR2);
810 writel(0xdef6d800, ioaddr + FIFOCR2);
811 //writel(0x11111111, ioaddr + FIFOCR4);
812 writel(0x18181818, ioaddr + FIFOCR4);
813 // FIXME: should depend on the chipset revision
814 writel(0x0000000e, ioaddr + FIFOCR3);
816 writel(0xff200001, ioaddr + GCMDR);
824 dpriv = priv->root + i;
825 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
826 dpriv->iqrx, dpriv->iqrx_dma);
831 dpriv = priv->root + i;
832 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
833 dpriv->iqtx, dpriv->iqtx_dma);
835 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
838 free_irq(pdev->irq, priv->root);
843 err_free_mmio_regions_2:
844 pci_release_region(pdev, 1);
845 err_free_mmio_region_1:
846 pci_release_region(pdev, 0);
848 pci_disable_device(pdev);
853 * Let's hope the default values are decent enough to protect my
854 * feet from the user's gun - Ueimor
856 static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
857 struct net_device *dev)
859 /* No interrupts, SCC core disabled. Let's relax */
860 scc_writel(0x00000000, dpriv, dev, CCR0);
862 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
865 * No address recognition/crc-CCITT/cts enabled
866 * Shared flags transmission disabled - cf errata DS5 p.11
867 * Carrier detect disabled - cf errata p.14
868 * FIXME: carrier detection/polarity may be handled more gracefully.
870 scc_writel(0x02408000, dpriv, dev, CCR1);
872 /* crc not forwarded - Cf errata DS5 p.11 */
873 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
875 //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2);
878 static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
882 if ((hz < 0) || (hz > DSCC4_HZ_MAX))
885 dpriv->pci_priv->xtal_hz = hz;
890 static const struct net_device_ops dscc4_ops = {
891 .ndo_open = dscc4_open,
892 .ndo_stop = dscc4_close,
893 .ndo_change_mtu = hdlc_change_mtu,
894 .ndo_start_xmit = hdlc_start_xmit,
895 .ndo_do_ioctl = dscc4_ioctl,
896 .ndo_tx_timeout = dscc4_tx_timeout,
899 static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
901 struct dscc4_pci_priv *ppriv;
902 struct dscc4_dev_priv *root;
903 int i, ret = -ENOMEM;
905 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
909 for (i = 0; i < dev_per_card; i++) {
910 root[i].dev = alloc_hdlcdev(root + i);
915 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
920 spin_lock_init(&ppriv->lock);
922 for (i = 0; i < dev_per_card; i++) {
923 struct dscc4_dev_priv *dpriv = root + i;
924 struct net_device *d = dscc4_to_dev(dpriv);
925 hdlc_device *hdlc = dev_to_hdlc(d);
927 d->base_addr = (unsigned long)ioaddr;
929 d->netdev_ops = &dscc4_ops;
930 d->watchdog_timeo = TX_TIMEOUT;
931 SET_NETDEV_DEV(d, &pdev->dev);
934 dpriv->pci_priv = ppriv;
935 dpriv->base_addr = ioaddr;
936 spin_lock_init(&dpriv->lock);
938 hdlc->xmit = dscc4_start_xmit;
939 hdlc->attach = dscc4_hdlc_attach;
941 dscc4_init_registers(dpriv, d);
942 dpriv->parity = PARITY_CRC16_PR0_CCITT;
943 dpriv->encoding = ENCODING_NRZ;
945 ret = dscc4_init_ring(d);
949 ret = register_hdlc_device(d);
951 pr_err("unable to register\n");
952 dscc4_release_ring(dpriv);
957 ret = dscc4_set_quartz(root, quartz);
961 pci_set_drvdata(pdev, ppriv);
966 dscc4_release_ring(root + i);
967 unregister_hdlc_device(dscc4_to_dev(root + i));
973 free_netdev(root[i].dev);
979 /* FIXME: get rid of the unneeded code */
980 static void dscc4_timer(unsigned long data)
982 struct net_device *dev = (struct net_device *)data;
983 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
984 // struct dscc4_pci_priv *ppriv;
988 dpriv->timer.expires = jiffies + TX_TIMEOUT;
989 add_timer(&dpriv->timer);
992 static void dscc4_tx_timeout(struct net_device *dev)
994 /* FIXME: something is missing there */
997 static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
999 sync_serial_settings *settings = &dpriv->settings;
1001 if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
1002 struct net_device *dev = dscc4_to_dev(dpriv);
1004 netdev_info(dev, "loopback requires clock\n");
1010 #ifdef CONFIG_DSCC4_PCI_RST
1012 * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together
1013 * so as to provide a safe way to reset the asic while not the whole machine
1016 * This code doesn't need to be efficient. Keep It Simple
1018 static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1022 mutex_lock(&dscc4_mutex);
1023 for (i = 0; i < 16; i++)
1024 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1026 /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */
1027 writel(0x001c0000, ioaddr + GMODE);
1028 /* Configure GPIO port as output */
1029 writel(0x0000ffff, ioaddr + GPDIR);
1030 /* Disable interruption */
1031 writel(0x0000ffff, ioaddr + GPIM);
1033 writel(0x0000ffff, ioaddr + GPDATA);
1034 writel(0x00000000, ioaddr + GPDATA);
1036 /* Flush posted writes */
1037 readl(ioaddr + GSTAR);
1039 schedule_timeout_uninterruptible(10);
1041 for (i = 0; i < 16; i++)
1042 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1043 mutex_unlock(&dscc4_mutex);
1046 #define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
1047 #endif /* CONFIG_DSCC4_PCI_RST */
1049 static int dscc4_open(struct net_device *dev)
1051 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1052 struct dscc4_pci_priv *ppriv;
1055 if ((dscc4_loopback_check(dpriv) < 0))
1058 if ((ret = hdlc_open(dev)))
1061 ppriv = dpriv->pci_priv;
1064 * Due to various bugs, there is no way to reliably reset a
1065 * specific port (manufacturer's dependent special PCI #RST wiring
1066 * apart: it affects all ports). Thus the device goes in the best
1067 * silent mode possible at dscc4_close() time and simply claims to
1068 * be up if it's opened again. It still isn't possible to change
1069 * the HDLC configuration without rebooting but at least the ports
1070 * can be up/down ifconfig'ed without killing the host.
1072 if (dpriv->flags & FakeReset) {
1073 dpriv->flags &= ~FakeReset;
1074 scc_patchl(0, PowerUp, dpriv, dev, CCR0);
1075 scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
1076 scc_writel(EventsMask, dpriv, dev, IMR);
1077 netdev_info(dev, "up again\n");
1081 /* IDT+IDR during XPR */
1082 dpriv->flags = NeedIDR | NeedIDT;
1084 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
1087 * The following is a bit paranoid...
1089 * NB: the datasheet "...CEC will stay active if the SCC is in
1090 * power-down mode or..." and CCR2.RAC = 1 are two different
1093 if (scc_readl_star(dpriv, dev) & SccBusy) {
1094 netdev_err(dev, "busy - try later\n");
1098 netdev_info(dev, "available - good\n");
1100 scc_writel(EventsMask, dpriv, dev, IMR);
1102 /* Posted write is flushed in the wait_ack loop */
1103 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
1105 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
1106 goto err_disable_scc_events;
1109 * I would expect XPR near CE completion (before ? after ?).
1110 * At worst, this code won't see a late XPR and people
1111 * will have to re-issue an ifconfig (this is harmless).
1112 * WARNING, a really missing XPR usually means a hardware
1113 * reset is needed. Suggestions anyone ?
1115 if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
1116 pr_err("XPR timeout\n");
1117 goto err_disable_scc_events;
1121 dscc4_tx_print(dev, dpriv, "Open");
1124 netif_start_queue(dev);
1126 init_timer(&dpriv->timer);
1127 dpriv->timer.expires = jiffies + 10*HZ;
1128 dpriv->timer.data = (unsigned long)dev;
1129 dpriv->timer.function = dscc4_timer;
1130 add_timer(&dpriv->timer);
1131 netif_carrier_on(dev);
1135 err_disable_scc_events:
1136 scc_writel(0xffffffff, dpriv, dev, IMR);
1137 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1144 #ifdef DSCC4_POLLING
1145 static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1147 /* FIXME: it's gonna be easy (TM), for sure */
1149 #endif /* DSCC4_POLLING */
1151 static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
1152 struct net_device *dev)
1154 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1155 struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
1159 next = dpriv->tx_current%TX_RING_SIZE;
1160 dpriv->tx_skbuff[next] = skb;
1161 tx_fd = dpriv->tx_fd + next;
1162 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
1163 tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len,
1165 tx_fd->complete = 0x00000000;
1166 tx_fd->jiffies = jiffies;
1169 #ifdef DSCC4_POLLING
1170 spin_lock(&dpriv->lock);
1171 while (dscc4_tx_poll(dpriv, dev));
1172 spin_unlock(&dpriv->lock);
1176 dscc4_tx_print(dev, dpriv, "Xmit");
1177 /* To be cleaned(unsigned int)/optimized. Later, ok ? */
1178 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
1179 netif_stop_queue(dev);
1181 if (dscc4_tx_quiescent(dpriv, dev))
1182 dscc4_do_tx(dpriv, dev);
1184 return NETDEV_TX_OK;
1187 static int dscc4_close(struct net_device *dev)
1189 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1191 del_timer_sync(&dpriv->timer);
1192 netif_stop_queue(dev);
1194 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1195 scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
1196 scc_writel(0xffffffff, dpriv, dev, IMR);
1198 dpriv->flags |= FakeReset;
1205 static inline int dscc4_check_clock_ability(int port)
1209 #ifdef CONFIG_DSCC4_PCISYNC
1217 * DS1 p.137: "There are a total of 13 different clocking modes..."
1220 * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a).
1221 * Clock mode 3b _should_ work but the testing seems to make this point
1222 * dubious (DIY testing requires setting CCR0 at 0x00000033).
1223 * This is supposed to provide least surprise "DTE like" behavior.
1224 * - if line rate is specified, clocks are assumed to be locally generated.
1225 * A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing
1226 * between these it automagically done according on the required frequency
1227 * scaling. Of course some rounding may take place.
1228 * - no high speed mode (40Mb/s). May be trivial to do but I don't have an
1229 * appropriate external clocking device for testing.
1230 * - no time-slot/clock mode 5: shameless laziness.
1232 * The clock signals wiring can be (is ?) manufacturer dependent. Good luck.
1234 * BIG FAT WARNING: if the device isn't provided enough clocking signal, it
1235 * won't pass the init sequence. For example, straight back-to-back DTE without
1236 * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is
1239 * Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153
1242 * Clock mode related bits of CCR0:
1243 * +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only)
1244 * | +---------- SSEL: sub-mode select 0 -> a, 1 -> b
1245 * | | +-------- High Speed: say 0
1246 * | | | +-+-+-- Clock Mode: 0..7
1249 * x|x|5|4|3|2|1|0| lower bits
1251 * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b)
1252 * +-+-+-+------------------ M (0..15)
1253 * | | | | +-+-+-+-+-+-- N (0..63)
1254 * 0 0 0 0 | | | | 0 0 | | | | | |
1255 * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1256 * f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits
1259 static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
1261 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1265 *state &= ~Ccr0ClockMask;
1266 if (*bps) { /* Clock generated - required for DCE */
1267 u32 n = 0, m = 0, divider;
1270 xtal = dpriv->pci_priv->xtal_hz;
1273 if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
1275 divider = xtal / *bps;
1276 if (divider > BRR_DIVIDER_MAX) {
1278 *state |= 0x00000036; /* Clock mode 6b (BRG/16) */
1280 *state |= 0x00000037; /* Clock mode 7b (BRG) */
1281 if (divider >> 22) {
1284 } else if (divider) {
1285 /* Extraction of the 6 highest weighted bits */
1287 while (0xffffffc0 & divider) {
1295 if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */
1297 *bps = xtal / divider;
1300 * External clock - DTE
1301 * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00).
1302 * Nothing more to be done
1306 scc_writel(brr, dpriv, dev, BRR);
1312 static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1314 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1315 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1316 const size_t size = sizeof(dpriv->settings);
1319 if (dev->flags & IFF_UP)
1322 if (cmd != SIOCWANDEV)
1325 switch(ifr->ifr_settings.type) {
1327 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1328 if (ifr->ifr_settings.size < size) {
1329 ifr->ifr_settings.size = size; /* data size wanted */
1332 if (copy_to_user(line, &dpriv->settings, size))
1336 case IF_IFACE_SYNC_SERIAL:
1337 if (!capable(CAP_NET_ADMIN))
1340 if (dpriv->flags & FakeReset) {
1341 netdev_info(dev, "please reset the device before this command\n");
1344 if (copy_from_user(&dpriv->settings, line, size))
1346 ret = dscc4_set_iface(dpriv, dev);
1350 ret = hdlc_ioctl(dev, ifr, cmd);
1357 static int dscc4_match(const struct thingie *p, int value)
1361 for (i = 0; p[i].define != -1; i++) {
1362 if (value == p[i].define)
1365 if (p[i].define == -1)
1371 static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
1372 struct net_device *dev)
1374 sync_serial_settings *settings = &dpriv->settings;
1375 int ret = -EOPNOTSUPP;
1378 bps = settings->clock_rate;
1379 state = scc_readl(dpriv, CCR0);
1380 if (dscc4_set_clock(dev, &bps, &state) < 0)
1382 if (bps) { /* DCE */
1383 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
1384 if (settings->clock_rate != bps) {
1385 printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
1386 dev->name, settings->clock_rate, bps);
1387 settings->clock_rate = bps;
1390 state |= PowerUp | Vis;
1391 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
1393 scc_writel(state, dpriv, dev, CCR0);
1399 static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
1400 struct net_device *dev)
1402 static const struct thingie encoding[] = {
1403 { ENCODING_NRZ, 0x00000000 },
1404 { ENCODING_NRZI, 0x00200000 },
1405 { ENCODING_FM_MARK, 0x00400000 },
1406 { ENCODING_FM_SPACE, 0x00500000 },
1407 { ENCODING_MANCHESTER, 0x00600000 },
1412 i = dscc4_match(encoding, dpriv->encoding);
1414 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
1420 static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
1421 struct net_device *dev)
1423 sync_serial_settings *settings = &dpriv->settings;
1426 state = scc_readl(dpriv, CCR1);
1427 if (settings->loopback) {
1428 printk(KERN_DEBUG "%s: loopback\n", dev->name);
1429 state |= 0x00000100;
1431 printk(KERN_DEBUG "%s: normal\n", dev->name);
1432 state &= ~0x00000100;
1434 scc_writel(state, dpriv, dev, CCR1);
1438 static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
1439 struct net_device *dev)
1441 static const struct thingie crc[] = {
1442 { PARITY_CRC16_PR0_CCITT, 0x00000010 },
1443 { PARITY_CRC16_PR1_CCITT, 0x00000000 },
1444 { PARITY_CRC32_PR0_CCITT, 0x00000011 },
1445 { PARITY_CRC32_PR1_CCITT, 0x00000001 }
1449 i = dscc4_match(crc, dpriv->parity);
1451 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
1457 static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1460 int (*action)(struct dscc4_dev_priv *, struct net_device *);
1461 } *p, do_setting[] = {
1462 { dscc4_encoding_setting },
1463 { dscc4_clock_setting },
1464 { dscc4_loopback_setting },
1465 { dscc4_crc_setting },
1470 for (p = do_setting; p->action; p++) {
1471 if ((ret = p->action(dpriv, dev)) < 0)
1477 static irqreturn_t dscc4_irq(int irq, void *token)
1479 struct dscc4_dev_priv *root = token;
1480 struct dscc4_pci_priv *priv;
1481 struct net_device *dev;
1482 void __iomem *ioaddr;
1484 unsigned long flags;
1487 priv = root->pci_priv;
1488 dev = dscc4_to_dev(root);
1490 spin_lock_irqsave(&priv->lock, flags);
1492 ioaddr = root->base_addr;
1494 state = readl(ioaddr + GSTAR);
1500 printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
1501 writel(state, ioaddr + GSTAR);
1504 netdev_err(dev, "failure (Arf). Harass the maintainer\n");
1510 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
1511 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf))
1512 netdev_err(dev, "CFG failed\n");
1513 if (!(state &= ~Cfg))
1516 if (state & RxEvt) {
1517 i = dev_per_card - 1;
1519 dscc4_rx_irq(priv, root + i);
1523 if (state & TxEvt) {
1524 i = dev_per_card - 1;
1526 dscc4_tx_irq(priv, root + i);
1531 spin_unlock_irqrestore(&priv->lock, flags);
1532 return IRQ_RETVAL(handled);
1535 static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
1536 struct dscc4_dev_priv *dpriv)
1538 struct net_device *dev = dscc4_to_dev(dpriv);
1543 cur = dpriv->iqtx_current%IRQ_RING_SIZE;
1544 state = le32_to_cpu(dpriv->iqtx[cur]);
1547 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
1549 if ((debug > 1) && (loop > 1))
1550 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
1551 if (loop && netif_queue_stopped(dev))
1552 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
1553 netif_wake_queue(dev);
1555 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
1556 !dscc4_tx_done(dpriv))
1557 dscc4_do_tx(dpriv, dev);
1561 dpriv->iqtx[cur] = 0;
1562 dpriv->iqtx_current++;
1564 if (state_check(state, dpriv, dev, "Tx") < 0)
1567 if (state & SccEvt) {
1569 struct sk_buff *skb;
1573 dscc4_tx_print(dev, dpriv, "Alls");
1575 * DataComplete can't be trusted for Tx completion.
1578 cur = dpriv->tx_dirty%TX_RING_SIZE;
1579 tx_fd = dpriv->tx_fd + cur;
1580 skb = dpriv->tx_skbuff[cur];
1582 pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data),
1583 skb->len, PCI_DMA_TODEVICE);
1584 if (tx_fd->state & FrameEnd) {
1585 dev->stats.tx_packets++;
1586 dev->stats.tx_bytes += skb->len;
1588 dev_kfree_skb_irq(skb);
1589 dpriv->tx_skbuff[cur] = NULL;
1593 netdev_err(dev, "Tx: NULL skb %d\n",
1597 * If the driver ends sending crap on the wire, it
1598 * will be way easier to diagnose than the (not so)
1599 * random freeze induced by null sized tx frames.
1601 tx_fd->data = tx_fd->next;
1602 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1603 tx_fd->complete = 0x00000000;
1606 if (!(state &= ~Alls))
1610 * Transmit Data Underrun
1613 netdev_err(dev, "Tx Data Underrun. Ask maintainer\n");
1614 dpriv->flags = NeedIDT;
1617 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
1618 writel(Action, dpriv->base_addr + GCMDR);
1622 netdev_info(dev, "CTS transition\n");
1623 if (!(state &= ~Cts)) /* DEBUG */
1627 /* Frame needs to be sent again - FIXME */
1628 netdev_err(dev, "Tx ReTx. Ask maintainer\n");
1629 if (!(state &= ~Xmr)) /* DEBUG */
1633 void __iomem *scc_addr;
1638 * - the busy condition happens (sometimes);
1639 * - it doesn't seem to make the handler unreliable.
1641 for (i = 1; i; i <<= 1) {
1642 if (!(scc_readl_star(dpriv, dev) & SccBusy))
1646 netdev_info(dev, "busy in irq\n");
1648 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1649 /* Keep this order: IDT before IDR */
1650 if (dpriv->flags & NeedIDT) {
1652 dscc4_tx_print(dev, dpriv, "Xpr");
1653 ring = dpriv->tx_fd_dma +
1654 (dpriv->tx_dirty%TX_RING_SIZE)*
1655 sizeof(struct TxFD);
1656 writel(ring, scc_addr + CH0BTDA);
1657 dscc4_do_tx(dpriv, dev);
1658 writel(MTFi | Idt, scc_addr + CH0CFG);
1659 if (dscc4_do_action(dev, "IDT") < 0)
1661 dpriv->flags &= ~NeedIDT;
1663 if (dpriv->flags & NeedIDR) {
1664 ring = dpriv->rx_fd_dma +
1665 (dpriv->rx_current%RX_RING_SIZE)*
1666 sizeof(struct RxFD);
1667 writel(ring, scc_addr + CH0BRDA);
1668 dscc4_rx_update(dpriv, dev);
1669 writel(MTFi | Idr, scc_addr + CH0CFG);
1670 if (dscc4_do_action(dev, "IDR") < 0)
1672 dpriv->flags &= ~NeedIDR;
1674 /* Activate receiver and misc */
1675 scc_writel(0x08050008, dpriv, dev, CCR2);
1678 if (!(state &= ~Xpr))
1683 netdev_info(dev, "CD transition\n");
1684 if (!(state &= ~Cd)) /* DEBUG */
1687 } else { /* ! SccEvt */
1689 #ifdef DSCC4_POLLING
1690 while (!dscc4_tx_poll(dpriv, dev));
1692 netdev_info(dev, "Tx Hi\n");
1696 netdev_info(dev, "Tx ERR\n");
1697 dev->stats.tx_errors++;
1704 static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
1705 struct dscc4_dev_priv *dpriv)
1707 struct net_device *dev = dscc4_to_dev(dpriv);
1712 cur = dpriv->iqrx_current%IRQ_RING_SIZE;
1713 state = le32_to_cpu(dpriv->iqrx[cur]);
1716 dpriv->iqrx[cur] = 0;
1717 dpriv->iqrx_current++;
1719 if (state_check(state, dpriv, dev, "Rx") < 0)
1722 if (!(state & SccEvt)){
1726 printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
1728 state &= 0x00ffffff;
1729 if (state & Err) { /* Hold or reset */
1730 printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
1731 cur = dpriv->rx_current%RX_RING_SIZE;
1732 rx_fd = dpriv->rx_fd + cur;
1734 * Presume we're not facing a DMAC receiver reset.
1735 * As We use the rx size-filtering feature of the
1736 * DSCC4, the beginning of a new frame is waiting in
1737 * the rx fifo. I bet a Receive Data Overflow will
1738 * happen most of time but let's try and avoid it.
1739 * Btw (as for RDO) if one experiences ERR whereas
1740 * the system looks rather idle, there may be a
1741 * problem with latency. In this case, increasing
1742 * RX_RING_SIZE may help.
1744 //while (dpriv->rx_needs_refill) {
1745 while (!(rx_fd->state1 & Hold)) {
1748 if (!(cur = cur%RX_RING_SIZE))
1749 rx_fd = dpriv->rx_fd;
1751 //dpriv->rx_needs_refill--;
1752 try_get_rx_skb(dpriv, dev);
1755 rx_fd->state1 &= ~Hold;
1756 rx_fd->state2 = 0x00000000;
1757 rx_fd->end = cpu_to_le32(0xbabeface);
1762 dscc4_rx_skb(dpriv, dev);
1765 if (state & Hi ) { /* HI bit */
1766 netdev_info(dev, "Rx Hi\n");
1770 } else { /* SccEvt */
1772 //FIXME: verifier la presence de tous les evenements
1775 const char *irq_name;
1777 { 0x00008000, "TIN"},
1778 { 0x00000020, "RSC"},
1779 { 0x00000010, "PCE"},
1780 { 0x00000008, "PLLA"},
1784 for (evt = evts; evt->irq_name; evt++) {
1785 if (state & evt->mask) {
1786 printk(KERN_DEBUG "%s: %s\n",
1787 dev->name, evt->irq_name);
1788 if (!(state &= ~evt->mask))
1793 if (!(state &= ~0x0000c03c))
1797 netdev_info(dev, "CTS transition\n");
1798 if (!(state &= ~Cts)) /* DEBUG */
1802 * Receive Data Overflow (FIXME: fscked)
1806 void __iomem *scc_addr;
1810 // dscc4_rx_dump(dpriv);
1811 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1813 scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
1815 * This has no effect. Why ?
1816 * ORed with TxSccRes, one sees the CFG ack (for
1817 * the TX part only).
1819 scc_writel(RxSccRes, dpriv, dev, CMDR);
1820 dpriv->flags |= RdoSet;
1823 * Let's try and save something in the received data.
1824 * rx_current must be incremented at least once to
1825 * avoid HOLD in the BRDA-to-be-pointed desc.
1828 cur = dpriv->rx_current++%RX_RING_SIZE;
1829 rx_fd = dpriv->rx_fd + cur;
1830 if (!(rx_fd->state2 & DataComplete))
1832 if (rx_fd->state2 & FrameAborted) {
1833 dev->stats.rx_over_errors++;
1834 rx_fd->state1 |= Hold;
1835 rx_fd->state2 = 0x00000000;
1836 rx_fd->end = cpu_to_le32(0xbabeface);
1838 dscc4_rx_skb(dpriv, dev);
1842 if (dpriv->flags & RdoSet)
1844 "%s: no RDO in Rx data\n", DRV_NAME);
1846 #ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
1848 * FIXME: must the reset be this violent ?
1850 #warning "FIXME: CH0BRDA"
1851 writel(dpriv->rx_fd_dma +
1852 (dpriv->rx_current%RX_RING_SIZE)*
1853 sizeof(struct RxFD), scc_addr + CH0BRDA);
1854 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
1855 if (dscc4_do_action(dev, "RDR") < 0) {
1856 netdev_err(dev, "RDO recovery failed(RDR)\n");
1859 writel(MTFi|Idr, scc_addr + CH0CFG);
1860 if (dscc4_do_action(dev, "IDR") < 0) {
1861 netdev_err(dev, "RDO recovery failed(IDR)\n");
1866 scc_patchl(0, RxActivate, dpriv, dev, CCR2);
1870 netdev_info(dev, "CD transition\n");
1871 if (!(state &= ~Cd)) /* DEBUG */
1875 printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
1876 if (!(state &= ~Flex))
1883 * I had expected the following to work for the first descriptor
1884 * (tx_fd->state = 0xc0000000)
1885 * - Hold=1 (don't try and branch to the next descripto);
1886 * - No=0 (I want an empty data section, i.e. size=0);
1887 * - Fe=1 (required by No=0 or we got an Err irq and must reset).
1888 * It failed and locked solid. Thus the introduction of a dummy skb.
1889 * Problem is acknowledged in errata sheet DS5. Joy :o/
1891 static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1893 struct sk_buff *skb;
1895 skb = dev_alloc_skb(DUMMY_SKB_SIZE);
1897 int last = dpriv->tx_dirty%TX_RING_SIZE;
1898 struct TxFD *tx_fd = dpriv->tx_fd + last;
1900 skb->len = DUMMY_SKB_SIZE;
1901 skb_copy_to_linear_data(skb, version,
1902 strlen(version) % DUMMY_SKB_SIZE);
1903 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
1904 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
1905 skb->data, DUMMY_SKB_SIZE,
1907 dpriv->tx_skbuff[last] = skb;
1912 static int dscc4_init_ring(struct net_device *dev)
1914 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1915 struct pci_dev *pdev = dpriv->pci_priv->pdev;
1921 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma);
1924 dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
1926 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma);
1928 goto err_free_dma_rx;
1929 dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
1931 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
1932 dpriv->tx_dirty = 0xffffffff;
1933 i = dpriv->tx_current = 0;
1935 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1936 tx_fd->complete = 0x00000000;
1937 /* FIXME: NULL should be ok - to be tried */
1938 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma);
1939 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma +
1940 (++i%TX_RING_SIZE)*sizeof(*tx_fd));
1941 } while (i < TX_RING_SIZE);
1943 if (!dscc4_init_dummy_skb(dpriv))
1944 goto err_free_dma_tx;
1946 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
1947 i = dpriv->rx_dirty = dpriv->rx_current = 0;
1949 /* size set by the host. Multiple of 4 bytes please */
1950 rx_fd->state1 = HiDesc;
1951 rx_fd->state2 = 0x00000000;
1952 rx_fd->end = cpu_to_le32(0xbabeface);
1953 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
1954 // FIXME: return value verifiee mais traitement suspect
1955 if (try_get_rx_skb(dpriv, dev) >= 0)
1957 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma +
1958 (++i%RX_RING_SIZE)*sizeof(*rx_fd));
1959 } while (i < RX_RING_SIZE);
1964 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
1966 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
1971 static void __devexit dscc4_remove_one(struct pci_dev *pdev)
1973 struct dscc4_pci_priv *ppriv;
1974 struct dscc4_dev_priv *root;
1975 void __iomem *ioaddr;
1978 ppriv = pci_get_drvdata(pdev);
1981 ioaddr = root->base_addr;
1983 dscc4_pci_reset(pdev, ioaddr);
1985 free_irq(pdev->irq, root);
1986 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
1988 for (i = 0; i < dev_per_card; i++) {
1989 struct dscc4_dev_priv *dpriv = root + i;
1991 dscc4_release_ring(dpriv);
1992 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
1993 dpriv->iqrx, dpriv->iqrx_dma);
1994 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
1995 dpriv->iqtx, dpriv->iqtx_dma);
2002 pci_release_region(pdev, 1);
2003 pci_release_region(pdev, 0);
2005 pci_disable_device(pdev);
2008 static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding,
2009 unsigned short parity)
2011 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
2013 if (encoding != ENCODING_NRZ &&
2014 encoding != ENCODING_NRZI &&
2015 encoding != ENCODING_FM_MARK &&
2016 encoding != ENCODING_FM_SPACE &&
2017 encoding != ENCODING_MANCHESTER)
2020 if (parity != PARITY_NONE &&
2021 parity != PARITY_CRC16_PR0_CCITT &&
2022 parity != PARITY_CRC16_PR1_CCITT &&
2023 parity != PARITY_CRC32_PR0_CCITT &&
2024 parity != PARITY_CRC32_PR1_CCITT)
2027 dpriv->encoding = encoding;
2028 dpriv->parity = parity;
2033 static int __init dscc4_setup(char *str)
2035 int *args[] = { &debug, &quartz, NULL }, **p = args;
2037 while (*p && (get_option(&str, *p) == 2))
2042 __setup("dscc4.setup=", dscc4_setup);
2045 static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
2046 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2047 PCI_ANY_ID, PCI_ANY_ID, },
2050 MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
2052 static struct pci_driver dscc4_driver = {
2054 .id_table = dscc4_pci_tbl,
2055 .probe = dscc4_init_one,
2056 .remove = __devexit_p(dscc4_remove_one),
2059 module_pci_driver(dscc4_driver);