]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/b44.c
DMA-API: net: broadcom/b44: replace dma_set_mask()+dma_set_coherent_mask() with new...
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9  *
10  * Distribute under GPL.
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/ssb/ssb.h>
31 #include <linux/slab.h>
32
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36
37
38 #include "b44.h"
39
40 #define DRV_MODULE_NAME         "b44"
41 #define DRV_MODULE_VERSION      "2.0"
42 #define DRV_DESCRIPTION         "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
43
44 #define B44_DEF_MSG_ENABLE        \
45         (NETIF_MSG_DRV          | \
46          NETIF_MSG_PROBE        | \
47          NETIF_MSG_LINK         | \
48          NETIF_MSG_TIMER        | \
49          NETIF_MSG_IFDOWN       | \
50          NETIF_MSG_IFUP         | \
51          NETIF_MSG_RX_ERR       | \
52          NETIF_MSG_TX_ERR)
53
54 /* length of time before we decide the hardware is borked,
55  * and dev->tx_timeout() should be called to fix the problem
56  */
57 #define B44_TX_TIMEOUT                  (5 * HZ)
58
59 /* hardware minimum and maximum for a single frame's data payload */
60 #define B44_MIN_MTU                     60
61 #define B44_MAX_MTU                     1500
62
63 #define B44_RX_RING_SIZE                512
64 #define B44_DEF_RX_RING_PENDING         200
65 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
66                                  B44_RX_RING_SIZE)
67 #define B44_TX_RING_SIZE                512
68 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
69 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
70                                  B44_TX_RING_SIZE)
71
72 #define TX_RING_GAP(BP) \
73         (B44_TX_RING_SIZE - (BP)->tx_pending)
74 #define TX_BUFFS_AVAIL(BP)                                              \
75         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
76           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
77           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
79
80 #define RX_PKT_OFFSET           (RX_HEADER_LEN + 2)
81 #define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET)
82
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
85
86 /* b44 internal pattern match filter info */
87 #define B44_PATTERN_BASE        0x400
88 #define B44_PATTERN_SIZE        0x80
89 #define B44_PMASK_BASE          0x600
90 #define B44_PMASK_SIZE          0x10
91 #define B44_MAX_PATTERNS        16
92 #define B44_ETHIPV6UDP_HLEN     62
93 #define B44_ETHIPV4UDP_HLEN     42
94
95 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
96 MODULE_DESCRIPTION(DRV_DESCRIPTION);
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_MODULE_VERSION);
99
100 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103
104
105 #ifdef CONFIG_B44_PCI
106 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
107         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110         { 0 } /* terminate list with empty entry */
111 };
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113
114 static struct pci_driver b44_pci_driver = {
115         .name           = DRV_MODULE_NAME,
116         .id_table       = b44_pci_tbl,
117 };
118 #endif /* CONFIG_B44_PCI */
119
120 static const struct ssb_device_id b44_ssb_tbl[] = {
121         SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122         SSB_DEVTABLE_END
123 };
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
128
129 #define B44_FULL_RESET          1
130 #define B44_FULL_RESET_SKIP_PHY 2
131 #define B44_PARTIAL_RESET       3
132 #define B44_CHIP_RESET_FULL     4
133 #define B44_CHIP_RESET_PARTIAL  5
134
135 static void b44_init_hw(struct b44 *, int);
136
137 static int dma_desc_sync_size;
138 static int instance;
139
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...)      # x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
144 };
145
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147                                                 dma_addr_t dma_base,
148                                                 unsigned long offset,
149                                                 enum dma_data_direction dir)
150 {
151         dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152                                    dma_desc_sync_size, dir);
153 }
154
155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156                                              dma_addr_t dma_base,
157                                              unsigned long offset,
158                                              enum dma_data_direction dir)
159 {
160         dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161                                 dma_desc_sync_size, dir);
162 }
163
164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165 {
166         return ssb_read32(bp->sdev, reg);
167 }
168
169 static inline void bw32(const struct b44 *bp,
170                         unsigned long reg, unsigned long val)
171 {
172         ssb_write32(bp->sdev, reg, val);
173 }
174
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176                         u32 bit, unsigned long timeout, const int clear)
177 {
178         unsigned long i;
179
180         for (i = 0; i < timeout; i++) {
181                 u32 val = br32(bp, reg);
182
183                 if (clear && !(val & bit))
184                         break;
185                 if (!clear && (val & bit))
186                         break;
187                 udelay(10);
188         }
189         if (i == timeout) {
190                 if (net_ratelimit())
191                         netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
192                                    bit, reg, clear ? "clear" : "set");
193
194                 return -ENODEV;
195         }
196         return 0;
197 }
198
199 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
200 {
201         u32 val;
202
203         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204                             (index << CAM_CTRL_INDEX_SHIFT)));
205
206         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
207
208         val = br32(bp, B44_CAM_DATA_LO);
209
210         data[2] = (val >> 24) & 0xFF;
211         data[3] = (val >> 16) & 0xFF;
212         data[4] = (val >> 8) & 0xFF;
213         data[5] = (val >> 0) & 0xFF;
214
215         val = br32(bp, B44_CAM_DATA_HI);
216
217         data[0] = (val >> 8) & 0xFF;
218         data[1] = (val >> 0) & 0xFF;
219 }
220
221 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
222 {
223         u32 val;
224
225         val  = ((u32) data[2]) << 24;
226         val |= ((u32) data[3]) << 16;
227         val |= ((u32) data[4]) <<  8;
228         val |= ((u32) data[5]) <<  0;
229         bw32(bp, B44_CAM_DATA_LO, val);
230         val = (CAM_DATA_HI_VALID |
231                (((u32) data[0]) << 8) |
232                (((u32) data[1]) << 0));
233         bw32(bp, B44_CAM_DATA_HI, val);
234         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235                             (index << CAM_CTRL_INDEX_SHIFT)));
236         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
237 }
238
239 static inline void __b44_disable_ints(struct b44 *bp)
240 {
241         bw32(bp, B44_IMASK, 0);
242 }
243
244 static void b44_disable_ints(struct b44 *bp)
245 {
246         __b44_disable_ints(bp);
247
248         /* Flush posted writes. */
249         br32(bp, B44_IMASK);
250 }
251
252 static void b44_enable_ints(struct b44 *bp)
253 {
254         bw32(bp, B44_IMASK, bp->imask);
255 }
256
257 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
258 {
259         int err;
260
261         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
265                              (reg << MDIO_DATA_RA_SHIFT) |
266                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269
270         return err;
271 }
272
273 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
274 {
275         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
279                              (reg << MDIO_DATA_RA_SHIFT) |
280                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281                              (val & MDIO_DATA_DATA)));
282         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283 }
284
285 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286 {
287         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
288                 return 0;
289
290         return __b44_readphy(bp, bp->phy_addr, reg, val);
291 }
292
293 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294 {
295         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
296                 return 0;
297
298         return __b44_writephy(bp, bp->phy_addr, reg, val);
299 }
300
301 /* miilib interface */
302 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
303 {
304         u32 val;
305         struct b44 *bp = netdev_priv(dev);
306         int rc = __b44_readphy(bp, phy_id, location, &val);
307         if (rc)
308                 return 0xffffffff;
309         return val;
310 }
311
312 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
313                          int val)
314 {
315         struct b44 *bp = netdev_priv(dev);
316         __b44_writephy(bp, phy_id, location, val);
317 }
318
319 static int b44_phy_reset(struct b44 *bp)
320 {
321         u32 val;
322         int err;
323
324         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
325                 return 0;
326         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
327         if (err)
328                 return err;
329         udelay(100);
330         err = b44_readphy(bp, MII_BMCR, &val);
331         if (!err) {
332                 if (val & BMCR_RESET) {
333                         netdev_err(bp->dev, "PHY Reset would not complete\n");
334                         err = -ENODEV;
335                 }
336         }
337
338         return err;
339 }
340
341 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
342 {
343         u32 val;
344
345         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
346         bp->flags |= pause_flags;
347
348         val = br32(bp, B44_RXCONFIG);
349         if (pause_flags & B44_FLAG_RX_PAUSE)
350                 val |= RXCONFIG_FLOW;
351         else
352                 val &= ~RXCONFIG_FLOW;
353         bw32(bp, B44_RXCONFIG, val);
354
355         val = br32(bp, B44_MAC_FLOW);
356         if (pause_flags & B44_FLAG_TX_PAUSE)
357                 val |= (MAC_FLOW_PAUSE_ENAB |
358                         (0xc0 & MAC_FLOW_RX_HI_WATER));
359         else
360                 val &= ~MAC_FLOW_PAUSE_ENAB;
361         bw32(bp, B44_MAC_FLOW, val);
362 }
363
364 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
365 {
366         u32 pause_enab = 0;
367
368         /* The driver supports only rx pause by default because
369            the b44 mac tx pause mechanism generates excessive
370            pause frames.
371            Use ethtool to turn on b44 tx pause if necessary.
372          */
373         if ((local & ADVERTISE_PAUSE_CAP) &&
374             (local & ADVERTISE_PAUSE_ASYM)){
375                 if ((remote & LPA_PAUSE_ASYM) &&
376                     !(remote & LPA_PAUSE_CAP))
377                         pause_enab |= B44_FLAG_RX_PAUSE;
378         }
379
380         __b44_set_flow_ctrl(bp, pause_enab);
381 }
382
383 #ifdef CONFIG_BCM47XX
384 #include <bcm47xx_nvram.h>
385 static void b44_wap54g10_workaround(struct b44 *bp)
386 {
387         char buf[20];
388         u32 val;
389         int err;
390
391         /*
392          * workaround for bad hardware design in Linksys WAP54G v1.0
393          * see https://dev.openwrt.org/ticket/146
394          * check and reset bit "isolate"
395          */
396         if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
397                 return;
398         if (simple_strtoul(buf, NULL, 0) == 2) {
399                 err = __b44_readphy(bp, 0, MII_BMCR, &val);
400                 if (err)
401                         goto error;
402                 if (!(val & BMCR_ISOLATE))
403                         return;
404                 val &= ~BMCR_ISOLATE;
405                 err = __b44_writephy(bp, 0, MII_BMCR, val);
406                 if (err)
407                         goto error;
408         }
409         return;
410 error:
411         pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
412 }
413 #else
414 static inline void b44_wap54g10_workaround(struct b44 *bp)
415 {
416 }
417 #endif
418
419 static int b44_setup_phy(struct b44 *bp)
420 {
421         u32 val;
422         int err;
423
424         b44_wap54g10_workaround(bp);
425
426         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
427                 return 0;
428         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
429                 goto out;
430         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
432                 goto out;
433         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
434                 goto out;
435         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436                                 val | MII_TLEDCTRL_ENABLE)) != 0)
437                 goto out;
438
439         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440                 u32 adv = ADVERTISE_CSMA;
441
442                 if (bp->flags & B44_FLAG_ADV_10HALF)
443                         adv |= ADVERTISE_10HALF;
444                 if (bp->flags & B44_FLAG_ADV_10FULL)
445                         adv |= ADVERTISE_10FULL;
446                 if (bp->flags & B44_FLAG_ADV_100HALF)
447                         adv |= ADVERTISE_100HALF;
448                 if (bp->flags & B44_FLAG_ADV_100FULL)
449                         adv |= ADVERTISE_100FULL;
450
451                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
452                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
453
454                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
455                         goto out;
456                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457                                                        BMCR_ANRESTART))) != 0)
458                         goto out;
459         } else {
460                 u32 bmcr;
461
462                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
463                         goto out;
464                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
465                 if (bp->flags & B44_FLAG_100_BASE_T)
466                         bmcr |= BMCR_SPEED100;
467                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
468                         bmcr |= BMCR_FULLDPLX;
469                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
470                         goto out;
471
472                 /* Since we will not be negotiating there is no safe way
473                  * to determine if the link partner supports flow control
474                  * or not.  So just disable it completely in this case.
475                  */
476                 b44_set_flow_ctrl(bp, 0, 0);
477         }
478
479 out:
480         return err;
481 }
482
483 static void b44_stats_update(struct b44 *bp)
484 {
485         unsigned long reg;
486         u64 *val;
487
488         val = &bp->hw_stats.tx_good_octets;
489         u64_stats_update_begin(&bp->hw_stats.syncp);
490
491         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
492                 *val++ += br32(bp, reg);
493         }
494
495         /* Pad */
496         reg += 8*4UL;
497
498         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
499                 *val++ += br32(bp, reg);
500         }
501
502         u64_stats_update_end(&bp->hw_stats.syncp);
503 }
504
505 static void b44_link_report(struct b44 *bp)
506 {
507         if (!netif_carrier_ok(bp->dev)) {
508                 netdev_info(bp->dev, "Link is down\n");
509         } else {
510                 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
511                             (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
512                             (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
513
514                 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
515                             (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
516                             (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
517         }
518 }
519
520 static void b44_check_phy(struct b44 *bp)
521 {
522         u32 bmsr, aux;
523
524         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
525                 bp->flags |= B44_FLAG_100_BASE_T;
526                 bp->flags |= B44_FLAG_FULL_DUPLEX;
527                 if (!netif_carrier_ok(bp->dev)) {
528                         u32 val = br32(bp, B44_TX_CTRL);
529                         val |= TX_CTRL_DUPLEX;
530                         bw32(bp, B44_TX_CTRL, val);
531                         netif_carrier_on(bp->dev);
532                         b44_link_report(bp);
533                 }
534                 return;
535         }
536
537         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
538             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
539             (bmsr != 0xffff)) {
540                 if (aux & MII_AUXCTRL_SPEED)
541                         bp->flags |= B44_FLAG_100_BASE_T;
542                 else
543                         bp->flags &= ~B44_FLAG_100_BASE_T;
544                 if (aux & MII_AUXCTRL_DUPLEX)
545                         bp->flags |= B44_FLAG_FULL_DUPLEX;
546                 else
547                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
548
549                 if (!netif_carrier_ok(bp->dev) &&
550                     (bmsr & BMSR_LSTATUS)) {
551                         u32 val = br32(bp, B44_TX_CTRL);
552                         u32 local_adv, remote_adv;
553
554                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
555                                 val |= TX_CTRL_DUPLEX;
556                         else
557                                 val &= ~TX_CTRL_DUPLEX;
558                         bw32(bp, B44_TX_CTRL, val);
559
560                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
561                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
562                             !b44_readphy(bp, MII_LPA, &remote_adv))
563                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
564
565                         /* Link now up */
566                         netif_carrier_on(bp->dev);
567                         b44_link_report(bp);
568                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
569                         /* Link now down */
570                         netif_carrier_off(bp->dev);
571                         b44_link_report(bp);
572                 }
573
574                 if (bmsr & BMSR_RFAULT)
575                         netdev_warn(bp->dev, "Remote fault detected in PHY\n");
576                 if (bmsr & BMSR_JCD)
577                         netdev_warn(bp->dev, "Jabber detected in PHY\n");
578         }
579 }
580
581 static void b44_timer(unsigned long __opaque)
582 {
583         struct b44 *bp = (struct b44 *) __opaque;
584
585         spin_lock_irq(&bp->lock);
586
587         b44_check_phy(bp);
588
589         b44_stats_update(bp);
590
591         spin_unlock_irq(&bp->lock);
592
593         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
594 }
595
596 static void b44_tx(struct b44 *bp)
597 {
598         u32 cur, cons;
599
600         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
601         cur /= sizeof(struct dma_desc);
602
603         /* XXX needs updating when NETIF_F_SG is supported */
604         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
605                 struct ring_info *rp = &bp->tx_buffers[cons];
606                 struct sk_buff *skb = rp->skb;
607
608                 BUG_ON(skb == NULL);
609
610                 dma_unmap_single(bp->sdev->dma_dev,
611                                  rp->mapping,
612                                  skb->len,
613                                  DMA_TO_DEVICE);
614                 rp->skb = NULL;
615                 dev_kfree_skb_irq(skb);
616         }
617
618         bp->tx_cons = cons;
619         if (netif_queue_stopped(bp->dev) &&
620             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
621                 netif_wake_queue(bp->dev);
622
623         bw32(bp, B44_GPTIMER, 0);
624 }
625
626 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
627  * before the DMA address you give it.  So we allocate 30 more bytes
628  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
629  * point the chip at 30 bytes past where the rx_header will go.
630  */
631 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
632 {
633         struct dma_desc *dp;
634         struct ring_info *src_map, *map;
635         struct rx_header *rh;
636         struct sk_buff *skb;
637         dma_addr_t mapping;
638         int dest_idx;
639         u32 ctrl;
640
641         src_map = NULL;
642         if (src_idx >= 0)
643                 src_map = &bp->rx_buffers[src_idx];
644         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
645         map = &bp->rx_buffers[dest_idx];
646         skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
647         if (skb == NULL)
648                 return -ENOMEM;
649
650         mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
651                                  RX_PKT_BUF_SZ,
652                                  DMA_FROM_DEVICE);
653
654         /* Hardware bug work-around, the chip is unable to do PCI DMA
655            to/from anything above 1GB :-( */
656         if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
657                 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
658                 /* Sigh... */
659                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
660                         dma_unmap_single(bp->sdev->dma_dev, mapping,
661                                              RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
662                 dev_kfree_skb_any(skb);
663                 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
664                 if (skb == NULL)
665                         return -ENOMEM;
666                 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
667                                          RX_PKT_BUF_SZ,
668                                          DMA_FROM_DEVICE);
669                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
670                     mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
671                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
672                                 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
673                         dev_kfree_skb_any(skb);
674                         return -ENOMEM;
675                 }
676                 bp->force_copybreak = 1;
677         }
678
679         rh = (struct rx_header *) skb->data;
680
681         rh->len = 0;
682         rh->flags = 0;
683
684         map->skb = skb;
685         map->mapping = mapping;
686
687         if (src_map != NULL)
688                 src_map->skb = NULL;
689
690         ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
691         if (dest_idx == (B44_RX_RING_SIZE - 1))
692                 ctrl |= DESC_CTRL_EOT;
693
694         dp = &bp->rx_ring[dest_idx];
695         dp->ctrl = cpu_to_le32(ctrl);
696         dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
697
698         if (bp->flags & B44_FLAG_RX_RING_HACK)
699                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
700                                             dest_idx * sizeof(*dp),
701                                             DMA_BIDIRECTIONAL);
702
703         return RX_PKT_BUF_SZ;
704 }
705
706 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
707 {
708         struct dma_desc *src_desc, *dest_desc;
709         struct ring_info *src_map, *dest_map;
710         struct rx_header *rh;
711         int dest_idx;
712         __le32 ctrl;
713
714         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
715         dest_desc = &bp->rx_ring[dest_idx];
716         dest_map = &bp->rx_buffers[dest_idx];
717         src_desc = &bp->rx_ring[src_idx];
718         src_map = &bp->rx_buffers[src_idx];
719
720         dest_map->skb = src_map->skb;
721         rh = (struct rx_header *) src_map->skb->data;
722         rh->len = 0;
723         rh->flags = 0;
724         dest_map->mapping = src_map->mapping;
725
726         if (bp->flags & B44_FLAG_RX_RING_HACK)
727                 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
728                                          src_idx * sizeof(*src_desc),
729                                          DMA_BIDIRECTIONAL);
730
731         ctrl = src_desc->ctrl;
732         if (dest_idx == (B44_RX_RING_SIZE - 1))
733                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
734         else
735                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
736
737         dest_desc->ctrl = ctrl;
738         dest_desc->addr = src_desc->addr;
739
740         src_map->skb = NULL;
741
742         if (bp->flags & B44_FLAG_RX_RING_HACK)
743                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
744                                              dest_idx * sizeof(*dest_desc),
745                                              DMA_BIDIRECTIONAL);
746
747         dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
748                                    RX_PKT_BUF_SZ,
749                                    DMA_FROM_DEVICE);
750 }
751
752 static int b44_rx(struct b44 *bp, int budget)
753 {
754         int received;
755         u32 cons, prod;
756
757         received = 0;
758         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
759         prod /= sizeof(struct dma_desc);
760         cons = bp->rx_cons;
761
762         while (cons != prod && budget > 0) {
763                 struct ring_info *rp = &bp->rx_buffers[cons];
764                 struct sk_buff *skb = rp->skb;
765                 dma_addr_t map = rp->mapping;
766                 struct rx_header *rh;
767                 u16 len;
768
769                 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
770                                         RX_PKT_BUF_SZ,
771                                         DMA_FROM_DEVICE);
772                 rh = (struct rx_header *) skb->data;
773                 len = le16_to_cpu(rh->len);
774                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
775                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
776                 drop_it:
777                         b44_recycle_rx(bp, cons, bp->rx_prod);
778                 drop_it_no_recycle:
779                         bp->dev->stats.rx_dropped++;
780                         goto next_pkt;
781                 }
782
783                 if (len == 0) {
784                         int i = 0;
785
786                         do {
787                                 udelay(2);
788                                 barrier();
789                                 len = le16_to_cpu(rh->len);
790                         } while (len == 0 && i++ < 5);
791                         if (len == 0)
792                                 goto drop_it;
793                 }
794
795                 /* Omit CRC. */
796                 len -= 4;
797
798                 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
799                         int skb_size;
800                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
801                         if (skb_size < 0)
802                                 goto drop_it;
803                         dma_unmap_single(bp->sdev->dma_dev, map,
804                                          skb_size, DMA_FROM_DEVICE);
805                         /* Leave out rx_header */
806                         skb_put(skb, len + RX_PKT_OFFSET);
807                         skb_pull(skb, RX_PKT_OFFSET);
808                 } else {
809                         struct sk_buff *copy_skb;
810
811                         b44_recycle_rx(bp, cons, bp->rx_prod);
812                         copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
813                         if (copy_skb == NULL)
814                                 goto drop_it_no_recycle;
815
816                         skb_put(copy_skb, len);
817                         /* DMA sync done above, copy just the actual packet */
818                         skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
819                                                          copy_skb->data, len);
820                         skb = copy_skb;
821                 }
822                 skb_checksum_none_assert(skb);
823                 skb->protocol = eth_type_trans(skb, bp->dev);
824                 netif_receive_skb(skb);
825                 received++;
826                 budget--;
827         next_pkt:
828                 bp->rx_prod = (bp->rx_prod + 1) &
829                         (B44_RX_RING_SIZE - 1);
830                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
831         }
832
833         bp->rx_cons = cons;
834         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
835
836         return received;
837 }
838
839 static int b44_poll(struct napi_struct *napi, int budget)
840 {
841         struct b44 *bp = container_of(napi, struct b44, napi);
842         int work_done;
843         unsigned long flags;
844
845         spin_lock_irqsave(&bp->lock, flags);
846
847         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
848                 /* spin_lock(&bp->tx_lock); */
849                 b44_tx(bp);
850                 /* spin_unlock(&bp->tx_lock); */
851         }
852         if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
853                 bp->istat &= ~ISTAT_RFO;
854                 b44_disable_ints(bp);
855                 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
856                 b44_init_rings(bp);
857                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
858                 netif_wake_queue(bp->dev);
859         }
860
861         spin_unlock_irqrestore(&bp->lock, flags);
862
863         work_done = 0;
864         if (bp->istat & ISTAT_RX)
865                 work_done += b44_rx(bp, budget);
866
867         if (bp->istat & ISTAT_ERRORS) {
868                 spin_lock_irqsave(&bp->lock, flags);
869                 b44_halt(bp);
870                 b44_init_rings(bp);
871                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
872                 netif_wake_queue(bp->dev);
873                 spin_unlock_irqrestore(&bp->lock, flags);
874                 work_done = 0;
875         }
876
877         if (work_done < budget) {
878                 napi_complete(napi);
879                 b44_enable_ints(bp);
880         }
881
882         return work_done;
883 }
884
885 static irqreturn_t b44_interrupt(int irq, void *dev_id)
886 {
887         struct net_device *dev = dev_id;
888         struct b44 *bp = netdev_priv(dev);
889         u32 istat, imask;
890         int handled = 0;
891
892         spin_lock(&bp->lock);
893
894         istat = br32(bp, B44_ISTAT);
895         imask = br32(bp, B44_IMASK);
896
897         /* The interrupt mask register controls which interrupt bits
898          * will actually raise an interrupt to the CPU when set by hw/firmware,
899          * but doesn't mask off the bits.
900          */
901         istat &= imask;
902         if (istat) {
903                 handled = 1;
904
905                 if (unlikely(!netif_running(dev))) {
906                         netdev_info(dev, "late interrupt\n");
907                         goto irq_ack;
908                 }
909
910                 if (napi_schedule_prep(&bp->napi)) {
911                         /* NOTE: These writes are posted by the readback of
912                          *       the ISTAT register below.
913                          */
914                         bp->istat = istat;
915                         __b44_disable_ints(bp);
916                         __napi_schedule(&bp->napi);
917                 }
918
919 irq_ack:
920                 bw32(bp, B44_ISTAT, istat);
921                 br32(bp, B44_ISTAT);
922         }
923         spin_unlock(&bp->lock);
924         return IRQ_RETVAL(handled);
925 }
926
927 static void b44_tx_timeout(struct net_device *dev)
928 {
929         struct b44 *bp = netdev_priv(dev);
930
931         netdev_err(dev, "transmit timed out, resetting\n");
932
933         spin_lock_irq(&bp->lock);
934
935         b44_halt(bp);
936         b44_init_rings(bp);
937         b44_init_hw(bp, B44_FULL_RESET);
938
939         spin_unlock_irq(&bp->lock);
940
941         b44_enable_ints(bp);
942
943         netif_wake_queue(dev);
944 }
945
946 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
947 {
948         struct b44 *bp = netdev_priv(dev);
949         int rc = NETDEV_TX_OK;
950         dma_addr_t mapping;
951         u32 len, entry, ctrl;
952         unsigned long flags;
953
954         len = skb->len;
955         spin_lock_irqsave(&bp->lock, flags);
956
957         /* This is a hard error, log it. */
958         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
959                 netif_stop_queue(dev);
960                 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
961                 goto err_out;
962         }
963
964         mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
965         if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
966                 struct sk_buff *bounce_skb;
967
968                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
969                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
970                         dma_unmap_single(bp->sdev->dma_dev, mapping, len,
971                                              DMA_TO_DEVICE);
972
973                 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
974                 if (!bounce_skb)
975                         goto err_out;
976
977                 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
978                                          len, DMA_TO_DEVICE);
979                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
980                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
981                                 dma_unmap_single(bp->sdev->dma_dev, mapping,
982                                                      len, DMA_TO_DEVICE);
983                         dev_kfree_skb_any(bounce_skb);
984                         goto err_out;
985                 }
986
987                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
988                 dev_kfree_skb_any(skb);
989                 skb = bounce_skb;
990         }
991
992         entry = bp->tx_prod;
993         bp->tx_buffers[entry].skb = skb;
994         bp->tx_buffers[entry].mapping = mapping;
995
996         ctrl  = (len & DESC_CTRL_LEN);
997         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
998         if (entry == (B44_TX_RING_SIZE - 1))
999                 ctrl |= DESC_CTRL_EOT;
1000
1001         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1002         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1003
1004         if (bp->flags & B44_FLAG_TX_RING_HACK)
1005                 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1006                                             entry * sizeof(bp->tx_ring[0]),
1007                                             DMA_TO_DEVICE);
1008
1009         entry = NEXT_TX(entry);
1010
1011         bp->tx_prod = entry;
1012
1013         wmb();
1014
1015         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1016         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1017                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1018         if (bp->flags & B44_FLAG_REORDER_BUG)
1019                 br32(bp, B44_DMATX_PTR);
1020
1021         if (TX_BUFFS_AVAIL(bp) < 1)
1022                 netif_stop_queue(dev);
1023
1024 out_unlock:
1025         spin_unlock_irqrestore(&bp->lock, flags);
1026
1027         return rc;
1028
1029 err_out:
1030         rc = NETDEV_TX_BUSY;
1031         goto out_unlock;
1032 }
1033
1034 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1035 {
1036         struct b44 *bp = netdev_priv(dev);
1037
1038         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1039                 return -EINVAL;
1040
1041         if (!netif_running(dev)) {
1042                 /* We'll just catch it later when the
1043                  * device is up'd.
1044                  */
1045                 dev->mtu = new_mtu;
1046                 return 0;
1047         }
1048
1049         spin_lock_irq(&bp->lock);
1050         b44_halt(bp);
1051         dev->mtu = new_mtu;
1052         b44_init_rings(bp);
1053         b44_init_hw(bp, B44_FULL_RESET);
1054         spin_unlock_irq(&bp->lock);
1055
1056         b44_enable_ints(bp);
1057
1058         return 0;
1059 }
1060
1061 /* Free up pending packets in all rx/tx rings.
1062  *
1063  * The chip has been shut down and the driver detached from
1064  * the networking, so no interrupts or new tx packets will
1065  * end up in the driver.  bp->lock is not held and we are not
1066  * in an interrupt context and thus may sleep.
1067  */
1068 static void b44_free_rings(struct b44 *bp)
1069 {
1070         struct ring_info *rp;
1071         int i;
1072
1073         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1074                 rp = &bp->rx_buffers[i];
1075
1076                 if (rp->skb == NULL)
1077                         continue;
1078                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1079                                  DMA_FROM_DEVICE);
1080                 dev_kfree_skb_any(rp->skb);
1081                 rp->skb = NULL;
1082         }
1083
1084         /* XXX needs changes once NETIF_F_SG is set... */
1085         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1086                 rp = &bp->tx_buffers[i];
1087
1088                 if (rp->skb == NULL)
1089                         continue;
1090                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1091                                  DMA_TO_DEVICE);
1092                 dev_kfree_skb_any(rp->skb);
1093                 rp->skb = NULL;
1094         }
1095 }
1096
1097 /* Initialize tx/rx rings for packet processing.
1098  *
1099  * The chip has been shut down and the driver detached from
1100  * the networking, so no interrupts or new tx packets will
1101  * end up in the driver.
1102  */
1103 static void b44_init_rings(struct b44 *bp)
1104 {
1105         int i;
1106
1107         b44_free_rings(bp);
1108
1109         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1110         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1111
1112         if (bp->flags & B44_FLAG_RX_RING_HACK)
1113                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1114                                            DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1115
1116         if (bp->flags & B44_FLAG_TX_RING_HACK)
1117                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1118                                            DMA_TABLE_BYTES, DMA_TO_DEVICE);
1119
1120         for (i = 0; i < bp->rx_pending; i++) {
1121                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1122                         break;
1123         }
1124 }
1125
1126 /*
1127  * Must not be invoked with interrupt sources disabled and
1128  * the hardware shutdown down.
1129  */
1130 static void b44_free_consistent(struct b44 *bp)
1131 {
1132         kfree(bp->rx_buffers);
1133         bp->rx_buffers = NULL;
1134         kfree(bp->tx_buffers);
1135         bp->tx_buffers = NULL;
1136         if (bp->rx_ring) {
1137                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1138                         dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1139                                          DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1140                         kfree(bp->rx_ring);
1141                 } else
1142                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1143                                           bp->rx_ring, bp->rx_ring_dma);
1144                 bp->rx_ring = NULL;
1145                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1146         }
1147         if (bp->tx_ring) {
1148                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1149                         dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1150                                          DMA_TABLE_BYTES, DMA_TO_DEVICE);
1151                         kfree(bp->tx_ring);
1152                 } else
1153                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1154                                           bp->tx_ring, bp->tx_ring_dma);
1155                 bp->tx_ring = NULL;
1156                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1157         }
1158 }
1159
1160 /*
1161  * Must not be invoked with interrupt sources disabled and
1162  * the hardware shutdown down.  Can sleep.
1163  */
1164 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165 {
1166         int size;
1167
1168         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1169         bp->rx_buffers = kzalloc(size, gfp);
1170         if (!bp->rx_buffers)
1171                 goto out_err;
1172
1173         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1174         bp->tx_buffers = kzalloc(size, gfp);
1175         if (!bp->tx_buffers)
1176                 goto out_err;
1177
1178         size = DMA_TABLE_BYTES;
1179         bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1180                                          &bp->rx_ring_dma, gfp);
1181         if (!bp->rx_ring) {
1182                 /* Allocation may have failed due to pci_alloc_consistent
1183                    insisting on use of GFP_DMA, which is more restrictive
1184                    than necessary...  */
1185                 struct dma_desc *rx_ring;
1186                 dma_addr_t rx_ring_dma;
1187
1188                 rx_ring = kzalloc(size, gfp);
1189                 if (!rx_ring)
1190                         goto out_err;
1191
1192                 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1193                                              DMA_TABLE_BYTES,
1194                                              DMA_BIDIRECTIONAL);
1195
1196                 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1197                         rx_ring_dma + size > DMA_BIT_MASK(30)) {
1198                         kfree(rx_ring);
1199                         goto out_err;
1200                 }
1201
1202                 bp->rx_ring = rx_ring;
1203                 bp->rx_ring_dma = rx_ring_dma;
1204                 bp->flags |= B44_FLAG_RX_RING_HACK;
1205         }
1206
1207         bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1208                                          &bp->tx_ring_dma, gfp);
1209         if (!bp->tx_ring) {
1210                 /* Allocation may have failed due to ssb_dma_alloc_consistent
1211                    insisting on use of GFP_DMA, which is more restrictive
1212                    than necessary...  */
1213                 struct dma_desc *tx_ring;
1214                 dma_addr_t tx_ring_dma;
1215
1216                 tx_ring = kzalloc(size, gfp);
1217                 if (!tx_ring)
1218                         goto out_err;
1219
1220                 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1221                                              DMA_TABLE_BYTES,
1222                                              DMA_TO_DEVICE);
1223
1224                 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1225                         tx_ring_dma + size > DMA_BIT_MASK(30)) {
1226                         kfree(tx_ring);
1227                         goto out_err;
1228                 }
1229
1230                 bp->tx_ring = tx_ring;
1231                 bp->tx_ring_dma = tx_ring_dma;
1232                 bp->flags |= B44_FLAG_TX_RING_HACK;
1233         }
1234
1235         return 0;
1236
1237 out_err:
1238         b44_free_consistent(bp);
1239         return -ENOMEM;
1240 }
1241
1242 /* bp->lock is held. */
1243 static void b44_clear_stats(struct b44 *bp)
1244 {
1245         unsigned long reg;
1246
1247         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1248         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1249                 br32(bp, reg);
1250         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1251                 br32(bp, reg);
1252 }
1253
1254 /* bp->lock is held. */
1255 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1256 {
1257         struct ssb_device *sdev = bp->sdev;
1258         bool was_enabled;
1259
1260         was_enabled = ssb_device_is_enabled(bp->sdev);
1261
1262         ssb_device_enable(bp->sdev, 0);
1263         ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1264
1265         if (was_enabled) {
1266                 bw32(bp, B44_RCV_LAZY, 0);
1267                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1268                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1269                 bw32(bp, B44_DMATX_CTRL, 0);
1270                 bp->tx_prod = bp->tx_cons = 0;
1271                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1272                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1273                                      100, 0);
1274                 }
1275                 bw32(bp, B44_DMARX_CTRL, 0);
1276                 bp->rx_prod = bp->rx_cons = 0;
1277         }
1278
1279         b44_clear_stats(bp);
1280
1281         /*
1282          * Don't enable PHY if we are doing a partial reset
1283          * we are probably going to power down
1284          */
1285         if (reset_kind == B44_CHIP_RESET_PARTIAL)
1286                 return;
1287
1288         switch (sdev->bus->bustype) {
1289         case SSB_BUSTYPE_SSB:
1290                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1291                      (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1292                                         B44_MDC_RATIO)
1293                      & MDIO_CTRL_MAXF_MASK)));
1294                 break;
1295         case SSB_BUSTYPE_PCI:
1296                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297                      (0x0d & MDIO_CTRL_MAXF_MASK)));
1298                 break;
1299         case SSB_BUSTYPE_PCMCIA:
1300         case SSB_BUSTYPE_SDIO:
1301                 WARN_ON(1); /* A device with this bus does not exist. */
1302                 break;
1303         }
1304
1305         br32(bp, B44_MDIO_CTRL);
1306
1307         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1308                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1309                 br32(bp, B44_ENET_CTRL);
1310                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1311         } else {
1312                 u32 val = br32(bp, B44_DEVCTRL);
1313
1314                 if (val & DEVCTRL_EPR) {
1315                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1316                         br32(bp, B44_DEVCTRL);
1317                         udelay(100);
1318                 }
1319                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1320         }
1321 }
1322
1323 /* bp->lock is held. */
1324 static void b44_halt(struct b44 *bp)
1325 {
1326         b44_disable_ints(bp);
1327         /* reset PHY */
1328         b44_phy_reset(bp);
1329         /* power down PHY */
1330         netdev_info(bp->dev, "powering down PHY\n");
1331         bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1332         /* now reset the chip, but without enabling the MAC&PHY
1333          * part of it. This has to be done _after_ we shut down the PHY */
1334         b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1335 }
1336
1337 /* bp->lock is held. */
1338 static void __b44_set_mac_addr(struct b44 *bp)
1339 {
1340         bw32(bp, B44_CAM_CTRL, 0);
1341         if (!(bp->dev->flags & IFF_PROMISC)) {
1342                 u32 val;
1343
1344                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1345                 val = br32(bp, B44_CAM_CTRL);
1346                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1347         }
1348 }
1349
1350 static int b44_set_mac_addr(struct net_device *dev, void *p)
1351 {
1352         struct b44 *bp = netdev_priv(dev);
1353         struct sockaddr *addr = p;
1354         u32 val;
1355
1356         if (netif_running(dev))
1357                 return -EBUSY;
1358
1359         if (!is_valid_ether_addr(addr->sa_data))
1360                 return -EINVAL;
1361
1362         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1363
1364         spin_lock_irq(&bp->lock);
1365
1366         val = br32(bp, B44_RXCONFIG);
1367         if (!(val & RXCONFIG_CAM_ABSENT))
1368                 __b44_set_mac_addr(bp);
1369
1370         spin_unlock_irq(&bp->lock);
1371
1372         return 0;
1373 }
1374
1375 /* Called at device open time to get the chip ready for
1376  * packet processing.  Invoked with bp->lock held.
1377  */
1378 static void __b44_set_rx_mode(struct net_device *);
1379 static void b44_init_hw(struct b44 *bp, int reset_kind)
1380 {
1381         u32 val;
1382
1383         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1384         if (reset_kind == B44_FULL_RESET) {
1385                 b44_phy_reset(bp);
1386                 b44_setup_phy(bp);
1387         }
1388
1389         /* Enable CRC32, set proper LED modes and power on PHY */
1390         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1391         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1392
1393         /* This sets the MAC address too.  */
1394         __b44_set_rx_mode(bp->dev);
1395
1396         /* MTU + eth header + possible VLAN tag + struct rx_header */
1397         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1398         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1399
1400         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1401         if (reset_kind == B44_PARTIAL_RESET) {
1402                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1403                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1404         } else {
1405                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1406                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1407                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1408                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1409                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1410
1411                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1412                 bp->rx_prod = bp->rx_pending;
1413
1414                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1415         }
1416
1417         val = br32(bp, B44_ENET_CTRL);
1418         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1419 }
1420
1421 static int b44_open(struct net_device *dev)
1422 {
1423         struct b44 *bp = netdev_priv(dev);
1424         int err;
1425
1426         err = b44_alloc_consistent(bp, GFP_KERNEL);
1427         if (err)
1428                 goto out;
1429
1430         napi_enable(&bp->napi);
1431
1432         b44_init_rings(bp);
1433         b44_init_hw(bp, B44_FULL_RESET);
1434
1435         b44_check_phy(bp);
1436
1437         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1438         if (unlikely(err < 0)) {
1439                 napi_disable(&bp->napi);
1440                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1441                 b44_free_rings(bp);
1442                 b44_free_consistent(bp);
1443                 goto out;
1444         }
1445
1446         init_timer(&bp->timer);
1447         bp->timer.expires = jiffies + HZ;
1448         bp->timer.data = (unsigned long) bp;
1449         bp->timer.function = b44_timer;
1450         add_timer(&bp->timer);
1451
1452         b44_enable_ints(bp);
1453         netif_start_queue(dev);
1454 out:
1455         return err;
1456 }
1457
1458 #ifdef CONFIG_NET_POLL_CONTROLLER
1459 /*
1460  * Polling receive - used by netconsole and other diagnostic tools
1461  * to allow network i/o with interrupts disabled.
1462  */
1463 static void b44_poll_controller(struct net_device *dev)
1464 {
1465         disable_irq(dev->irq);
1466         b44_interrupt(dev->irq, dev);
1467         enable_irq(dev->irq);
1468 }
1469 #endif
1470
1471 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1472 {
1473         u32 i;
1474         u32 *pattern = (u32 *) pp;
1475
1476         for (i = 0; i < bytes; i += sizeof(u32)) {
1477                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1478                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1479         }
1480 }
1481
1482 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1483 {
1484         int magicsync = 6;
1485         int k, j, len = offset;
1486         int ethaddr_bytes = ETH_ALEN;
1487
1488         memset(ppattern + offset, 0xff, magicsync);
1489         for (j = 0; j < magicsync; j++)
1490                 set_bit(len++, (unsigned long *) pmask);
1491
1492         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1493                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1494                         ethaddr_bytes = ETH_ALEN;
1495                 else
1496                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1497                 if (ethaddr_bytes <=0)
1498                         break;
1499                 for (k = 0; k< ethaddr_bytes; k++) {
1500                         ppattern[offset + magicsync +
1501                                 (j * ETH_ALEN) + k] = macaddr[k];
1502                         set_bit(len++, (unsigned long *) pmask);
1503                 }
1504         }
1505         return len - 1;
1506 }
1507
1508 /* Setup magic packet patterns in the b44 WOL
1509  * pattern matching filter.
1510  */
1511 static void b44_setup_pseudo_magicp(struct b44 *bp)
1512 {
1513
1514         u32 val;
1515         int plen0, plen1, plen2;
1516         u8 *pwol_pattern;
1517         u8 pwol_mask[B44_PMASK_SIZE];
1518
1519         pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1520         if (!pwol_pattern)
1521                 return;
1522
1523         /* Ipv4 magic packet pattern - pattern 0.*/
1524         memset(pwol_mask, 0, B44_PMASK_SIZE);
1525         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1526                                   B44_ETHIPV4UDP_HLEN);
1527
1528         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1529         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1530
1531         /* Raw ethernet II magic packet pattern - pattern 1 */
1532         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1533         memset(pwol_mask, 0, B44_PMASK_SIZE);
1534         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1535                                   ETH_HLEN);
1536
1537         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1538                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1539         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1540                        B44_PMASK_BASE + B44_PMASK_SIZE);
1541
1542         /* Ipv6 magic packet pattern - pattern 2 */
1543         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544         memset(pwol_mask, 0, B44_PMASK_SIZE);
1545         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546                                   B44_ETHIPV6UDP_HLEN);
1547
1548         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1550         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1552
1553         kfree(pwol_pattern);
1554
1555         /* set these pattern's lengths: one less than each real length */
1556         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1557         bw32(bp, B44_WKUP_LEN, val);
1558
1559         /* enable wakeup pattern matching */
1560         val = br32(bp, B44_DEVCTRL);
1561         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1562
1563 }
1564
1565 #ifdef CONFIG_B44_PCI
1566 static void b44_setup_wol_pci(struct b44 *bp)
1567 {
1568         u16 val;
1569
1570         if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1571                 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1572                 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1573                 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1574         }
1575 }
1576 #else
1577 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1578 #endif /* CONFIG_B44_PCI */
1579
1580 static void b44_setup_wol(struct b44 *bp)
1581 {
1582         u32 val;
1583
1584         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1585
1586         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1587
1588                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1589
1590                 val = bp->dev->dev_addr[2] << 24 |
1591                         bp->dev->dev_addr[3] << 16 |
1592                         bp->dev->dev_addr[4] << 8 |
1593                         bp->dev->dev_addr[5];
1594                 bw32(bp, B44_ADDR_LO, val);
1595
1596                 val = bp->dev->dev_addr[0] << 8 |
1597                         bp->dev->dev_addr[1];
1598                 bw32(bp, B44_ADDR_HI, val);
1599
1600                 val = br32(bp, B44_DEVCTRL);
1601                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1602
1603         } else {
1604                 b44_setup_pseudo_magicp(bp);
1605         }
1606         b44_setup_wol_pci(bp);
1607 }
1608
1609 static int b44_close(struct net_device *dev)
1610 {
1611         struct b44 *bp = netdev_priv(dev);
1612
1613         netif_stop_queue(dev);
1614
1615         napi_disable(&bp->napi);
1616
1617         del_timer_sync(&bp->timer);
1618
1619         spin_lock_irq(&bp->lock);
1620
1621         b44_halt(bp);
1622         b44_free_rings(bp);
1623         netif_carrier_off(dev);
1624
1625         spin_unlock_irq(&bp->lock);
1626
1627         free_irq(dev->irq, dev);
1628
1629         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1630                 b44_init_hw(bp, B44_PARTIAL_RESET);
1631                 b44_setup_wol(bp);
1632         }
1633
1634         b44_free_consistent(bp);
1635
1636         return 0;
1637 }
1638
1639 static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1640                                         struct rtnl_link_stats64 *nstat)
1641 {
1642         struct b44 *bp = netdev_priv(dev);
1643         struct b44_hw_stats *hwstat = &bp->hw_stats;
1644         unsigned int start;
1645
1646         do {
1647                 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
1648
1649                 /* Convert HW stats into rtnl_link_stats64 stats. */
1650                 nstat->rx_packets = hwstat->rx_pkts;
1651                 nstat->tx_packets = hwstat->tx_pkts;
1652                 nstat->rx_bytes   = hwstat->rx_octets;
1653                 nstat->tx_bytes   = hwstat->tx_octets;
1654                 nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1655                                      hwstat->tx_oversize_pkts +
1656                                      hwstat->tx_underruns +
1657                                      hwstat->tx_excessive_cols +
1658                                      hwstat->tx_late_cols);
1659                 nstat->multicast  = hwstat->tx_multicast_pkts;
1660                 nstat->collisions = hwstat->tx_total_cols;
1661
1662                 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1663                                            hwstat->rx_undersize);
1664                 nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1665                 nstat->rx_frame_errors  = hwstat->rx_align_errs;
1666                 nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1667                 nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1668                                            hwstat->rx_oversize_pkts +
1669                                            hwstat->rx_missed_pkts +
1670                                            hwstat->rx_crc_align_errs +
1671                                            hwstat->rx_undersize +
1672                                            hwstat->rx_crc_errs +
1673                                            hwstat->rx_align_errs +
1674                                            hwstat->rx_symbol_errs);
1675
1676                 nstat->tx_aborted_errors = hwstat->tx_underruns;
1677 #if 0
1678                 /* Carrier lost counter seems to be broken for some devices */
1679                 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1680 #endif
1681         } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
1682
1683         return nstat;
1684 }
1685
1686 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1687 {
1688         struct netdev_hw_addr *ha;
1689         int i, num_ents;
1690
1691         num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1692         i = 0;
1693         netdev_for_each_mc_addr(ha, dev) {
1694                 if (i == num_ents)
1695                         break;
1696                 __b44_cam_write(bp, ha->addr, i++ + 1);
1697         }
1698         return i+1;
1699 }
1700
1701 static void __b44_set_rx_mode(struct net_device *dev)
1702 {
1703         struct b44 *bp = netdev_priv(dev);
1704         u32 val;
1705
1706         val = br32(bp, B44_RXCONFIG);
1707         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1708         if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1709                 val |= RXCONFIG_PROMISC;
1710                 bw32(bp, B44_RXCONFIG, val);
1711         } else {
1712                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1713                 int i = 1;
1714
1715                 __b44_set_mac_addr(bp);
1716
1717                 if ((dev->flags & IFF_ALLMULTI) ||
1718                     (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1719                         val |= RXCONFIG_ALLMULTI;
1720                 else
1721                         i = __b44_load_mcast(bp, dev);
1722
1723                 for (; i < 64; i++)
1724                         __b44_cam_write(bp, zero, i);
1725
1726                 bw32(bp, B44_RXCONFIG, val);
1727                 val = br32(bp, B44_CAM_CTRL);
1728                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1729         }
1730 }
1731
1732 static void b44_set_rx_mode(struct net_device *dev)
1733 {
1734         struct b44 *bp = netdev_priv(dev);
1735
1736         spin_lock_irq(&bp->lock);
1737         __b44_set_rx_mode(dev);
1738         spin_unlock_irq(&bp->lock);
1739 }
1740
1741 static u32 b44_get_msglevel(struct net_device *dev)
1742 {
1743         struct b44 *bp = netdev_priv(dev);
1744         return bp->msg_enable;
1745 }
1746
1747 static void b44_set_msglevel(struct net_device *dev, u32 value)
1748 {
1749         struct b44 *bp = netdev_priv(dev);
1750         bp->msg_enable = value;
1751 }
1752
1753 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1754 {
1755         struct b44 *bp = netdev_priv(dev);
1756         struct ssb_bus *bus = bp->sdev->bus;
1757
1758         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1759         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1760         switch (bus->bustype) {
1761         case SSB_BUSTYPE_PCI:
1762                 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1763                 break;
1764         case SSB_BUSTYPE_SSB:
1765                 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1766                 break;
1767         case SSB_BUSTYPE_PCMCIA:
1768         case SSB_BUSTYPE_SDIO:
1769                 WARN_ON(1); /* A device with this bus does not exist. */
1770                 break;
1771         }
1772 }
1773
1774 static int b44_nway_reset(struct net_device *dev)
1775 {
1776         struct b44 *bp = netdev_priv(dev);
1777         u32 bmcr;
1778         int r;
1779
1780         spin_lock_irq(&bp->lock);
1781         b44_readphy(bp, MII_BMCR, &bmcr);
1782         b44_readphy(bp, MII_BMCR, &bmcr);
1783         r = -EINVAL;
1784         if (bmcr & BMCR_ANENABLE) {
1785                 b44_writephy(bp, MII_BMCR,
1786                              bmcr | BMCR_ANRESTART);
1787                 r = 0;
1788         }
1789         spin_unlock_irq(&bp->lock);
1790
1791         return r;
1792 }
1793
1794 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1795 {
1796         struct b44 *bp = netdev_priv(dev);
1797
1798         cmd->supported = (SUPPORTED_Autoneg);
1799         cmd->supported |= (SUPPORTED_100baseT_Half |
1800                           SUPPORTED_100baseT_Full |
1801                           SUPPORTED_10baseT_Half |
1802                           SUPPORTED_10baseT_Full |
1803                           SUPPORTED_MII);
1804
1805         cmd->advertising = 0;
1806         if (bp->flags & B44_FLAG_ADV_10HALF)
1807                 cmd->advertising |= ADVERTISED_10baseT_Half;
1808         if (bp->flags & B44_FLAG_ADV_10FULL)
1809                 cmd->advertising |= ADVERTISED_10baseT_Full;
1810         if (bp->flags & B44_FLAG_ADV_100HALF)
1811                 cmd->advertising |= ADVERTISED_100baseT_Half;
1812         if (bp->flags & B44_FLAG_ADV_100FULL)
1813                 cmd->advertising |= ADVERTISED_100baseT_Full;
1814         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1815         ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1816                                     SPEED_100 : SPEED_10));
1817         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1818                 DUPLEX_FULL : DUPLEX_HALF;
1819         cmd->port = 0;
1820         cmd->phy_address = bp->phy_addr;
1821         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1822                 XCVR_INTERNAL : XCVR_EXTERNAL;
1823         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1824                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1825         if (cmd->autoneg == AUTONEG_ENABLE)
1826                 cmd->advertising |= ADVERTISED_Autoneg;
1827         if (!netif_running(dev)){
1828                 ethtool_cmd_speed_set(cmd, 0);
1829                 cmd->duplex = 0xff;
1830         }
1831         cmd->maxtxpkt = 0;
1832         cmd->maxrxpkt = 0;
1833         return 0;
1834 }
1835
1836 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1837 {
1838         struct b44 *bp = netdev_priv(dev);
1839         u32 speed = ethtool_cmd_speed(cmd);
1840
1841         /* We do not support gigabit. */
1842         if (cmd->autoneg == AUTONEG_ENABLE) {
1843                 if (cmd->advertising &
1844                     (ADVERTISED_1000baseT_Half |
1845                      ADVERTISED_1000baseT_Full))
1846                         return -EINVAL;
1847         } else if ((speed != SPEED_100 &&
1848                     speed != SPEED_10) ||
1849                    (cmd->duplex != DUPLEX_HALF &&
1850                     cmd->duplex != DUPLEX_FULL)) {
1851                         return -EINVAL;
1852         }
1853
1854         spin_lock_irq(&bp->lock);
1855
1856         if (cmd->autoneg == AUTONEG_ENABLE) {
1857                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1858                                B44_FLAG_100_BASE_T |
1859                                B44_FLAG_FULL_DUPLEX |
1860                                B44_FLAG_ADV_10HALF |
1861                                B44_FLAG_ADV_10FULL |
1862                                B44_FLAG_ADV_100HALF |
1863                                B44_FLAG_ADV_100FULL);
1864                 if (cmd->advertising == 0) {
1865                         bp->flags |= (B44_FLAG_ADV_10HALF |
1866                                       B44_FLAG_ADV_10FULL |
1867                                       B44_FLAG_ADV_100HALF |
1868                                       B44_FLAG_ADV_100FULL);
1869                 } else {
1870                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1871                                 bp->flags |= B44_FLAG_ADV_10HALF;
1872                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1873                                 bp->flags |= B44_FLAG_ADV_10FULL;
1874                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1875                                 bp->flags |= B44_FLAG_ADV_100HALF;
1876                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1877                                 bp->flags |= B44_FLAG_ADV_100FULL;
1878                 }
1879         } else {
1880                 bp->flags |= B44_FLAG_FORCE_LINK;
1881                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1882                 if (speed == SPEED_100)
1883                         bp->flags |= B44_FLAG_100_BASE_T;
1884                 if (cmd->duplex == DUPLEX_FULL)
1885                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1886         }
1887
1888         if (netif_running(dev))
1889                 b44_setup_phy(bp);
1890
1891         spin_unlock_irq(&bp->lock);
1892
1893         return 0;
1894 }
1895
1896 static void b44_get_ringparam(struct net_device *dev,
1897                               struct ethtool_ringparam *ering)
1898 {
1899         struct b44 *bp = netdev_priv(dev);
1900
1901         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1902         ering->rx_pending = bp->rx_pending;
1903
1904         /* XXX ethtool lacks a tx_max_pending, oops... */
1905 }
1906
1907 static int b44_set_ringparam(struct net_device *dev,
1908                              struct ethtool_ringparam *ering)
1909 {
1910         struct b44 *bp = netdev_priv(dev);
1911
1912         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1913             (ering->rx_mini_pending != 0) ||
1914             (ering->rx_jumbo_pending != 0) ||
1915             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1916                 return -EINVAL;
1917
1918         spin_lock_irq(&bp->lock);
1919
1920         bp->rx_pending = ering->rx_pending;
1921         bp->tx_pending = ering->tx_pending;
1922
1923         b44_halt(bp);
1924         b44_init_rings(bp);
1925         b44_init_hw(bp, B44_FULL_RESET);
1926         netif_wake_queue(bp->dev);
1927         spin_unlock_irq(&bp->lock);
1928
1929         b44_enable_ints(bp);
1930
1931         return 0;
1932 }
1933
1934 static void b44_get_pauseparam(struct net_device *dev,
1935                                 struct ethtool_pauseparam *epause)
1936 {
1937         struct b44 *bp = netdev_priv(dev);
1938
1939         epause->autoneg =
1940                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1941         epause->rx_pause =
1942                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1943         epause->tx_pause =
1944                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1945 }
1946
1947 static int b44_set_pauseparam(struct net_device *dev,
1948                                 struct ethtool_pauseparam *epause)
1949 {
1950         struct b44 *bp = netdev_priv(dev);
1951
1952         spin_lock_irq(&bp->lock);
1953         if (epause->autoneg)
1954                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1955         else
1956                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1957         if (epause->rx_pause)
1958                 bp->flags |= B44_FLAG_RX_PAUSE;
1959         else
1960                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1961         if (epause->tx_pause)
1962                 bp->flags |= B44_FLAG_TX_PAUSE;
1963         else
1964                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1965         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1966                 b44_halt(bp);
1967                 b44_init_rings(bp);
1968                 b44_init_hw(bp, B44_FULL_RESET);
1969         } else {
1970                 __b44_set_flow_ctrl(bp, bp->flags);
1971         }
1972         spin_unlock_irq(&bp->lock);
1973
1974         b44_enable_ints(bp);
1975
1976         return 0;
1977 }
1978
1979 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1980 {
1981         switch(stringset) {
1982         case ETH_SS_STATS:
1983                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1984                 break;
1985         }
1986 }
1987
1988 static int b44_get_sset_count(struct net_device *dev, int sset)
1989 {
1990         switch (sset) {
1991         case ETH_SS_STATS:
1992                 return ARRAY_SIZE(b44_gstrings);
1993         default:
1994                 return -EOPNOTSUPP;
1995         }
1996 }
1997
1998 static void b44_get_ethtool_stats(struct net_device *dev,
1999                                   struct ethtool_stats *stats, u64 *data)
2000 {
2001         struct b44 *bp = netdev_priv(dev);
2002         struct b44_hw_stats *hwstat = &bp->hw_stats;
2003         u64 *data_src, *data_dst;
2004         unsigned int start;
2005         u32 i;
2006
2007         spin_lock_irq(&bp->lock);
2008         b44_stats_update(bp);
2009         spin_unlock_irq(&bp->lock);
2010
2011         do {
2012                 data_src = &hwstat->tx_good_octets;
2013                 data_dst = data;
2014                 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
2015
2016                 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2017                         *data_dst++ = *data_src++;
2018
2019         } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
2020 }
2021
2022 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2023 {
2024         struct b44 *bp = netdev_priv(dev);
2025
2026         wol->supported = WAKE_MAGIC;
2027         if (bp->flags & B44_FLAG_WOL_ENABLE)
2028                 wol->wolopts = WAKE_MAGIC;
2029         else
2030                 wol->wolopts = 0;
2031         memset(&wol->sopass, 0, sizeof(wol->sopass));
2032 }
2033
2034 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2035 {
2036         struct b44 *bp = netdev_priv(dev);
2037
2038         spin_lock_irq(&bp->lock);
2039         if (wol->wolopts & WAKE_MAGIC)
2040                 bp->flags |= B44_FLAG_WOL_ENABLE;
2041         else
2042                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2043         spin_unlock_irq(&bp->lock);
2044
2045         return 0;
2046 }
2047
2048 static const struct ethtool_ops b44_ethtool_ops = {
2049         .get_drvinfo            = b44_get_drvinfo,
2050         .get_settings           = b44_get_settings,
2051         .set_settings           = b44_set_settings,
2052         .nway_reset             = b44_nway_reset,
2053         .get_link               = ethtool_op_get_link,
2054         .get_wol                = b44_get_wol,
2055         .set_wol                = b44_set_wol,
2056         .get_ringparam          = b44_get_ringparam,
2057         .set_ringparam          = b44_set_ringparam,
2058         .get_pauseparam         = b44_get_pauseparam,
2059         .set_pauseparam         = b44_set_pauseparam,
2060         .get_msglevel           = b44_get_msglevel,
2061         .set_msglevel           = b44_set_msglevel,
2062         .get_strings            = b44_get_strings,
2063         .get_sset_count         = b44_get_sset_count,
2064         .get_ethtool_stats      = b44_get_ethtool_stats,
2065 };
2066
2067 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2068 {
2069         struct mii_ioctl_data *data = if_mii(ifr);
2070         struct b44 *bp = netdev_priv(dev);
2071         int err = -EINVAL;
2072
2073         if (!netif_running(dev))
2074                 goto out;
2075
2076         spin_lock_irq(&bp->lock);
2077         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2078         spin_unlock_irq(&bp->lock);
2079 out:
2080         return err;
2081 }
2082
2083 static int b44_get_invariants(struct b44 *bp)
2084 {
2085         struct ssb_device *sdev = bp->sdev;
2086         int err = 0;
2087         u8 *addr;
2088
2089         bp->dma_offset = ssb_dma_translation(sdev);
2090
2091         if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2092             instance > 1) {
2093                 addr = sdev->bus->sprom.et1mac;
2094                 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2095         } else {
2096                 addr = sdev->bus->sprom.et0mac;
2097                 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2098         }
2099         /* Some ROMs have buggy PHY addresses with the high
2100          * bits set (sign extension?). Truncate them to a
2101          * valid PHY address. */
2102         bp->phy_addr &= 0x1F;
2103
2104         memcpy(bp->dev->dev_addr, addr, 6);
2105
2106         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2107                 pr_err("Invalid MAC address found in EEPROM\n");
2108                 return -EINVAL;
2109         }
2110
2111         bp->imask = IMASK_DEF;
2112
2113         /* XXX - really required?
2114            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2115         */
2116
2117         if (bp->sdev->id.revision >= 7)
2118                 bp->flags |= B44_FLAG_B0_ANDLATER;
2119
2120         return err;
2121 }
2122
2123 static const struct net_device_ops b44_netdev_ops = {
2124         .ndo_open               = b44_open,
2125         .ndo_stop               = b44_close,
2126         .ndo_start_xmit         = b44_start_xmit,
2127         .ndo_get_stats64        = b44_get_stats64,
2128         .ndo_set_rx_mode        = b44_set_rx_mode,
2129         .ndo_set_mac_address    = b44_set_mac_addr,
2130         .ndo_validate_addr      = eth_validate_addr,
2131         .ndo_do_ioctl           = b44_ioctl,
2132         .ndo_tx_timeout         = b44_tx_timeout,
2133         .ndo_change_mtu         = b44_change_mtu,
2134 #ifdef CONFIG_NET_POLL_CONTROLLER
2135         .ndo_poll_controller    = b44_poll_controller,
2136 #endif
2137 };
2138
2139 static int b44_init_one(struct ssb_device *sdev,
2140                         const struct ssb_device_id *ent)
2141 {
2142         struct net_device *dev;
2143         struct b44 *bp;
2144         int err;
2145
2146         instance++;
2147
2148         pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2149
2150         dev = alloc_etherdev(sizeof(*bp));
2151         if (!dev) {
2152                 err = -ENOMEM;
2153                 goto out;
2154         }
2155
2156         SET_NETDEV_DEV(dev, sdev->dev);
2157
2158         /* No interesting netdevice features in this card... */
2159         dev->features |= 0;
2160
2161         bp = netdev_priv(dev);
2162         bp->sdev = sdev;
2163         bp->dev = dev;
2164         bp->force_copybreak = 0;
2165
2166         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2167
2168         spin_lock_init(&bp->lock);
2169
2170         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2171         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2172
2173         dev->netdev_ops = &b44_netdev_ops;
2174         netif_napi_add(dev, &bp->napi, b44_poll, 64);
2175         dev->watchdog_timeo = B44_TX_TIMEOUT;
2176         dev->irq = sdev->irq;
2177         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2178
2179         err = ssb_bus_powerup(sdev->bus, 0);
2180         if (err) {
2181                 dev_err(sdev->dev,
2182                         "Failed to powerup the bus\n");
2183                 goto err_out_free_dev;
2184         }
2185
2186         if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2187                 dev_err(sdev->dev,
2188                         "Required 30BIT DMA mask unsupported by the system\n");
2189                 goto err_out_powerdown;
2190         }
2191
2192         err = b44_get_invariants(bp);
2193         if (err) {
2194                 dev_err(sdev->dev,
2195                         "Problem fetching invariants of chip, aborting\n");
2196                 goto err_out_powerdown;
2197         }
2198
2199         bp->mii_if.dev = dev;
2200         bp->mii_if.mdio_read = b44_mii_read;
2201         bp->mii_if.mdio_write = b44_mii_write;
2202         bp->mii_if.phy_id = bp->phy_addr;
2203         bp->mii_if.phy_id_mask = 0x1f;
2204         bp->mii_if.reg_num_mask = 0x1f;
2205
2206         /* By default, advertise all speed/duplex settings. */
2207         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2208                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2209
2210         /* By default, auto-negotiate PAUSE. */
2211         bp->flags |= B44_FLAG_PAUSE_AUTO;
2212
2213         err = register_netdev(dev);
2214         if (err) {
2215                 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2216                 goto err_out_powerdown;
2217         }
2218
2219         netif_carrier_off(dev);
2220
2221         ssb_set_drvdata(sdev, dev);
2222
2223         /* Chip reset provides power to the b44 MAC & PCI cores, which
2224          * is necessary for MAC register access.
2225          */
2226         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2227
2228         /* do a phy reset to test if there is an active phy */
2229         if (b44_phy_reset(bp) < 0)
2230                 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2231
2232         netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2233
2234         return 0;
2235
2236 err_out_powerdown:
2237         ssb_bus_may_powerdown(sdev->bus);
2238
2239 err_out_free_dev:
2240         free_netdev(dev);
2241
2242 out:
2243         return err;
2244 }
2245
2246 static void b44_remove_one(struct ssb_device *sdev)
2247 {
2248         struct net_device *dev = ssb_get_drvdata(sdev);
2249
2250         unregister_netdev(dev);
2251         ssb_device_disable(sdev, 0);
2252         ssb_bus_may_powerdown(sdev->bus);
2253         free_netdev(dev);
2254         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2255         ssb_set_drvdata(sdev, NULL);
2256 }
2257
2258 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2259 {
2260         struct net_device *dev = ssb_get_drvdata(sdev);
2261         struct b44 *bp = netdev_priv(dev);
2262
2263         if (!netif_running(dev))
2264                 return 0;
2265
2266         del_timer_sync(&bp->timer);
2267
2268         spin_lock_irq(&bp->lock);
2269
2270         b44_halt(bp);
2271         netif_carrier_off(bp->dev);
2272         netif_device_detach(bp->dev);
2273         b44_free_rings(bp);
2274
2275         spin_unlock_irq(&bp->lock);
2276
2277         free_irq(dev->irq, dev);
2278         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2279                 b44_init_hw(bp, B44_PARTIAL_RESET);
2280                 b44_setup_wol(bp);
2281         }
2282
2283         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2284         return 0;
2285 }
2286
2287 static int b44_resume(struct ssb_device *sdev)
2288 {
2289         struct net_device *dev = ssb_get_drvdata(sdev);
2290         struct b44 *bp = netdev_priv(dev);
2291         int rc = 0;
2292
2293         rc = ssb_bus_powerup(sdev->bus, 0);
2294         if (rc) {
2295                 dev_err(sdev->dev,
2296                         "Failed to powerup the bus\n");
2297                 return rc;
2298         }
2299
2300         if (!netif_running(dev))
2301                 return 0;
2302
2303         spin_lock_irq(&bp->lock);
2304         b44_init_rings(bp);
2305         b44_init_hw(bp, B44_FULL_RESET);
2306         spin_unlock_irq(&bp->lock);
2307
2308         /*
2309          * As a shared interrupt, the handler can be called immediately. To be
2310          * able to check the interrupt status the hardware must already be
2311          * powered back on (b44_init_hw).
2312          */
2313         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2314         if (rc) {
2315                 netdev_err(dev, "request_irq failed\n");
2316                 spin_lock_irq(&bp->lock);
2317                 b44_halt(bp);
2318                 b44_free_rings(bp);
2319                 spin_unlock_irq(&bp->lock);
2320                 return rc;
2321         }
2322
2323         netif_device_attach(bp->dev);
2324
2325         b44_enable_ints(bp);
2326         netif_wake_queue(dev);
2327
2328         mod_timer(&bp->timer, jiffies + 1);
2329
2330         return 0;
2331 }
2332
2333 static struct ssb_driver b44_ssb_driver = {
2334         .name           = DRV_MODULE_NAME,
2335         .id_table       = b44_ssb_tbl,
2336         .probe          = b44_init_one,
2337         .remove         = b44_remove_one,
2338         .suspend        = b44_suspend,
2339         .resume         = b44_resume,
2340 };
2341
2342 static inline int __init b44_pci_init(void)
2343 {
2344         int err = 0;
2345 #ifdef CONFIG_B44_PCI
2346         err = ssb_pcihost_register(&b44_pci_driver);
2347 #endif
2348         return err;
2349 }
2350
2351 static inline void b44_pci_exit(void)
2352 {
2353 #ifdef CONFIG_B44_PCI
2354         ssb_pcihost_unregister(&b44_pci_driver);
2355 #endif
2356 }
2357
2358 static int __init b44_init(void)
2359 {
2360         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2361         int err;
2362
2363         /* Setup paramaters for syncing RX/TX DMA descriptors */
2364         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2365
2366         err = b44_pci_init();
2367         if (err)
2368                 return err;
2369         err = ssb_driver_register(&b44_ssb_driver);
2370         if (err)
2371                 b44_pci_exit();
2372         return err;
2373 }
2374
2375 static void __exit b44_cleanup(void)
2376 {
2377         ssb_driver_unregister(&b44_ssb_driver);
2378         b44_pci_exit();
2379 }
2380
2381 module_init(b44_init);
2382 module_exit(b44_cleanup);
2383