2 * Copyright (C) 2001,2002,2003 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * This driver is designed for the Broadcom SiByte SOC built-in
20 * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/timer.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/config.h>
35 #include <linux/bitops.h>
36 #include <asm/processor.h> /* Processor type for cache alignment. */
38 #include <asm/cache.h>
40 /* This is only here until the firmware is ready. In that case,
41 the firmware leaves the ethernet address in the register for us. */
42 #ifdef CONFIG_SIBYTE_STANDALONE
43 #define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
44 #define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
45 #define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
49 /* These identify the driver base version and may not be removed. */
51 static char version1[] __devinitdata =
52 "sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
56 /* Operational parameters that usually are not changed. */
58 #define CONFIG_SBMAC_COALESCE
60 #define MAX_UNITS 3 /* More are supported, limit only on options */
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (2*HZ)
66 MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
67 MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
69 /* A few user-configurable values which may be modified when a driver
72 /* 1 normal messages, 0 quiet .. 7 verbose. */
74 module_param(debug, int, S_IRUGO);
75 MODULE_PARM_DESC(debug, "Debug messages");
78 static int noisy_mii = 1;
79 module_param(noisy_mii, int, S_IRUGO);
80 MODULE_PARM_DESC(noisy_mii, "MII status messages");
82 /* Used to pass the media type, etc.
83 Both 'options[]' and 'full_duplex[]' should exist for driver
85 The media type is usually passed in 'options[]'.
88 static int options[MAX_UNITS] = {-1, -1, -1};
89 module_param_array(options, int, NULL, S_IRUGO);
90 MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS));
92 static int full_duplex[MAX_UNITS] = {-1, -1, -1};
93 module_param_array(full_duplex, int, NULL, S_IRUGO);
94 MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS));
97 #ifdef CONFIG_SBMAC_COALESCE
98 static int int_pktcnt = 0;
99 module_param(int_pktcnt, int, S_IRUGO);
100 MODULE_PARM_DESC(int_pktcnt, "Packet count");
102 static int int_timeout = 0;
103 module_param(int_timeout, int, S_IRUGO);
104 MODULE_PARM_DESC(int_timeout, "Timeout value");
107 #include <asm/sibyte/sb1250.h>
108 #include <asm/sibyte/sb1250_defs.h>
109 #include <asm/sibyte/sb1250_regs.h>
110 #include <asm/sibyte/sb1250_mac.h>
111 #include <asm/sibyte/sb1250_dma.h>
112 #include <asm/sibyte/sb1250_int.h>
113 #include <asm/sibyte/sb1250_scd.h>
116 /**********************************************************************
118 ********************************************************************* */
121 typedef enum { sbmac_speed_auto, sbmac_speed_10,
122 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
124 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
125 sbmac_duplex_full } sbmac_duplex_t;
127 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
128 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
130 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
131 sbmac_state_broken } sbmac_state_t;
134 /**********************************************************************
136 ********************************************************************* */
139 #define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
140 (d)->sbdma_dscrtable : (d)->f+1)
143 #define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
145 #define SBMAC_MAX_TXDESCR 32
146 #define SBMAC_MAX_RXDESCR 32
148 #define ETHER_ALIGN 2
149 #define ETHER_ADDR_LEN 6
150 #define ENET_PACKET_SIZE 1518
151 /*#define ENET_PACKET_SIZE 9216 */
153 /**********************************************************************
154 * DMA Descriptor structure
155 ********************************************************************* */
157 typedef struct sbdmadscr_s {
162 typedef unsigned long paddr_t;
164 /**********************************************************************
165 * DMA Controller structure
166 ********************************************************************* */
168 typedef struct sbmacdma_s {
171 * This stuff is used to identify the channel and the registers
172 * associated with it.
175 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
176 int sbdma_channel; /* channel number */
177 int sbdma_txdir; /* direction (1=transmit) */
178 int sbdma_maxdescr; /* total # of descriptors in ring */
179 #ifdef CONFIG_SBMAC_COALESCE
180 int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/
181 int sbdma_int_timeout; /* # usec rx/tx interrupt */
184 volatile void __iomem *sbdma_config0; /* DMA config register 0 */
185 volatile void __iomem *sbdma_config1; /* DMA config register 1 */
186 volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */
187 volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */
188 volatile void __iomem *sbdma_curdscr; /* current descriptor address */
191 * This stuff is for maintenance of the ring
194 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
195 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */
197 struct sk_buff **sbdma_ctxtable; /* context table, one per descr */
199 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
200 sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */
201 sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */
205 /**********************************************************************
206 * Ethernet softc structure
207 ********************************************************************* */
212 * Linux-specific things
215 struct net_device *sbm_dev; /* pointer to linux device */
216 spinlock_t sbm_lock; /* spin lock */
217 struct timer_list sbm_timer; /* for monitoring MII */
218 struct net_device_stats sbm_stats;
219 int sbm_devflags; /* current device flags */
222 int sbm_phy_oldanlpar;
223 int sbm_phy_oldk1stsr;
224 int sbm_phy_oldlinkstat;
227 unsigned char sbm_phys[2];
230 * Controller-specific things
233 volatile void __iomem *sbm_base; /* MAC's base address */
234 sbmac_state_t sbm_state; /* current state */
236 volatile void __iomem *sbm_macenable; /* MAC Enable Register */
237 volatile void __iomem *sbm_maccfg; /* MAC Configuration Register */
238 volatile void __iomem *sbm_fifocfg; /* FIFO configuration register */
239 volatile void __iomem *sbm_framecfg; /* Frame configuration register */
240 volatile void __iomem *sbm_rxfilter; /* receive filter register */
241 volatile void __iomem *sbm_isr; /* Interrupt status register */
242 volatile void __iomem *sbm_imr; /* Interrupt mask register */
243 volatile void __iomem *sbm_mdio; /* MDIO register */
245 sbmac_speed_t sbm_speed; /* current speed */
246 sbmac_duplex_t sbm_duplex; /* current duplex */
247 sbmac_fc_t sbm_fc; /* current flow control setting */
249 unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
251 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
252 sbmacdma_t sbm_rxdma;
258 /**********************************************************************
260 ********************************************************************* */
262 /**********************************************************************
264 ********************************************************************* */
266 static void sbdma_initctx(sbmacdma_t *d,
267 struct sbmac_softc *s,
271 static void sbdma_channel_start(sbmacdma_t *d, int rxtx);
272 static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m);
273 static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m);
274 static void sbdma_emptyring(sbmacdma_t *d);
275 static void sbdma_fillring(sbmacdma_t *d);
276 static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d);
277 static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d);
278 static int sbmac_initctx(struct sbmac_softc *s);
279 static void sbmac_channel_start(struct sbmac_softc *s);
280 static void sbmac_channel_stop(struct sbmac_softc *s);
281 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,sbmac_state_t);
282 static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff);
283 static uint64_t sbmac_addr2reg(unsigned char *ptr);
284 static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs);
285 static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
286 static void sbmac_setmulti(struct sbmac_softc *sc);
287 static int sbmac_init(struct net_device *dev, int idx);
288 static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed);
289 static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc);
291 static int sbmac_open(struct net_device *dev);
292 static void sbmac_timer(unsigned long data);
293 static void sbmac_tx_timeout (struct net_device *dev);
294 static struct net_device_stats *sbmac_get_stats(struct net_device *dev);
295 static void sbmac_set_rx_mode(struct net_device *dev);
296 static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
297 static int sbmac_close(struct net_device *dev);
298 static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
300 static void sbmac_mii_sync(struct sbmac_softc *s);
301 static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt);
302 static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx);
303 static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
304 unsigned int regval);
307 /**********************************************************************
309 ********************************************************************* */
311 static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
314 /**********************************************************************
316 ********************************************************************* */
318 #define MII_COMMAND_START 0x01
319 #define MII_COMMAND_READ 0x02
320 #define MII_COMMAND_WRITE 0x01
321 #define MII_COMMAND_ACK 0x02
323 #define BMCR_RESET 0x8000
324 #define BMCR_LOOPBACK 0x4000
325 #define BMCR_SPEED0 0x2000
326 #define BMCR_ANENABLE 0x1000
327 #define BMCR_POWERDOWN 0x0800
328 #define BMCR_ISOLATE 0x0400
329 #define BMCR_RESTARTAN 0x0200
330 #define BMCR_DUPLEX 0x0100
331 #define BMCR_COLTEST 0x0080
332 #define BMCR_SPEED1 0x0040
333 #define BMCR_SPEED1000 BMCR_SPEED1
334 #define BMCR_SPEED100 BMCR_SPEED0
335 #define BMCR_SPEED10 0
337 #define BMSR_100BT4 0x8000
338 #define BMSR_100BT_FDX 0x4000
339 #define BMSR_100BT_HDX 0x2000
340 #define BMSR_10BT_FDX 0x1000
341 #define BMSR_10BT_HDX 0x0800
342 #define BMSR_100BT2_FDX 0x0400
343 #define BMSR_100BT2_HDX 0x0200
344 #define BMSR_1000BT_XSR 0x0100
345 #define BMSR_PRESUP 0x0040
346 #define BMSR_ANCOMPLT 0x0020
347 #define BMSR_REMFAULT 0x0010
348 #define BMSR_AUTONEG 0x0008
349 #define BMSR_LINKSTAT 0x0004
350 #define BMSR_JABDETECT 0x0002
351 #define BMSR_EXTCAPAB 0x0001
353 #define PHYIDR1 0x2000
354 #define PHYIDR2 0x5C60
356 #define ANAR_NP 0x8000
357 #define ANAR_RF 0x2000
358 #define ANAR_ASYPAUSE 0x0800
359 #define ANAR_PAUSE 0x0400
360 #define ANAR_T4 0x0200
361 #define ANAR_TXFD 0x0100
362 #define ANAR_TXHD 0x0080
363 #define ANAR_10FD 0x0040
364 #define ANAR_10HD 0x0020
365 #define ANAR_PSB 0x0001
367 #define ANLPAR_NP 0x8000
368 #define ANLPAR_ACK 0x4000
369 #define ANLPAR_RF 0x2000
370 #define ANLPAR_ASYPAUSE 0x0800
371 #define ANLPAR_PAUSE 0x0400
372 #define ANLPAR_T4 0x0200
373 #define ANLPAR_TXFD 0x0100
374 #define ANLPAR_TXHD 0x0080
375 #define ANLPAR_10FD 0x0040
376 #define ANLPAR_10HD 0x0020
377 #define ANLPAR_PSB 0x0001 /* 802.3 */
379 #define ANER_PDF 0x0010
380 #define ANER_LPNPABLE 0x0008
381 #define ANER_NPABLE 0x0004
382 #define ANER_PAGERX 0x0002
383 #define ANER_LPANABLE 0x0001
385 #define ANNPTR_NP 0x8000
386 #define ANNPTR_MP 0x2000
387 #define ANNPTR_ACK2 0x1000
388 #define ANNPTR_TOGTX 0x0800
389 #define ANNPTR_CODE 0x0008
391 #define ANNPRR_NP 0x8000
392 #define ANNPRR_MP 0x2000
393 #define ANNPRR_ACK3 0x1000
394 #define ANNPRR_TOGTX 0x0800
395 #define ANNPRR_CODE 0x0008
397 #define K1TCR_TESTMODE 0x0000
398 #define K1TCR_MSMCE 0x1000
399 #define K1TCR_MSCV 0x0800
400 #define K1TCR_RPTR 0x0400
401 #define K1TCR_1000BT_FDX 0x200
402 #define K1TCR_1000BT_HDX 0x100
404 #define K1STSR_MSMCFLT 0x8000
405 #define K1STSR_MSCFGRES 0x4000
406 #define K1STSR_LRSTAT 0x2000
407 #define K1STSR_RRSTAT 0x1000
408 #define K1STSR_LP1KFD 0x0800
409 #define K1STSR_LP1KHD 0x0400
410 #define K1STSR_LPASMDIR 0x0200
412 #define K1SCR_1KX_FDX 0x8000
413 #define K1SCR_1KX_HDX 0x4000
414 #define K1SCR_1KT_FDX 0x2000
415 #define K1SCR_1KT_HDX 0x1000
417 #define STRAP_PHY1 0x0800
418 #define STRAP_NCMODE 0x0400
419 #define STRAP_MANMSCFG 0x0200
420 #define STRAP_ANENABLE 0x0100
421 #define STRAP_MSVAL 0x0080
422 #define STRAP_1KHDXADV 0x0010
423 #define STRAP_1KFDXADV 0x0008
424 #define STRAP_100ADV 0x0004
425 #define STRAP_SPEEDSEL 0x0000
426 #define STRAP_SPEED100 0x0001
428 #define PHYSUP_SPEED1000 0x10
429 #define PHYSUP_SPEED100 0x08
430 #define PHYSUP_SPEED10 0x00
431 #define PHYSUP_LINKUP 0x04
432 #define PHYSUP_FDX 0x02
434 #define MII_BMCR 0x00 /* Basic mode control register (rw) */
435 #define MII_BMSR 0x01 /* Basic mode status register (ro) */
436 #define MII_K1STSR 0x0A /* 1K Status Register (ro) */
437 #define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */
440 #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
445 /**********************************************************************
448 * Synchronize with the MII - send a pattern of bits to the MII
449 * that will guarantee that it is ready to accept a command.
452 * s - sbmac structure
456 ********************************************************************* */
458 static void sbmac_mii_sync(struct sbmac_softc *s)
464 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
466 bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
468 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
470 for (cnt = 0; cnt < 32; cnt++) {
471 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
472 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
476 /**********************************************************************
477 * SBMAC_MII_SENDDATA(s,data,bitcnt)
479 * Send some bits to the MII. The bits to be sent are right-
480 * justified in the 'data' parameter.
483 * s - sbmac structure
484 * data - data to send
485 * bitcnt - number of bits to send
486 ********************************************************************* */
488 static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt)
492 unsigned int curmask;
495 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
497 bits = M_MAC_MDIO_DIR_OUTPUT;
498 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
500 curmask = 1 << (bitcnt - 1);
502 for (i = 0; i < bitcnt; i++) {
504 bits |= M_MAC_MDIO_OUT;
505 else bits &= ~M_MAC_MDIO_OUT;
506 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
507 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
508 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
515 /**********************************************************************
516 * SBMAC_MII_READ(s,phyaddr,regidx)
518 * Read a PHY register.
521 * s - sbmac structure
522 * phyaddr - PHY's address
523 * regidx = index of register to read
526 * value read, or 0 if an error occurred.
527 ********************************************************************* */
529 static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
537 * Synchronize ourselves so that the PHY knows the next
538 * thing coming down is a command
544 * Send the data to the PHY. The sequence is
545 * a "start" command (2 bits)
546 * a "read" command (2 bits)
547 * the PHY addr (5 bits)
548 * the register index (5 bits)
551 sbmac_mii_senddata(s,MII_COMMAND_START, 2);
552 sbmac_mii_senddata(s,MII_COMMAND_READ, 2);
553 sbmac_mii_senddata(s,phyaddr, 5);
554 sbmac_mii_senddata(s,regidx, 5);
556 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
559 * Switch the port around without a clock transition.
561 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
564 * Send out a clock pulse to signal we want the status
567 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
568 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
571 * If an error occurred, the PHY will signal '1' back
573 error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN;
576 * Issue an 'idle' clock pulse, but keep the direction
579 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
580 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
584 for (idx = 0; idx < 16; idx++) {
588 if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN)
592 __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
593 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
596 /* Switch back to output */
597 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
605 /**********************************************************************
606 * SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
608 * Write a value to a PHY register.
611 * s - sbmac structure
612 * phyaddr - PHY to use
613 * regidx - register within the PHY
614 * regval - data to write to register
618 ********************************************************************* */
620 static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
627 sbmac_mii_senddata(s,MII_COMMAND_START,2);
628 sbmac_mii_senddata(s,MII_COMMAND_WRITE,2);
629 sbmac_mii_senddata(s,phyaddr, 5);
630 sbmac_mii_senddata(s,regidx, 5);
631 sbmac_mii_senddata(s,MII_COMMAND_ACK,2);
632 sbmac_mii_senddata(s,regval,16);
634 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
636 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
641 /**********************************************************************
642 * SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
644 * Initialize a DMA channel context. Since there are potentially
645 * eight DMA channels per MAC, it's nice to do this in a standard
649 * d - sbmacdma_t structure (DMA channel context)
650 * s - sbmac_softc structure (pointer to a MAC)
651 * chan - channel number (0..1 right now)
652 * txrx - Identifies DMA_TX or DMA_RX for channel direction
653 * maxdescr - number of descriptors
657 ********************************************************************* */
659 static void sbdma_initctx(sbmacdma_t *d,
660 struct sbmac_softc *s,
666 * Save away interesting stuff in the structure
670 d->sbdma_channel = chan;
671 d->sbdma_txdir = txrx;
675 s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
678 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)));
679 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)));
680 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)));
681 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)));
682 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)));
683 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)));
684 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)));
685 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)));
686 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)));
687 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)));
688 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)));
689 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)));
690 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)));
691 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)));
692 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)));
693 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)));
694 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)));
695 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)));
696 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)));
697 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)));
698 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)));
701 * initialize register pointers
705 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
707 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
709 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
711 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
713 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
716 * Allocate memory for the ring
719 d->sbdma_maxdescr = maxdescr;
721 d->sbdma_dscrtable = (sbdmadscr_t *)
722 kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
725 * The descriptor table must be aligned to at least 16 bytes or the
726 * MAC will corrupt it.
728 d->sbdma_dscrtable = (sbdmadscr_t *)
729 ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t));
731 memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t));
733 d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
735 d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
741 d->sbdma_ctxtable = (struct sk_buff **)
742 kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL);
744 memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *));
746 #ifdef CONFIG_SBMAC_COALESCE
748 * Setup Rx/Tx DMA coalescing defaults
752 d->sbdma_int_pktcnt = int_pktcnt;
754 d->sbdma_int_pktcnt = 1;
758 d->sbdma_int_timeout = int_timeout;
760 d->sbdma_int_timeout = 0;
766 /**********************************************************************
767 * SBDMA_CHANNEL_START(d)
769 * Initialize the hardware registers for a DMA channel.
772 * d - DMA channel to init (context must be previously init'd
773 * rxtx - DMA_RX or DMA_TX depending on what type of channel
777 ********************************************************************* */
779 static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
782 * Turn on the DMA channel
785 #ifdef CONFIG_SBMAC_COALESCE
786 __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
787 0, d->sbdma_config1);
788 __raw_writeq(M_DMA_EOP_INT_EN |
789 V_DMA_RINGSZ(d->sbdma_maxdescr) |
790 V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
791 0, d->sbdma_config0);
793 __raw_writeq(0, d->sbdma_config1);
794 __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
795 0, d->sbdma_config0);
798 __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
801 * Initialize ring pointers
804 d->sbdma_addptr = d->sbdma_dscrtable;
805 d->sbdma_remptr = d->sbdma_dscrtable;
808 /**********************************************************************
809 * SBDMA_CHANNEL_STOP(d)
811 * Initialize the hardware registers for a DMA channel.
814 * d - DMA channel to init (context must be previously init'd
818 ********************************************************************* */
820 static void sbdma_channel_stop(sbmacdma_t *d)
823 * Turn off the DMA channel
826 __raw_writeq(0, d->sbdma_config1);
828 __raw_writeq(0, d->sbdma_dscrbase);
830 __raw_writeq(0, d->sbdma_config0);
836 d->sbdma_addptr = NULL;
837 d->sbdma_remptr = NULL;
840 static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
843 unsigned long newaddr;
845 addr = (unsigned long) skb->data;
847 newaddr = (addr + power2 - 1) & ~(power2 - 1);
849 skb_reserve(skb,newaddr-addr+offset);
853 /**********************************************************************
854 * SBDMA_ADD_RCVBUFFER(d,sb)
856 * Add a buffer to the specified DMA channel. For receive channels,
857 * this queues a buffer for inbound packets.
860 * d - DMA channel descriptor
861 * sb - sk_buff to add, or NULL if we should allocate one
864 * 0 if buffer could not be added (ring is full)
865 * 1 if buffer added successfully
866 ********************************************************************* */
869 static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
872 sbdmadscr_t *nextdsc;
873 struct sk_buff *sb_new = NULL;
874 int pktsize = ENET_PACKET_SIZE;
876 /* get pointer to our current place in the ring */
878 dsc = d->sbdma_addptr;
879 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
882 * figure out if the ring is full - if the next descriptor
883 * is the same as the one that we're going to remove from
884 * the ring, the ring is full
887 if (nextdsc == d->sbdma_remptr) {
892 * Allocate a sk_buff if we don't already have one.
893 * If we do have an sk_buff, reset it so that it's empty.
895 * Note: sk_buffs don't seem to be guaranteed to have any sort
896 * of alignment when they are allocated. Therefore, allocate enough
897 * extra space to make sure that:
899 * 1. the data does not start in the middle of a cache line.
900 * 2. The data does not end in the middle of a cache line
901 * 3. The buffer can be aligned such that the IP addresses are
904 * Remember, the SOCs MAC writes whole cache lines at a time,
905 * without reading the old contents first. So, if the sk_buff's
906 * data portion starts in the middle of a cache line, the SOC
907 * DMA will trash the beginning (and ending) portions.
911 sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
912 if (sb_new == NULL) {
913 printk(KERN_INFO "%s: sk_buff allocation failed\n",
914 d->sbdma_eth->sbm_dev->name);
918 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
920 /* mark skbuff owned by our device */
921 sb_new->dev = d->sbdma_eth->sbm_dev;
926 * nothing special to reinit buffer, it's already aligned
927 * and sb->data already points to a good place.
932 * fill in the descriptor
935 #ifdef CONFIG_SBMAC_COALESCE
937 * Do not interrupt per DMA transfer.
939 dsc->dscr_a = virt_to_phys(sb_new->data) |
940 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
942 dsc->dscr_a = virt_to_phys(sb_new->data) |
943 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
944 M_DMA_DSCRA_INTERRUPT;
947 /* receiving: no options */
951 * fill in the context
954 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
957 * point at next packet
960 d->sbdma_addptr = nextdsc;
963 * Give the buffer to the DMA engine.
966 __raw_writeq(1, d->sbdma_dscrcnt);
968 return 0; /* we did it */
971 /**********************************************************************
972 * SBDMA_ADD_TXBUFFER(d,sb)
974 * Add a transmit buffer to the specified DMA channel, causing a
978 * d - DMA channel descriptor
979 * sb - sk_buff to add
982 * 0 transmit queued successfully
983 * otherwise error code
984 ********************************************************************* */
987 static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
990 sbdmadscr_t *nextdsc;
995 /* get pointer to our current place in the ring */
997 dsc = d->sbdma_addptr;
998 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
1001 * figure out if the ring is full - if the next descriptor
1002 * is the same as the one that we're going to remove from
1003 * the ring, the ring is full
1006 if (nextdsc == d->sbdma_remptr) {
1011 * Under Linux, it's not necessary to copy/coalesce buffers
1012 * like it is on NetBSD. We think they're all contiguous,
1013 * but that may not be true for GBE.
1019 * fill in the descriptor. Note that the number of cache
1020 * blocks in the descriptor is the number of blocks
1021 * *spanned*, so we need to add in the offset (if any)
1022 * while doing the calculation.
1025 phys = virt_to_phys(sb->data);
1026 ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
1028 dsc->dscr_a = phys |
1029 V_DMA_DSCRA_A_SIZE(ncb) |
1030 #ifndef CONFIG_SBMAC_COALESCE
1031 M_DMA_DSCRA_INTERRUPT |
1035 /* transmitting: set outbound options and length */
1037 dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
1038 V_DMA_DSCRB_PKT_SIZE(length);
1041 * fill in the context
1044 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
1047 * point at next packet
1050 d->sbdma_addptr = nextdsc;
1053 * Give the buffer to the DMA engine.
1056 __raw_writeq(1, d->sbdma_dscrcnt);
1058 return 0; /* we did it */
1064 /**********************************************************************
1065 * SBDMA_EMPTYRING(d)
1067 * Free all allocated sk_buffs on the specified DMA channel;
1074 ********************************************************************* */
1076 static void sbdma_emptyring(sbmacdma_t *d)
1081 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
1082 sb = d->sbdma_ctxtable[idx];
1085 d->sbdma_ctxtable[idx] = NULL;
1091 /**********************************************************************
1094 * Fill the specified DMA channel (must be receive channel)
1102 ********************************************************************* */
1104 static void sbdma_fillring(sbmacdma_t *d)
1108 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
1109 if (sbdma_add_rcvbuffer(d,NULL) != 0)
1115 /**********************************************************************
1116 * SBDMA_RX_PROCESS(sc,d)
1118 * Process "completed" receive buffers on the specified DMA channel.
1119 * Note that this isn't really ideal for priority channels, since
1120 * it processes all of the packets on a given channel before
1124 * sc - softc structure
1125 * d - DMA channel context
1129 ********************************************************************* */
1131 static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1141 * figure out where we are (as an index) and where
1142 * the hardware is (also as an index)
1144 * This could be done faster if (for example) the
1145 * descriptor table was page-aligned and contiguous in
1146 * both virtual and physical memory -- you could then
1147 * just compare the low-order bits of the virtual address
1148 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1151 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1152 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1153 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1156 * If they're the same, that means we've processed all
1157 * of the descriptors up to (but not including) the one that
1158 * the hardware is working on right now.
1161 if (curidx == hwidx)
1165 * Otherwise, get the packet's sk_buff ptr back
1168 dsc = &(d->sbdma_dscrtable[curidx]);
1169 sb = d->sbdma_ctxtable[curidx];
1170 d->sbdma_ctxtable[curidx] = NULL;
1172 len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
1175 * Check packet status. If good, process it.
1176 * If not, silently drop it and put it back on the
1180 if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) {
1183 * Add a new buffer to replace the old one. If we fail
1184 * to allocate a buffer, we're going to drop this
1185 * packet and put it right back on the receive ring.
1188 if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) {
1189 sc->sbm_stats.rx_dropped++;
1190 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
1193 * Set length into the packet
1198 * Buffer has been replaced on the
1199 * receive ring. Pass the buffer to
1202 sc->sbm_stats.rx_bytes += len;
1203 sc->sbm_stats.rx_packets++;
1204 sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
1205 /* Check hw IPv4/TCP checksum if supported */
1206 if (sc->rx_hw_checksum == ENABLE) {
1207 if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
1208 !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
1209 sb->ip_summed = CHECKSUM_UNNECESSARY;
1210 /* don't need to set sb->csum */
1212 sb->ip_summed = CHECKSUM_NONE;
1220 * Packet was mangled somehow. Just drop it and
1221 * put it back on the receive ring.
1223 sc->sbm_stats.rx_errors++;
1224 sbdma_add_rcvbuffer(d,sb);
1229 * .. and advance to the next buffer.
1232 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1239 /**********************************************************************
1240 * SBDMA_TX_PROCESS(sc,d)
1242 * Process "completed" transmit buffers on the specified DMA channel.
1243 * This is normally called within the interrupt service routine.
1244 * Note that this isn't really ideal for priority channels, since
1245 * it processes all of the packets on a given channel before
1249 * sc - softc structure
1250 * d - DMA channel context
1254 ********************************************************************* */
1256 static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1262 unsigned long flags;
1264 spin_lock_irqsave(&(sc->sbm_lock), flags);
1268 * figure out where we are (as an index) and where
1269 * the hardware is (also as an index)
1271 * This could be done faster if (for example) the
1272 * descriptor table was page-aligned and contiguous in
1273 * both virtual and physical memory -- you could then
1274 * just compare the low-order bits of the virtual address
1275 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1278 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1279 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1280 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1283 * If they're the same, that means we've processed all
1284 * of the descriptors up to (but not including) the one that
1285 * the hardware is working on right now.
1288 if (curidx == hwidx)
1292 * Otherwise, get the packet's sk_buff ptr back
1295 dsc = &(d->sbdma_dscrtable[curidx]);
1296 sb = d->sbdma_ctxtable[curidx];
1297 d->sbdma_ctxtable[curidx] = NULL;
1303 sc->sbm_stats.tx_bytes += sb->len;
1304 sc->sbm_stats.tx_packets++;
1307 * for transmits, we just free buffers.
1310 dev_kfree_skb_irq(sb);
1313 * .. and advance to the next buffer.
1316 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1321 * Decide if we should wake up the protocol or not.
1322 * Other drivers seem to do this when we reach a low
1323 * watermark on the transmit queue.
1326 netif_wake_queue(d->sbdma_eth->sbm_dev);
1328 spin_unlock_irqrestore(&(sc->sbm_lock), flags);
1334 /**********************************************************************
1337 * Initialize an Ethernet context structure - this is called
1338 * once per MAC on the 1250. Memory is allocated here, so don't
1339 * call it again from inside the ioctl routines that bring the
1343 * s - sbmac context structure
1347 ********************************************************************* */
1349 static int sbmac_initctx(struct sbmac_softc *s)
1353 * figure out the addresses of some ports
1356 s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
1357 s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
1358 s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
1359 s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG;
1360 s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG;
1361 s->sbm_isr = s->sbm_base + R_MAC_STATUS;
1362 s->sbm_imr = s->sbm_base + R_MAC_INT_MASK;
1363 s->sbm_mdio = s->sbm_base + R_MAC_MDIO;
1368 s->sbm_phy_oldbmsr = 0;
1369 s->sbm_phy_oldanlpar = 0;
1370 s->sbm_phy_oldk1stsr = 0;
1371 s->sbm_phy_oldlinkstat = 0;
1374 * Initialize the DMA channels. Right now, only one per MAC is used
1375 * Note: Only do this _once_, as it allocates memory from the kernel!
1378 sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
1379 sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
1382 * initial state is OFF
1385 s->sbm_state = sbmac_state_off;
1388 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1391 s->sbm_speed = sbmac_speed_10;
1392 s->sbm_duplex = sbmac_duplex_half;
1393 s->sbm_fc = sbmac_fc_disabled;
1399 static void sbdma_uninitctx(struct sbmacdma_s *d)
1401 if (d->sbdma_dscrtable) {
1402 kfree(d->sbdma_dscrtable);
1403 d->sbdma_dscrtable = NULL;
1406 if (d->sbdma_ctxtable) {
1407 kfree(d->sbdma_ctxtable);
1408 d->sbdma_ctxtable = NULL;
1413 static void sbmac_uninitctx(struct sbmac_softc *sc)
1415 sbdma_uninitctx(&(sc->sbm_txdma));
1416 sbdma_uninitctx(&(sc->sbm_rxdma));
1420 /**********************************************************************
1421 * SBMAC_CHANNEL_START(s)
1423 * Start packet processing on this MAC.
1426 * s - sbmac structure
1430 ********************************************************************* */
1432 static void sbmac_channel_start(struct sbmac_softc *s)
1435 volatile void __iomem *port;
1436 uint64_t cfg,fifo,framecfg;
1440 * Don't do this if running
1443 if (s->sbm_state == sbmac_state_on)
1447 * Bring the controller out of reset, but leave it off.
1450 __raw_writeq(0, s->sbm_macenable);
1453 * Ignore all received packets
1456 __raw_writeq(0, s->sbm_rxfilter);
1459 * Calculate values for various control registers.
1462 cfg = M_MAC_RETRY_EN |
1463 M_MAC_TX_HOLD_SOP_EN |
1464 V_MAC_TX_PAUSE_CNT_16K |
1471 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
1472 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
1473 * Use a larger RD_THRSH for gigabit
1475 if (periph_rev >= 2)
1480 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1481 ((s->sbm_speed == sbmac_speed_1000)
1482 ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
1483 V_MAC_TX_RL_THRSH(4) |
1484 V_MAC_RX_PL_THRSH(4) |
1485 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1486 V_MAC_RX_PL_THRSH(4) |
1487 V_MAC_RX_RL_THRSH(8) |
1490 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1491 V_MAC_MAX_FRAMESZ_DEFAULT |
1492 V_MAC_BACKOFF_SEL(1);
1495 * Clear out the hash address map
1498 port = s->sbm_base + R_MAC_HASH_BASE;
1499 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1500 __raw_writeq(0, port);
1501 port += sizeof(uint64_t);
1505 * Clear out the exact-match table
1508 port = s->sbm_base + R_MAC_ADDR_BASE;
1509 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1510 __raw_writeq(0, port);
1511 port += sizeof(uint64_t);
1515 * Clear out the DMA Channel mapping table registers
1518 port = s->sbm_base + R_MAC_CHUP0_BASE;
1519 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1520 __raw_writeq(0, port);
1521 port += sizeof(uint64_t);
1525 port = s->sbm_base + R_MAC_CHLO0_BASE;
1526 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1527 __raw_writeq(0, port);
1528 port += sizeof(uint64_t);
1532 * Program the hardware address. It goes into the hardware-address
1533 * register as well as the first filter register.
1536 reg = sbmac_addr2reg(s->sbm_hwaddr);
1538 port = s->sbm_base + R_MAC_ADDR_BASE;
1539 __raw_writeq(reg, port);
1540 port = s->sbm_base + R_MAC_ETHERNET_ADDR;
1542 #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
1544 * Pass1 SOCs do not receive packets addressed to the
1545 * destination address in the R_MAC_ETHERNET_ADDR register.
1546 * Set the value to zero.
1548 __raw_writeq(0, port);
1550 __raw_writeq(reg, port);
1554 * Set the receive filter for no packets, and write values
1555 * to the various config registers
1558 __raw_writeq(0, s->sbm_rxfilter);
1559 __raw_writeq(0, s->sbm_imr);
1560 __raw_writeq(framecfg, s->sbm_framecfg);
1561 __raw_writeq(fifo, s->sbm_fifocfg);
1562 __raw_writeq(cfg, s->sbm_maccfg);
1565 * Initialize DMA channels (rings should be ok now)
1568 sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
1569 sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
1572 * Configure the speed, duplex, and flow control
1575 sbmac_set_speed(s,s->sbm_speed);
1576 sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
1579 * Fill the receive ring
1582 sbdma_fillring(&(s->sbm_rxdma));
1585 * Turn on the rest of the bits in the enable register
1588 __raw_writeq(M_MAC_RXDMA_EN0 |
1591 M_MAC_TX_ENABLE, s->sbm_macenable);
1596 #ifdef CONFIG_SBMAC_COALESCE
1598 * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
1600 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1601 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
1604 * Accept any kind of interrupt on TX and RX DMA channel 0
1606 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1607 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
1611 * Enable receiving unicasts and broadcasts
1614 __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
1617 * we're running now.
1620 s->sbm_state = sbmac_state_on;
1623 * Program multicast addresses
1629 * If channel was in promiscuous mode before, turn that on
1632 if (s->sbm_devflags & IFF_PROMISC) {
1633 sbmac_promiscuous_mode(s,1);
1639 /**********************************************************************
1640 * SBMAC_CHANNEL_STOP(s)
1642 * Stop packet processing on this MAC.
1645 * s - sbmac structure
1649 ********************************************************************* */
1651 static void sbmac_channel_stop(struct sbmac_softc *s)
1653 /* don't do this if already stopped */
1655 if (s->sbm_state == sbmac_state_off)
1658 /* don't accept any packets, disable all interrupts */
1660 __raw_writeq(0, s->sbm_rxfilter);
1661 __raw_writeq(0, s->sbm_imr);
1663 /* Turn off ticker */
1667 /* turn off receiver and transmitter */
1669 __raw_writeq(0, s->sbm_macenable);
1671 /* We're stopped now. */
1673 s->sbm_state = sbmac_state_off;
1676 * Stop DMA channels (rings should be ok now)
1679 sbdma_channel_stop(&(s->sbm_rxdma));
1680 sbdma_channel_stop(&(s->sbm_txdma));
1682 /* Empty the receive and transmit rings */
1684 sbdma_emptyring(&(s->sbm_rxdma));
1685 sbdma_emptyring(&(s->sbm_txdma));
1689 /**********************************************************************
1690 * SBMAC_SET_CHANNEL_STATE(state)
1692 * Set the channel's state ON or OFF
1699 ********************************************************************* */
1700 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
1701 sbmac_state_t state)
1703 sbmac_state_t oldstate = sc->sbm_state;
1706 * If same as previous state, return
1709 if (state == oldstate) {
1714 * If new state is ON, turn channel on
1717 if (state == sbmac_state_on) {
1718 sbmac_channel_start(sc);
1721 sbmac_channel_stop(sc);
1725 * Return previous state
1732 /**********************************************************************
1733 * SBMAC_PROMISCUOUS_MODE(sc,onoff)
1735 * Turn on or off promiscuous mode
1739 * onoff - 1 to turn on, 0 to turn off
1743 ********************************************************************* */
1745 static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
1749 if (sc->sbm_state != sbmac_state_on)
1753 reg = __raw_readq(sc->sbm_rxfilter);
1754 reg |= M_MAC_ALLPKT_EN;
1755 __raw_writeq(reg, sc->sbm_rxfilter);
1758 reg = __raw_readq(sc->sbm_rxfilter);
1759 reg &= ~M_MAC_ALLPKT_EN;
1760 __raw_writeq(reg, sc->sbm_rxfilter);
1764 /**********************************************************************
1765 * SBMAC_SETIPHDR_OFFSET(sc,onoff)
1767 * Set the iphdr offset as 15 assuming ethernet encapsulation
1774 ********************************************************************* */
1776 static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
1780 /* Hard code the off set to 15 for now */
1781 reg = __raw_readq(sc->sbm_rxfilter);
1782 reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
1783 __raw_writeq(reg, sc->sbm_rxfilter);
1785 /* read system identification to determine revision */
1786 if (periph_rev >= 2) {
1787 sc->rx_hw_checksum = ENABLE;
1789 sc->rx_hw_checksum = DISABLE;
1794 /**********************************************************************
1795 * SBMAC_ADDR2REG(ptr)
1797 * Convert six bytes into the 64-bit register value that
1798 * we typically write into the SBMAC's address/mcast registers
1801 * ptr - pointer to 6 bytes
1805 ********************************************************************* */
1807 static uint64_t sbmac_addr2reg(unsigned char *ptr)
1813 reg |= (uint64_t) *(--ptr);
1815 reg |= (uint64_t) *(--ptr);
1817 reg |= (uint64_t) *(--ptr);
1819 reg |= (uint64_t) *(--ptr);
1821 reg |= (uint64_t) *(--ptr);
1823 reg |= (uint64_t) *(--ptr);
1829 /**********************************************************************
1830 * SBMAC_SET_SPEED(s,speed)
1832 * Configure LAN speed for the specified MAC.
1833 * Warning: must be called when MAC is off!
1836 * s - sbmac structure
1837 * speed - speed to set MAC to (see sbmac_speed_t enum)
1841 * 0 indicates invalid parameters
1842 ********************************************************************* */
1844 static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1850 * Save new current values
1853 s->sbm_speed = speed;
1855 if (s->sbm_state == sbmac_state_on)
1856 return 0; /* save for next restart */
1859 * Read current register values
1862 cfg = __raw_readq(s->sbm_maccfg);
1863 framecfg = __raw_readq(s->sbm_framecfg);
1866 * Mask out the stuff we want to change
1869 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1870 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1874 * Now add in the new bits
1878 case sbmac_speed_10:
1879 framecfg |= V_MAC_IFG_RX_10 |
1881 K_MAC_IFG_THRSH_10 |
1883 cfg |= V_MAC_SPEED_SEL_10MBPS;
1886 case sbmac_speed_100:
1887 framecfg |= V_MAC_IFG_RX_100 |
1889 V_MAC_IFG_THRSH_100 |
1890 V_MAC_SLOT_SIZE_100;
1891 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1894 case sbmac_speed_1000:
1895 framecfg |= V_MAC_IFG_RX_1000 |
1897 V_MAC_IFG_THRSH_1000 |
1898 V_MAC_SLOT_SIZE_1000;
1899 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1902 case sbmac_speed_auto: /* XXX not implemented */
1909 * Send the bits back to the hardware
1912 __raw_writeq(framecfg, s->sbm_framecfg);
1913 __raw_writeq(cfg, s->sbm_maccfg);
1918 /**********************************************************************
1919 * SBMAC_SET_DUPLEX(s,duplex,fc)
1921 * Set Ethernet duplex and flow control options for this MAC
1922 * Warning: must be called when MAC is off!
1925 * s - sbmac structure
1926 * duplex - duplex setting (see sbmac_duplex_t)
1927 * fc - flow control setting (see sbmac_fc_t)
1931 * 0 if an invalid parameter combination was specified
1932 ********************************************************************* */
1934 static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc)
1939 * Save new current values
1942 s->sbm_duplex = duplex;
1945 if (s->sbm_state == sbmac_state_on)
1946 return 0; /* save for next restart */
1949 * Read current register values
1952 cfg = __raw_readq(s->sbm_maccfg);
1955 * Mask off the stuff we're about to change
1958 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1962 case sbmac_duplex_half:
1964 case sbmac_fc_disabled:
1965 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1968 case sbmac_fc_collision:
1969 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1972 case sbmac_fc_carrier:
1973 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1976 case sbmac_fc_auto: /* XXX not implemented */
1978 case sbmac_fc_frame: /* not valid in half duplex */
1979 default: /* invalid selection */
1984 case sbmac_duplex_full:
1986 case sbmac_fc_disabled:
1987 cfg |= V_MAC_FC_CMD_DISABLED;
1990 case sbmac_fc_frame:
1991 cfg |= V_MAC_FC_CMD_ENABLED;
1994 case sbmac_fc_collision: /* not valid in full duplex */
1995 case sbmac_fc_carrier: /* not valid in full duplex */
1996 case sbmac_fc_auto: /* XXX not implemented */
2002 case sbmac_duplex_auto:
2003 /* XXX not implemented */
2008 * Send the bits back to the hardware
2011 __raw_writeq(cfg, s->sbm_maccfg);
2019 /**********************************************************************
2022 * Interrupt handler for MAC interrupts
2029 ********************************************************************* */
2030 static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
2032 struct net_device *dev = (struct net_device *) dev_instance;
2033 struct sbmac_softc *sc = netdev_priv(dev);
2040 * Read the ISR (this clears the bits in the real
2041 * register, except for counter addr)
2044 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
2052 * Transmits on channel 0
2055 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
2056 sbdma_tx_process(sc,&(sc->sbm_txdma));
2060 * Receives on channel 0
2064 * It's important to test all the bits (or at least the
2065 * EOP_SEEN bit) when deciding to do the RX process
2066 * particularly when coalescing, to make sure we
2067 * take care of the following:
2069 * If you have some packets waiting (have been received
2070 * but no interrupt) and get a TX interrupt before
2071 * the RX timer or counter expires, reading the ISR
2072 * above will clear the timer and counter, and you
2073 * won't get another interrupt until a packet shows
2074 * up to start the timer again. Testing
2075 * EOP_SEEN here takes care of this case.
2076 * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
2080 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2081 sbdma_rx_process(sc,&(sc->sbm_rxdma));
2084 return IRQ_RETVAL(handled);
2088 /**********************************************************************
2089 * SBMAC_START_TX(skb,dev)
2091 * Start output on the specified interface. Basically, we
2092 * queue as many buffers as we can until the ring fills up, or
2093 * we run off the end of the queue, whichever comes first.
2100 ********************************************************************* */
2101 static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2103 struct sbmac_softc *sc = netdev_priv(dev);
2106 spin_lock_irq (&sc->sbm_lock);
2109 * Put the buffer on the transmit ring. If we
2110 * don't have room, stop the queue.
2113 if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
2114 /* XXX save skb that we could not send */
2115 netif_stop_queue(dev);
2116 spin_unlock_irq(&sc->sbm_lock);
2121 dev->trans_start = jiffies;
2123 spin_unlock_irq (&sc->sbm_lock);
2128 /**********************************************************************
2129 * SBMAC_SETMULTI(sc)
2131 * Reprogram the multicast table into the hardware, given
2132 * the list of multicasts associated with the interface
2140 ********************************************************************* */
2142 static void sbmac_setmulti(struct sbmac_softc *sc)
2145 volatile void __iomem *port;
2147 struct dev_mc_list *mclist;
2148 struct net_device *dev = sc->sbm_dev;
2151 * Clear out entire multicast table. We do this by nuking
2152 * the entire hash table and all the direct matches except
2153 * the first one, which is used for our station address
2156 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
2157 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
2158 __raw_writeq(0, port);
2161 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
2162 port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
2163 __raw_writeq(0, port);
2167 * Clear the filter to say we don't want any multicasts.
2170 reg = __raw_readq(sc->sbm_rxfilter);
2171 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2172 __raw_writeq(reg, sc->sbm_rxfilter);
2174 if (dev->flags & IFF_ALLMULTI) {
2176 * Enable ALL multicasts. Do this by inverting the
2177 * multicast enable bit.
2179 reg = __raw_readq(sc->sbm_rxfilter);
2180 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2181 __raw_writeq(reg, sc->sbm_rxfilter);
2187 * Progam new multicast entries. For now, only use the
2188 * perfect filter. In the future we'll need to use the
2189 * hash filter if the perfect filter overflows
2192 /* XXX only using perfect filter for now, need to use hash
2193 * XXX if the table overflows */
2195 idx = 1; /* skip station address */
2196 mclist = dev->mc_list;
2197 while (mclist && (idx < MAC_ADDR_COUNT)) {
2198 reg = sbmac_addr2reg(mclist->dmi_addr);
2199 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
2200 __raw_writeq(reg, port);
2202 mclist = mclist->next;
2206 * Enable the "accept multicast bits" if we programmed at least one
2211 reg = __raw_readq(sc->sbm_rxfilter);
2212 reg |= M_MAC_MCAST_EN;
2213 __raw_writeq(reg, sc->sbm_rxfilter);
2219 #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR)
2220 /**********************************************************************
2221 * SBMAC_PARSE_XDIGIT(str)
2223 * Parse a hex digit, returning its value
2229 * hex value, or -1 if invalid
2230 ********************************************************************* */
2232 static int sbmac_parse_xdigit(char str)
2236 if ((str >= '0') && (str <= '9'))
2238 else if ((str >= 'a') && (str <= 'f'))
2239 digit = str - 'a' + 10;
2240 else if ((str >= 'A') && (str <= 'F'))
2241 digit = str - 'A' + 10;
2248 /**********************************************************************
2249 * SBMAC_PARSE_HWADDR(str,hwaddr)
2251 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2256 * hwaddr - pointer to hardware address
2260 ********************************************************************* */
2262 static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2267 while (*str && (idx > 0)) {
2268 digit1 = sbmac_parse_xdigit(*str);
2275 if ((*str == ':') || (*str == '-')) {
2280 digit2 = sbmac_parse_xdigit(*str);
2286 *hwaddr++ = (digit1 << 4) | digit2;
2298 static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
2300 if (new_mtu > ENET_PACKET_SIZE)
2302 _dev->mtu = new_mtu;
2303 printk(KERN_INFO "changing the mtu to %d\n", new_mtu);
2307 /**********************************************************************
2310 * Attach routine - init hardware and hook ourselves into linux
2313 * dev - net_device structure
2317 ********************************************************************* */
2319 static int sbmac_init(struct net_device *dev, int idx)
2321 struct sbmac_softc *sc;
2322 unsigned char *eaddr;
2327 sc = netdev_priv(dev);
2329 /* Determine controller base address */
2331 sc->sbm_base = IOADDR(dev->base_addr);
2335 eaddr = sc->sbm_hwaddr;
2338 * Read the ethernet address. The firwmare left this programmed
2339 * for us in the ethernet address register for each mac.
2342 ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
2343 __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
2344 for (i = 0; i < 6; i++) {
2345 eaddr[i] = (uint8_t) (ea_reg & 0xFF);
2349 for (i = 0; i < 6; i++) {
2350 dev->dev_addr[i] = eaddr[i];
2358 sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
2361 * Initialize context (get pointers to registers and stuff), then
2362 * allocate the memory for the descriptor tables.
2368 * Set up Linux device callins
2371 spin_lock_init(&(sc->sbm_lock));
2373 dev->open = sbmac_open;
2374 dev->hard_start_xmit = sbmac_start_tx;
2375 dev->stop = sbmac_close;
2376 dev->get_stats = sbmac_get_stats;
2377 dev->set_multicast_list = sbmac_set_rx_mode;
2378 dev->do_ioctl = sbmac_mii_ioctl;
2379 dev->tx_timeout = sbmac_tx_timeout;
2380 dev->watchdog_timeo = TX_TIMEOUT;
2382 dev->change_mtu = sb1250_change_mtu;
2384 /* This is needed for PASS2 for Rx H/W checksum feature */
2385 sbmac_set_iphdr_offset(sc);
2387 err = register_netdev(dev);
2391 if (sc->rx_hw_checksum == ENABLE) {
2392 printk(KERN_INFO "%s: enabling TCP rcv checksum\n",
2397 * Display Ethernet address (this is called during the config
2398 * process so we need to finish off the config message that
2399 * was being displayed)
2402 "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
2403 dev->name, dev->base_addr,
2404 eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]);
2410 sbmac_uninitctx(sc);
2416 static int sbmac_open(struct net_device *dev)
2418 struct sbmac_softc *sc = netdev_priv(dev);
2421 printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
2425 * map/route interrupt (clear status first, in case something
2426 * weird is pending; we haven't initialized the mac registers
2430 __raw_readq(sc->sbm_isr);
2431 if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev))
2435 * Configure default speed
2438 sbmac_mii_poll(sc,noisy_mii);
2441 * Turn on the channel
2444 sbmac_set_channel_state(sc,sbmac_state_on);
2447 * XXX Station address is in dev->dev_addr
2450 if (dev->if_port == 0)
2453 netif_start_queue(dev);
2455 sbmac_set_rx_mode(dev);
2457 /* Set the timer to check for link beat. */
2458 init_timer(&sc->sbm_timer);
2459 sc->sbm_timer.expires = jiffies + 2 * HZ/100;
2460 sc->sbm_timer.data = (unsigned long)dev;
2461 sc->sbm_timer.function = &sbmac_timer;
2462 add_timer(&sc->sbm_timer);
2469 static int sbmac_mii_poll(struct sbmac_softc *s,int noisy)
2471 int bmsr,bmcr,k1stsr,anlpar;
2476 /* Read the mode status and mode control registers. */
2477 bmsr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMSR);
2478 bmcr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMCR);
2480 /* get the link partner status */
2481 anlpar = sbmac_mii_read(s,s->sbm_phys[0],MII_ANLPAR);
2483 /* if supported, read the 1000baseT register */
2484 if (bmsr & BMSR_1000BT_XSR) {
2485 k1stsr = sbmac_mii_read(s,s->sbm_phys[0],MII_K1STSR);
2493 if ((bmsr & BMSR_LINKSTAT) == 0) {
2495 * If link status is down, clear out old info so that when
2496 * it comes back up it will force us to reconfigure speed
2498 s->sbm_phy_oldbmsr = 0;
2499 s->sbm_phy_oldanlpar = 0;
2500 s->sbm_phy_oldk1stsr = 0;
2504 if ((s->sbm_phy_oldbmsr != bmsr) ||
2505 (s->sbm_phy_oldanlpar != anlpar) ||
2506 (s->sbm_phy_oldk1stsr != k1stsr)) {
2508 printk(KERN_DEBUG "%s: bmsr:%x/%x anlpar:%x/%x k1stsr:%x/%x\n",
2510 s->sbm_phy_oldbmsr,bmsr,
2511 s->sbm_phy_oldanlpar,anlpar,
2512 s->sbm_phy_oldk1stsr,k1stsr);
2514 s->sbm_phy_oldbmsr = bmsr;
2515 s->sbm_phy_oldanlpar = anlpar;
2516 s->sbm_phy_oldk1stsr = k1stsr;
2523 p += sprintf(p,"Link speed: ");
2525 if (k1stsr & K1STSR_LP1KFD) {
2526 s->sbm_speed = sbmac_speed_1000;
2527 s->sbm_duplex = sbmac_duplex_full;
2528 s->sbm_fc = sbmac_fc_frame;
2529 p += sprintf(p,"1000BaseT FDX");
2531 else if (k1stsr & K1STSR_LP1KHD) {
2532 s->sbm_speed = sbmac_speed_1000;
2533 s->sbm_duplex = sbmac_duplex_half;
2534 s->sbm_fc = sbmac_fc_disabled;
2535 p += sprintf(p,"1000BaseT HDX");
2537 else if (anlpar & ANLPAR_TXFD) {
2538 s->sbm_speed = sbmac_speed_100;
2539 s->sbm_duplex = sbmac_duplex_full;
2540 s->sbm_fc = (anlpar & ANLPAR_PAUSE) ? sbmac_fc_frame : sbmac_fc_disabled;
2541 p += sprintf(p,"100BaseT FDX");
2543 else if (anlpar & ANLPAR_TXHD) {
2544 s->sbm_speed = sbmac_speed_100;
2545 s->sbm_duplex = sbmac_duplex_half;
2546 s->sbm_fc = sbmac_fc_disabled;
2547 p += sprintf(p,"100BaseT HDX");
2549 else if (anlpar & ANLPAR_10FD) {
2550 s->sbm_speed = sbmac_speed_10;
2551 s->sbm_duplex = sbmac_duplex_full;
2552 s->sbm_fc = sbmac_fc_frame;
2553 p += sprintf(p,"10BaseT FDX");
2555 else if (anlpar & ANLPAR_10HD) {
2556 s->sbm_speed = sbmac_speed_10;
2557 s->sbm_duplex = sbmac_duplex_half;
2558 s->sbm_fc = sbmac_fc_collision;
2559 p += sprintf(p,"10BaseT HDX");
2562 p += sprintf(p,"Unknown");
2566 printk(KERN_INFO "%s: %s\n",s->sbm_dev->name,buffer);
2573 static void sbmac_timer(unsigned long data)
2575 struct net_device *dev = (struct net_device *)data;
2576 struct sbmac_softc *sc = netdev_priv(dev);
2580 spin_lock_irq (&sc->sbm_lock);
2582 /* make IFF_RUNNING follow the MII status bit "Link established" */
2583 mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR);
2585 if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) {
2586 sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT;
2587 if (mii_status & BMSR_LINKSTAT) {
2588 netif_carrier_on(dev);
2591 netif_carrier_off(dev);
2596 * Poll the PHY to see what speed we should be running at
2599 if (sbmac_mii_poll(sc,noisy_mii)) {
2600 if (sc->sbm_state != sbmac_state_off) {
2602 * something changed, restart the channel
2605 printk("%s: restarting channel because speed changed\n",
2608 sbmac_channel_stop(sc);
2609 sbmac_channel_start(sc);
2613 spin_unlock_irq (&sc->sbm_lock);
2615 sc->sbm_timer.expires = jiffies + next_tick;
2616 add_timer(&sc->sbm_timer);
2620 static void sbmac_tx_timeout (struct net_device *dev)
2622 struct sbmac_softc *sc = netdev_priv(dev);
2624 spin_lock_irq (&sc->sbm_lock);
2627 dev->trans_start = jiffies;
2628 sc->sbm_stats.tx_errors++;
2630 spin_unlock_irq (&sc->sbm_lock);
2632 printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
2638 static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
2640 struct sbmac_softc *sc = netdev_priv(dev);
2641 unsigned long flags;
2643 spin_lock_irqsave(&sc->sbm_lock, flags);
2645 /* XXX update other stats here */
2647 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2649 return &sc->sbm_stats;
2654 static void sbmac_set_rx_mode(struct net_device *dev)
2656 unsigned long flags;
2658 struct sbmac_softc *sc = netdev_priv(dev);
2660 spin_lock_irqsave(&sc->sbm_lock, flags);
2661 if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
2663 * Promiscuous changed.
2666 if (dev->flags & IFF_PROMISC) {
2667 /* Unconditionally log net taps. */
2669 sbmac_promiscuous_mode(sc,1);
2673 sbmac_promiscuous_mode(sc,0);
2676 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2679 printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
2680 dev->name,(msg_flag==1)?"en":"dis");
2684 * Program the multicasts. Do this every time.
2691 static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2693 struct sbmac_softc *sc = netdev_priv(dev);
2694 u16 *data = (u16 *)&rq->ifr_ifru;
2695 unsigned long flags;
2698 spin_lock_irqsave(&sc->sbm_lock, flags);
2702 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2703 data[0] = sc->sbm_phys[0] & 0x1f;
2705 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2706 data[3] = sbmac_mii_read(sc, data[0] & 0x1f, data[1] & 0x1f);
2708 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2709 if (!capable(CAP_NET_ADMIN)) {
2714 printk(KERN_DEBUG "%s: sbmac_mii_ioctl: write %02X %02X %02X\n",dev->name,
2715 data[0],data[1],data[2]);
2717 sbmac_mii_write(sc, data[0] & 0x1f, data[1] & 0x1f, data[2]);
2720 retval = -EOPNOTSUPP;
2723 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2727 static int sbmac_close(struct net_device *dev)
2729 struct sbmac_softc *sc = netdev_priv(dev);
2730 unsigned long flags;
2733 sbmac_set_channel_state(sc,sbmac_state_off);
2735 del_timer_sync(&sc->sbm_timer);
2737 spin_lock_irqsave(&sc->sbm_lock, flags);
2739 netif_stop_queue(dev);
2742 printk(KERN_DEBUG "%s: Shutting down ethercard\n",dev->name);
2745 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2748 synchronize_irq(irq);
2751 sbdma_emptyring(&(sc->sbm_txdma));
2752 sbdma_emptyring(&(sc->sbm_rxdma));
2759 #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR)
2761 sbmac_setup_hwaddr(int chan,char *addr)
2767 port = A_MAC_CHANNEL_BASE(chan);
2768 sbmac_parse_hwaddr(addr,eaddr);
2769 val = sbmac_addr2reg(eaddr);
2770 __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR));
2771 val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2775 static struct net_device *dev_sbmac[MAX_UNITS];
2778 sbmac_init_module(void)
2781 struct net_device *dev;
2786 * For bringup when not using the firmware, we can pre-fill
2787 * the MAC addresses using the environment variables
2788 * specified in this file (or maybe from the config file?)
2790 #ifdef SBMAC_ETH0_HWADDR
2791 sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR);
2793 #ifdef SBMAC_ETH1_HWADDR
2794 sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR);
2796 #ifdef SBMAC_ETH2_HWADDR
2797 sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR);
2801 * Walk through the Ethernet controllers and find
2802 * those who have their MAC addresses set.
2805 case K_SYS_SOC_TYPE_BCM1250:
2806 case K_SYS_SOC_TYPE_BCM1250_ALT:
2809 case K_SYS_SOC_TYPE_BCM1120:
2810 case K_SYS_SOC_TYPE_BCM1125:
2811 case K_SYS_SOC_TYPE_BCM1125H:
2812 case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
2819 if (chip_max_units > MAX_UNITS)
2820 chip_max_units = MAX_UNITS;
2822 for (idx = 0; idx < chip_max_units; idx++) {
2825 * This is the base address of the MAC.
2828 port = A_MAC_CHANNEL_BASE(idx);
2831 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
2832 * value for us by the firmware if we're going to use this MAC.
2833 * If we find a zero, skip this MAC.
2836 sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2837 if (sbmac_orig_hwaddr[idx] == 0) {
2838 printk(KERN_DEBUG "sbmac: not configuring MAC at "
2844 * Okay, cool. Initialize this MAC.
2847 dev = alloc_etherdev(sizeof(struct sbmac_softc));
2849 return -ENOMEM; /* return ENOMEM */
2851 printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
2853 dev->irq = K_INT_MAC_0 + idx;
2854 dev->base_addr = port;
2856 if (sbmac_init(dev, idx)) {
2857 port = A_MAC_CHANNEL_BASE(idx);
2858 __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR));
2862 dev_sbmac[idx] = dev;
2869 sbmac_cleanup_module(void)
2871 struct net_device *dev;
2874 for (idx = 0; idx < MAX_UNITS; idx++) {
2875 struct sbmac_softc *sc;
2876 dev = dev_sbmac[idx];
2880 sc = netdev_priv(dev);
2881 unregister_netdev(dev);
2882 sbmac_uninitctx(sc);
2887 module_init(sbmac_init_module);
2888 module_exit(sbmac_cleanup_module);