]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/tg3.c
[TG3]: Some low power fixes
[karo-tx-linux.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.45"
72 #define DRV_MODULE_RELDATE      "Dec 13, 2005"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         ((TP)->tx_pending -                                             \
128          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
133
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
246           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247         { 0, }
248 };
249
250 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
251
252 static struct {
253         const char string[ETH_GSTRING_LEN];
254 } ethtool_stats_keys[TG3_NUM_STATS] = {
255         { "rx_octets" },
256         { "rx_fragments" },
257         { "rx_ucast_packets" },
258         { "rx_mcast_packets" },
259         { "rx_bcast_packets" },
260         { "rx_fcs_errors" },
261         { "rx_align_errors" },
262         { "rx_xon_pause_rcvd" },
263         { "rx_xoff_pause_rcvd" },
264         { "rx_mac_ctrl_rcvd" },
265         { "rx_xoff_entered" },
266         { "rx_frame_too_long_errors" },
267         { "rx_jabbers" },
268         { "rx_undersize_packets" },
269         { "rx_in_length_errors" },
270         { "rx_out_length_errors" },
271         { "rx_64_or_less_octet_packets" },
272         { "rx_65_to_127_octet_packets" },
273         { "rx_128_to_255_octet_packets" },
274         { "rx_256_to_511_octet_packets" },
275         { "rx_512_to_1023_octet_packets" },
276         { "rx_1024_to_1522_octet_packets" },
277         { "rx_1523_to_2047_octet_packets" },
278         { "rx_2048_to_4095_octet_packets" },
279         { "rx_4096_to_8191_octet_packets" },
280         { "rx_8192_to_9022_octet_packets" },
281
282         { "tx_octets" },
283         { "tx_collisions" },
284
285         { "tx_xon_sent" },
286         { "tx_xoff_sent" },
287         { "tx_flow_control" },
288         { "tx_mac_errors" },
289         { "tx_single_collisions" },
290         { "tx_mult_collisions" },
291         { "tx_deferred" },
292         { "tx_excessive_collisions" },
293         { "tx_late_collisions" },
294         { "tx_collide_2times" },
295         { "tx_collide_3times" },
296         { "tx_collide_4times" },
297         { "tx_collide_5times" },
298         { "tx_collide_6times" },
299         { "tx_collide_7times" },
300         { "tx_collide_8times" },
301         { "tx_collide_9times" },
302         { "tx_collide_10times" },
303         { "tx_collide_11times" },
304         { "tx_collide_12times" },
305         { "tx_collide_13times" },
306         { "tx_collide_14times" },
307         { "tx_collide_15times" },
308         { "tx_ucast_packets" },
309         { "tx_mcast_packets" },
310         { "tx_bcast_packets" },
311         { "tx_carrier_sense_errors" },
312         { "tx_discards" },
313         { "tx_errors" },
314
315         { "dma_writeq_full" },
316         { "dma_write_prioq_full" },
317         { "rxbds_empty" },
318         { "rx_discards" },
319         { "rx_errors" },
320         { "rx_threshold_hit" },
321
322         { "dma_readq_full" },
323         { "dma_read_prioq_full" },
324         { "tx_comp_queue_full" },
325
326         { "ring_set_send_prod_index" },
327         { "ring_status_update" },
328         { "nic_irqs" },
329         { "nic_avoided_irqs" },
330         { "nic_tx_threshold_hit" }
331 };
332
333 static struct {
334         const char string[ETH_GSTRING_LEN];
335 } ethtool_test_keys[TG3_NUM_TEST] = {
336         { "nvram test     (online) " },
337         { "link test      (online) " },
338         { "register test  (offline)" },
339         { "memory test    (offline)" },
340         { "loopback test  (offline)" },
341         { "interrupt test (offline)" },
342 };
343
344 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
345 {
346         unsigned long flags;
347
348         spin_lock_irqsave(&tp->indirect_lock, flags);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
350         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
351         spin_unlock_irqrestore(&tp->indirect_lock, flags);
352 }
353
354 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
355 {
356         writel(val, tp->regs + off);
357         readl(tp->regs + off);
358 }
359
360 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
361 {
362         unsigned long flags;
363         u32 val;
364
365         spin_lock_irqsave(&tp->indirect_lock, flags);
366         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
367         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
368         spin_unlock_irqrestore(&tp->indirect_lock, flags);
369         return val;
370 }
371
372 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
373 {
374         unsigned long flags;
375
376         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
377                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
378                                        TG3_64BIT_REG_LOW, val);
379                 return;
380         }
381         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
383                                        TG3_64BIT_REG_LOW, val);
384                 return;
385         }
386
387         spin_lock_irqsave(&tp->indirect_lock, flags);
388         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
390         spin_unlock_irqrestore(&tp->indirect_lock, flags);
391
392         /* In indirect mode when disabling interrupts, we also need
393          * to clear the interrupt bit in the GRC local ctrl register.
394          */
395         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
396             (val == 0x1)) {
397                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
398                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
399         }
400 }
401
402 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
403 {
404         unsigned long flags;
405         u32 val;
406
407         spin_lock_irqsave(&tp->indirect_lock, flags);
408         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
409         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
410         spin_unlock_irqrestore(&tp->indirect_lock, flags);
411         return val;
412 }
413
414 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
415 {
416         tp->write32(tp, off, val);
417         if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
418             !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
419             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
420                 tp->read32(tp, off);    /* flush */
421 }
422
423 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
424 {
425         tp->write32_mbox(tp, off, val);
426         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
427             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
428                 tp->read32_mbox(tp, off);
429 }
430
431 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
432 {
433         void __iomem *mbox = tp->regs + off;
434         writel(val, mbox);
435         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
436                 writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
438                 readl(mbox);
439 }
440
441 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
442 {
443         writel(val, tp->regs + off);
444 }
445
446 static u32 tg3_read32(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off)); 
449 }
450
451 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
452 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
453 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
454 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
455 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
456
457 #define tw32(reg,val)           tp->write32(tp, reg, val)
458 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
459 #define tr32(reg)               tp->read32(tp, reg)
460
461 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
462 {
463         unsigned long flags;
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
467         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
468
469         /* Always leave this as zero. */
470         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 }
473
474 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
475 {
476         /* If no workaround is needed, write to mem space directly */
477         if (tp->write32 != tg3_write_indirect_reg32)
478                 tw32(NIC_SRAM_WIN_BASE + off, val);
479         else
480                 tg3_write_mem(tp, off, val);
481 }
482
483 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491         /* Always leave this as zero. */
492         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         spin_unlock_irqrestore(&tp->indirect_lock, flags);
494 }
495
496 static void tg3_disable_ints(struct tg3 *tp)
497 {
498         tw32(TG3PCI_MISC_HOST_CTRL,
499              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
500         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
501 }
502
503 static inline void tg3_cond_int(struct tg3 *tp)
504 {
505         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
506             (tp->hw_status->status & SD_STATUS_UPDATED))
507                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
508 }
509
510 static void tg3_enable_ints(struct tg3 *tp)
511 {
512         tp->irq_sync = 0;
513         wmb();
514
515         tw32(TG3PCI_MISC_HOST_CTRL,
516              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
517         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
518                        (tp->last_tag << 24));
519         tg3_cond_int(tp);
520 }
521
522 static inline unsigned int tg3_has_work(struct tg3 *tp)
523 {
524         struct tg3_hw_status *sblk = tp->hw_status;
525         unsigned int work_exists = 0;
526
527         /* check for phy events */
528         if (!(tp->tg3_flags &
529               (TG3_FLAG_USE_LINKCHG_REG |
530                TG3_FLAG_POLL_SERDES))) {
531                 if (sblk->status & SD_STATUS_LINK_CHG)
532                         work_exists = 1;
533         }
534         /* check for RX/TX work to do */
535         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
536             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
537                 work_exists = 1;
538
539         return work_exists;
540 }
541
542 /* tg3_restart_ints
543  *  similar to tg3_enable_ints, but it accurately determines whether there
544  *  is new work pending and can return without flushing the PIO write
545  *  which reenables interrupts 
546  */
547 static void tg3_restart_ints(struct tg3 *tp)
548 {
549         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
550                      tp->last_tag << 24);
551         mmiowb();
552
553         /* When doing tagged status, this work check is unnecessary.
554          * The last_tag we write above tells the chip which piece of
555          * work we've completed.
556          */
557         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
558             tg3_has_work(tp))
559                 tw32(HOSTCC_MODE, tp->coalesce_mode |
560                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
561 }
562
563 static inline void tg3_netif_stop(struct tg3 *tp)
564 {
565         tp->dev->trans_start = jiffies; /* prevent tx timeout */
566         netif_poll_disable(tp->dev);
567         netif_tx_disable(tp->dev);
568 }
569
570 static inline void tg3_netif_start(struct tg3 *tp)
571 {
572         netif_wake_queue(tp->dev);
573         /* NOTE: unconditional netif_wake_queue is only appropriate
574          * so long as all callers are assured to have free tx slots
575          * (such as after tg3_init_hw)
576          */
577         netif_poll_enable(tp->dev);
578         tp->hw_status->status |= SD_STATUS_UPDATED;
579         tg3_enable_ints(tp);
580 }
581
582 static void tg3_switch_clocks(struct tg3 *tp)
583 {
584         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
585         u32 orig_clock_ctrl;
586
587         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
588                 return;
589
590         orig_clock_ctrl = clock_ctrl;
591         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
592                        CLOCK_CTRL_CLKRUN_OENABLE |
593                        0x1f);
594         tp->pci_clock_ctrl = clock_ctrl;
595
596         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
597                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
598                         tw32_f(TG3PCI_CLOCK_CTRL,
599                                clock_ctrl | CLOCK_CTRL_625_CORE);
600                         udelay(40);
601                 }
602         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
603                 tw32_f(TG3PCI_CLOCK_CTRL,
604                      clock_ctrl |
605                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
606                 udelay(40);
607                 tw32_f(TG3PCI_CLOCK_CTRL,
608                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
609                 udelay(40);
610         }
611         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
612         udelay(40);
613 }
614
615 #define PHY_BUSY_LOOPS  5000
616
617 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
618 {
619         u32 frame_val;
620         unsigned int loops;
621         int ret;
622
623         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
624                 tw32_f(MAC_MI_MODE,
625                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
626                 udelay(80);
627         }
628
629         *val = 0x0;
630
631         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
632                       MI_COM_PHY_ADDR_MASK);
633         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
634                       MI_COM_REG_ADDR_MASK);
635         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
636         
637         tw32_f(MAC_MI_COM, frame_val);
638
639         loops = PHY_BUSY_LOOPS;
640         while (loops != 0) {
641                 udelay(10);
642                 frame_val = tr32(MAC_MI_COM);
643
644                 if ((frame_val & MI_COM_BUSY) == 0) {
645                         udelay(5);
646                         frame_val = tr32(MAC_MI_COM);
647                         break;
648                 }
649                 loops -= 1;
650         }
651
652         ret = -EBUSY;
653         if (loops != 0) {
654                 *val = frame_val & MI_COM_DATA_MASK;
655                 ret = 0;
656         }
657
658         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
659                 tw32_f(MAC_MI_MODE, tp->mi_mode);
660                 udelay(80);
661         }
662
663         return ret;
664 }
665
666 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
667 {
668         u32 frame_val;
669         unsigned int loops;
670         int ret;
671
672         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
673                 tw32_f(MAC_MI_MODE,
674                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
675                 udelay(80);
676         }
677
678         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
679                       MI_COM_PHY_ADDR_MASK);
680         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
681                       MI_COM_REG_ADDR_MASK);
682         frame_val |= (val & MI_COM_DATA_MASK);
683         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
684         
685         tw32_f(MAC_MI_COM, frame_val);
686
687         loops = PHY_BUSY_LOOPS;
688         while (loops != 0) {
689                 udelay(10);
690                 frame_val = tr32(MAC_MI_COM);
691                 if ((frame_val & MI_COM_BUSY) == 0) {
692                         udelay(5);
693                         frame_val = tr32(MAC_MI_COM);
694                         break;
695                 }
696                 loops -= 1;
697         }
698
699         ret = -EBUSY;
700         if (loops != 0)
701                 ret = 0;
702
703         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
704                 tw32_f(MAC_MI_MODE, tp->mi_mode);
705                 udelay(80);
706         }
707
708         return ret;
709 }
710
711 static void tg3_phy_set_wirespeed(struct tg3 *tp)
712 {
713         u32 val;
714
715         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
716                 return;
717
718         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
719             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
720                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
721                              (val | (1 << 15) | (1 << 4)));
722 }
723
724 static int tg3_bmcr_reset(struct tg3 *tp)
725 {
726         u32 phy_control;
727         int limit, err;
728
729         /* OK, reset it, and poll the BMCR_RESET bit until it
730          * clears or we time out.
731          */
732         phy_control = BMCR_RESET;
733         err = tg3_writephy(tp, MII_BMCR, phy_control);
734         if (err != 0)
735                 return -EBUSY;
736
737         limit = 5000;
738         while (limit--) {
739                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
740                 if (err != 0)
741                         return -EBUSY;
742
743                 if ((phy_control & BMCR_RESET) == 0) {
744                         udelay(40);
745                         break;
746                 }
747                 udelay(10);
748         }
749         if (limit <= 0)
750                 return -EBUSY;
751
752         return 0;
753 }
754
755 static int tg3_wait_macro_done(struct tg3 *tp)
756 {
757         int limit = 100;
758
759         while (limit--) {
760                 u32 tmp32;
761
762                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
763                         if ((tmp32 & 0x1000) == 0)
764                                 break;
765                 }
766         }
767         if (limit <= 0)
768                 return -EBUSY;
769
770         return 0;
771 }
772
773 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
774 {
775         static const u32 test_pat[4][6] = {
776         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
777         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
778         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
779         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
780         };
781         int chan;
782
783         for (chan = 0; chan < 4; chan++) {
784                 int i;
785
786                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
787                              (chan * 0x2000) | 0x0200);
788                 tg3_writephy(tp, 0x16, 0x0002);
789
790                 for (i = 0; i < 6; i++)
791                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
792                                      test_pat[chan][i]);
793
794                 tg3_writephy(tp, 0x16, 0x0202);
795                 if (tg3_wait_macro_done(tp)) {
796                         *resetp = 1;
797                         return -EBUSY;
798                 }
799
800                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
801                              (chan * 0x2000) | 0x0200);
802                 tg3_writephy(tp, 0x16, 0x0082);
803                 if (tg3_wait_macro_done(tp)) {
804                         *resetp = 1;
805                         return -EBUSY;
806                 }
807
808                 tg3_writephy(tp, 0x16, 0x0802);
809                 if (tg3_wait_macro_done(tp)) {
810                         *resetp = 1;
811                         return -EBUSY;
812                 }
813
814                 for (i = 0; i < 6; i += 2) {
815                         u32 low, high;
816
817                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
818                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
819                             tg3_wait_macro_done(tp)) {
820                                 *resetp = 1;
821                                 return -EBUSY;
822                         }
823                         low &= 0x7fff;
824                         high &= 0x000f;
825                         if (low != test_pat[chan][i] ||
826                             high != test_pat[chan][i+1]) {
827                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
828                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
829                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
830
831                                 return -EBUSY;
832                         }
833                 }
834         }
835
836         return 0;
837 }
838
839 static int tg3_phy_reset_chanpat(struct tg3 *tp)
840 {
841         int chan;
842
843         for (chan = 0; chan < 4; chan++) {
844                 int i;
845
846                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
847                              (chan * 0x2000) | 0x0200);
848                 tg3_writephy(tp, 0x16, 0x0002);
849                 for (i = 0; i < 6; i++)
850                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
851                 tg3_writephy(tp, 0x16, 0x0202);
852                 if (tg3_wait_macro_done(tp))
853                         return -EBUSY;
854         }
855
856         return 0;
857 }
858
859 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
860 {
861         u32 reg32, phy9_orig;
862         int retries, do_phy_reset, err;
863
864         retries = 10;
865         do_phy_reset = 1;
866         do {
867                 if (do_phy_reset) {
868                         err = tg3_bmcr_reset(tp);
869                         if (err)
870                                 return err;
871                         do_phy_reset = 0;
872                 }
873
874                 /* Disable transmitter and interrupt.  */
875                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
876                         continue;
877
878                 reg32 |= 0x3000;
879                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
880
881                 /* Set full-duplex, 1000 mbps.  */
882                 tg3_writephy(tp, MII_BMCR,
883                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
884
885                 /* Set to master mode.  */
886                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
887                         continue;
888
889                 tg3_writephy(tp, MII_TG3_CTRL,
890                              (MII_TG3_CTRL_AS_MASTER |
891                               MII_TG3_CTRL_ENABLE_AS_MASTER));
892
893                 /* Enable SM_DSP_CLOCK and 6dB.  */
894                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
895
896                 /* Block the PHY control access.  */
897                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
898                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
899
900                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
901                 if (!err)
902                         break;
903         } while (--retries);
904
905         err = tg3_phy_reset_chanpat(tp);
906         if (err)
907                 return err;
908
909         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
910         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
911
912         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
913         tg3_writephy(tp, 0x16, 0x0000);
914
915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
917                 /* Set Extended packet length bit for jumbo frames */
918                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
919         }
920         else {
921                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
922         }
923
924         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
925
926         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
927                 reg32 &= ~0x3000;
928                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
929         } else if (!err)
930                 err = -EBUSY;
931
932         return err;
933 }
934
935 /* This will reset the tigon3 PHY if there is no valid
936  * link unless the FORCE argument is non-zero.
937  */
938 static int tg3_phy_reset(struct tg3 *tp)
939 {
940         u32 phy_status;
941         int err;
942
943         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
944         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
945         if (err != 0)
946                 return -EBUSY;
947
948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
951                 err = tg3_phy_reset_5703_4_5(tp);
952                 if (err)
953                         return err;
954                 goto out;
955         }
956
957         err = tg3_bmcr_reset(tp);
958         if (err)
959                 return err;
960
961 out:
962         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
964                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
965                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
966                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
967                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
968                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
969         }
970         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
971                 tg3_writephy(tp, 0x1c, 0x8d68);
972                 tg3_writephy(tp, 0x1c, 0x8d68);
973         }
974         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
976                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
977                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
978                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
979                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
982                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
983         }
984         /* Set Extended packet length bit (bit 14) on all chips that */
985         /* support jumbo frames */
986         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
987                 /* Cannot do read-modify-write on 5401 */
988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
989         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
990                 u32 phy_reg;
991
992                 /* Set bit 14 with read-modify-write to preserve other bits */
993                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
994                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
995                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
996         }
997
998         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
999          * jumbo frames transmission.
1000          */
1001         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1002                 u32 phy_reg;
1003
1004                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1005                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1006                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1007         }
1008
1009         tg3_phy_set_wirespeed(tp);
1010         return 0;
1011 }
1012
1013 static void tg3_frob_aux_power(struct tg3 *tp)
1014 {
1015         struct tg3 *tp_peer = tp;
1016
1017         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1018                 return;
1019
1020         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1021             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1022                 struct net_device *dev_peer;
1023
1024                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1025                 if (!dev_peer)
1026                         BUG();
1027                 tp_peer = netdev_priv(dev_peer);
1028         }
1029
1030         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1031             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1032             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1033             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1034                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1035                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1036                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1037                              (GRC_LCLCTRL_GPIO_OE0 |
1038                               GRC_LCLCTRL_GPIO_OE1 |
1039                               GRC_LCLCTRL_GPIO_OE2 |
1040                               GRC_LCLCTRL_GPIO_OUTPUT0 |
1041                               GRC_LCLCTRL_GPIO_OUTPUT1));
1042                         udelay(100);
1043                 } else {
1044                         u32 no_gpio2;
1045                         u32 grc_local_ctrl = 0;
1046
1047                         if (tp_peer != tp &&
1048                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1049                                 return;
1050
1051                         /* Workaround to prevent overdrawing Amps. */
1052                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1053                             ASIC_REV_5714) {
1054                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1055                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1056                                        grc_local_ctrl);
1057                                 udelay(100);
1058                         }
1059
1060                         /* On 5753 and variants, GPIO2 cannot be used. */
1061                         no_gpio2 = tp->nic_sram_data_cfg &
1062                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1063
1064                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1065                                          GRC_LCLCTRL_GPIO_OE1 |
1066                                          GRC_LCLCTRL_GPIO_OE2 |
1067                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1068                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1069                         if (no_gpio2) {
1070                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1071                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1072                         }
1073                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1074                                                 grc_local_ctrl);
1075                         udelay(100);
1076
1077                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1078
1079                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1080                                                 grc_local_ctrl);
1081                         udelay(100);
1082
1083                         if (!no_gpio2) {
1084                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1085                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1086                                        grc_local_ctrl);
1087                                 udelay(100);
1088                         }
1089                 }
1090         } else {
1091                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1092                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1093                         if (tp_peer != tp &&
1094                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1095                                 return;
1096
1097                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1098                              (GRC_LCLCTRL_GPIO_OE1 |
1099                               GRC_LCLCTRL_GPIO_OUTPUT1));
1100                         udelay(100);
1101
1102                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1103                              (GRC_LCLCTRL_GPIO_OE1));
1104                         udelay(100);
1105
1106                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1107                              (GRC_LCLCTRL_GPIO_OE1 |
1108                               GRC_LCLCTRL_GPIO_OUTPUT1));
1109                         udelay(100);
1110                 }
1111         }
1112 }
1113
1114 static int tg3_setup_phy(struct tg3 *, int);
1115
1116 #define RESET_KIND_SHUTDOWN     0
1117 #define RESET_KIND_INIT         1
1118 #define RESET_KIND_SUSPEND      2
1119
1120 static void tg3_write_sig_post_reset(struct tg3 *, int);
1121 static int tg3_halt_cpu(struct tg3 *, u32);
1122 static int tg3_nvram_lock(struct tg3 *);
1123 static void tg3_nvram_unlock(struct tg3 *);
1124
1125 static int tg3_set_power_state(struct tg3 *tp, int state)
1126 {
1127         u32 misc_host_ctrl;
1128         u16 power_control, power_caps;
1129         int pm = tp->pm_cap;
1130
1131         /* Make sure register accesses (indirect or otherwise)
1132          * will function correctly.
1133          */
1134         pci_write_config_dword(tp->pdev,
1135                                TG3PCI_MISC_HOST_CTRL,
1136                                tp->misc_host_ctrl);
1137
1138         pci_read_config_word(tp->pdev,
1139                              pm + PCI_PM_CTRL,
1140                              &power_control);
1141         power_control |= PCI_PM_CTRL_PME_STATUS;
1142         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1143         switch (state) {
1144         case 0:
1145                 power_control |= 0;
1146                 pci_write_config_word(tp->pdev,
1147                                       pm + PCI_PM_CTRL,
1148                                       power_control);
1149                 udelay(100);    /* Delay after power state change */
1150
1151                 /* Switch out of Vaux if it is not a LOM */
1152                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1153                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1154                         udelay(100);
1155                 }
1156
1157                 return 0;
1158
1159         case 1:
1160                 power_control |= 1;
1161                 break;
1162
1163         case 2:
1164                 power_control |= 2;
1165                 break;
1166
1167         case 3:
1168                 power_control |= 3;
1169                 break;
1170
1171         default:
1172                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1173                        "requested.\n",
1174                        tp->dev->name, state);
1175                 return -EINVAL;
1176         };
1177
1178         power_control |= PCI_PM_CTRL_PME_ENABLE;
1179
1180         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1181         tw32(TG3PCI_MISC_HOST_CTRL,
1182              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1183
1184         if (tp->link_config.phy_is_low_power == 0) {
1185                 tp->link_config.phy_is_low_power = 1;
1186                 tp->link_config.orig_speed = tp->link_config.speed;
1187                 tp->link_config.orig_duplex = tp->link_config.duplex;
1188                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1189         }
1190
1191         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1192                 tp->link_config.speed = SPEED_10;
1193                 tp->link_config.duplex = DUPLEX_HALF;
1194                 tp->link_config.autoneg = AUTONEG_ENABLE;
1195                 tg3_setup_phy(tp, 0);
1196         }
1197
1198         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1199                 int i;
1200                 u32 val;
1201
1202                 for (i = 0; i < 200; i++) {
1203                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1204                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1205                                 break;
1206                         msleep(1);
1207                 }
1208         }
1209         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1210                                              WOL_DRV_STATE_SHUTDOWN |
1211                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1212
1213         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1214
1215         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1216                 u32 mac_mode;
1217
1218                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1219                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1220                         udelay(40);
1221
1222                         mac_mode = MAC_MODE_PORT_MODE_MII;
1223
1224                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1225                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1226                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1227                 } else {
1228                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1229                 }
1230
1231                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1232                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1233
1234                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1235                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1236                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1237
1238                 tw32_f(MAC_MODE, mac_mode);
1239                 udelay(100);
1240
1241                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1242                 udelay(10);
1243         }
1244
1245         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1246             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1247              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1248                 u32 base_val;
1249
1250                 base_val = tp->pci_clock_ctrl;
1251                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1252                              CLOCK_CTRL_TXCLK_DISABLE);
1253
1254                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1255                      CLOCK_CTRL_ALTCLK |
1256                      CLOCK_CTRL_PWRDOWN_PLL133);
1257                 udelay(40);
1258         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1259                 /* do nothing */
1260         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1261                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1262                 u32 newbits1, newbits2;
1263
1264                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1265                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1266                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1267                                     CLOCK_CTRL_TXCLK_DISABLE |
1268                                     CLOCK_CTRL_ALTCLK);
1269                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1270                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1271                         newbits1 = CLOCK_CTRL_625_CORE;
1272                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1273                 } else {
1274                         newbits1 = CLOCK_CTRL_ALTCLK;
1275                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1276                 }
1277
1278                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1279                 udelay(40);
1280
1281                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1282                 udelay(40);
1283
1284                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1285                         u32 newbits3;
1286
1287                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1288                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1289                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1290                                             CLOCK_CTRL_TXCLK_DISABLE |
1291                                             CLOCK_CTRL_44MHZ_CORE);
1292                         } else {
1293                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1294                         }
1295
1296                         tw32_f(TG3PCI_CLOCK_CTRL,
1297                                          tp->pci_clock_ctrl | newbits3);
1298                         udelay(40);
1299                 }
1300         }
1301
1302         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1303             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1304                 /* Turn off the PHY */
1305                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1306                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1307                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1308                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1309                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1310                                 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1311                 }
1312         }
1313
1314         tg3_frob_aux_power(tp);
1315
1316         /* Workaround for unstable PLL clock */
1317         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1318             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1319                 u32 val = tr32(0x7d00);
1320
1321                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1322                 tw32(0x7d00, val);
1323                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1324                         tg3_nvram_lock(tp);
1325                         tg3_halt_cpu(tp, RX_CPU_BASE);
1326                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0);
1327                         tg3_nvram_unlock(tp);
1328                 }
1329         }
1330
1331         /* Finally, set the new power state. */
1332         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1333         udelay(100);    /* Delay after power state change */
1334
1335         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1336
1337         return 0;
1338 }
1339
1340 static void tg3_link_report(struct tg3 *tp)
1341 {
1342         if (!netif_carrier_ok(tp->dev)) {
1343                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1344         } else {
1345                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1346                        tp->dev->name,
1347                        (tp->link_config.active_speed == SPEED_1000 ?
1348                         1000 :
1349                         (tp->link_config.active_speed == SPEED_100 ?
1350                          100 : 10)),
1351                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1352                         "full" : "half"));
1353
1354                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1355                        "%s for RX.\n",
1356                        tp->dev->name,
1357                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1358                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1359         }
1360 }
1361
1362 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1363 {
1364         u32 new_tg3_flags = 0;
1365         u32 old_rx_mode = tp->rx_mode;
1366         u32 old_tx_mode = tp->tx_mode;
1367
1368         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1369
1370                 /* Convert 1000BaseX flow control bits to 1000BaseT
1371                  * bits before resolving flow control.
1372                  */
1373                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1374                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1375                                        ADVERTISE_PAUSE_ASYM);
1376                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1377
1378                         if (local_adv & ADVERTISE_1000XPAUSE)
1379                                 local_adv |= ADVERTISE_PAUSE_CAP;
1380                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1381                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1382                         if (remote_adv & LPA_1000XPAUSE)
1383                                 remote_adv |= LPA_PAUSE_CAP;
1384                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1385                                 remote_adv |= LPA_PAUSE_ASYM;
1386                 }
1387
1388                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1389                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1390                                 if (remote_adv & LPA_PAUSE_CAP)
1391                                         new_tg3_flags |=
1392                                                 (TG3_FLAG_RX_PAUSE |
1393                                                 TG3_FLAG_TX_PAUSE);
1394                                 else if (remote_adv & LPA_PAUSE_ASYM)
1395                                         new_tg3_flags |=
1396                                                 (TG3_FLAG_RX_PAUSE);
1397                         } else {
1398                                 if (remote_adv & LPA_PAUSE_CAP)
1399                                         new_tg3_flags |=
1400                                                 (TG3_FLAG_RX_PAUSE |
1401                                                 TG3_FLAG_TX_PAUSE);
1402                         }
1403                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1404                         if ((remote_adv & LPA_PAUSE_CAP) &&
1405                         (remote_adv & LPA_PAUSE_ASYM))
1406                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1407                 }
1408
1409                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1410                 tp->tg3_flags |= new_tg3_flags;
1411         } else {
1412                 new_tg3_flags = tp->tg3_flags;
1413         }
1414
1415         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1416                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1417         else
1418                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1419
1420         if (old_rx_mode != tp->rx_mode) {
1421                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1422         }
1423         
1424         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1425                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1426         else
1427                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1428
1429         if (old_tx_mode != tp->tx_mode) {
1430                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1431         }
1432 }
1433
1434 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1435 {
1436         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1437         case MII_TG3_AUX_STAT_10HALF:
1438                 *speed = SPEED_10;
1439                 *duplex = DUPLEX_HALF;
1440                 break;
1441
1442         case MII_TG3_AUX_STAT_10FULL:
1443                 *speed = SPEED_10;
1444                 *duplex = DUPLEX_FULL;
1445                 break;
1446
1447         case MII_TG3_AUX_STAT_100HALF:
1448                 *speed = SPEED_100;
1449                 *duplex = DUPLEX_HALF;
1450                 break;
1451
1452         case MII_TG3_AUX_STAT_100FULL:
1453                 *speed = SPEED_100;
1454                 *duplex = DUPLEX_FULL;
1455                 break;
1456
1457         case MII_TG3_AUX_STAT_1000HALF:
1458                 *speed = SPEED_1000;
1459                 *duplex = DUPLEX_HALF;
1460                 break;
1461
1462         case MII_TG3_AUX_STAT_1000FULL:
1463                 *speed = SPEED_1000;
1464                 *duplex = DUPLEX_FULL;
1465                 break;
1466
1467         default:
1468                 *speed = SPEED_INVALID;
1469                 *duplex = DUPLEX_INVALID;
1470                 break;
1471         };
1472 }
1473
1474 static void tg3_phy_copper_begin(struct tg3 *tp)
1475 {
1476         u32 new_adv;
1477         int i;
1478
1479         if (tp->link_config.phy_is_low_power) {
1480                 /* Entering low power mode.  Disable gigabit and
1481                  * 100baseT advertisements.
1482                  */
1483                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1484
1485                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1486                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1487                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1488                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1489
1490                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1491         } else if (tp->link_config.speed == SPEED_INVALID) {
1492                 tp->link_config.advertising =
1493                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1494                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1495                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1496                          ADVERTISED_Autoneg | ADVERTISED_MII);
1497
1498                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1499                         tp->link_config.advertising &=
1500                                 ~(ADVERTISED_1000baseT_Half |
1501                                   ADVERTISED_1000baseT_Full);
1502
1503                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1504                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1505                         new_adv |= ADVERTISE_10HALF;
1506                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1507                         new_adv |= ADVERTISE_10FULL;
1508                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1509                         new_adv |= ADVERTISE_100HALF;
1510                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1511                         new_adv |= ADVERTISE_100FULL;
1512                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1513
1514                 if (tp->link_config.advertising &
1515                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1516                         new_adv = 0;
1517                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1518                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1519                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1520                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1521                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1522                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1523                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1524                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1525                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1526                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1527                 } else {
1528                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1529                 }
1530         } else {
1531                 /* Asking for a specific link mode. */
1532                 if (tp->link_config.speed == SPEED_1000) {
1533                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1534                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1535
1536                         if (tp->link_config.duplex == DUPLEX_FULL)
1537                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1538                         else
1539                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1540                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1541                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1542                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1543                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1544                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1545                 } else {
1546                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1547
1548                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1549                         if (tp->link_config.speed == SPEED_100) {
1550                                 if (tp->link_config.duplex == DUPLEX_FULL)
1551                                         new_adv |= ADVERTISE_100FULL;
1552                                 else
1553                                         new_adv |= ADVERTISE_100HALF;
1554                         } else {
1555                                 if (tp->link_config.duplex == DUPLEX_FULL)
1556                                         new_adv |= ADVERTISE_10FULL;
1557                                 else
1558                                         new_adv |= ADVERTISE_10HALF;
1559                         }
1560                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1561                 }
1562         }
1563
1564         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1565             tp->link_config.speed != SPEED_INVALID) {
1566                 u32 bmcr, orig_bmcr;
1567
1568                 tp->link_config.active_speed = tp->link_config.speed;
1569                 tp->link_config.active_duplex = tp->link_config.duplex;
1570
1571                 bmcr = 0;
1572                 switch (tp->link_config.speed) {
1573                 default:
1574                 case SPEED_10:
1575                         break;
1576
1577                 case SPEED_100:
1578                         bmcr |= BMCR_SPEED100;
1579                         break;
1580
1581                 case SPEED_1000:
1582                         bmcr |= TG3_BMCR_SPEED1000;
1583                         break;
1584                 };
1585
1586                 if (tp->link_config.duplex == DUPLEX_FULL)
1587                         bmcr |= BMCR_FULLDPLX;
1588
1589                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1590                     (bmcr != orig_bmcr)) {
1591                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1592                         for (i = 0; i < 1500; i++) {
1593                                 u32 tmp;
1594
1595                                 udelay(10);
1596                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1597                                     tg3_readphy(tp, MII_BMSR, &tmp))
1598                                         continue;
1599                                 if (!(tmp & BMSR_LSTATUS)) {
1600                                         udelay(40);
1601                                         break;
1602                                 }
1603                         }
1604                         tg3_writephy(tp, MII_BMCR, bmcr);
1605                         udelay(40);
1606                 }
1607         } else {
1608                 tg3_writephy(tp, MII_BMCR,
1609                              BMCR_ANENABLE | BMCR_ANRESTART);
1610         }
1611 }
1612
1613 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1614 {
1615         int err;
1616
1617         /* Turn off tap power management. */
1618         /* Set Extended packet length bit */
1619         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1620
1621         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1622         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1623
1624         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1625         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1626
1627         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1628         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1629
1630         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1631         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1632
1633         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1634         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1635
1636         udelay(40);
1637
1638         return err;
1639 }
1640
1641 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1642 {
1643         u32 adv_reg, all_mask;
1644
1645         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1646                 return 0;
1647
1648         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1649                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1650         if ((adv_reg & all_mask) != all_mask)
1651                 return 0;
1652         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1653                 u32 tg3_ctrl;
1654
1655                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1656                         return 0;
1657
1658                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1659                             MII_TG3_CTRL_ADV_1000_FULL);
1660                 if ((tg3_ctrl & all_mask) != all_mask)
1661                         return 0;
1662         }
1663         return 1;
1664 }
1665
1666 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1667 {
1668         int current_link_up;
1669         u32 bmsr, dummy;
1670         u16 current_speed;
1671         u8 current_duplex;
1672         int i, err;
1673
1674         tw32(MAC_EVENT, 0);
1675
1676         tw32_f(MAC_STATUS,
1677              (MAC_STATUS_SYNC_CHANGED |
1678               MAC_STATUS_CFG_CHANGED |
1679               MAC_STATUS_MI_COMPLETION |
1680               MAC_STATUS_LNKSTATE_CHANGED));
1681         udelay(40);
1682
1683         tp->mi_mode = MAC_MI_MODE_BASE;
1684         tw32_f(MAC_MI_MODE, tp->mi_mode);
1685         udelay(80);
1686
1687         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1688
1689         /* Some third-party PHYs need to be reset on link going
1690          * down.
1691          */
1692         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1693              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1694              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1695             netif_carrier_ok(tp->dev)) {
1696                 tg3_readphy(tp, MII_BMSR, &bmsr);
1697                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1698                     !(bmsr & BMSR_LSTATUS))
1699                         force_reset = 1;
1700         }
1701         if (force_reset)
1702                 tg3_phy_reset(tp);
1703
1704         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1705                 tg3_readphy(tp, MII_BMSR, &bmsr);
1706                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1707                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1708                         bmsr = 0;
1709
1710                 if (!(bmsr & BMSR_LSTATUS)) {
1711                         err = tg3_init_5401phy_dsp(tp);
1712                         if (err)
1713                                 return err;
1714
1715                         tg3_readphy(tp, MII_BMSR, &bmsr);
1716                         for (i = 0; i < 1000; i++) {
1717                                 udelay(10);
1718                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1719                                     (bmsr & BMSR_LSTATUS)) {
1720                                         udelay(40);
1721                                         break;
1722                                 }
1723                         }
1724
1725                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1726                             !(bmsr & BMSR_LSTATUS) &&
1727                             tp->link_config.active_speed == SPEED_1000) {
1728                                 err = tg3_phy_reset(tp);
1729                                 if (!err)
1730                                         err = tg3_init_5401phy_dsp(tp);
1731                                 if (err)
1732                                         return err;
1733                         }
1734                 }
1735         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1736                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1737                 /* 5701 {A0,B0} CRC bug workaround */
1738                 tg3_writephy(tp, 0x15, 0x0a75);
1739                 tg3_writephy(tp, 0x1c, 0x8c68);
1740                 tg3_writephy(tp, 0x1c, 0x8d68);
1741                 tg3_writephy(tp, 0x1c, 0x8c68);
1742         }
1743
1744         /* Clear pending interrupts... */
1745         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1746         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1747
1748         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1749                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1750         else
1751                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1752
1753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1754             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1755                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1756                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1757                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1758                 else
1759                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1760         }
1761
1762         current_link_up = 0;
1763         current_speed = SPEED_INVALID;
1764         current_duplex = DUPLEX_INVALID;
1765
1766         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1767                 u32 val;
1768
1769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1770                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1771                 if (!(val & (1 << 10))) {
1772                         val |= (1 << 10);
1773                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1774                         goto relink;
1775                 }
1776         }
1777
1778         bmsr = 0;
1779         for (i = 0; i < 100; i++) {
1780                 tg3_readphy(tp, MII_BMSR, &bmsr);
1781                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1782                     (bmsr & BMSR_LSTATUS))
1783                         break;
1784                 udelay(40);
1785         }
1786
1787         if (bmsr & BMSR_LSTATUS) {
1788                 u32 aux_stat, bmcr;
1789
1790                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1791                 for (i = 0; i < 2000; i++) {
1792                         udelay(10);
1793                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1794                             aux_stat)
1795                                 break;
1796                 }
1797
1798                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1799                                              &current_speed,
1800                                              &current_duplex);
1801
1802                 bmcr = 0;
1803                 for (i = 0; i < 200; i++) {
1804                         tg3_readphy(tp, MII_BMCR, &bmcr);
1805                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1806                                 continue;
1807                         if (bmcr && bmcr != 0x7fff)
1808                                 break;
1809                         udelay(10);
1810                 }
1811
1812                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1813                         if (bmcr & BMCR_ANENABLE) {
1814                                 current_link_up = 1;
1815
1816                                 /* Force autoneg restart if we are exiting
1817                                  * low power mode.
1818                                  */
1819                                 if (!tg3_copper_is_advertising_all(tp))
1820                                         current_link_up = 0;
1821                         } else {
1822                                 current_link_up = 0;
1823                         }
1824                 } else {
1825                         if (!(bmcr & BMCR_ANENABLE) &&
1826                             tp->link_config.speed == current_speed &&
1827                             tp->link_config.duplex == current_duplex) {
1828                                 current_link_up = 1;
1829                         } else {
1830                                 current_link_up = 0;
1831                         }
1832                 }
1833
1834                 tp->link_config.active_speed = current_speed;
1835                 tp->link_config.active_duplex = current_duplex;
1836         }
1837
1838         if (current_link_up == 1 &&
1839             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1840             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1841                 u32 local_adv, remote_adv;
1842
1843                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1844                         local_adv = 0;
1845                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1846
1847                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1848                         remote_adv = 0;
1849
1850                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1851
1852                 /* If we are not advertising full pause capability,
1853                  * something is wrong.  Bring the link down and reconfigure.
1854                  */
1855                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1856                         current_link_up = 0;
1857                 } else {
1858                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1859                 }
1860         }
1861 relink:
1862         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1863                 u32 tmp;
1864
1865                 tg3_phy_copper_begin(tp);
1866
1867                 tg3_readphy(tp, MII_BMSR, &tmp);
1868                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1869                     (tmp & BMSR_LSTATUS))
1870                         current_link_up = 1;
1871         }
1872
1873         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1874         if (current_link_up == 1) {
1875                 if (tp->link_config.active_speed == SPEED_100 ||
1876                     tp->link_config.active_speed == SPEED_10)
1877                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1878                 else
1879                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1880         } else
1881                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1882
1883         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1884         if (tp->link_config.active_duplex == DUPLEX_HALF)
1885                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1886
1887         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1888         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1889                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1890                     (current_link_up == 1 &&
1891                      tp->link_config.active_speed == SPEED_10))
1892                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1893         } else {
1894                 if (current_link_up == 1)
1895                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1896         }
1897
1898         /* ??? Without this setting Netgear GA302T PHY does not
1899          * ??? send/receive packets...
1900          */
1901         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1902             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1903                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1904                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1905                 udelay(80);
1906         }
1907
1908         tw32_f(MAC_MODE, tp->mac_mode);
1909         udelay(40);
1910
1911         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1912                 /* Polled via timer. */
1913                 tw32_f(MAC_EVENT, 0);
1914         } else {
1915                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1916         }
1917         udelay(40);
1918
1919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1920             current_link_up == 1 &&
1921             tp->link_config.active_speed == SPEED_1000 &&
1922             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1923              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1924                 udelay(120);
1925                 tw32_f(MAC_STATUS,
1926                      (MAC_STATUS_SYNC_CHANGED |
1927                       MAC_STATUS_CFG_CHANGED));
1928                 udelay(40);
1929                 tg3_write_mem(tp,
1930                               NIC_SRAM_FIRMWARE_MBOX,
1931                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1932         }
1933
1934         if (current_link_up != netif_carrier_ok(tp->dev)) {
1935                 if (current_link_up)
1936                         netif_carrier_on(tp->dev);
1937                 else
1938                         netif_carrier_off(tp->dev);
1939                 tg3_link_report(tp);
1940         }
1941
1942         return 0;
1943 }
1944
1945 struct tg3_fiber_aneginfo {
1946         int state;
1947 #define ANEG_STATE_UNKNOWN              0
1948 #define ANEG_STATE_AN_ENABLE            1
1949 #define ANEG_STATE_RESTART_INIT         2
1950 #define ANEG_STATE_RESTART              3
1951 #define ANEG_STATE_DISABLE_LINK_OK      4
1952 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1953 #define ANEG_STATE_ABILITY_DETECT       6
1954 #define ANEG_STATE_ACK_DETECT_INIT      7
1955 #define ANEG_STATE_ACK_DETECT           8
1956 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1957 #define ANEG_STATE_COMPLETE_ACK         10
1958 #define ANEG_STATE_IDLE_DETECT_INIT     11
1959 #define ANEG_STATE_IDLE_DETECT          12
1960 #define ANEG_STATE_LINK_OK              13
1961 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1962 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1963
1964         u32 flags;
1965 #define MR_AN_ENABLE            0x00000001
1966 #define MR_RESTART_AN           0x00000002
1967 #define MR_AN_COMPLETE          0x00000004
1968 #define MR_PAGE_RX              0x00000008
1969 #define MR_NP_LOADED            0x00000010
1970 #define MR_TOGGLE_TX            0x00000020
1971 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1972 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1973 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1974 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1975 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1976 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1977 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1978 #define MR_TOGGLE_RX            0x00002000
1979 #define MR_NP_RX                0x00004000
1980
1981 #define MR_LINK_OK              0x80000000
1982
1983         unsigned long link_time, cur_time;
1984
1985         u32 ability_match_cfg;
1986         int ability_match_count;
1987
1988         char ability_match, idle_match, ack_match;
1989
1990         u32 txconfig, rxconfig;
1991 #define ANEG_CFG_NP             0x00000080
1992 #define ANEG_CFG_ACK            0x00000040
1993 #define ANEG_CFG_RF2            0x00000020
1994 #define ANEG_CFG_RF1            0x00000010
1995 #define ANEG_CFG_PS2            0x00000001
1996 #define ANEG_CFG_PS1            0x00008000
1997 #define ANEG_CFG_HD             0x00004000
1998 #define ANEG_CFG_FD             0x00002000
1999 #define ANEG_CFG_INVAL          0x00001f06
2000
2001 };
2002 #define ANEG_OK         0
2003 #define ANEG_DONE       1
2004 #define ANEG_TIMER_ENAB 2
2005 #define ANEG_FAILED     -1
2006
2007 #define ANEG_STATE_SETTLE_TIME  10000
2008
2009 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2010                                    struct tg3_fiber_aneginfo *ap)
2011 {
2012         unsigned long delta;
2013         u32 rx_cfg_reg;
2014         int ret;
2015
2016         if (ap->state == ANEG_STATE_UNKNOWN) {
2017                 ap->rxconfig = 0;
2018                 ap->link_time = 0;
2019                 ap->cur_time = 0;
2020                 ap->ability_match_cfg = 0;
2021                 ap->ability_match_count = 0;
2022                 ap->ability_match = 0;
2023                 ap->idle_match = 0;
2024                 ap->ack_match = 0;
2025         }
2026         ap->cur_time++;
2027
2028         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2029                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2030
2031                 if (rx_cfg_reg != ap->ability_match_cfg) {
2032                         ap->ability_match_cfg = rx_cfg_reg;
2033                         ap->ability_match = 0;
2034                         ap->ability_match_count = 0;
2035                 } else {
2036                         if (++ap->ability_match_count > 1) {
2037                                 ap->ability_match = 1;
2038                                 ap->ability_match_cfg = rx_cfg_reg;
2039                         }
2040                 }
2041                 if (rx_cfg_reg & ANEG_CFG_ACK)
2042                         ap->ack_match = 1;
2043                 else
2044                         ap->ack_match = 0;
2045
2046                 ap->idle_match = 0;
2047         } else {
2048                 ap->idle_match = 1;
2049                 ap->ability_match_cfg = 0;
2050                 ap->ability_match_count = 0;
2051                 ap->ability_match = 0;
2052                 ap->ack_match = 0;
2053
2054                 rx_cfg_reg = 0;
2055         }
2056
2057         ap->rxconfig = rx_cfg_reg;
2058         ret = ANEG_OK;
2059
2060         switch(ap->state) {
2061         case ANEG_STATE_UNKNOWN:
2062                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2063                         ap->state = ANEG_STATE_AN_ENABLE;
2064
2065                 /* fallthru */
2066         case ANEG_STATE_AN_ENABLE:
2067                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2068                 if (ap->flags & MR_AN_ENABLE) {
2069                         ap->link_time = 0;
2070                         ap->cur_time = 0;
2071                         ap->ability_match_cfg = 0;
2072                         ap->ability_match_count = 0;
2073                         ap->ability_match = 0;
2074                         ap->idle_match = 0;
2075                         ap->ack_match = 0;
2076
2077                         ap->state = ANEG_STATE_RESTART_INIT;
2078                 } else {
2079                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2080                 }
2081                 break;
2082
2083         case ANEG_STATE_RESTART_INIT:
2084                 ap->link_time = ap->cur_time;
2085                 ap->flags &= ~(MR_NP_LOADED);
2086                 ap->txconfig = 0;
2087                 tw32(MAC_TX_AUTO_NEG, 0);
2088                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2089                 tw32_f(MAC_MODE, tp->mac_mode);
2090                 udelay(40);
2091
2092                 ret = ANEG_TIMER_ENAB;
2093                 ap->state = ANEG_STATE_RESTART;
2094
2095                 /* fallthru */
2096         case ANEG_STATE_RESTART:
2097                 delta = ap->cur_time - ap->link_time;
2098                 if (delta > ANEG_STATE_SETTLE_TIME) {
2099                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2100                 } else {
2101                         ret = ANEG_TIMER_ENAB;
2102                 }
2103                 break;
2104
2105         case ANEG_STATE_DISABLE_LINK_OK:
2106                 ret = ANEG_DONE;
2107                 break;
2108
2109         case ANEG_STATE_ABILITY_DETECT_INIT:
2110                 ap->flags &= ~(MR_TOGGLE_TX);
2111                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2112                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2113                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2114                 tw32_f(MAC_MODE, tp->mac_mode);
2115                 udelay(40);
2116
2117                 ap->state = ANEG_STATE_ABILITY_DETECT;
2118                 break;
2119
2120         case ANEG_STATE_ABILITY_DETECT:
2121                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2122                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2123                 }
2124                 break;
2125
2126         case ANEG_STATE_ACK_DETECT_INIT:
2127                 ap->txconfig |= ANEG_CFG_ACK;
2128                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2129                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2130                 tw32_f(MAC_MODE, tp->mac_mode);
2131                 udelay(40);
2132
2133                 ap->state = ANEG_STATE_ACK_DETECT;
2134
2135                 /* fallthru */
2136         case ANEG_STATE_ACK_DETECT:
2137                 if (ap->ack_match != 0) {
2138                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2139                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2140                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2141                         } else {
2142                                 ap->state = ANEG_STATE_AN_ENABLE;
2143                         }
2144                 } else if (ap->ability_match != 0 &&
2145                            ap->rxconfig == 0) {
2146                         ap->state = ANEG_STATE_AN_ENABLE;
2147                 }
2148                 break;
2149
2150         case ANEG_STATE_COMPLETE_ACK_INIT:
2151                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2152                         ret = ANEG_FAILED;
2153                         break;
2154                 }
2155                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2156                                MR_LP_ADV_HALF_DUPLEX |
2157                                MR_LP_ADV_SYM_PAUSE |
2158                                MR_LP_ADV_ASYM_PAUSE |
2159                                MR_LP_ADV_REMOTE_FAULT1 |
2160                                MR_LP_ADV_REMOTE_FAULT2 |
2161                                MR_LP_ADV_NEXT_PAGE |
2162                                MR_TOGGLE_RX |
2163                                MR_NP_RX);
2164                 if (ap->rxconfig & ANEG_CFG_FD)
2165                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2166                 if (ap->rxconfig & ANEG_CFG_HD)
2167                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2168                 if (ap->rxconfig & ANEG_CFG_PS1)
2169                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2170                 if (ap->rxconfig & ANEG_CFG_PS2)
2171                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2172                 if (ap->rxconfig & ANEG_CFG_RF1)
2173                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2174                 if (ap->rxconfig & ANEG_CFG_RF2)
2175                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2176                 if (ap->rxconfig & ANEG_CFG_NP)
2177                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2178
2179                 ap->link_time = ap->cur_time;
2180
2181                 ap->flags ^= (MR_TOGGLE_TX);
2182                 if (ap->rxconfig & 0x0008)
2183                         ap->flags |= MR_TOGGLE_RX;
2184                 if (ap->rxconfig & ANEG_CFG_NP)
2185                         ap->flags |= MR_NP_RX;
2186                 ap->flags |= MR_PAGE_RX;
2187
2188                 ap->state = ANEG_STATE_COMPLETE_ACK;
2189                 ret = ANEG_TIMER_ENAB;
2190                 break;
2191
2192         case ANEG_STATE_COMPLETE_ACK:
2193                 if (ap->ability_match != 0 &&
2194                     ap->rxconfig == 0) {
2195                         ap->state = ANEG_STATE_AN_ENABLE;
2196                         break;
2197                 }
2198                 delta = ap->cur_time - ap->link_time;
2199                 if (delta > ANEG_STATE_SETTLE_TIME) {
2200                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2201                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2202                         } else {
2203                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2204                                     !(ap->flags & MR_NP_RX)) {
2205                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2206                                 } else {
2207                                         ret = ANEG_FAILED;
2208                                 }
2209                         }
2210                 }
2211                 break;
2212
2213         case ANEG_STATE_IDLE_DETECT_INIT:
2214                 ap->link_time = ap->cur_time;
2215                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2216                 tw32_f(MAC_MODE, tp->mac_mode);
2217                 udelay(40);
2218
2219                 ap->state = ANEG_STATE_IDLE_DETECT;
2220                 ret = ANEG_TIMER_ENAB;
2221                 break;
2222
2223         case ANEG_STATE_IDLE_DETECT:
2224                 if (ap->ability_match != 0 &&
2225                     ap->rxconfig == 0) {
2226                         ap->state = ANEG_STATE_AN_ENABLE;
2227                         break;
2228                 }
2229                 delta = ap->cur_time - ap->link_time;
2230                 if (delta > ANEG_STATE_SETTLE_TIME) {
2231                         /* XXX another gem from the Broadcom driver :( */
2232                         ap->state = ANEG_STATE_LINK_OK;
2233                 }
2234                 break;
2235
2236         case ANEG_STATE_LINK_OK:
2237                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2238                 ret = ANEG_DONE;
2239                 break;
2240
2241         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2242                 /* ??? unimplemented */
2243                 break;
2244
2245         case ANEG_STATE_NEXT_PAGE_WAIT:
2246                 /* ??? unimplemented */
2247                 break;
2248
2249         default:
2250                 ret = ANEG_FAILED;
2251                 break;
2252         };
2253
2254         return ret;
2255 }
2256
2257 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2258 {
2259         int res = 0;
2260         struct tg3_fiber_aneginfo aninfo;
2261         int status = ANEG_FAILED;
2262         unsigned int tick;
2263         u32 tmp;
2264
2265         tw32_f(MAC_TX_AUTO_NEG, 0);
2266
2267         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2268         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2269         udelay(40);
2270
2271         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2272         udelay(40);
2273
2274         memset(&aninfo, 0, sizeof(aninfo));
2275         aninfo.flags |= MR_AN_ENABLE;
2276         aninfo.state = ANEG_STATE_UNKNOWN;
2277         aninfo.cur_time = 0;
2278         tick = 0;
2279         while (++tick < 195000) {
2280                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2281                 if (status == ANEG_DONE || status == ANEG_FAILED)
2282                         break;
2283
2284                 udelay(1);
2285         }
2286
2287         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2288         tw32_f(MAC_MODE, tp->mac_mode);
2289         udelay(40);
2290
2291         *flags = aninfo.flags;
2292
2293         if (status == ANEG_DONE &&
2294             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2295                              MR_LP_ADV_FULL_DUPLEX)))
2296                 res = 1;
2297
2298         return res;
2299 }
2300
2301 static void tg3_init_bcm8002(struct tg3 *tp)
2302 {
2303         u32 mac_status = tr32(MAC_STATUS);
2304         int i;
2305
2306         /* Reset when initting first time or we have a link. */
2307         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2308             !(mac_status & MAC_STATUS_PCS_SYNCED))
2309                 return;
2310
2311         /* Set PLL lock range. */
2312         tg3_writephy(tp, 0x16, 0x8007);
2313
2314         /* SW reset */
2315         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2316
2317         /* Wait for reset to complete. */
2318         /* XXX schedule_timeout() ... */
2319         for (i = 0; i < 500; i++)
2320                 udelay(10);
2321
2322         /* Config mode; select PMA/Ch 1 regs. */
2323         tg3_writephy(tp, 0x10, 0x8411);
2324
2325         /* Enable auto-lock and comdet, select txclk for tx. */
2326         tg3_writephy(tp, 0x11, 0x0a10);
2327
2328         tg3_writephy(tp, 0x18, 0x00a0);
2329         tg3_writephy(tp, 0x16, 0x41ff);
2330
2331         /* Assert and deassert POR. */
2332         tg3_writephy(tp, 0x13, 0x0400);
2333         udelay(40);
2334         tg3_writephy(tp, 0x13, 0x0000);
2335
2336         tg3_writephy(tp, 0x11, 0x0a50);
2337         udelay(40);
2338         tg3_writephy(tp, 0x11, 0x0a10);
2339
2340         /* Wait for signal to stabilize */
2341         /* XXX schedule_timeout() ... */
2342         for (i = 0; i < 15000; i++)
2343                 udelay(10);
2344
2345         /* Deselect the channel register so we can read the PHYID
2346          * later.
2347          */
2348         tg3_writephy(tp, 0x10, 0x8011);
2349 }
2350
2351 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2352 {
2353         u32 sg_dig_ctrl, sg_dig_status;
2354         u32 serdes_cfg, expected_sg_dig_ctrl;
2355         int workaround, port_a;
2356         int current_link_up;
2357
2358         serdes_cfg = 0;
2359         expected_sg_dig_ctrl = 0;
2360         workaround = 0;
2361         port_a = 1;
2362         current_link_up = 0;
2363
2364         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2365             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2366                 workaround = 1;
2367                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2368                         port_a = 0;
2369
2370                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2371                 /* preserve bits 20-23 for voltage regulator */
2372                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2373         }
2374
2375         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2376
2377         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2378                 if (sg_dig_ctrl & (1 << 31)) {
2379                         if (workaround) {
2380                                 u32 val = serdes_cfg;
2381
2382                                 if (port_a)
2383                                         val |= 0xc010000;
2384                                 else
2385                                         val |= 0x4010000;
2386                                 tw32_f(MAC_SERDES_CFG, val);
2387                         }
2388                         tw32_f(SG_DIG_CTRL, 0x01388400);
2389                 }
2390                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2391                         tg3_setup_flow_control(tp, 0, 0);
2392                         current_link_up = 1;
2393                 }
2394                 goto out;
2395         }
2396
2397         /* Want auto-negotiation.  */
2398         expected_sg_dig_ctrl = 0x81388400;
2399
2400         /* Pause capability */
2401         expected_sg_dig_ctrl |= (1 << 11);
2402
2403         /* Asymettric pause */
2404         expected_sg_dig_ctrl |= (1 << 12);
2405
2406         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2407                 if (workaround)
2408                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2409                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2410                 udelay(5);
2411                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2412
2413                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2414         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2415                                  MAC_STATUS_SIGNAL_DET)) {
2416                 int i;
2417
2418                 /* Giver time to negotiate (~200ms) */
2419                 for (i = 0; i < 40000; i++) {
2420                         sg_dig_status = tr32(SG_DIG_STATUS);
2421                         if (sg_dig_status & (0x3))
2422                                 break;
2423                         udelay(5);
2424                 }
2425                 mac_status = tr32(MAC_STATUS);
2426
2427                 if ((sg_dig_status & (1 << 1)) &&
2428                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2429                         u32 local_adv, remote_adv;
2430
2431                         local_adv = ADVERTISE_PAUSE_CAP;
2432                         remote_adv = 0;
2433                         if (sg_dig_status & (1 << 19))
2434                                 remote_adv |= LPA_PAUSE_CAP;
2435                         if (sg_dig_status & (1 << 20))
2436                                 remote_adv |= LPA_PAUSE_ASYM;
2437
2438                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2439                         current_link_up = 1;
2440                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2441                 } else if (!(sg_dig_status & (1 << 1))) {
2442                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2443                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2444                         else {
2445                                 if (workaround) {
2446                                         u32 val = serdes_cfg;
2447
2448                                         if (port_a)
2449                                                 val |= 0xc010000;
2450                                         else
2451                                                 val |= 0x4010000;
2452
2453                                         tw32_f(MAC_SERDES_CFG, val);
2454                                 }
2455
2456                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2457                                 udelay(40);
2458
2459                                 /* Link parallel detection - link is up */
2460                                 /* only if we have PCS_SYNC and not */
2461                                 /* receiving config code words */
2462                                 mac_status = tr32(MAC_STATUS);
2463                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2464                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2465                                         tg3_setup_flow_control(tp, 0, 0);
2466                                         current_link_up = 1;
2467                                 }
2468                         }
2469                 }
2470         }
2471
2472 out:
2473         return current_link_up;
2474 }
2475
2476 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2477 {
2478         int current_link_up = 0;
2479
2480         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2481                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2482                 goto out;
2483         }
2484
2485         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2486                 u32 flags;
2487                 int i;
2488   
2489                 if (fiber_autoneg(tp, &flags)) {
2490                         u32 local_adv, remote_adv;
2491
2492                         local_adv = ADVERTISE_PAUSE_CAP;
2493                         remote_adv = 0;
2494                         if (flags & MR_LP_ADV_SYM_PAUSE)
2495                                 remote_adv |= LPA_PAUSE_CAP;
2496                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2497                                 remote_adv |= LPA_PAUSE_ASYM;
2498
2499                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2500
2501                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2502                         current_link_up = 1;
2503                 }
2504                 for (i = 0; i < 30; i++) {
2505                         udelay(20);
2506                         tw32_f(MAC_STATUS,
2507                                (MAC_STATUS_SYNC_CHANGED |
2508                                 MAC_STATUS_CFG_CHANGED));
2509                         udelay(40);
2510                         if ((tr32(MAC_STATUS) &
2511                              (MAC_STATUS_SYNC_CHANGED |
2512                               MAC_STATUS_CFG_CHANGED)) == 0)
2513                                 break;
2514                 }
2515
2516                 mac_status = tr32(MAC_STATUS);
2517                 if (current_link_up == 0 &&
2518                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2519                     !(mac_status & MAC_STATUS_RCVD_CFG))
2520                         current_link_up = 1;
2521         } else {
2522                 /* Forcing 1000FD link up. */
2523                 current_link_up = 1;
2524                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2525
2526                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2527                 udelay(40);
2528         }
2529
2530 out:
2531         return current_link_up;
2532 }
2533
2534 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2535 {
2536         u32 orig_pause_cfg;
2537         u16 orig_active_speed;
2538         u8 orig_active_duplex;
2539         u32 mac_status;
2540         int current_link_up;
2541         int i;
2542
2543         orig_pause_cfg =
2544                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2545                                   TG3_FLAG_TX_PAUSE));
2546         orig_active_speed = tp->link_config.active_speed;
2547         orig_active_duplex = tp->link_config.active_duplex;
2548
2549         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2550             netif_carrier_ok(tp->dev) &&
2551             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2552                 mac_status = tr32(MAC_STATUS);
2553                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2554                                MAC_STATUS_SIGNAL_DET |
2555                                MAC_STATUS_CFG_CHANGED |
2556                                MAC_STATUS_RCVD_CFG);
2557                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2558                                    MAC_STATUS_SIGNAL_DET)) {
2559                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2560                                             MAC_STATUS_CFG_CHANGED));
2561                         return 0;
2562                 }
2563         }
2564
2565         tw32_f(MAC_TX_AUTO_NEG, 0);
2566
2567         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2568         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2569         tw32_f(MAC_MODE, tp->mac_mode);
2570         udelay(40);
2571
2572         if (tp->phy_id == PHY_ID_BCM8002)
2573                 tg3_init_bcm8002(tp);
2574
2575         /* Enable link change event even when serdes polling.  */
2576         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2577         udelay(40);
2578
2579         current_link_up = 0;
2580         mac_status = tr32(MAC_STATUS);
2581
2582         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2583                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2584         else
2585                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2586
2587         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2588         tw32_f(MAC_MODE, tp->mac_mode);
2589         udelay(40);
2590
2591         tp->hw_status->status =
2592                 (SD_STATUS_UPDATED |
2593                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2594
2595         for (i = 0; i < 100; i++) {
2596                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2597                                     MAC_STATUS_CFG_CHANGED));
2598                 udelay(5);
2599                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2600                                          MAC_STATUS_CFG_CHANGED)) == 0)
2601                         break;
2602         }
2603
2604         mac_status = tr32(MAC_STATUS);
2605         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2606                 current_link_up = 0;
2607                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2608                         tw32_f(MAC_MODE, (tp->mac_mode |
2609                                           MAC_MODE_SEND_CONFIGS));
2610                         udelay(1);
2611                         tw32_f(MAC_MODE, tp->mac_mode);
2612                 }
2613         }
2614
2615         if (current_link_up == 1) {
2616                 tp->link_config.active_speed = SPEED_1000;
2617                 tp->link_config.active_duplex = DUPLEX_FULL;
2618                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2619                                     LED_CTRL_LNKLED_OVERRIDE |
2620                                     LED_CTRL_1000MBPS_ON));
2621         } else {
2622                 tp->link_config.active_speed = SPEED_INVALID;
2623                 tp->link_config.active_duplex = DUPLEX_INVALID;
2624                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2625                                     LED_CTRL_LNKLED_OVERRIDE |
2626                                     LED_CTRL_TRAFFIC_OVERRIDE));
2627         }
2628
2629         if (current_link_up != netif_carrier_ok(tp->dev)) {
2630                 if (current_link_up)
2631                         netif_carrier_on(tp->dev);
2632                 else
2633                         netif_carrier_off(tp->dev);
2634                 tg3_link_report(tp);
2635         } else {
2636                 u32 now_pause_cfg =
2637                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2638                                          TG3_FLAG_TX_PAUSE);
2639                 if (orig_pause_cfg != now_pause_cfg ||
2640                     orig_active_speed != tp->link_config.active_speed ||
2641                     orig_active_duplex != tp->link_config.active_duplex)
2642                         tg3_link_report(tp);
2643         }
2644
2645         return 0;
2646 }
2647
2648 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2649 {
2650         int current_link_up, err = 0;
2651         u32 bmsr, bmcr;
2652         u16 current_speed;
2653         u8 current_duplex;
2654
2655         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2656         tw32_f(MAC_MODE, tp->mac_mode);
2657         udelay(40);
2658
2659         tw32(MAC_EVENT, 0);
2660
2661         tw32_f(MAC_STATUS,
2662              (MAC_STATUS_SYNC_CHANGED |
2663               MAC_STATUS_CFG_CHANGED |
2664               MAC_STATUS_MI_COMPLETION |
2665               MAC_STATUS_LNKSTATE_CHANGED));
2666         udelay(40);
2667
2668         if (force_reset)
2669                 tg3_phy_reset(tp);
2670
2671         current_link_up = 0;
2672         current_speed = SPEED_INVALID;
2673         current_duplex = DUPLEX_INVALID;
2674
2675         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2676         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2677
2678         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2679
2680         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2681             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2682                 /* do nothing, just check for link up at the end */
2683         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2684                 u32 adv, new_adv;
2685
2686                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2687                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2688                                   ADVERTISE_1000XPAUSE |
2689                                   ADVERTISE_1000XPSE_ASYM |
2690                                   ADVERTISE_SLCT);
2691
2692                 /* Always advertise symmetric PAUSE just like copper */
2693                 new_adv |= ADVERTISE_1000XPAUSE;
2694
2695                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2696                         new_adv |= ADVERTISE_1000XHALF;
2697                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2698                         new_adv |= ADVERTISE_1000XFULL;
2699
2700                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2701                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2702                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2703                         tg3_writephy(tp, MII_BMCR, bmcr);
2704
2705                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2706                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2707                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2708
2709                         return err;
2710                 }
2711         } else {
2712                 u32 new_bmcr;
2713
2714                 bmcr &= ~BMCR_SPEED1000;
2715                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2716
2717                 if (tp->link_config.duplex == DUPLEX_FULL)
2718                         new_bmcr |= BMCR_FULLDPLX;
2719
2720                 if (new_bmcr != bmcr) {
2721                         /* BMCR_SPEED1000 is a reserved bit that needs
2722                          * to be set on write.
2723                          */
2724                         new_bmcr |= BMCR_SPEED1000;
2725
2726                         /* Force a linkdown */
2727                         if (netif_carrier_ok(tp->dev)) {
2728                                 u32 adv;
2729
2730                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2731                                 adv &= ~(ADVERTISE_1000XFULL |
2732                                          ADVERTISE_1000XHALF |
2733                                          ADVERTISE_SLCT);
2734                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2735                                 tg3_writephy(tp, MII_BMCR, bmcr |
2736                                                            BMCR_ANRESTART |
2737                                                            BMCR_ANENABLE);
2738                                 udelay(10);
2739                                 netif_carrier_off(tp->dev);
2740                         }
2741                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2742                         bmcr = new_bmcr;
2743                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2744                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2745                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2746                 }
2747         }
2748
2749         if (bmsr & BMSR_LSTATUS) {
2750                 current_speed = SPEED_1000;
2751                 current_link_up = 1;
2752                 if (bmcr & BMCR_FULLDPLX)
2753                         current_duplex = DUPLEX_FULL;
2754                 else
2755                         current_duplex = DUPLEX_HALF;
2756
2757                 if (bmcr & BMCR_ANENABLE) {
2758                         u32 local_adv, remote_adv, common;
2759
2760                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2761                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2762                         common = local_adv & remote_adv;
2763                         if (common & (ADVERTISE_1000XHALF |
2764                                       ADVERTISE_1000XFULL)) {
2765                                 if (common & ADVERTISE_1000XFULL)
2766                                         current_duplex = DUPLEX_FULL;
2767                                 else
2768                                         current_duplex = DUPLEX_HALF;
2769
2770                                 tg3_setup_flow_control(tp, local_adv,
2771                                                        remote_adv);
2772                         }
2773                         else
2774                                 current_link_up = 0;
2775                 }
2776         }
2777
2778         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2779         if (tp->link_config.active_duplex == DUPLEX_HALF)
2780                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2781
2782         tw32_f(MAC_MODE, tp->mac_mode);
2783         udelay(40);
2784
2785         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2786
2787         tp->link_config.active_speed = current_speed;
2788         tp->link_config.active_duplex = current_duplex;
2789
2790         if (current_link_up != netif_carrier_ok(tp->dev)) {
2791                 if (current_link_up)
2792                         netif_carrier_on(tp->dev);
2793                 else {
2794                         netif_carrier_off(tp->dev);
2795                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2796                 }
2797                 tg3_link_report(tp);
2798         }
2799         return err;
2800 }
2801
2802 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2803 {
2804         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2805                 /* Give autoneg time to complete. */
2806                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2807                 return;
2808         }
2809         if (!netif_carrier_ok(tp->dev) &&
2810             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2811                 u32 bmcr;
2812
2813                 tg3_readphy(tp, MII_BMCR, &bmcr);
2814                 if (bmcr & BMCR_ANENABLE) {
2815                         u32 phy1, phy2;
2816
2817                         /* Select shadow register 0x1f */
2818                         tg3_writephy(tp, 0x1c, 0x7c00);
2819                         tg3_readphy(tp, 0x1c, &phy1);
2820
2821                         /* Select expansion interrupt status register */
2822                         tg3_writephy(tp, 0x17, 0x0f01);
2823                         tg3_readphy(tp, 0x15, &phy2);
2824                         tg3_readphy(tp, 0x15, &phy2);
2825
2826                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2827                                 /* We have signal detect and not receiving
2828                                  * config code words, link is up by parallel
2829                                  * detection.
2830                                  */
2831
2832                                 bmcr &= ~BMCR_ANENABLE;
2833                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2834                                 tg3_writephy(tp, MII_BMCR, bmcr);
2835                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2836                         }
2837                 }
2838         }
2839         else if (netif_carrier_ok(tp->dev) &&
2840                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2841                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2842                 u32 phy2;
2843
2844                 /* Select expansion interrupt status register */
2845                 tg3_writephy(tp, 0x17, 0x0f01);
2846                 tg3_readphy(tp, 0x15, &phy2);
2847                 if (phy2 & 0x20) {
2848                         u32 bmcr;
2849
2850                         /* Config code words received, turn on autoneg. */
2851                         tg3_readphy(tp, MII_BMCR, &bmcr);
2852                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2853
2854                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2855
2856                 }
2857         }
2858 }
2859
2860 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2861 {
2862         int err;
2863
2864         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2865                 err = tg3_setup_fiber_phy(tp, force_reset);
2866         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2867                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2868         } else {
2869                 err = tg3_setup_copper_phy(tp, force_reset);
2870         }
2871
2872         if (tp->link_config.active_speed == SPEED_1000 &&
2873             tp->link_config.active_duplex == DUPLEX_HALF)
2874                 tw32(MAC_TX_LENGTHS,
2875                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2876                       (6 << TX_LENGTHS_IPG_SHIFT) |
2877                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2878         else
2879                 tw32(MAC_TX_LENGTHS,
2880                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2881                       (6 << TX_LENGTHS_IPG_SHIFT) |
2882                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2883
2884         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2885                 if (netif_carrier_ok(tp->dev)) {
2886                         tw32(HOSTCC_STAT_COAL_TICKS,
2887                              tp->coal.stats_block_coalesce_usecs);
2888                 } else {
2889                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2890                 }
2891         }
2892
2893         return err;
2894 }
2895
2896 /* Tigon3 never reports partial packet sends.  So we do not
2897  * need special logic to handle SKBs that have not had all
2898  * of their frags sent yet, like SunGEM does.
2899  */
2900 static void tg3_tx(struct tg3 *tp)
2901 {
2902         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2903         u32 sw_idx = tp->tx_cons;
2904
2905         while (sw_idx != hw_idx) {
2906                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2907                 struct sk_buff *skb = ri->skb;
2908                 int i;
2909
2910                 if (unlikely(skb == NULL))
2911                         BUG();
2912
2913                 pci_unmap_single(tp->pdev,
2914                                  pci_unmap_addr(ri, mapping),
2915                                  skb_headlen(skb),
2916                                  PCI_DMA_TODEVICE);
2917
2918                 ri->skb = NULL;
2919
2920                 sw_idx = NEXT_TX(sw_idx);
2921
2922                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2923                         if (unlikely(sw_idx == hw_idx))
2924                                 BUG();
2925
2926                         ri = &tp->tx_buffers[sw_idx];
2927                         if (unlikely(ri->skb != NULL))
2928                                 BUG();
2929
2930                         pci_unmap_page(tp->pdev,
2931                                        pci_unmap_addr(ri, mapping),
2932                                        skb_shinfo(skb)->frags[i].size,
2933                                        PCI_DMA_TODEVICE);
2934
2935                         sw_idx = NEXT_TX(sw_idx);
2936                 }
2937
2938                 dev_kfree_skb(skb);
2939         }
2940
2941         tp->tx_cons = sw_idx;
2942
2943         if (unlikely(netif_queue_stopped(tp->dev))) {
2944                 spin_lock(&tp->tx_lock);
2945                 if (netif_queue_stopped(tp->dev) &&
2946                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2947                         netif_wake_queue(tp->dev);
2948                 spin_unlock(&tp->tx_lock);
2949         }
2950 }
2951
2952 /* Returns size of skb allocated or < 0 on error.
2953  *
2954  * We only need to fill in the address because the other members
2955  * of the RX descriptor are invariant, see tg3_init_rings.
2956  *
2957  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2958  * posting buffers we only dirty the first cache line of the RX
2959  * descriptor (containing the address).  Whereas for the RX status
2960  * buffers the cpu only reads the last cacheline of the RX descriptor
2961  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2962  */
2963 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2964                             int src_idx, u32 dest_idx_unmasked)
2965 {
2966         struct tg3_rx_buffer_desc *desc;
2967         struct ring_info *map, *src_map;
2968         struct sk_buff *skb;
2969         dma_addr_t mapping;
2970         int skb_size, dest_idx;
2971
2972         src_map = NULL;
2973         switch (opaque_key) {
2974         case RXD_OPAQUE_RING_STD:
2975                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2976                 desc = &tp->rx_std[dest_idx];
2977                 map = &tp->rx_std_buffers[dest_idx];
2978                 if (src_idx >= 0)
2979                         src_map = &tp->rx_std_buffers[src_idx];
2980                 skb_size = tp->rx_pkt_buf_sz;
2981                 break;
2982
2983         case RXD_OPAQUE_RING_JUMBO:
2984                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2985                 desc = &tp->rx_jumbo[dest_idx];
2986                 map = &tp->rx_jumbo_buffers[dest_idx];
2987                 if (src_idx >= 0)
2988                         src_map = &tp->rx_jumbo_buffers[src_idx];
2989                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2990                 break;
2991
2992         default:
2993                 return -EINVAL;
2994         };
2995
2996         /* Do not overwrite any of the map or rp information
2997          * until we are sure we can commit to a new buffer.
2998          *
2999          * Callers depend upon this behavior and assume that
3000          * we leave everything unchanged if we fail.
3001          */
3002         skb = dev_alloc_skb(skb_size);
3003         if (skb == NULL)
3004                 return -ENOMEM;
3005
3006         skb->dev = tp->dev;
3007         skb_reserve(skb, tp->rx_offset);
3008
3009         mapping = pci_map_single(tp->pdev, skb->data,
3010                                  skb_size - tp->rx_offset,
3011                                  PCI_DMA_FROMDEVICE);
3012
3013         map->skb = skb;
3014         pci_unmap_addr_set(map, mapping, mapping);
3015
3016         if (src_map != NULL)
3017                 src_map->skb = NULL;
3018
3019         desc->addr_hi = ((u64)mapping >> 32);
3020         desc->addr_lo = ((u64)mapping & 0xffffffff);
3021
3022         return skb_size;
3023 }
3024
3025 /* We only need to move over in the address because the other
3026  * members of the RX descriptor are invariant.  See notes above
3027  * tg3_alloc_rx_skb for full details.
3028  */
3029 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3030                            int src_idx, u32 dest_idx_unmasked)
3031 {
3032         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3033         struct ring_info *src_map, *dest_map;
3034         int dest_idx;
3035
3036         switch (opaque_key) {
3037         case RXD_OPAQUE_RING_STD:
3038                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3039                 dest_desc = &tp->rx_std[dest_idx];
3040                 dest_map = &tp->rx_std_buffers[dest_idx];
3041                 src_desc = &tp->rx_std[src_idx];
3042                 src_map = &tp->rx_std_buffers[src_idx];
3043                 break;
3044
3045         case RXD_OPAQUE_RING_JUMBO:
3046                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3047                 dest_desc = &tp->rx_jumbo[dest_idx];
3048                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3049                 src_desc = &tp->rx_jumbo[src_idx];
3050                 src_map = &tp->rx_jumbo_buffers[src_idx];
3051                 break;
3052
3053         default:
3054                 return;
3055         };
3056
3057         dest_map->skb = src_map->skb;
3058         pci_unmap_addr_set(dest_map, mapping,
3059                            pci_unmap_addr(src_map, mapping));
3060         dest_desc->addr_hi = src_desc->addr_hi;
3061         dest_desc->addr_lo = src_desc->addr_lo;
3062
3063         src_map->skb = NULL;
3064 }
3065
3066 #if TG3_VLAN_TAG_USED
3067 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3068 {
3069         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3070 }
3071 #endif
3072
3073 /* The RX ring scheme is composed of multiple rings which post fresh
3074  * buffers to the chip, and one special ring the chip uses to report
3075  * status back to the host.
3076  *
3077  * The special ring reports the status of received packets to the
3078  * host.  The chip does not write into the original descriptor the
3079  * RX buffer was obtained from.  The chip simply takes the original
3080  * descriptor as provided by the host, updates the status and length
3081  * field, then writes this into the next status ring entry.
3082  *
3083  * Each ring the host uses to post buffers to the chip is described
3084  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3085  * it is first placed into the on-chip ram.  When the packet's length
3086  * is known, it walks down the TG3_BDINFO entries to select the ring.
3087  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3088  * which is within the range of the new packet's length is chosen.
3089  *
3090  * The "separate ring for rx status" scheme may sound queer, but it makes
3091  * sense from a cache coherency perspective.  If only the host writes
3092  * to the buffer post rings, and only the chip writes to the rx status
3093  * rings, then cache lines never move beyond shared-modified state.
3094  * If both the host and chip were to write into the same ring, cache line
3095  * eviction could occur since both entities want it in an exclusive state.
3096  */
3097 static int tg3_rx(struct tg3 *tp, int budget)
3098 {
3099         u32 work_mask;
3100         u32 sw_idx = tp->rx_rcb_ptr;
3101         u16 hw_idx;
3102         int received;
3103
3104         hw_idx = tp->hw_status->idx[0].rx_producer;
3105         /*
3106          * We need to order the read of hw_idx and the read of
3107          * the opaque cookie.
3108          */
3109         rmb();
3110         work_mask = 0;
3111         received = 0;
3112         while (sw_idx != hw_idx && budget > 0) {
3113                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3114                 unsigned int len;
3115                 struct sk_buff *skb;
3116                 dma_addr_t dma_addr;
3117                 u32 opaque_key, desc_idx, *post_ptr;
3118
3119                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3120                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3121                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3122                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3123                                                   mapping);
3124                         skb = tp->rx_std_buffers[desc_idx].skb;
3125                         post_ptr = &tp->rx_std_ptr;
3126                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3127                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3128                                                   mapping);
3129                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3130                         post_ptr = &tp->rx_jumbo_ptr;
3131                 }
3132                 else {
3133                         goto next_pkt_nopost;
3134                 }
3135
3136                 work_mask |= opaque_key;
3137
3138                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3139                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3140                 drop_it:
3141                         tg3_recycle_rx(tp, opaque_key,
3142                                        desc_idx, *post_ptr);
3143                 drop_it_no_recycle:
3144                         /* Other statistics kept track of by card. */
3145                         tp->net_stats.rx_dropped++;
3146                         goto next_pkt;
3147                 }
3148
3149                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3150
3151                 if (len > RX_COPY_THRESHOLD 
3152                         && tp->rx_offset == 2
3153                         /* rx_offset != 2 iff this is a 5701 card running
3154                          * in PCI-X mode [see tg3_get_invariants()] */
3155                 ) {
3156                         int skb_size;
3157
3158                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3159                                                     desc_idx, *post_ptr);
3160                         if (skb_size < 0)
3161                                 goto drop_it;
3162
3163                         pci_unmap_single(tp->pdev, dma_addr,
3164                                          skb_size - tp->rx_offset,
3165                                          PCI_DMA_FROMDEVICE);
3166
3167                         skb_put(skb, len);
3168                 } else {
3169                         struct sk_buff *copy_skb;
3170
3171                         tg3_recycle_rx(tp, opaque_key,
3172                                        desc_idx, *post_ptr);
3173
3174                         copy_skb = dev_alloc_skb(len + 2);
3175                         if (copy_skb == NULL)
3176                                 goto drop_it_no_recycle;
3177
3178                         copy_skb->dev = tp->dev;
3179                         skb_reserve(copy_skb, 2);
3180                         skb_put(copy_skb, len);
3181                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3182                         memcpy(copy_skb->data, skb->data, len);
3183                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3184
3185                         /* We'll reuse the original ring buffer. */
3186                         skb = copy_skb;
3187                 }
3188
3189                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3190                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3191                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3192                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3193                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3194                 else
3195                         skb->ip_summed = CHECKSUM_NONE;
3196
3197                 skb->protocol = eth_type_trans(skb, tp->dev);
3198 #if TG3_VLAN_TAG_USED
3199                 if (tp->vlgrp != NULL &&
3200                     desc->type_flags & RXD_FLAG_VLAN) {
3201                         tg3_vlan_rx(tp, skb,
3202                                     desc->err_vlan & RXD_VLAN_MASK);
3203                 } else
3204 #endif
3205                         netif_receive_skb(skb);
3206
3207                 tp->dev->last_rx = jiffies;
3208                 received++;
3209                 budget--;
3210
3211 next_pkt:
3212                 (*post_ptr)++;
3213 next_pkt_nopost:
3214                 sw_idx++;
3215                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3216
3217                 /* Refresh hw_idx to see if there is new work */
3218                 if (sw_idx == hw_idx) {
3219                         hw_idx = tp->hw_status->idx[0].rx_producer;
3220                         rmb();
3221                 }
3222         }
3223
3224         /* ACK the status ring. */
3225         tp->rx_rcb_ptr = sw_idx;
3226         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3227
3228         /* Refill RX ring(s). */
3229         if (work_mask & RXD_OPAQUE_RING_STD) {
3230                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3231                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3232                              sw_idx);
3233         }
3234         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3235                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3236                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3237                              sw_idx);
3238         }
3239         mmiowb();
3240
3241         return received;
3242 }
3243
3244 static int tg3_poll(struct net_device *netdev, int *budget)
3245 {
3246         struct tg3 *tp = netdev_priv(netdev);
3247         struct tg3_hw_status *sblk = tp->hw_status;
3248         int done;
3249
3250         /* handle link change and other phy events */
3251         if (!(tp->tg3_flags &
3252               (TG3_FLAG_USE_LINKCHG_REG |
3253                TG3_FLAG_POLL_SERDES))) {
3254                 if (sblk->status & SD_STATUS_LINK_CHG) {
3255                         sblk->status = SD_STATUS_UPDATED |
3256                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3257                         spin_lock(&tp->lock);
3258                         tg3_setup_phy(tp, 0);
3259                         spin_unlock(&tp->lock);
3260                 }
3261         }
3262
3263         /* run TX completion thread */
3264         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3265                 tg3_tx(tp);
3266         }
3267
3268         /* run RX thread, within the bounds set by NAPI.
3269          * All RX "locking" is done by ensuring outside
3270          * code synchronizes with dev->poll()
3271          */
3272         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3273                 int orig_budget = *budget;
3274                 int work_done;
3275
3276                 if (orig_budget > netdev->quota)
3277                         orig_budget = netdev->quota;
3278
3279                 work_done = tg3_rx(tp, orig_budget);
3280
3281                 *budget -= work_done;
3282                 netdev->quota -= work_done;
3283         }
3284
3285         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3286                 tp->last_tag = sblk->status_tag;
3287                 rmb();
3288         } else
3289                 sblk->status &= ~SD_STATUS_UPDATED;
3290
3291         /* if no more work, tell net stack and NIC we're done */
3292         done = !tg3_has_work(tp);
3293         if (done) {
3294                 netif_rx_complete(netdev);
3295                 tg3_restart_ints(tp);
3296         }
3297
3298         return (done ? 0 : 1);
3299 }
3300
3301 static void tg3_irq_quiesce(struct tg3 *tp)
3302 {
3303         BUG_ON(tp->irq_sync);
3304
3305         tp->irq_sync = 1;
3306         smp_mb();
3307
3308         synchronize_irq(tp->pdev->irq);
3309 }
3310
3311 static inline int tg3_irq_sync(struct tg3 *tp)
3312 {
3313         return tp->irq_sync;
3314 }
3315
3316 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3317  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3318  * with as well.  Most of the time, this is not necessary except when
3319  * shutting down the device.
3320  */
3321 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3322 {
3323         if (irq_sync)
3324                 tg3_irq_quiesce(tp);
3325         spin_lock_bh(&tp->lock);
3326         spin_lock(&tp->tx_lock);
3327 }
3328
3329 static inline void tg3_full_unlock(struct tg3 *tp)
3330 {
3331         spin_unlock(&tp->tx_lock);
3332         spin_unlock_bh(&tp->lock);
3333 }
3334
3335 /* MSI ISR - No need to check for interrupt sharing and no need to
3336  * flush status block and interrupt mailbox. PCI ordering rules
3337  * guarantee that MSI will arrive after the status block.
3338  */
3339 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3340 {
3341         struct net_device *dev = dev_id;
3342         struct tg3 *tp = netdev_priv(dev);
3343
3344         prefetch(tp->hw_status);
3345         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3346         /*
3347          * Writing any value to intr-mbox-0 clears PCI INTA# and
3348          * chip-internal interrupt pending events.
3349          * Writing non-zero to intr-mbox-0 additional tells the
3350          * NIC to stop sending us irqs, engaging "in-intr-handler"
3351          * event coalescing.
3352          */
3353         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3354         if (likely(!tg3_irq_sync(tp)))
3355                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3356
3357         return IRQ_RETVAL(1);
3358 }
3359
3360 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3361 {
3362         struct net_device *dev = dev_id;
3363         struct tg3 *tp = netdev_priv(dev);
3364         struct tg3_hw_status *sblk = tp->hw_status;
3365         unsigned int handled = 1;
3366
3367         /* In INTx mode, it is possible for the interrupt to arrive at
3368          * the CPU before the status block posted prior to the interrupt.
3369          * Reading the PCI State register will confirm whether the
3370          * interrupt is ours and will flush the status block.
3371          */
3372         if ((sblk->status & SD_STATUS_UPDATED) ||
3373             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3374                 /*
3375                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3376                  * chip-internal interrupt pending events.
3377                  * Writing non-zero to intr-mbox-0 additional tells the
3378                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3379                  * event coalescing.
3380                  */
3381                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3382                              0x00000001);
3383                 if (tg3_irq_sync(tp))
3384                         goto out;
3385                 sblk->status &= ~SD_STATUS_UPDATED;
3386                 if (likely(tg3_has_work(tp))) {
3387                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3388                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3389                 } else {
3390                         /* No work, shared interrupt perhaps?  re-enable
3391                          * interrupts, and flush that PCI write
3392                          */
3393                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3394                                 0x00000000);
3395                 }
3396         } else {        /* shared interrupt */
3397                 handled = 0;
3398         }
3399 out:
3400         return IRQ_RETVAL(handled);
3401 }
3402
3403 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3404 {
3405         struct net_device *dev = dev_id;
3406         struct tg3 *tp = netdev_priv(dev);
3407         struct tg3_hw_status *sblk = tp->hw_status;
3408         unsigned int handled = 1;
3409
3410         /* In INTx mode, it is possible for the interrupt to arrive at
3411          * the CPU before the status block posted prior to the interrupt.
3412          * Reading the PCI State register will confirm whether the
3413          * interrupt is ours and will flush the status block.
3414          */
3415         if ((sblk->status_tag != tp->last_tag) ||
3416             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3417                 /*
3418                  * writing any value to intr-mbox-0 clears PCI INTA# and
3419                  * chip-internal interrupt pending events.
3420                  * writing non-zero to intr-mbox-0 additional tells the
3421                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3422                  * event coalescing.
3423                  */
3424                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3425                              0x00000001);
3426                 if (tg3_irq_sync(tp))
3427                         goto out;
3428                 if (netif_rx_schedule_prep(dev)) {
3429                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3430                         /* Update last_tag to mark that this status has been
3431                          * seen. Because interrupt may be shared, we may be
3432                          * racing with tg3_poll(), so only update last_tag
3433                          * if tg3_poll() is not scheduled.
3434                          */
3435                         tp->last_tag = sblk->status_tag;
3436                         __netif_rx_schedule(dev);
3437                 }
3438         } else {        /* shared interrupt */
3439                 handled = 0;
3440         }
3441 out:
3442         return IRQ_RETVAL(handled);
3443 }
3444
3445 /* ISR for interrupt test */
3446 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3447                 struct pt_regs *regs)
3448 {
3449         struct net_device *dev = dev_id;
3450         struct tg3 *tp = netdev_priv(dev);
3451         struct tg3_hw_status *sblk = tp->hw_status;
3452
3453         if ((sblk->status & SD_STATUS_UPDATED) ||
3454             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3455                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3456                              0x00000001);
3457                 return IRQ_RETVAL(1);
3458         }
3459         return IRQ_RETVAL(0);
3460 }
3461
3462 static int tg3_init_hw(struct tg3 *);
3463 static int tg3_halt(struct tg3 *, int, int);
3464
3465 #ifdef CONFIG_NET_POLL_CONTROLLER
3466 static void tg3_poll_controller(struct net_device *dev)
3467 {
3468         struct tg3 *tp = netdev_priv(dev);
3469
3470         tg3_interrupt(tp->pdev->irq, dev, NULL);
3471 }
3472 #endif
3473
3474 static void tg3_reset_task(void *_data)
3475 {
3476         struct tg3 *tp = _data;
3477         unsigned int restart_timer;
3478
3479         tg3_netif_stop(tp);
3480
3481         tg3_full_lock(tp, 1);
3482
3483         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3484         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3485
3486         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3487         tg3_init_hw(tp);
3488
3489         tg3_netif_start(tp);
3490
3491         tg3_full_unlock(tp);
3492
3493         if (restart_timer)
3494                 mod_timer(&tp->timer, jiffies + 1);
3495 }
3496
3497 static void tg3_tx_timeout(struct net_device *dev)
3498 {
3499         struct tg3 *tp = netdev_priv(dev);
3500
3501         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3502                dev->name);
3503
3504         schedule_work(&tp->reset_task);
3505 }
3506
3507 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3508 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3509 {
3510         u32 base = (u32) mapping & 0xffffffff;
3511
3512         return ((base > 0xffffdcc0) &&
3513                 (base + len + 8 < base));
3514 }
3515
3516 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3517
3518 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3519                                        u32 last_plus_one, u32 *start,
3520                                        u32 base_flags, u32 mss)
3521 {
3522         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3523         dma_addr_t new_addr = 0;
3524         u32 entry = *start;
3525         int i, ret = 0;
3526
3527         if (!new_skb) {
3528                 ret = -1;
3529         } else {
3530                 /* New SKB is guaranteed to be linear. */
3531                 entry = *start;
3532                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3533                                           PCI_DMA_TODEVICE);
3534                 /* Make sure new skb does not cross any 4G boundaries.
3535                  * Drop the packet if it does.
3536                  */
3537                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3538                         ret = -1;
3539                         dev_kfree_skb(new_skb);
3540                         new_skb = NULL;
3541                 } else {
3542                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3543                                     base_flags, 1 | (mss << 1));
3544                         *start = NEXT_TX(entry);
3545                 }
3546         }
3547
3548         /* Now clean up the sw ring entries. */
3549         i = 0;
3550         while (entry != last_plus_one) {
3551                 int len;
3552
3553                 if (i == 0)
3554                         len = skb_headlen(skb);
3555                 else
3556                         len = skb_shinfo(skb)->frags[i-1].size;
3557                 pci_unmap_single(tp->pdev,
3558                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3559                                  len, PCI_DMA_TODEVICE);
3560                 if (i == 0) {
3561                         tp->tx_buffers[entry].skb = new_skb;
3562                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3563                 } else {
3564                         tp->tx_buffers[entry].skb = NULL;
3565                 }
3566                 entry = NEXT_TX(entry);
3567                 i++;
3568         }
3569
3570         dev_kfree_skb(skb);
3571
3572         return ret;
3573 }
3574
3575 static void tg3_set_txd(struct tg3 *tp, int entry,
3576                         dma_addr_t mapping, int len, u32 flags,
3577                         u32 mss_and_is_end)
3578 {
3579         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3580         int is_end = (mss_and_is_end & 0x1);
3581         u32 mss = (mss_and_is_end >> 1);
3582         u32 vlan_tag = 0;
3583
3584         if (is_end)
3585                 flags |= TXD_FLAG_END;
3586         if (flags & TXD_FLAG_VLAN) {
3587                 vlan_tag = flags >> 16;
3588                 flags &= 0xffff;
3589         }
3590         vlan_tag |= (mss << TXD_MSS_SHIFT);
3591
3592         txd->addr_hi = ((u64) mapping >> 32);
3593         txd->addr_lo = ((u64) mapping & 0xffffffff);
3594         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3595         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3596 }
3597
3598 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3599 {
3600         struct tg3 *tp = netdev_priv(dev);
3601         dma_addr_t mapping;
3602         u32 len, entry, base_flags, mss;
3603         int would_hit_hwbug;
3604
3605         len = skb_headlen(skb);
3606
3607         /* No BH disabling for tx_lock here.  We are running in BH disabled
3608          * context and TX reclaim runs via tp->poll inside of a software
3609          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3610          * no IRQ context deadlocks to worry about either.  Rejoice!
3611          */
3612         if (!spin_trylock(&tp->tx_lock))
3613                 return NETDEV_TX_LOCKED; 
3614
3615         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3616                 if (!netif_queue_stopped(dev)) {
3617                         netif_stop_queue(dev);
3618
3619                         /* This is a hard error, log it. */
3620                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3621                                "queue awake!\n", dev->name);
3622                 }
3623                 spin_unlock(&tp->tx_lock);
3624                 return NETDEV_TX_BUSY;
3625         }
3626
3627         entry = tp->tx_prod;
3628         base_flags = 0;
3629         if (skb->ip_summed == CHECKSUM_HW)
3630                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3631 #if TG3_TSO_SUPPORT != 0
3632         mss = 0;
3633         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3634             (mss = skb_shinfo(skb)->tso_size) != 0) {
3635                 int tcp_opt_len, ip_tcp_len;
3636
3637                 if (skb_header_cloned(skb) &&
3638                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3639                         dev_kfree_skb(skb);
3640                         goto out_unlock;
3641                 }
3642
3643                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3644                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3645
3646                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3647                                TXD_FLAG_CPU_POST_DMA);
3648
3649                 skb->nh.iph->check = 0;
3650                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3651                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3652                         skb->h.th->check = 0;
3653                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3654                 }
3655                 else {
3656                         skb->h.th->check =
3657                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3658                                                    skb->nh.iph->daddr,
3659                                                    0, IPPROTO_TCP, 0);
3660                 }
3661
3662                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3663                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3664                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3665                                 int tsflags;
3666
3667                                 tsflags = ((skb->nh.iph->ihl - 5) +
3668                                            (tcp_opt_len >> 2));
3669                                 mss |= (tsflags << 11);
3670                         }
3671                 } else {
3672                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3673                                 int tsflags;
3674
3675                                 tsflags = ((skb->nh.iph->ihl - 5) +
3676                                            (tcp_opt_len >> 2));
3677                                 base_flags |= tsflags << 12;
3678                         }
3679                 }
3680         }
3681 #else
3682         mss = 0;
3683 #endif
3684 #if TG3_VLAN_TAG_USED
3685         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3686                 base_flags |= (TXD_FLAG_VLAN |
3687                                (vlan_tx_tag_get(skb) << 16));
3688 #endif
3689
3690         /* Queue skb data, a.k.a. the main skb fragment. */
3691         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3692
3693         tp->tx_buffers[entry].skb = skb;
3694         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3695
3696         would_hit_hwbug = 0;
3697
3698         if (tg3_4g_overflow_test(mapping, len))
3699                 would_hit_hwbug = 1;
3700
3701         tg3_set_txd(tp, entry, mapping, len, base_flags,
3702                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3703
3704         entry = NEXT_TX(entry);
3705
3706         /* Now loop through additional data fragments, and queue them. */
3707         if (skb_shinfo(skb)->nr_frags > 0) {
3708                 unsigned int i, last;
3709
3710                 last = skb_shinfo(skb)->nr_frags - 1;
3711                 for (i = 0; i <= last; i++) {
3712                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3713
3714                         len = frag->size;
3715                         mapping = pci_map_page(tp->pdev,
3716                                                frag->page,
3717                                                frag->page_offset,
3718                                                len, PCI_DMA_TODEVICE);
3719
3720                         tp->tx_buffers[entry].skb = NULL;
3721                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3722
3723                         if (tg3_4g_overflow_test(mapping, len))
3724                                 would_hit_hwbug = 1;
3725
3726                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3727                                 tg3_set_txd(tp, entry, mapping, len,
3728                                             base_flags, (i == last)|(mss << 1));
3729                         else
3730                                 tg3_set_txd(tp, entry, mapping, len,
3731                                             base_flags, (i == last));
3732
3733                         entry = NEXT_TX(entry);
3734                 }
3735         }
3736
3737         if (would_hit_hwbug) {
3738                 u32 last_plus_one = entry;
3739                 u32 start;
3740
3741                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3742                 start &= (TG3_TX_RING_SIZE - 1);
3743
3744                 /* If the workaround fails due to memory/mapping
3745                  * failure, silently drop this packet.
3746                  */
3747                 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3748                                                 &start, base_flags, mss))
3749                         goto out_unlock;
3750
3751                 entry = start;
3752         }
3753
3754         /* Packets are ready, update Tx producer idx local and on card. */
3755         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3756
3757         tp->tx_prod = entry;
3758         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3759                 netif_stop_queue(dev);
3760                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3761                         netif_wake_queue(tp->dev);
3762         }
3763
3764 out_unlock:
3765         mmiowb();
3766         spin_unlock(&tp->tx_lock);
3767
3768         dev->trans_start = jiffies;
3769
3770         return NETDEV_TX_OK;
3771 }
3772
3773 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3774                                int new_mtu)
3775 {
3776         dev->mtu = new_mtu;
3777
3778         if (new_mtu > ETH_DATA_LEN) {
3779                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3780                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3781                         ethtool_op_set_tso(dev, 0);
3782                 }
3783                 else
3784                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3785         } else {
3786                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3787                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3788                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3789         }
3790 }
3791
3792 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3793 {
3794         struct tg3 *tp = netdev_priv(dev);
3795
3796         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3797                 return -EINVAL;
3798
3799         if (!netif_running(dev)) {
3800                 /* We'll just catch it later when the
3801                  * device is up'd.
3802                  */
3803                 tg3_set_mtu(dev, tp, new_mtu);
3804                 return 0;
3805         }
3806
3807         tg3_netif_stop(tp);
3808
3809         tg3_full_lock(tp, 1);
3810
3811         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3812
3813         tg3_set_mtu(dev, tp, new_mtu);
3814
3815         tg3_init_hw(tp);
3816
3817         tg3_netif_start(tp);
3818
3819         tg3_full_unlock(tp);
3820
3821         return 0;
3822 }
3823
3824 /* Free up pending packets in all rx/tx rings.
3825  *
3826  * The chip has been shut down and the driver detached from
3827  * the networking, so no interrupts or new tx packets will
3828  * end up in the driver.  tp->{tx,}lock is not held and we are not
3829  * in an interrupt context and thus may sleep.
3830  */
3831 static void tg3_free_rings(struct tg3 *tp)
3832 {
3833         struct ring_info *rxp;
3834         int i;
3835
3836         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3837                 rxp = &tp->rx_std_buffers[i];
3838
3839                 if (rxp->skb == NULL)
3840                         continue;
3841                 pci_unmap_single(tp->pdev,
3842                                  pci_unmap_addr(rxp, mapping),
3843                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3844                                  PCI_DMA_FROMDEVICE);
3845                 dev_kfree_skb_any(rxp->skb);
3846                 rxp->skb = NULL;
3847         }
3848
3849         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3850                 rxp = &tp->rx_jumbo_buffers[i];
3851
3852                 if (rxp->skb == NULL)
3853                         continue;
3854                 pci_unmap_single(tp->pdev,
3855                                  pci_unmap_addr(rxp, mapping),
3856                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3857                                  PCI_DMA_FROMDEVICE);
3858                 dev_kfree_skb_any(rxp->skb);
3859                 rxp->skb = NULL;
3860         }
3861
3862         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3863                 struct tx_ring_info *txp;
3864                 struct sk_buff *skb;
3865                 int j;
3866
3867                 txp = &tp->tx_buffers[i];
3868                 skb = txp->skb;
3869
3870                 if (skb == NULL) {
3871                         i++;
3872                         continue;
3873                 }
3874
3875                 pci_unmap_single(tp->pdev,
3876                                  pci_unmap_addr(txp, mapping),
3877                                  skb_headlen(skb),
3878                                  PCI_DMA_TODEVICE);
3879                 txp->skb = NULL;
3880
3881                 i++;
3882
3883                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3884                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3885                         pci_unmap_page(tp->pdev,
3886                                        pci_unmap_addr(txp, mapping),
3887                                        skb_shinfo(skb)->frags[j].size,
3888                                        PCI_DMA_TODEVICE);
3889                         i++;
3890                 }
3891
3892                 dev_kfree_skb_any(skb);
3893         }
3894 }
3895
3896 /* Initialize tx/rx rings for packet processing.
3897  *
3898  * The chip has been shut down and the driver detached from
3899  * the networking, so no interrupts or new tx packets will
3900  * end up in the driver.  tp->{tx,}lock are held and thus
3901  * we may not sleep.
3902  */
3903 static void tg3_init_rings(struct tg3 *tp)
3904 {
3905         u32 i;
3906
3907         /* Free up all the SKBs. */
3908         tg3_free_rings(tp);
3909
3910         /* Zero out all descriptors. */
3911         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3912         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3913         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3914         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3915
3916         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3917         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3918             (tp->dev->mtu > ETH_DATA_LEN))
3919                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3920
3921         /* Initialize invariants of the rings, we only set this
3922          * stuff once.  This works because the card does not
3923          * write into the rx buffer posting rings.
3924          */
3925         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3926                 struct tg3_rx_buffer_desc *rxd;
3927
3928                 rxd = &tp->rx_std[i];
3929                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3930                         << RXD_LEN_SHIFT;
3931                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3932                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3933                                (i << RXD_OPAQUE_INDEX_SHIFT));
3934         }
3935
3936         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3937                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3938                         struct tg3_rx_buffer_desc *rxd;
3939
3940                         rxd = &tp->rx_jumbo[i];
3941                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3942                                 << RXD_LEN_SHIFT;
3943                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3944                                 RXD_FLAG_JUMBO;
3945                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3946                                (i << RXD_OPAQUE_INDEX_SHIFT));
3947                 }
3948         }
3949
3950         /* Now allocate fresh SKBs for each rx ring. */
3951         for (i = 0; i < tp->rx_pending; i++) {
3952                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3953                                      -1, i) < 0)
3954                         break;
3955         }
3956
3957         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3958                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3959                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3960                                              -1, i) < 0)
3961                                 break;
3962                 }
3963         }
3964 }
3965
3966 /*
3967  * Must not be invoked with interrupt sources disabled and
3968  * the hardware shutdown down.
3969  */
3970 static void tg3_free_consistent(struct tg3 *tp)
3971 {
3972         kfree(tp->rx_std_buffers);
3973         tp->rx_std_buffers = NULL;
3974         if (tp->rx_std) {
3975                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3976                                     tp->rx_std, tp->rx_std_mapping);
3977                 tp->rx_std = NULL;
3978         }
3979         if (tp->rx_jumbo) {
3980                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3981                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3982                 tp->rx_jumbo = NULL;
3983         }
3984         if (tp->rx_rcb) {
3985                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3986                                     tp->rx_rcb, tp->rx_rcb_mapping);
3987                 tp->rx_rcb = NULL;
3988         }
3989         if (tp->tx_ring) {
3990                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3991                         tp->tx_ring, tp->tx_desc_mapping);
3992                 tp->tx_ring = NULL;
3993         }
3994         if (tp->hw_status) {
3995                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3996                                     tp->hw_status, tp->status_mapping);
3997                 tp->hw_status = NULL;
3998         }
3999         if (tp->hw_stats) {
4000                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4001                                     tp->hw_stats, tp->stats_mapping);
4002                 tp->hw_stats = NULL;
4003         }
4004 }
4005
4006 /*
4007  * Must not be invoked with interrupt sources disabled and
4008  * the hardware shutdown down.  Can sleep.
4009  */
4010 static int tg3_alloc_consistent(struct tg3 *tp)
4011 {
4012         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4013                                       (TG3_RX_RING_SIZE +
4014                                        TG3_RX_JUMBO_RING_SIZE)) +
4015                                      (sizeof(struct tx_ring_info) *
4016                                       TG3_TX_RING_SIZE),
4017                                      GFP_KERNEL);
4018         if (!tp->rx_std_buffers)
4019                 return -ENOMEM;
4020
4021         memset(tp->rx_std_buffers, 0,
4022                (sizeof(struct ring_info) *
4023                 (TG3_RX_RING_SIZE +
4024                  TG3_RX_JUMBO_RING_SIZE)) +
4025                (sizeof(struct tx_ring_info) *
4026                 TG3_TX_RING_SIZE));
4027
4028         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4029         tp->tx_buffers = (struct tx_ring_info *)
4030                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4031
4032         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4033                                           &tp->rx_std_mapping);
4034         if (!tp->rx_std)
4035                 goto err_out;
4036
4037         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4038                                             &tp->rx_jumbo_mapping);
4039
4040         if (!tp->rx_jumbo)
4041                 goto err_out;
4042
4043         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4044                                           &tp->rx_rcb_mapping);
4045         if (!tp->rx_rcb)
4046                 goto err_out;
4047
4048         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4049                                            &tp->tx_desc_mapping);
4050         if (!tp->tx_ring)
4051                 goto err_out;
4052
4053         tp->hw_status = pci_alloc_consistent(tp->pdev,
4054                                              TG3_HW_STATUS_SIZE,
4055                                              &tp->status_mapping);
4056         if (!tp->hw_status)
4057                 goto err_out;
4058
4059         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4060                                             sizeof(struct tg3_hw_stats),
4061                                             &tp->stats_mapping);
4062         if (!tp->hw_stats)
4063                 goto err_out;
4064
4065         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4066         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4067
4068         return 0;
4069
4070 err_out:
4071         tg3_free_consistent(tp);
4072         return -ENOMEM;
4073 }
4074
4075 #define MAX_WAIT_CNT 1000
4076
4077 /* To stop a block, clear the enable bit and poll till it
4078  * clears.  tp->lock is held.
4079  */
4080 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4081 {
4082         unsigned int i;
4083         u32 val;
4084
4085         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4086                 switch (ofs) {
4087                 case RCVLSC_MODE:
4088                 case DMAC_MODE:
4089                 case MBFREE_MODE:
4090                 case BUFMGR_MODE:
4091                 case MEMARB_MODE:
4092                         /* We can't enable/disable these bits of the
4093                          * 5705/5750, just say success.
4094                          */
4095                         return 0;
4096
4097                 default:
4098                         break;
4099                 };
4100         }
4101
4102         val = tr32(ofs);
4103         val &= ~enable_bit;
4104         tw32_f(ofs, val);
4105
4106         for (i = 0; i < MAX_WAIT_CNT; i++) {
4107                 udelay(100);
4108                 val = tr32(ofs);
4109                 if ((val & enable_bit) == 0)
4110                         break;
4111         }
4112
4113         if (i == MAX_WAIT_CNT && !silent) {
4114                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4115                        "ofs=%lx enable_bit=%x\n",
4116                        ofs, enable_bit);
4117                 return -ENODEV;
4118         }
4119
4120         return 0;
4121 }
4122
4123 /* tp->lock is held. */
4124 static int tg3_abort_hw(struct tg3 *tp, int silent)
4125 {
4126         int i, err;
4127
4128         tg3_disable_ints(tp);
4129
4130         tp->rx_mode &= ~RX_MODE_ENABLE;
4131         tw32_f(MAC_RX_MODE, tp->rx_mode);
4132         udelay(10);
4133
4134         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4135         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4136         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4137         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4138         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4139         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4140
4141         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4142         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4143         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4144         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4145         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4146         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4147         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4148
4149         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4150         tw32_f(MAC_MODE, tp->mac_mode);
4151         udelay(40);
4152
4153         tp->tx_mode &= ~TX_MODE_ENABLE;
4154         tw32_f(MAC_TX_MODE, tp->tx_mode);
4155
4156         for (i = 0; i < MAX_WAIT_CNT; i++) {
4157                 udelay(100);
4158                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4159                         break;
4160         }
4161         if (i >= MAX_WAIT_CNT) {
4162                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4163                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4164                        tp->dev->name, tr32(MAC_TX_MODE));
4165                 err |= -ENODEV;
4166         }
4167
4168         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4169         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4170         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4171
4172         tw32(FTQ_RESET, 0xffffffff);
4173         tw32(FTQ_RESET, 0x00000000);
4174
4175         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4176         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4177
4178         if (tp->hw_status)
4179                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4180         if (tp->hw_stats)
4181                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4182
4183         return err;
4184 }
4185
4186 /* tp->lock is held. */
4187 static int tg3_nvram_lock(struct tg3 *tp)
4188 {
4189         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4190                 int i;
4191
4192                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4193                 for (i = 0; i < 8000; i++) {
4194                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4195                                 break;
4196                         udelay(20);
4197                 }
4198                 if (i == 8000)
4199                         return -ENODEV;
4200         }
4201         return 0;
4202 }
4203
4204 /* tp->lock is held. */
4205 static void tg3_nvram_unlock(struct tg3 *tp)
4206 {
4207         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4208                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4209 }
4210
4211 /* tp->lock is held. */
4212 static void tg3_enable_nvram_access(struct tg3 *tp)
4213 {
4214         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4215             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4216                 u32 nvaccess = tr32(NVRAM_ACCESS);
4217
4218                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4219         }
4220 }
4221
4222 /* tp->lock is held. */
4223 static void tg3_disable_nvram_access(struct tg3 *tp)
4224 {
4225         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4226             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4227                 u32 nvaccess = tr32(NVRAM_ACCESS);
4228
4229                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4230         }
4231 }
4232
4233 /* tp->lock is held. */
4234 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4235 {
4236         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4237                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4238                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4239
4240         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4241                 switch (kind) {
4242                 case RESET_KIND_INIT:
4243                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4244                                       DRV_STATE_START);
4245                         break;
4246
4247                 case RESET_KIND_SHUTDOWN:
4248                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4249                                       DRV_STATE_UNLOAD);
4250                         break;
4251
4252                 case RESET_KIND_SUSPEND:
4253                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4254                                       DRV_STATE_SUSPEND);
4255                         break;
4256
4257                 default:
4258                         break;
4259                 };
4260         }
4261 }
4262
4263 /* tp->lock is held. */
4264 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4265 {
4266         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4267                 switch (kind) {
4268                 case RESET_KIND_INIT:
4269                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4270                                       DRV_STATE_START_DONE);
4271                         break;
4272
4273                 case RESET_KIND_SHUTDOWN:
4274                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4275                                       DRV_STATE_UNLOAD_DONE);
4276                         break;
4277
4278                 default:
4279                         break;
4280                 };
4281         }
4282 }
4283
4284 /* tp->lock is held. */
4285 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4286 {
4287         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4288                 switch (kind) {
4289                 case RESET_KIND_INIT:
4290                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4291                                       DRV_STATE_START);
4292                         break;
4293
4294                 case RESET_KIND_SHUTDOWN:
4295                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4296                                       DRV_STATE_UNLOAD);
4297                         break;
4298
4299                 case RESET_KIND_SUSPEND:
4300                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4301                                       DRV_STATE_SUSPEND);
4302                         break;
4303
4304                 default:
4305                         break;
4306                 };
4307         }
4308 }
4309
4310 static void tg3_stop_fw(struct tg3 *);
4311
4312 /* tp->lock is held. */
4313 static int tg3_chip_reset(struct tg3 *tp)
4314 {
4315         u32 val;
4316         void (*write_op)(struct tg3 *, u32, u32);
4317         int i;
4318
4319         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4320                 tg3_nvram_lock(tp);
4321
4322         /*
4323          * We must avoid the readl() that normally takes place.
4324          * It locks machines, causes machine checks, and other
4325          * fun things.  So, temporarily disable the 5701
4326          * hardware workaround, while we do the reset.
4327          */
4328         write_op = tp->write32;
4329         if (write_op == tg3_write_flush_reg32)
4330                 tp->write32 = tg3_write32;
4331
4332         /* do the reset */
4333         val = GRC_MISC_CFG_CORECLK_RESET;
4334
4335         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4336                 if (tr32(0x7e2c) == 0x60) {
4337                         tw32(0x7e2c, 0x20);
4338                 }
4339                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4340                         tw32(GRC_MISC_CFG, (1 << 29));
4341                         val |= (1 << 29);
4342                 }
4343         }
4344
4345         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4346                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4347         tw32(GRC_MISC_CFG, val);
4348
4349         /* restore 5701 hardware bug workaround write method */
4350         tp->write32 = write_op;
4351
4352         /* Unfortunately, we have to delay before the PCI read back.
4353          * Some 575X chips even will not respond to a PCI cfg access
4354          * when the reset command is given to the chip.
4355          *
4356          * How do these hardware designers expect things to work
4357          * properly if the PCI write is posted for a long period
4358          * of time?  It is always necessary to have some method by
4359          * which a register read back can occur to push the write
4360          * out which does the reset.
4361          *
4362          * For most tg3 variants the trick below was working.
4363          * Ho hum...
4364          */
4365         udelay(120);
4366
4367         /* Flush PCI posted writes.  The normal MMIO registers
4368          * are inaccessible at this time so this is the only
4369          * way to make this reliably (actually, this is no longer
4370          * the case, see above).  I tried to use indirect
4371          * register read/write but this upset some 5701 variants.
4372          */
4373         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4374
4375         udelay(120);
4376
4377         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4378                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4379                         int i;
4380                         u32 cfg_val;
4381
4382                         /* Wait for link training to complete.  */
4383                         for (i = 0; i < 5000; i++)
4384                                 udelay(100);
4385
4386                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4387                         pci_write_config_dword(tp->pdev, 0xc4,
4388                                                cfg_val | (1 << 15));
4389                 }
4390                 /* Set PCIE max payload size and clear error status.  */
4391                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4392         }
4393
4394         /* Re-enable indirect register accesses. */
4395         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4396                                tp->misc_host_ctrl);
4397
4398         /* Set MAX PCI retry to zero. */
4399         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4400         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4401             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4402                 val |= PCISTATE_RETRY_SAME_DMA;
4403         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4404
4405         pci_restore_state(tp->pdev);
4406
4407         /* Make sure PCI-X relaxed ordering bit is clear. */
4408         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4409         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4410         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4411
4412         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4413                 u32 val;
4414
4415                 /* Chip reset on 5780 will reset MSI enable bit,
4416                  * so need to restore it.
4417                  */
4418                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4419                         u16 ctrl;
4420
4421                         pci_read_config_word(tp->pdev,
4422                                              tp->msi_cap + PCI_MSI_FLAGS,
4423                                              &ctrl);
4424                         pci_write_config_word(tp->pdev,
4425                                               tp->msi_cap + PCI_MSI_FLAGS,
4426                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4427                         val = tr32(MSGINT_MODE);
4428                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4429                 }
4430
4431                 val = tr32(MEMARB_MODE);
4432                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4433
4434         } else
4435                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4436
4437         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4438                 tg3_stop_fw(tp);
4439                 tw32(0x5000, 0x400);
4440         }
4441
4442         tw32(GRC_MODE, tp->grc_mode);
4443
4444         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4445                 u32 val = tr32(0xc4);
4446
4447                 tw32(0xc4, val | (1 << 15));
4448         }
4449
4450         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4451             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4452                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4453                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4454                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4455                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4456         }
4457
4458         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4459                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4460                 tw32_f(MAC_MODE, tp->mac_mode);
4461         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4462                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4463                 tw32_f(MAC_MODE, tp->mac_mode);
4464         } else
4465                 tw32_f(MAC_MODE, 0);
4466         udelay(40);
4467
4468         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4469                 /* Wait for firmware initialization to complete. */
4470                 for (i = 0; i < 100000; i++) {
4471                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4472                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4473                                 break;
4474                         udelay(10);
4475                 }
4476                 if (i >= 100000) {
4477                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4478                                "firmware will not restart magic=%08x\n",
4479                                tp->dev->name, val);
4480                         return -ENODEV;
4481                 }
4482         }
4483
4484         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4485             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4486                 u32 val = tr32(0x7c00);
4487
4488                 tw32(0x7c00, val | (1 << 25));
4489         }
4490
4491         /* Reprobe ASF enable state.  */
4492         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4493         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4494         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4495         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4496                 u32 nic_cfg;
4497
4498                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4499                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4500                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4501                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4502                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4503                 }
4504         }
4505
4506         return 0;
4507 }
4508
4509 /* tp->lock is held. */
4510 static void tg3_stop_fw(struct tg3 *tp)
4511 {
4512         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4513                 u32 val;
4514                 int i;
4515
4516                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4517                 val = tr32(GRC_RX_CPU_EVENT);
4518                 val |= (1 << 14);
4519                 tw32(GRC_RX_CPU_EVENT, val);
4520
4521                 /* Wait for RX cpu to ACK the event.  */
4522                 for (i = 0; i < 100; i++) {
4523                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4524                                 break;
4525                         udelay(1);
4526                 }
4527         }
4528 }
4529
4530 /* tp->lock is held. */
4531 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4532 {
4533         int err;
4534
4535         tg3_stop_fw(tp);
4536
4537         tg3_write_sig_pre_reset(tp, kind);
4538
4539         tg3_abort_hw(tp, silent);
4540         err = tg3_chip_reset(tp);
4541
4542         tg3_write_sig_legacy(tp, kind);
4543         tg3_write_sig_post_reset(tp, kind);
4544
4545         if (err)
4546                 return err;
4547
4548         return 0;
4549 }
4550
4551 #define TG3_FW_RELEASE_MAJOR    0x0
4552 #define TG3_FW_RELASE_MINOR     0x0
4553 #define TG3_FW_RELEASE_FIX      0x0
4554 #define TG3_FW_START_ADDR       0x08000000
4555 #define TG3_FW_TEXT_ADDR        0x08000000
4556 #define TG3_FW_TEXT_LEN         0x9c0
4557 #define TG3_FW_RODATA_ADDR      0x080009c0
4558 #define TG3_FW_RODATA_LEN       0x60
4559 #define TG3_FW_DATA_ADDR        0x08000a40
4560 #define TG3_FW_DATA_LEN         0x20
4561 #define TG3_FW_SBSS_ADDR        0x08000a60
4562 #define TG3_FW_SBSS_LEN         0xc
4563 #define TG3_FW_BSS_ADDR         0x08000a70
4564 #define TG3_FW_BSS_LEN          0x10
4565
4566 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4567         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4568         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4569         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4570         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4571         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4572         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4573         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4574         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4575         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4576         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4577         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4578         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4579         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4580         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4581         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4582         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4583         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4584         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4585         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4586         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4587         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4588         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4589         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4590         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4591         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4592         0, 0, 0, 0, 0, 0,
4593         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4594         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4595         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4596         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4597         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4598         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4599         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4600         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4601         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4602         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4603         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4604         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4605         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4606         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4607         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4608         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4609         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4610         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4611         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4612         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4613         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4614         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4615         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4616         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4617         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4618         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4619         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4620         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4621         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4622         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4623         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4624         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4625         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4626         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4627         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4628         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4629         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4630         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4631         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4632         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4633         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4634         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4635         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4636         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4637         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4638         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4639         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4640         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4641         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4642         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4643         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4644         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4645         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4646         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4647         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4648         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4649         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4650         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4651         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4652         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4653         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4654         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4655         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4656         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4657         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4658 };
4659
4660 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4661         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4662         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4663         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4664         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4665         0x00000000
4666 };
4667
4668 #if 0 /* All zeros, don't eat up space with it. */
4669 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4670         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4671         0x00000000, 0x00000000, 0x00000000, 0x00000000
4672 };
4673 #endif
4674
4675 #define RX_CPU_SCRATCH_BASE     0x30000
4676 #define RX_CPU_SCRATCH_SIZE     0x04000
4677 #define TX_CPU_SCRATCH_BASE     0x34000
4678 #define TX_CPU_SCRATCH_SIZE     0x04000
4679
4680 /* tp->lock is held. */
4681 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4682 {
4683         int i;
4684
4685         if (offset == TX_CPU_BASE &&
4686             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4687                 BUG();
4688
4689         if (offset == RX_CPU_BASE) {
4690                 for (i = 0; i < 10000; i++) {
4691                         tw32(offset + CPU_STATE, 0xffffffff);
4692                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4693                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4694                                 break;
4695                 }
4696
4697                 tw32(offset + CPU_STATE, 0xffffffff);
4698                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4699                 udelay(10);
4700         } else {
4701                 for (i = 0; i < 10000; i++) {
4702                         tw32(offset + CPU_STATE, 0xffffffff);
4703                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4704                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4705                                 break;
4706                 }
4707         }
4708
4709         if (i >= 10000) {
4710                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4711                        "and %s CPU\n",
4712                        tp->dev->name,
4713                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4714                 return -ENODEV;
4715         }
4716         return 0;
4717 }
4718
4719 struct fw_info {
4720         unsigned int text_base;
4721         unsigned int text_len;
4722         u32 *text_data;
4723         unsigned int rodata_base;
4724         unsigned int rodata_len;
4725         u32 *rodata_data;
4726         unsigned int data_base;
4727         unsigned int data_len;
4728         u32 *data_data;
4729 };
4730
4731 /* tp->lock is held. */
4732 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4733                                  int cpu_scratch_size, struct fw_info *info)
4734 {
4735         int err, i;
4736         void (*write_op)(struct tg3 *, u32, u32);
4737
4738         if (cpu_base == TX_CPU_BASE &&
4739             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4740                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4741                        "TX cpu firmware on %s which is 5705.\n",
4742                        tp->dev->name);
4743                 return -EINVAL;
4744         }
4745
4746         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4747                 write_op = tg3_write_mem;
4748         else
4749                 write_op = tg3_write_indirect_reg32;
4750
4751         /* It is possible that bootcode is still loading at this point.
4752          * Get the nvram lock first before halting the cpu.
4753          */
4754         tg3_nvram_lock(tp);
4755         err = tg3_halt_cpu(tp, cpu_base);
4756         tg3_nvram_unlock(tp);
4757         if (err)
4758                 goto out;
4759
4760         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4761                 write_op(tp, cpu_scratch_base + i, 0);
4762         tw32(cpu_base + CPU_STATE, 0xffffffff);
4763         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4764         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4765                 write_op(tp, (cpu_scratch_base +
4766                               (info->text_base & 0xffff) +
4767                               (i * sizeof(u32))),
4768                          (info->text_data ?
4769                           info->text_data[i] : 0));
4770         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4771                 write_op(tp, (cpu_scratch_base +
4772                               (info->rodata_base & 0xffff) +
4773                               (i * sizeof(u32))),
4774                          (info->rodata_data ?
4775                           info->rodata_data[i] : 0));
4776         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4777                 write_op(tp, (cpu_scratch_base +
4778                               (info->data_base & 0xffff) +
4779                               (i * sizeof(u32))),
4780                          (info->data_data ?
4781                           info->data_data[i] : 0));
4782
4783         err = 0;
4784
4785 out:
4786         return err;
4787 }
4788
4789 /* tp->lock is held. */
4790 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4791 {
4792         struct fw_info info;
4793         int err, i;
4794
4795         info.text_base = TG3_FW_TEXT_ADDR;
4796         info.text_len = TG3_FW_TEXT_LEN;
4797         info.text_data = &tg3FwText[0];
4798         info.rodata_base = TG3_FW_RODATA_ADDR;
4799         info.rodata_len = TG3_FW_RODATA_LEN;
4800         info.rodata_data = &tg3FwRodata[0];
4801         info.data_base = TG3_FW_DATA_ADDR;
4802         info.data_len = TG3_FW_DATA_LEN;
4803         info.data_data = NULL;
4804
4805         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4806                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4807                                     &info);
4808         if (err)
4809                 return err;
4810
4811         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4812                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4813                                     &info);
4814         if (err)
4815                 return err;
4816
4817         /* Now startup only the RX cpu. */
4818         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4819         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4820
4821         for (i = 0; i < 5; i++) {
4822                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4823                         break;
4824                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4825                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4826                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4827                 udelay(1000);
4828         }
4829         if (i >= 5) {
4830                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4831                        "to set RX CPU PC, is %08x should be %08x\n",
4832                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4833                        TG3_FW_TEXT_ADDR);
4834                 return -ENODEV;
4835         }
4836         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4837         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4838
4839         return 0;
4840 }
4841
4842 #if TG3_TSO_SUPPORT != 0
4843
4844 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4845 #define TG3_TSO_FW_RELASE_MINOR         0x6
4846 #define TG3_TSO_FW_RELEASE_FIX          0x0
4847 #define TG3_TSO_FW_START_ADDR           0x08000000
4848 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4849 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4850 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4851 #define TG3_TSO_FW_RODATA_LEN           0x60
4852 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4853 #define TG3_TSO_FW_DATA_LEN             0x30
4854 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4855 #define TG3_TSO_FW_SBSS_LEN             0x2c
4856 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4857 #define TG3_TSO_FW_BSS_LEN              0x894
4858
4859 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4860         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4861         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4862         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4863         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4864         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4865         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4866         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4867         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4868         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4869         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4870         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4871         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4872         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4873         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4874         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4875         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4876         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4877         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4878         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4879         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4880         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4881         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4882         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4883         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4884         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4885         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4886         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4887         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4888         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4889         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4890         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4891         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4892         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4893         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4894         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4895         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4896         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4897         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4898         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4899         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4900         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4901         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4902         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4903         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4904         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4905         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4906         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4907         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4908         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4909         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4910         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4911         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4912         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4913         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4914         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4915         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4916         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4917         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4918         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4919         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4920         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4921         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4922         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4923         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4924         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4925         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4926         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4927         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4928         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4929         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4930         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4931         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4932         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4933         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4934         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4935         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4936         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4937         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4938         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4939         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4940         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4941         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4942         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4943         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4944         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4945         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4946         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4947         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4948         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4949         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4950         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4951         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4952         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4953         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4954         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4955         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4956         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4957         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4958         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4959         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4960         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4961         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4962         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4963         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4964         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4965         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4966         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4967         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4968         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4969         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4970         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4971         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4972         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4973         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4974         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4975         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4976         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4977         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4978         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4979         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4980         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4981         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4982         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4983         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4984         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4985         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4986         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4987         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4988         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4989         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4990         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4991         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4992         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4993         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4994         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4995         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4996         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4997         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4998         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4999         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5000         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5001         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5002         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5003         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5004         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5005         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5006         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5007         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5008         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5009         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5010         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5011         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5012         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5013         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5014         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5015         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5016         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5017         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5018         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5019         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5020         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5021         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5022         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5023         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5024         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5025         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5026         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5027         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5028         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5029         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5030         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5031         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5032         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5033         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5034         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5035         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5036         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5037         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5038         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5039         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5040         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5041         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5042         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5043         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5044         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5045         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5046         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5047         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5048         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5049         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5050         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5051         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5052         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5053         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5054         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5055         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5056         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5057         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5058         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5059         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5060         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5061         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5062         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5063         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5064         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5065         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5066         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5067         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5068         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5069         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5070         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5071         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5072         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5073         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5074         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5075         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5076         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5077         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5078         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5079         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5080         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5081         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5082         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5083         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5084         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5085         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5086         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5087         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5088         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5089         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5090         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5091         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5092         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5093         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5094         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5095         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5096         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5097         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5098         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5099         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5100         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5101         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5102         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5103         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5104         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5105         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5106         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5107         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5108         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5109         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5110         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5111         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5112         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5113         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5114         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5115         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5116         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5117         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5118         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5119         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5120         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5121         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5122         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5123         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5124         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5125         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5126         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5127         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5128         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5129         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5130         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5131         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5132         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5133         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5134         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5135         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5136         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5137         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5138         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5139         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5140         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5141         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5142         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5143         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5144 };
5145
5146 static u32 tg3TsoFwRodata[] = {
5147         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5148         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5149         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5150         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5151         0x00000000,
5152 };
5153
5154 static u32 tg3TsoFwData[] = {
5155         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5156         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5157         0x00000000,
5158 };
5159
5160 /* 5705 needs a special version of the TSO firmware.  */
5161 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5162 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5163 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5164 #define TG3_TSO5_FW_START_ADDR          0x00010000
5165 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5166 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5167 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5168 #define TG3_TSO5_FW_RODATA_LEN          0x50
5169 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5170 #define TG3_TSO5_FW_DATA_LEN            0x20
5171 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5172 #define TG3_TSO5_FW_SBSS_LEN            0x28
5173 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5174 #define TG3_TSO5_FW_BSS_LEN             0x88
5175
5176 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5177         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5178         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5179         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5180         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5181         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5182         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5183         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5184         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5185         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5186         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5187         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5188         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5189         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5190         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5191         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5192         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5193         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5194         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5195         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5196         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5197         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5198         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5199         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5200         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5201         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5202         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5203         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5204         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5205         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5206         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5207         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5208         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5209         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5210         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5211         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5212         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5213         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5214         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5215         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5216         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5217         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5218         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5219         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5220         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5221         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5222         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5223         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5224         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5225         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5226         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5227         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5228         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5229         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5230         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5231         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5232         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5233         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5234         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5235         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5236         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5237         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5238         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5239         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5240         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5241         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5242         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5243         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5244         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5245         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5246         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5247         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5248         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5249         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5250         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5251         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5252         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5253         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5254         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5255         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5256         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5257         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5258         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5259         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5260         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5261         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5262         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5263         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5264         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5265         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5266         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5267         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5268         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5269         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5270         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5271         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5272         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5273         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5274         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5275         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5276         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5277         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5278         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5279         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5280         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5281         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5282         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5283         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5284         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5285         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5286         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5287         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5288         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5289         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5290         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5291         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5292         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5293         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5294         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5295         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5296         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5297         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5298         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5299         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5300         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5301         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5302         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5303         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5304         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5305         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5306         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5307         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5308         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5309         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5310         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5311         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5312         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5313         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5314         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5315         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5316         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5317         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5318         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5319         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5320         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5321         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5322         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5323         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5324         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5325         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5326         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5327         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5328         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5329         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5330         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5331         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5332         0x00000000, 0x00000000, 0x00000000,
5333 };
5334
5335 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5336         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5337         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5338         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5339         0x00000000, 0x00000000, 0x00000000,
5340 };
5341
5342 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5343         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5344         0x00000000, 0x00000000, 0x00000000,
5345 };
5346
5347 /* tp->lock is held. */
5348 static int tg3_load_tso_firmware(struct tg3 *tp)
5349 {
5350         struct fw_info info;
5351         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5352         int err, i;
5353
5354         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5355                 return 0;
5356
5357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5358                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5359                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5360                 info.text_data = &tg3Tso5FwText[0];
5361                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5362                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5363                 info.rodata_data = &tg3Tso5FwRodata[0];
5364                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5365                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5366                 info.data_data = &tg3Tso5FwData[0];
5367                 cpu_base = RX_CPU_BASE;
5368                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5369                 cpu_scratch_size = (info.text_len +
5370                                     info.rodata_len +
5371                                     info.data_len +
5372                                     TG3_TSO5_FW_SBSS_LEN +
5373                                     TG3_TSO5_FW_BSS_LEN);
5374         } else {
5375                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5376                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5377                 info.text_data = &tg3TsoFwText[0];
5378                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5379                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5380                 info.rodata_data = &tg3TsoFwRodata[0];
5381                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5382                 info.data_len = TG3_TSO_FW_DATA_LEN;
5383                 info.data_data = &tg3TsoFwData[0];
5384                 cpu_base = TX_CPU_BASE;
5385                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5386                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5387         }
5388
5389         err = tg3_load_firmware_cpu(tp, cpu_base,
5390                                     cpu_scratch_base, cpu_scratch_size,
5391                                     &info);
5392         if (err)
5393                 return err;
5394
5395         /* Now startup the cpu. */
5396         tw32(cpu_base + CPU_STATE, 0xffffffff);
5397         tw32_f(cpu_base + CPU_PC,    info.text_base);
5398
5399         for (i = 0; i < 5; i++) {
5400                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5401                         break;
5402                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5403                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5404                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5405                 udelay(1000);
5406         }
5407         if (i >= 5) {
5408                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5409                        "to set CPU PC, is %08x should be %08x\n",
5410                        tp->dev->name, tr32(cpu_base + CPU_PC),
5411                        info.text_base);
5412                 return -ENODEV;
5413         }
5414         tw32(cpu_base + CPU_STATE, 0xffffffff);
5415         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5416         return 0;
5417 }
5418
5419 #endif /* TG3_TSO_SUPPORT != 0 */
5420
5421 /* tp->lock is held. */
5422 static void __tg3_set_mac_addr(struct tg3 *tp)
5423 {
5424         u32 addr_high, addr_low;
5425         int i;
5426
5427         addr_high = ((tp->dev->dev_addr[0] << 8) |
5428                      tp->dev->dev_addr[1]);
5429         addr_low = ((tp->dev->dev_addr[2] << 24) |
5430                     (tp->dev->dev_addr[3] << 16) |
5431                     (tp->dev->dev_addr[4] <<  8) |
5432                     (tp->dev->dev_addr[5] <<  0));
5433         for (i = 0; i < 4; i++) {
5434                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5435                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5436         }
5437
5438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5439             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5440                 for (i = 0; i < 12; i++) {
5441                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5442                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5443                 }
5444         }
5445
5446         addr_high = (tp->dev->dev_addr[0] +
5447                      tp->dev->dev_addr[1] +
5448                      tp->dev->dev_addr[2] +
5449                      tp->dev->dev_addr[3] +
5450                      tp->dev->dev_addr[4] +
5451                      tp->dev->dev_addr[5]) &
5452                 TX_BACKOFF_SEED_MASK;
5453         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5454 }
5455
5456 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5457 {
5458         struct tg3 *tp = netdev_priv(dev);
5459         struct sockaddr *addr = p;
5460
5461         if (!is_valid_ether_addr(addr->sa_data))
5462                 return -EINVAL;
5463
5464         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5465
5466         spin_lock_bh(&tp->lock);
5467         __tg3_set_mac_addr(tp);
5468         spin_unlock_bh(&tp->lock);
5469
5470         return 0;
5471 }
5472
5473 /* tp->lock is held. */
5474 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5475                            dma_addr_t mapping, u32 maxlen_flags,
5476                            u32 nic_addr)
5477 {
5478         tg3_write_mem(tp,
5479                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5480                       ((u64) mapping >> 32));
5481         tg3_write_mem(tp,
5482                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5483                       ((u64) mapping & 0xffffffff));
5484         tg3_write_mem(tp,
5485                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5486                        maxlen_flags);
5487
5488         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5489                 tg3_write_mem(tp,
5490                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5491                               nic_addr);
5492 }
5493
5494 static void __tg3_set_rx_mode(struct net_device *);
5495 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5496 {
5497         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5498         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5499         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5500         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5501         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5502                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5503                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5504         }
5505         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5506         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5507         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5508                 u32 val = ec->stats_block_coalesce_usecs;
5509
5510                 if (!netif_carrier_ok(tp->dev))
5511                         val = 0;
5512
5513                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5514         }
5515 }
5516
5517 /* tp->lock is held. */
5518 static int tg3_reset_hw(struct tg3 *tp)
5519 {
5520         u32 val, rdmac_mode;
5521         int i, err, limit;
5522
5523         tg3_disable_ints(tp);
5524
5525         tg3_stop_fw(tp);
5526
5527         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5528
5529         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5530                 tg3_abort_hw(tp, 1);
5531         }
5532
5533         err = tg3_chip_reset(tp);
5534         if (err)
5535                 return err;
5536
5537         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5538
5539         /* This works around an issue with Athlon chipsets on
5540          * B3 tigon3 silicon.  This bit has no effect on any
5541          * other revision.  But do not set this on PCI Express
5542          * chips.
5543          */
5544         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5545                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5546         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5547
5548         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5549             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5550                 val = tr32(TG3PCI_PCISTATE);
5551                 val |= PCISTATE_RETRY_SAME_DMA;
5552                 tw32(TG3PCI_PCISTATE, val);
5553         }
5554
5555         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5556                 /* Enable some hw fixes.  */
5557                 val = tr32(TG3PCI_MSI_DATA);
5558                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5559                 tw32(TG3PCI_MSI_DATA, val);
5560         }
5561
5562         /* Descriptor ring init may make accesses to the
5563          * NIC SRAM area to setup the TX descriptors, so we
5564          * can only do this after the hardware has been
5565          * successfully reset.
5566          */
5567         tg3_init_rings(tp);
5568
5569         /* This value is determined during the probe time DMA
5570          * engine test, tg3_test_dma.
5571          */
5572         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5573
5574         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5575                           GRC_MODE_4X_NIC_SEND_RINGS |
5576                           GRC_MODE_NO_TX_PHDR_CSUM |
5577                           GRC_MODE_NO_RX_PHDR_CSUM);
5578         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5579         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5580                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5581         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5582                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5583
5584         tw32(GRC_MODE,
5585              tp->grc_mode |
5586              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5587
5588         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5589         val = tr32(GRC_MISC_CFG);
5590         val &= ~0xff;
5591         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5592         tw32(GRC_MISC_CFG, val);
5593
5594         /* Initialize MBUF/DESC pool. */
5595         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5596                 /* Do nothing.  */
5597         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5598                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5599                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5600                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5601                 else
5602                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5603                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5604                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5605         }
5606 #if TG3_TSO_SUPPORT != 0
5607         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5608                 int fw_len;
5609
5610                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5611                           TG3_TSO5_FW_RODATA_LEN +
5612                           TG3_TSO5_FW_DATA_LEN +
5613                           TG3_TSO5_FW_SBSS_LEN +
5614                           TG3_TSO5_FW_BSS_LEN);
5615                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5616                 tw32(BUFMGR_MB_POOL_ADDR,
5617                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5618                 tw32(BUFMGR_MB_POOL_SIZE,
5619                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5620         }
5621 #endif
5622
5623         if (tp->dev->mtu <= ETH_DATA_LEN) {
5624                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5625                      tp->bufmgr_config.mbuf_read_dma_low_water);
5626                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5627                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5628                 tw32(BUFMGR_MB_HIGH_WATER,
5629                      tp->bufmgr_config.mbuf_high_water);
5630         } else {
5631                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5632                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5633                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5634                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5635                 tw32(BUFMGR_MB_HIGH_WATER,
5636                      tp->bufmgr_config.mbuf_high_water_jumbo);
5637         }
5638         tw32(BUFMGR_DMA_LOW_WATER,
5639              tp->bufmgr_config.dma_low_water);
5640         tw32(BUFMGR_DMA_HIGH_WATER,
5641              tp->bufmgr_config.dma_high_water);
5642
5643         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5644         for (i = 0; i < 2000; i++) {
5645                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5646                         break;
5647                 udelay(10);
5648         }
5649         if (i >= 2000) {
5650                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5651                        tp->dev->name);
5652                 return -ENODEV;
5653         }
5654
5655         /* Setup replenish threshold. */
5656         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5657
5658         /* Initialize TG3_BDINFO's at:
5659          *  RCVDBDI_STD_BD:     standard eth size rx ring
5660          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5661          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5662          *
5663          * like so:
5664          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5665          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5666          *                              ring attribute flags
5667          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5668          *
5669          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5670          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5671          *
5672          * The size of each ring is fixed in the firmware, but the location is
5673          * configurable.
5674          */
5675         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5676              ((u64) tp->rx_std_mapping >> 32));
5677         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5678              ((u64) tp->rx_std_mapping & 0xffffffff));
5679         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5680              NIC_SRAM_RX_BUFFER_DESC);
5681
5682         /* Don't even try to program the JUMBO/MINI buffer descriptor
5683          * configs on 5705.
5684          */
5685         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5686                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5687                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5688         } else {
5689                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5690                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5691
5692                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5693                      BDINFO_FLAGS_DISABLED);
5694
5695                 /* Setup replenish threshold. */
5696                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5697
5698                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5699                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5700                              ((u64) tp->rx_jumbo_mapping >> 32));
5701                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5702                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5703                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5704                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5705                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5706                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5707                 } else {
5708                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5709                              BDINFO_FLAGS_DISABLED);
5710                 }
5711
5712         }
5713
5714         /* There is only one send ring on 5705/5750, no need to explicitly
5715          * disable the others.
5716          */
5717         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5718                 /* Clear out send RCB ring in SRAM. */
5719                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5720                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5721                                       BDINFO_FLAGS_DISABLED);
5722         }
5723
5724         tp->tx_prod = 0;
5725         tp->tx_cons = 0;
5726         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5727         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5728
5729         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5730                        tp->tx_desc_mapping,
5731                        (TG3_TX_RING_SIZE <<
5732                         BDINFO_FLAGS_MAXLEN_SHIFT),
5733                        NIC_SRAM_TX_BUFFER_DESC);
5734
5735         /* There is only one receive return ring on 5705/5750, no need
5736          * to explicitly disable the others.
5737          */
5738         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5739                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5740                      i += TG3_BDINFO_SIZE) {
5741                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5742                                       BDINFO_FLAGS_DISABLED);
5743                 }
5744         }
5745
5746         tp->rx_rcb_ptr = 0;
5747         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5748
5749         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5750                        tp->rx_rcb_mapping,
5751                        (TG3_RX_RCB_RING_SIZE(tp) <<
5752                         BDINFO_FLAGS_MAXLEN_SHIFT),
5753                        0);
5754
5755         tp->rx_std_ptr = tp->rx_pending;
5756         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5757                      tp->rx_std_ptr);
5758
5759         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5760                                                 tp->rx_jumbo_pending : 0;
5761         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5762                      tp->rx_jumbo_ptr);
5763
5764         /* Initialize MAC address and backoff seed. */
5765         __tg3_set_mac_addr(tp);
5766
5767         /* MTU + ethernet header + FCS + optional VLAN tag */
5768         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5769
5770         /* The slot time is changed by tg3_setup_phy if we
5771          * run at gigabit with half duplex.
5772          */
5773         tw32(MAC_TX_LENGTHS,
5774              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5775              (6 << TX_LENGTHS_IPG_SHIFT) |
5776              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5777
5778         /* Receive rules. */
5779         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5780         tw32(RCVLPC_CONFIG, 0x0181);
5781
5782         /* Calculate RDMAC_MODE setting early, we need it to determine
5783          * the RCVLPC_STATE_ENABLE mask.
5784          */
5785         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5786                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5787                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5788                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5789                       RDMAC_MODE_LNGREAD_ENAB);
5790         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5791                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5792
5793         /* If statement applies to 5705 and 5750 PCI devices only */
5794         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5795              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5796             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5797                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5798                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5799                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5800                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5801                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5802                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5803                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5804                 }
5805         }
5806
5807         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5808                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5809
5810 #if TG3_TSO_SUPPORT != 0
5811         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5812                 rdmac_mode |= (1 << 27);
5813 #endif
5814
5815         /* Receive/send statistics. */
5816         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5817             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5818                 val = tr32(RCVLPC_STATS_ENABLE);
5819                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5820                 tw32(RCVLPC_STATS_ENABLE, val);
5821         } else {
5822                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5823         }
5824         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5825         tw32(SNDDATAI_STATSENAB, 0xffffff);
5826         tw32(SNDDATAI_STATSCTRL,
5827              (SNDDATAI_SCTRL_ENABLE |
5828               SNDDATAI_SCTRL_FASTUPD));
5829
5830         /* Setup host coalescing engine. */
5831         tw32(HOSTCC_MODE, 0);
5832         for (i = 0; i < 2000; i++) {
5833                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5834                         break;
5835                 udelay(10);
5836         }
5837
5838         __tg3_set_coalesce(tp, &tp->coal);
5839
5840         /* set status block DMA address */
5841         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5842              ((u64) tp->status_mapping >> 32));
5843         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5844              ((u64) tp->status_mapping & 0xffffffff));
5845
5846         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5847                 /* Status/statistics block address.  See tg3_timer,
5848                  * the tg3_periodic_fetch_stats call there, and
5849                  * tg3_get_stats to see how this works for 5705/5750 chips.
5850                  */
5851                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5852                      ((u64) tp->stats_mapping >> 32));
5853                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5854                      ((u64) tp->stats_mapping & 0xffffffff));
5855                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5856                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5857         }
5858
5859         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5860
5861         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5862         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5863         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5864                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5865
5866         /* Clear statistics/status block in chip, and status block in ram. */
5867         for (i = NIC_SRAM_STATS_BLK;
5868              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5869              i += sizeof(u32)) {
5870                 tg3_write_mem(tp, i, 0);
5871                 udelay(40);
5872         }
5873         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5874
5875         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5876                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5877                 /* reset to prevent losing 1st rx packet intermittently */
5878                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5879                 udelay(10);
5880         }
5881
5882         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5883                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5884         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5885         udelay(40);
5886
5887         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5888          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5889          * register to preserve the GPIO settings for LOMs. The GPIOs,
5890          * whether used as inputs or outputs, are set by boot code after
5891          * reset.
5892          */
5893         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5894                 u32 gpio_mask;
5895
5896                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5897                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5898
5899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5900                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5901                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5902
5903                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5904
5905                 /* GPIO1 must be driven high for eeprom write protect */
5906                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5907                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5908         }
5909         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5910         udelay(100);
5911
5912         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5913         tp->last_tag = 0;
5914
5915         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5916                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5917                 udelay(40);
5918         }
5919
5920         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5921                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5922                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5923                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5924                WDMAC_MODE_LNGREAD_ENAB);
5925
5926         /* If statement applies to 5705 and 5750 PCI devices only */
5927         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5928              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5930                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5931                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5932                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5933                         /* nothing */
5934                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5935                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5936                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5937                         val |= WDMAC_MODE_RX_ACCEL;
5938                 }
5939         }
5940
5941         tw32_f(WDMAC_MODE, val);
5942         udelay(40);
5943
5944         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5945                 val = tr32(TG3PCI_X_CAPS);
5946                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5947                         val &= ~PCIX_CAPS_BURST_MASK;
5948                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5949                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5950                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5951                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5952                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5953                                 val |= (tp->split_mode_max_reqs <<
5954                                         PCIX_CAPS_SPLIT_SHIFT);
5955                 }
5956                 tw32(TG3PCI_X_CAPS, val);
5957         }
5958
5959         tw32_f(RDMAC_MODE, rdmac_mode);
5960         udelay(40);
5961
5962         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5963         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5964                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5965         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5966         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5967         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5968         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5969         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5970 #if TG3_TSO_SUPPORT != 0
5971         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5972                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5973 #endif
5974         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5975         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5976
5977         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5978                 err = tg3_load_5701_a0_firmware_fix(tp);
5979                 if (err)
5980                         return err;
5981         }
5982
5983 #if TG3_TSO_SUPPORT != 0
5984         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5985                 err = tg3_load_tso_firmware(tp);
5986                 if (err)
5987                         return err;
5988         }
5989 #endif
5990
5991         tp->tx_mode = TX_MODE_ENABLE;
5992         tw32_f(MAC_TX_MODE, tp->tx_mode);
5993         udelay(100);
5994
5995         tp->rx_mode = RX_MODE_ENABLE;
5996         tw32_f(MAC_RX_MODE, tp->rx_mode);
5997         udelay(10);
5998
5999         if (tp->link_config.phy_is_low_power) {
6000                 tp->link_config.phy_is_low_power = 0;
6001                 tp->link_config.speed = tp->link_config.orig_speed;
6002                 tp->link_config.duplex = tp->link_config.orig_duplex;
6003                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6004         }
6005
6006         tp->mi_mode = MAC_MI_MODE_BASE;
6007         tw32_f(MAC_MI_MODE, tp->mi_mode);
6008         udelay(80);
6009
6010         tw32(MAC_LED_CTRL, tp->led_ctrl);
6011
6012         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6013         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6014                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6015                 udelay(10);
6016         }
6017         tw32_f(MAC_RX_MODE, tp->rx_mode);
6018         udelay(10);
6019
6020         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6021                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6022                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6023                         /* Set drive transmission level to 1.2V  */
6024                         /* only if the signal pre-emphasis bit is not set  */
6025                         val = tr32(MAC_SERDES_CFG);
6026                         val &= 0xfffff000;
6027                         val |= 0x880;
6028                         tw32(MAC_SERDES_CFG, val);
6029                 }
6030                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6031                         tw32(MAC_SERDES_CFG, 0x616000);
6032         }
6033
6034         /* Prevent chip from dropping frames when flow control
6035          * is enabled.
6036          */
6037         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6038
6039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6040             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6041                 /* Use hardware link auto-negotiation */
6042                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6043         }
6044
6045         err = tg3_setup_phy(tp, 1);
6046         if (err)
6047                 return err;
6048
6049         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6050                 u32 tmp;
6051
6052                 /* Clear CRC stats. */
6053                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6054                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6055                         tg3_readphy(tp, 0x14, &tmp);
6056                 }
6057         }
6058
6059         __tg3_set_rx_mode(tp->dev);
6060
6061         /* Initialize receive rules. */
6062         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6063         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6064         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6065         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6066
6067         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6068             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6069                 limit = 8;
6070         else
6071                 limit = 16;
6072         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6073                 limit -= 4;
6074         switch (limit) {
6075         case 16:
6076                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6077         case 15:
6078                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6079         case 14:
6080                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6081         case 13:
6082                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6083         case 12:
6084                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6085         case 11:
6086                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6087         case 10:
6088                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6089         case 9:
6090                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6091         case 8:
6092                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6093         case 7:
6094                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6095         case 6:
6096                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6097         case 5:
6098                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6099         case 4:
6100                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6101         case 3:
6102                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6103         case 2:
6104         case 1:
6105
6106         default:
6107                 break;
6108         };
6109
6110         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6111
6112         return 0;
6113 }
6114
6115 /* Called at device open time to get the chip ready for
6116  * packet processing.  Invoked with tp->lock held.
6117  */
6118 static int tg3_init_hw(struct tg3 *tp)
6119 {
6120         int err;
6121
6122         /* Force the chip into D0. */
6123         err = tg3_set_power_state(tp, 0);
6124         if (err)
6125                 goto out;
6126
6127         tg3_switch_clocks(tp);
6128
6129         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6130
6131         err = tg3_reset_hw(tp);
6132
6133 out:
6134         return err;
6135 }
6136
6137 #define TG3_STAT_ADD32(PSTAT, REG) \
6138 do {    u32 __val = tr32(REG); \
6139         (PSTAT)->low += __val; \
6140         if ((PSTAT)->low < __val) \
6141                 (PSTAT)->high += 1; \
6142 } while (0)
6143
6144 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6145 {
6146         struct tg3_hw_stats *sp = tp->hw_stats;
6147
6148         if (!netif_carrier_ok(tp->dev))
6149                 return;
6150
6151         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6152         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6153         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6154         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6155         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6156         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6157         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6158         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6159         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6160         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6161         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6162         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6163         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6164
6165         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6166         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6167         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6168         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6169         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6170         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6171         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6172         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6173         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6174         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6175         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6176         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6177         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6178         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6179 }
6180
6181 static void tg3_timer(unsigned long __opaque)
6182 {
6183         struct tg3 *tp = (struct tg3 *) __opaque;
6184
6185         spin_lock(&tp->lock);
6186
6187         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6188                 /* All of this garbage is because when using non-tagged
6189                  * IRQ status the mailbox/status_block protocol the chip
6190                  * uses with the cpu is race prone.
6191                  */
6192                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6193                         tw32(GRC_LOCAL_CTRL,
6194                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6195                 } else {
6196                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6197                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6198                 }
6199
6200                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6201                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6202                         spin_unlock(&tp->lock);
6203                         schedule_work(&tp->reset_task);
6204                         return;
6205                 }
6206         }
6207
6208         /* This part only runs once per second. */
6209         if (!--tp->timer_counter) {
6210                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6211                         tg3_periodic_fetch_stats(tp);
6212
6213                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6214                         u32 mac_stat;
6215                         int phy_event;
6216
6217                         mac_stat = tr32(MAC_STATUS);
6218
6219                         phy_event = 0;
6220                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6221                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6222                                         phy_event = 1;
6223                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6224                                 phy_event = 1;
6225
6226                         if (phy_event)
6227                                 tg3_setup_phy(tp, 0);
6228                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6229                         u32 mac_stat = tr32(MAC_STATUS);
6230                         int need_setup = 0;
6231
6232                         if (netif_carrier_ok(tp->dev) &&
6233                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6234                                 need_setup = 1;
6235                         }
6236                         if (! netif_carrier_ok(tp->dev) &&
6237                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6238                                          MAC_STATUS_SIGNAL_DET))) {
6239                                 need_setup = 1;
6240                         }
6241                         if (need_setup) {
6242                                 tw32_f(MAC_MODE,
6243                                      (tp->mac_mode &
6244                                       ~MAC_MODE_PORT_MODE_MASK));
6245                                 udelay(40);
6246                                 tw32_f(MAC_MODE, tp->mac_mode);
6247                                 udelay(40);
6248                                 tg3_setup_phy(tp, 0);
6249                         }
6250                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6251                         tg3_serdes_parallel_detect(tp);
6252
6253                 tp->timer_counter = tp->timer_multiplier;
6254         }
6255
6256         /* Heartbeat is only sent once every 2 seconds.  */
6257         if (!--tp->asf_counter) {
6258                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6259                         u32 val;
6260
6261                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6262                                            FWCMD_NICDRV_ALIVE2);
6263                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6264                         /* 5 seconds timeout */
6265                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6266                         val = tr32(GRC_RX_CPU_EVENT);
6267                         val |= (1 << 14);
6268                         tw32(GRC_RX_CPU_EVENT, val);
6269                 }
6270                 tp->asf_counter = tp->asf_multiplier;
6271         }
6272
6273         spin_unlock(&tp->lock);
6274
6275         tp->timer.expires = jiffies + tp->timer_offset;
6276         add_timer(&tp->timer);
6277 }
6278
6279 static int tg3_test_interrupt(struct tg3 *tp)
6280 {
6281         struct net_device *dev = tp->dev;
6282         int err, i;
6283         u32 int_mbox = 0;
6284
6285         if (!netif_running(dev))
6286                 return -ENODEV;
6287
6288         tg3_disable_ints(tp);
6289
6290         free_irq(tp->pdev->irq, dev);
6291
6292         err = request_irq(tp->pdev->irq, tg3_test_isr,
6293                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6294         if (err)
6295                 return err;
6296
6297         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6298         tg3_enable_ints(tp);
6299
6300         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6301                HOSTCC_MODE_NOW);
6302
6303         for (i = 0; i < 5; i++) {
6304                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6305                                         TG3_64BIT_REG_LOW);
6306                 if (int_mbox != 0)
6307                         break;
6308                 msleep(10);
6309         }
6310
6311         tg3_disable_ints(tp);
6312
6313         free_irq(tp->pdev->irq, dev);
6314         
6315         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6316                 err = request_irq(tp->pdev->irq, tg3_msi,
6317                                   SA_SAMPLE_RANDOM, dev->name, dev);
6318         else {
6319                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6320                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6321                         fn = tg3_interrupt_tagged;
6322                 err = request_irq(tp->pdev->irq, fn,
6323                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6324         }
6325
6326         if (err)
6327                 return err;
6328
6329         if (int_mbox != 0)
6330                 return 0;
6331
6332         return -EIO;
6333 }
6334
6335 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6336  * successfully restored
6337  */
6338 static int tg3_test_msi(struct tg3 *tp)
6339 {
6340         struct net_device *dev = tp->dev;
6341         int err;
6342         u16 pci_cmd;
6343
6344         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6345                 return 0;
6346
6347         /* Turn off SERR reporting in case MSI terminates with Master
6348          * Abort.
6349          */
6350         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6351         pci_write_config_word(tp->pdev, PCI_COMMAND,
6352                               pci_cmd & ~PCI_COMMAND_SERR);
6353
6354         err = tg3_test_interrupt(tp);
6355
6356         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6357
6358         if (!err)
6359                 return 0;
6360
6361         /* other failures */
6362         if (err != -EIO)
6363                 return err;
6364
6365         /* MSI test failed, go back to INTx mode */
6366         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6367                "switching to INTx mode. Please report this failure to "
6368                "the PCI maintainer and include system chipset information.\n",
6369                        tp->dev->name);
6370
6371         free_irq(tp->pdev->irq, dev);
6372         pci_disable_msi(tp->pdev);
6373
6374         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6375
6376         {
6377                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6378                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6379                         fn = tg3_interrupt_tagged;
6380
6381                 err = request_irq(tp->pdev->irq, fn,
6382                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6383         }
6384         if (err)
6385                 return err;
6386
6387         /* Need to reset the chip because the MSI cycle may have terminated
6388          * with Master Abort.
6389          */
6390         tg3_full_lock(tp, 1);
6391
6392         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6393         err = tg3_init_hw(tp);
6394
6395         tg3_full_unlock(tp);
6396
6397         if (err)
6398                 free_irq(tp->pdev->irq, dev);
6399
6400         return err;
6401 }
6402
6403 static int tg3_open(struct net_device *dev)
6404 {
6405         struct tg3 *tp = netdev_priv(dev);
6406         int err;
6407
6408         tg3_full_lock(tp, 0);
6409
6410         tg3_disable_ints(tp);
6411         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6412
6413         tg3_full_unlock(tp);
6414
6415         /* The placement of this call is tied
6416          * to the setup and use of Host TX descriptors.
6417          */
6418         err = tg3_alloc_consistent(tp);
6419         if (err)
6420                 return err;
6421
6422         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6423             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6424             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6425                 /* All MSI supporting chips should support tagged
6426                  * status.  Assert that this is the case.
6427                  */
6428                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6429                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6430                                "Not using MSI.\n", tp->dev->name);
6431                 } else if (pci_enable_msi(tp->pdev) == 0) {
6432                         u32 msi_mode;
6433
6434                         msi_mode = tr32(MSGINT_MODE);
6435                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6436                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6437                 }
6438         }
6439         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6440                 err = request_irq(tp->pdev->irq, tg3_msi,
6441                                   SA_SAMPLE_RANDOM, dev->name, dev);
6442         else {
6443                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6444                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6445                         fn = tg3_interrupt_tagged;
6446
6447                 err = request_irq(tp->pdev->irq, fn,
6448                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6449         }
6450
6451         if (err) {
6452                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6453                         pci_disable_msi(tp->pdev);
6454                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6455                 }
6456                 tg3_free_consistent(tp);
6457                 return err;
6458         }
6459
6460         tg3_full_lock(tp, 0);
6461
6462         err = tg3_init_hw(tp);
6463         if (err) {
6464                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6465                 tg3_free_rings(tp);
6466         } else {
6467                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6468                         tp->timer_offset = HZ;
6469                 else
6470                         tp->timer_offset = HZ / 10;
6471
6472                 BUG_ON(tp->timer_offset > HZ);
6473                 tp->timer_counter = tp->timer_multiplier =
6474                         (HZ / tp->timer_offset);
6475                 tp->asf_counter = tp->asf_multiplier =
6476                         ((HZ / tp->timer_offset) * 2);
6477
6478                 init_timer(&tp->timer);
6479                 tp->timer.expires = jiffies + tp->timer_offset;
6480                 tp->timer.data = (unsigned long) tp;
6481                 tp->timer.function = tg3_timer;
6482         }
6483
6484         tg3_full_unlock(tp);
6485
6486         if (err) {
6487                 free_irq(tp->pdev->irq, dev);
6488                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6489                         pci_disable_msi(tp->pdev);
6490                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6491                 }
6492                 tg3_free_consistent(tp);
6493                 return err;
6494         }
6495
6496         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6497                 err = tg3_test_msi(tp);
6498
6499                 if (err) {
6500                         tg3_full_lock(tp, 0);
6501
6502                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6503                                 pci_disable_msi(tp->pdev);
6504                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6505                         }
6506                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6507                         tg3_free_rings(tp);
6508                         tg3_free_consistent(tp);
6509
6510                         tg3_full_unlock(tp);
6511
6512                         return err;
6513                 }
6514         }
6515
6516         tg3_full_lock(tp, 0);
6517
6518         add_timer(&tp->timer);
6519         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6520         tg3_enable_ints(tp);
6521
6522         tg3_full_unlock(tp);
6523
6524         netif_start_queue(dev);
6525
6526         return 0;
6527 }
6528
6529 #if 0
6530 /*static*/ void tg3_dump_state(struct tg3 *tp)
6531 {
6532         u32 val32, val32_2, val32_3, val32_4, val32_5;
6533         u16 val16;
6534         int i;
6535
6536         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6537         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6538         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6539                val16, val32);
6540
6541         /* MAC block */
6542         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6543                tr32(MAC_MODE), tr32(MAC_STATUS));
6544         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6545                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6546         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6547                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6548         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6549                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6550
6551         /* Send data initiator control block */
6552         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6553                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6554         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6555                tr32(SNDDATAI_STATSCTRL));
6556
6557         /* Send data completion control block */
6558         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6559
6560         /* Send BD ring selector block */
6561         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6562                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6563
6564         /* Send BD initiator control block */
6565         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6566                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6567
6568         /* Send BD completion control block */
6569         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6570
6571         /* Receive list placement control block */
6572         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6573                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6574         printk("       RCVLPC_STATSCTRL[%08x]\n",
6575                tr32(RCVLPC_STATSCTRL));
6576
6577         /* Receive data and receive BD initiator control block */
6578         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6579                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6580
6581         /* Receive data completion control block */
6582         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6583                tr32(RCVDCC_MODE));
6584
6585         /* Receive BD initiator control block */
6586         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6587                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6588
6589         /* Receive BD completion control block */
6590         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6591                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6592
6593         /* Receive list selector control block */
6594         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6595                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6596
6597         /* Mbuf cluster free block */
6598         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6599                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6600
6601         /* Host coalescing control block */
6602         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6603                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6604         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6605                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6606                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6607         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6608                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6609                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6610         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6611                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6612         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6613                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6614
6615         /* Memory arbiter control block */
6616         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6617                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6618
6619         /* Buffer manager control block */
6620         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6621                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6622         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6623                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6624         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6625                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6626                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6627                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6628
6629         /* Read DMA control block */
6630         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6631                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6632
6633         /* Write DMA control block */
6634         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6635                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6636
6637         /* DMA completion block */
6638         printk("DEBUG: DMAC_MODE[%08x]\n",
6639                tr32(DMAC_MODE));
6640
6641         /* GRC block */
6642         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6643                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6644         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6645                tr32(GRC_LOCAL_CTRL));
6646
6647         /* TG3_BDINFOs */
6648         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6649                tr32(RCVDBDI_JUMBO_BD + 0x0),
6650                tr32(RCVDBDI_JUMBO_BD + 0x4),
6651                tr32(RCVDBDI_JUMBO_BD + 0x8),
6652                tr32(RCVDBDI_JUMBO_BD + 0xc));
6653         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6654                tr32(RCVDBDI_STD_BD + 0x0),
6655                tr32(RCVDBDI_STD_BD + 0x4),
6656                tr32(RCVDBDI_STD_BD + 0x8),
6657                tr32(RCVDBDI_STD_BD + 0xc));
6658         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6659                tr32(RCVDBDI_MINI_BD + 0x0),
6660                tr32(RCVDBDI_MINI_BD + 0x4),
6661                tr32(RCVDBDI_MINI_BD + 0x8),
6662                tr32(RCVDBDI_MINI_BD + 0xc));
6663
6664         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6665         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6666         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6667         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6668         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6669                val32, val32_2, val32_3, val32_4);
6670
6671         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6672         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6673         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6674         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6675         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6676                val32, val32_2, val32_3, val32_4);
6677
6678         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6679         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6680         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6681         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6682         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6683         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6684                val32, val32_2, val32_3, val32_4, val32_5);
6685
6686         /* SW status block */
6687         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6688                tp->hw_status->status,
6689                tp->hw_status->status_tag,
6690                tp->hw_status->rx_jumbo_consumer,
6691                tp->hw_status->rx_consumer,
6692                tp->hw_status->rx_mini_consumer,
6693                tp->hw_status->idx[0].rx_producer,
6694                tp->hw_status->idx[0].tx_consumer);
6695
6696         /* SW statistics block */
6697         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6698                ((u32 *)tp->hw_stats)[0],
6699                ((u32 *)tp->hw_stats)[1],
6700                ((u32 *)tp->hw_stats)[2],
6701                ((u32 *)tp->hw_stats)[3]);
6702
6703         /* Mailboxes */
6704         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6705                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6706                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6707                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6708                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6709
6710         /* NIC side send descriptors. */
6711         for (i = 0; i < 6; i++) {
6712                 unsigned long txd;
6713
6714                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6715                         + (i * sizeof(struct tg3_tx_buffer_desc));
6716                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6717                        i,
6718                        readl(txd + 0x0), readl(txd + 0x4),
6719                        readl(txd + 0x8), readl(txd + 0xc));
6720         }
6721
6722         /* NIC side RX descriptors. */
6723         for (i = 0; i < 6; i++) {
6724                 unsigned long rxd;
6725
6726                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6727                         + (i * sizeof(struct tg3_rx_buffer_desc));
6728                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6729                        i,
6730                        readl(rxd + 0x0), readl(rxd + 0x4),
6731                        readl(rxd + 0x8), readl(rxd + 0xc));
6732                 rxd += (4 * sizeof(u32));
6733                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6734                        i,
6735                        readl(rxd + 0x0), readl(rxd + 0x4),
6736                        readl(rxd + 0x8), readl(rxd + 0xc));
6737         }
6738
6739         for (i = 0; i < 6; i++) {
6740                 unsigned long rxd;
6741
6742                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6743                         + (i * sizeof(struct tg3_rx_buffer_desc));
6744                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6745                        i,
6746                        readl(rxd + 0x0), readl(rxd + 0x4),
6747                        readl(rxd + 0x8), readl(rxd + 0xc));
6748                 rxd += (4 * sizeof(u32));
6749                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6750                        i,
6751                        readl(rxd + 0x0), readl(rxd + 0x4),
6752                        readl(rxd + 0x8), readl(rxd + 0xc));
6753         }
6754 }
6755 #endif
6756
6757 static struct net_device_stats *tg3_get_stats(struct net_device *);
6758 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6759
6760 static int tg3_close(struct net_device *dev)
6761 {
6762         struct tg3 *tp = netdev_priv(dev);
6763
6764         netif_stop_queue(dev);
6765
6766         del_timer_sync(&tp->timer);
6767
6768         tg3_full_lock(tp, 1);
6769 #if 0
6770         tg3_dump_state(tp);
6771 #endif
6772
6773         tg3_disable_ints(tp);
6774
6775         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6776         tg3_free_rings(tp);
6777         tp->tg3_flags &=
6778                 ~(TG3_FLAG_INIT_COMPLETE |
6779                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6780         netif_carrier_off(tp->dev);
6781
6782         tg3_full_unlock(tp);
6783
6784         free_irq(tp->pdev->irq, dev);
6785         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6786                 pci_disable_msi(tp->pdev);
6787                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6788         }
6789
6790         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6791                sizeof(tp->net_stats_prev));
6792         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6793                sizeof(tp->estats_prev));
6794
6795         tg3_free_consistent(tp);
6796
6797         return 0;
6798 }
6799
6800 static inline unsigned long get_stat64(tg3_stat64_t *val)
6801 {
6802         unsigned long ret;
6803
6804 #if (BITS_PER_LONG == 32)
6805         ret = val->low;
6806 #else
6807         ret = ((u64)val->high << 32) | ((u64)val->low);
6808 #endif
6809         return ret;
6810 }
6811
6812 static unsigned long calc_crc_errors(struct tg3 *tp)
6813 {
6814         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6815
6816         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6817             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6818              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6819                 u32 val;
6820
6821                 spin_lock_bh(&tp->lock);
6822                 if (!tg3_readphy(tp, 0x1e, &val)) {
6823                         tg3_writephy(tp, 0x1e, val | 0x8000);
6824                         tg3_readphy(tp, 0x14, &val);
6825                 } else
6826                         val = 0;
6827                 spin_unlock_bh(&tp->lock);
6828
6829                 tp->phy_crc_errors += val;
6830
6831                 return tp->phy_crc_errors;
6832         }
6833
6834         return get_stat64(&hw_stats->rx_fcs_errors);
6835 }
6836
6837 #define ESTAT_ADD(member) \
6838         estats->member =        old_estats->member + \
6839                                 get_stat64(&hw_stats->member)
6840
6841 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6842 {
6843         struct tg3_ethtool_stats *estats = &tp->estats;
6844         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6845         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6846
6847         if (!hw_stats)
6848                 return old_estats;
6849
6850         ESTAT_ADD(rx_octets);
6851         ESTAT_ADD(rx_fragments);
6852         ESTAT_ADD(rx_ucast_packets);
6853         ESTAT_ADD(rx_mcast_packets);
6854         ESTAT_ADD(rx_bcast_packets);
6855         ESTAT_ADD(rx_fcs_errors);
6856         ESTAT_ADD(rx_align_errors);
6857         ESTAT_ADD(rx_xon_pause_rcvd);
6858         ESTAT_ADD(rx_xoff_pause_rcvd);
6859         ESTAT_ADD(rx_mac_ctrl_rcvd);
6860         ESTAT_ADD(rx_xoff_entered);
6861         ESTAT_ADD(rx_frame_too_long_errors);
6862         ESTAT_ADD(rx_jabbers);
6863         ESTAT_ADD(rx_undersize_packets);
6864         ESTAT_ADD(rx_in_length_errors);
6865         ESTAT_ADD(rx_out_length_errors);
6866         ESTAT_ADD(rx_64_or_less_octet_packets);
6867         ESTAT_ADD(rx_65_to_127_octet_packets);
6868         ESTAT_ADD(rx_128_to_255_octet_packets);
6869         ESTAT_ADD(rx_256_to_511_octet_packets);
6870         ESTAT_ADD(rx_512_to_1023_octet_packets);
6871         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6872         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6873         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6874         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6875         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6876
6877         ESTAT_ADD(tx_octets);
6878         ESTAT_ADD(tx_collisions);
6879         ESTAT_ADD(tx_xon_sent);
6880         ESTAT_ADD(tx_xoff_sent);
6881         ESTAT_ADD(tx_flow_control);
6882         ESTAT_ADD(tx_mac_errors);
6883         ESTAT_ADD(tx_single_collisions);
6884         ESTAT_ADD(tx_mult_collisions);
6885         ESTAT_ADD(tx_deferred);
6886         ESTAT_ADD(tx_excessive_collisions);
6887         ESTAT_ADD(tx_late_collisions);
6888         ESTAT_ADD(tx_collide_2times);
6889         ESTAT_ADD(tx_collide_3times);
6890         ESTAT_ADD(tx_collide_4times);
6891         ESTAT_ADD(tx_collide_5times);
6892         ESTAT_ADD(tx_collide_6times);
6893         ESTAT_ADD(tx_collide_7times);
6894         ESTAT_ADD(tx_collide_8times);
6895         ESTAT_ADD(tx_collide_9times);
6896         ESTAT_ADD(tx_collide_10times);
6897         ESTAT_ADD(tx_collide_11times);
6898         ESTAT_ADD(tx_collide_12times);
6899         ESTAT_ADD(tx_collide_13times);
6900         ESTAT_ADD(tx_collide_14times);
6901         ESTAT_ADD(tx_collide_15times);
6902         ESTAT_ADD(tx_ucast_packets);
6903         ESTAT_ADD(tx_mcast_packets);
6904         ESTAT_ADD(tx_bcast_packets);
6905         ESTAT_ADD(tx_carrier_sense_errors);
6906         ESTAT_ADD(tx_discards);
6907         ESTAT_ADD(tx_errors);
6908
6909         ESTAT_ADD(dma_writeq_full);
6910         ESTAT_ADD(dma_write_prioq_full);
6911         ESTAT_ADD(rxbds_empty);
6912         ESTAT_ADD(rx_discards);
6913         ESTAT_ADD(rx_errors);
6914         ESTAT_ADD(rx_threshold_hit);
6915
6916         ESTAT_ADD(dma_readq_full);
6917         ESTAT_ADD(dma_read_prioq_full);
6918         ESTAT_ADD(tx_comp_queue_full);
6919
6920         ESTAT_ADD(ring_set_send_prod_index);
6921         ESTAT_ADD(ring_status_update);
6922         ESTAT_ADD(nic_irqs);
6923         ESTAT_ADD(nic_avoided_irqs);
6924         ESTAT_ADD(nic_tx_threshold_hit);
6925
6926         return estats;
6927 }
6928
6929 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6930 {
6931         struct tg3 *tp = netdev_priv(dev);
6932         struct net_device_stats *stats = &tp->net_stats;
6933         struct net_device_stats *old_stats = &tp->net_stats_prev;
6934         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6935
6936         if (!hw_stats)
6937                 return old_stats;
6938
6939         stats->rx_packets = old_stats->rx_packets +
6940                 get_stat64(&hw_stats->rx_ucast_packets) +
6941                 get_stat64(&hw_stats->rx_mcast_packets) +
6942                 get_stat64(&hw_stats->rx_bcast_packets);
6943                 
6944         stats->tx_packets = old_stats->tx_packets +
6945                 get_stat64(&hw_stats->tx_ucast_packets) +
6946                 get_stat64(&hw_stats->tx_mcast_packets) +
6947                 get_stat64(&hw_stats->tx_bcast_packets);
6948
6949         stats->rx_bytes = old_stats->rx_bytes +
6950                 get_stat64(&hw_stats->rx_octets);
6951         stats->tx_bytes = old_stats->tx_bytes +
6952                 get_stat64(&hw_stats->tx_octets);
6953
6954         stats->rx_errors = old_stats->rx_errors +
6955                 get_stat64(&hw_stats->rx_errors);
6956         stats->tx_errors = old_stats->tx_errors +
6957                 get_stat64(&hw_stats->tx_errors) +
6958                 get_stat64(&hw_stats->tx_mac_errors) +
6959                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6960                 get_stat64(&hw_stats->tx_discards);
6961
6962         stats->multicast = old_stats->multicast +
6963                 get_stat64(&hw_stats->rx_mcast_packets);
6964         stats->collisions = old_stats->collisions +
6965                 get_stat64(&hw_stats->tx_collisions);
6966
6967         stats->rx_length_errors = old_stats->rx_length_errors +
6968                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6969                 get_stat64(&hw_stats->rx_undersize_packets);
6970
6971         stats->rx_over_errors = old_stats->rx_over_errors +
6972                 get_stat64(&hw_stats->rxbds_empty);
6973         stats->rx_frame_errors = old_stats->rx_frame_errors +
6974                 get_stat64(&hw_stats->rx_align_errors);
6975         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6976                 get_stat64(&hw_stats->tx_discards);
6977         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6978                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6979
6980         stats->rx_crc_errors = old_stats->rx_crc_errors +
6981                 calc_crc_errors(tp);
6982
6983         stats->rx_missed_errors = old_stats->rx_missed_errors +
6984                 get_stat64(&hw_stats->rx_discards);
6985
6986         return stats;
6987 }
6988
6989 static inline u32 calc_crc(unsigned char *buf, int len)
6990 {
6991         u32 reg;
6992         u32 tmp;
6993         int j, k;
6994
6995         reg = 0xffffffff;
6996
6997         for (j = 0; j < len; j++) {
6998                 reg ^= buf[j];
6999
7000                 for (k = 0; k < 8; k++) {
7001                         tmp = reg & 0x01;
7002
7003                         reg >>= 1;
7004
7005                         if (tmp) {
7006                                 reg ^= 0xedb88320;
7007                         }
7008                 }
7009         }
7010
7011         return ~reg;
7012 }
7013
7014 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7015 {
7016         /* accept or reject all multicast frames */
7017         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7018         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7019         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7020         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7021 }
7022
7023 static void __tg3_set_rx_mode(struct net_device *dev)
7024 {
7025         struct tg3 *tp = netdev_priv(dev);
7026         u32 rx_mode;
7027
7028         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7029                                   RX_MODE_KEEP_VLAN_TAG);
7030
7031         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7032          * flag clear.
7033          */
7034 #if TG3_VLAN_TAG_USED
7035         if (!tp->vlgrp &&
7036             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7037                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7038 #else
7039         /* By definition, VLAN is disabled always in this
7040          * case.
7041          */
7042         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7043                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7044 #endif
7045
7046         if (dev->flags & IFF_PROMISC) {
7047                 /* Promiscuous mode. */
7048                 rx_mode |= RX_MODE_PROMISC;
7049         } else if (dev->flags & IFF_ALLMULTI) {
7050                 /* Accept all multicast. */
7051                 tg3_set_multi (tp, 1);
7052         } else if (dev->mc_count < 1) {
7053                 /* Reject all multicast. */
7054                 tg3_set_multi (tp, 0);
7055         } else {
7056                 /* Accept one or more multicast(s). */
7057                 struct dev_mc_list *mclist;
7058                 unsigned int i;
7059                 u32 mc_filter[4] = { 0, };
7060                 u32 regidx;
7061                 u32 bit;
7062                 u32 crc;
7063
7064                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7065                      i++, mclist = mclist->next) {
7066
7067                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7068                         bit = ~crc & 0x7f;
7069                         regidx = (bit & 0x60) >> 5;
7070                         bit &= 0x1f;
7071                         mc_filter[regidx] |= (1 << bit);
7072                 }
7073
7074                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7075                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7076                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7077                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7078         }
7079
7080         if (rx_mode != tp->rx_mode) {
7081                 tp->rx_mode = rx_mode;
7082                 tw32_f(MAC_RX_MODE, rx_mode);
7083                 udelay(10);
7084         }
7085 }
7086
7087 static void tg3_set_rx_mode(struct net_device *dev)
7088 {
7089         struct tg3 *tp = netdev_priv(dev);
7090
7091         tg3_full_lock(tp, 0);
7092         __tg3_set_rx_mode(dev);
7093         tg3_full_unlock(tp);
7094 }
7095
7096 #define TG3_REGDUMP_LEN         (32 * 1024)
7097
7098 static int tg3_get_regs_len(struct net_device *dev)
7099 {
7100         return TG3_REGDUMP_LEN;
7101 }
7102
7103 static void tg3_get_regs(struct net_device *dev,
7104                 struct ethtool_regs *regs, void *_p)
7105 {
7106         u32 *p = _p;
7107         struct tg3 *tp = netdev_priv(dev);
7108         u8 *orig_p = _p;
7109         int i;
7110
7111         regs->version = 0;
7112
7113         memset(p, 0, TG3_REGDUMP_LEN);
7114
7115         tg3_full_lock(tp, 0);
7116
7117 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7118 #define GET_REG32_LOOP(base,len)                \
7119 do {    p = (u32 *)(orig_p + (base));           \
7120         for (i = 0; i < len; i += 4)            \
7121                 __GET_REG32((base) + i);        \
7122 } while (0)
7123 #define GET_REG32_1(reg)                        \
7124 do {    p = (u32 *)(orig_p + (reg));            \
7125         __GET_REG32((reg));                     \
7126 } while (0)
7127
7128         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7129         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7130         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7131         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7132         GET_REG32_1(SNDDATAC_MODE);
7133         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7134         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7135         GET_REG32_1(SNDBDC_MODE);
7136         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7137         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7138         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7139         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7140         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7141         GET_REG32_1(RCVDCC_MODE);
7142         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7143         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7144         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7145         GET_REG32_1(MBFREE_MODE);
7146         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7147         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7148         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7149         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7150         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7151         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7152         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7153         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7154         GET_REG32_LOOP(FTQ_RESET, 0x120);
7155         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7156         GET_REG32_1(DMAC_MODE);
7157         GET_REG32_LOOP(GRC_MODE, 0x4c);
7158         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7159                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7160
7161 #undef __GET_REG32
7162 #undef GET_REG32_LOOP
7163 #undef GET_REG32_1
7164
7165         tg3_full_unlock(tp);
7166 }
7167
7168 static int tg3_get_eeprom_len(struct net_device *dev)
7169 {
7170         struct tg3 *tp = netdev_priv(dev);
7171
7172         return tp->nvram_size;
7173 }
7174
7175 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7176
7177 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7178 {
7179         struct tg3 *tp = netdev_priv(dev);
7180         int ret;
7181         u8  *pd;
7182         u32 i, offset, len, val, b_offset, b_count;
7183
7184         offset = eeprom->offset;
7185         len = eeprom->len;
7186         eeprom->len = 0;
7187
7188         eeprom->magic = TG3_EEPROM_MAGIC;
7189
7190         if (offset & 3) {
7191                 /* adjustments to start on required 4 byte boundary */
7192                 b_offset = offset & 3;
7193                 b_count = 4 - b_offset;
7194                 if (b_count > len) {
7195                         /* i.e. offset=1 len=2 */
7196                         b_count = len;
7197                 }
7198                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7199                 if (ret)
7200                         return ret;
7201                 val = cpu_to_le32(val);
7202                 memcpy(data, ((char*)&val) + b_offset, b_count);
7203                 len -= b_count;
7204                 offset += b_count;
7205                 eeprom->len += b_count;
7206         }
7207
7208         /* read bytes upto the last 4 byte boundary */
7209         pd = &data[eeprom->len];
7210         for (i = 0; i < (len - (len & 3)); i += 4) {
7211                 ret = tg3_nvram_read(tp, offset + i, &val);
7212                 if (ret) {
7213                         eeprom->len += i;
7214                         return ret;
7215                 }
7216                 val = cpu_to_le32(val);
7217                 memcpy(pd + i, &val, 4);
7218         }
7219         eeprom->len += i;
7220
7221         if (len & 3) {
7222                 /* read last bytes not ending on 4 byte boundary */
7223                 pd = &data[eeprom->len];
7224                 b_count = len & 3;
7225                 b_offset = offset + len - b_count;
7226                 ret = tg3_nvram_read(tp, b_offset, &val);
7227                 if (ret)
7228                         return ret;
7229                 val = cpu_to_le32(val);
7230                 memcpy(pd, ((char*)&val), b_count);
7231                 eeprom->len += b_count;
7232         }
7233         return 0;
7234 }
7235
7236 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7237
7238 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7239 {
7240         struct tg3 *tp = netdev_priv(dev);
7241         int ret;
7242         u32 offset, len, b_offset, odd_len, start, end;
7243         u8 *buf;
7244
7245         if (eeprom->magic != TG3_EEPROM_MAGIC)
7246                 return -EINVAL;
7247
7248         offset = eeprom->offset;
7249         len = eeprom->len;
7250
7251         if ((b_offset = (offset & 3))) {
7252                 /* adjustments to start on required 4 byte boundary */
7253                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7254                 if (ret)
7255                         return ret;
7256                 start = cpu_to_le32(start);
7257                 len += b_offset;
7258                 offset &= ~3;
7259                 if (len < 4)
7260                         len = 4;
7261         }
7262
7263         odd_len = 0;
7264         if (len & 3) {
7265                 /* adjustments to end on required 4 byte boundary */
7266                 odd_len = 1;
7267                 len = (len + 3) & ~3;
7268                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7269                 if (ret)
7270                         return ret;
7271                 end = cpu_to_le32(end);
7272         }
7273
7274         buf = data;
7275         if (b_offset || odd_len) {
7276                 buf = kmalloc(len, GFP_KERNEL);
7277                 if (buf == 0)
7278                         return -ENOMEM;
7279                 if (b_offset)
7280                         memcpy(buf, &start, 4);
7281                 if (odd_len)
7282                         memcpy(buf+len-4, &end, 4);
7283                 memcpy(buf + b_offset, data, eeprom->len);
7284         }
7285
7286         ret = tg3_nvram_write_block(tp, offset, len, buf);
7287
7288         if (buf != data)
7289                 kfree(buf);
7290
7291         return ret;
7292 }
7293
7294 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7295 {
7296         struct tg3 *tp = netdev_priv(dev);
7297   
7298         cmd->supported = (SUPPORTED_Autoneg);
7299
7300         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7301                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7302                                    SUPPORTED_1000baseT_Full);
7303
7304         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7305                 cmd->supported |= (SUPPORTED_100baseT_Half |
7306                                   SUPPORTED_100baseT_Full |
7307                                   SUPPORTED_10baseT_Half |
7308                                   SUPPORTED_10baseT_Full |
7309                                   SUPPORTED_MII);
7310         else
7311                 cmd->supported |= SUPPORTED_FIBRE;
7312   
7313         cmd->advertising = tp->link_config.advertising;
7314         if (netif_running(dev)) {
7315                 cmd->speed = tp->link_config.active_speed;
7316                 cmd->duplex = tp->link_config.active_duplex;
7317         }
7318         cmd->port = 0;
7319         cmd->phy_address = PHY_ADDR;
7320         cmd->transceiver = 0;
7321         cmd->autoneg = tp->link_config.autoneg;
7322         cmd->maxtxpkt = 0;
7323         cmd->maxrxpkt = 0;
7324         return 0;
7325 }
7326   
7327 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7328 {
7329         struct tg3 *tp = netdev_priv(dev);
7330   
7331         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7332                 /* These are the only valid advertisement bits allowed.  */
7333                 if (cmd->autoneg == AUTONEG_ENABLE &&
7334                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7335                                           ADVERTISED_1000baseT_Full |
7336                                           ADVERTISED_Autoneg |
7337                                           ADVERTISED_FIBRE)))
7338                         return -EINVAL;
7339                 /* Fiber can only do SPEED_1000.  */
7340                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7341                          (cmd->speed != SPEED_1000))
7342                         return -EINVAL;
7343         /* Copper cannot force SPEED_1000.  */
7344         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7345                    (cmd->speed == SPEED_1000))
7346                 return -EINVAL;
7347         else if ((cmd->speed == SPEED_1000) &&
7348                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7349                 return -EINVAL;
7350
7351         tg3_full_lock(tp, 0);
7352
7353         tp->link_config.autoneg = cmd->autoneg;
7354         if (cmd->autoneg == AUTONEG_ENABLE) {
7355                 tp->link_config.advertising = cmd->advertising;
7356                 tp->link_config.speed = SPEED_INVALID;
7357                 tp->link_config.duplex = DUPLEX_INVALID;
7358         } else {
7359                 tp->link_config.advertising = 0;
7360                 tp->link_config.speed = cmd->speed;
7361                 tp->link_config.duplex = cmd->duplex;
7362         }
7363   
7364         if (netif_running(dev))
7365                 tg3_setup_phy(tp, 1);
7366
7367         tg3_full_unlock(tp);
7368   
7369         return 0;
7370 }
7371   
7372 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7373 {
7374         struct tg3 *tp = netdev_priv(dev);
7375   
7376         strcpy(info->driver, DRV_MODULE_NAME);
7377         strcpy(info->version, DRV_MODULE_VERSION);
7378         strcpy(info->bus_info, pci_name(tp->pdev));
7379 }
7380   
7381 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7382 {
7383         struct tg3 *tp = netdev_priv(dev);
7384   
7385         wol->supported = WAKE_MAGIC;
7386         wol->wolopts = 0;
7387         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7388                 wol->wolopts = WAKE_MAGIC;
7389         memset(&wol->sopass, 0, sizeof(wol->sopass));
7390 }
7391   
7392 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7393 {
7394         struct tg3 *tp = netdev_priv(dev);
7395   
7396         if (wol->wolopts & ~WAKE_MAGIC)
7397                 return -EINVAL;
7398         if ((wol->wolopts & WAKE_MAGIC) &&
7399             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7400             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7401                 return -EINVAL;
7402   
7403         spin_lock_bh(&tp->lock);
7404         if (wol->wolopts & WAKE_MAGIC)
7405                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7406         else
7407                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7408         spin_unlock_bh(&tp->lock);
7409   
7410         return 0;
7411 }
7412   
7413 static u32 tg3_get_msglevel(struct net_device *dev)
7414 {
7415         struct tg3 *tp = netdev_priv(dev);
7416         return tp->msg_enable;
7417 }
7418   
7419 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7420 {
7421         struct tg3 *tp = netdev_priv(dev);
7422         tp->msg_enable = value;
7423 }
7424   
7425 #if TG3_TSO_SUPPORT != 0
7426 static int tg3_set_tso(struct net_device *dev, u32 value)
7427 {
7428         struct tg3 *tp = netdev_priv(dev);
7429
7430         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7431                 if (value)
7432                         return -EINVAL;
7433                 return 0;
7434         }
7435         return ethtool_op_set_tso(dev, value);
7436 }
7437 #endif
7438   
7439 static int tg3_nway_reset(struct net_device *dev)
7440 {
7441         struct tg3 *tp = netdev_priv(dev);
7442         u32 bmcr;
7443         int r;
7444   
7445         if (!netif_running(dev))
7446                 return -EAGAIN;
7447
7448         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7449                 return -EINVAL;
7450
7451         spin_lock_bh(&tp->lock);
7452         r = -EINVAL;
7453         tg3_readphy(tp, MII_BMCR, &bmcr);
7454         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7455             ((bmcr & BMCR_ANENABLE) ||
7456              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7457                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7458                                            BMCR_ANENABLE);
7459                 r = 0;
7460         }
7461         spin_unlock_bh(&tp->lock);
7462   
7463         return r;
7464 }
7465   
7466 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7467 {
7468         struct tg3 *tp = netdev_priv(dev);
7469   
7470         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7471         ering->rx_mini_max_pending = 0;
7472         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7473
7474         ering->rx_pending = tp->rx_pending;
7475         ering->rx_mini_pending = 0;
7476         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7477         ering->tx_pending = tp->tx_pending;
7478 }
7479   
7480 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7481 {
7482         struct tg3 *tp = netdev_priv(dev);
7483         int irq_sync = 0;
7484   
7485         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7486             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7487             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7488                 return -EINVAL;
7489   
7490         if (netif_running(dev)) {
7491                 tg3_netif_stop(tp);
7492                 irq_sync = 1;
7493         }
7494
7495         tg3_full_lock(tp, irq_sync);
7496   
7497         tp->rx_pending = ering->rx_pending;
7498
7499         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7500             tp->rx_pending > 63)
7501                 tp->rx_pending = 63;
7502         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7503         tp->tx_pending = ering->tx_pending;
7504
7505         if (netif_running(dev)) {
7506                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7507                 tg3_init_hw(tp);
7508                 tg3_netif_start(tp);
7509         }
7510
7511         tg3_full_unlock(tp);
7512   
7513         return 0;
7514 }
7515   
7516 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7517 {
7518         struct tg3 *tp = netdev_priv(dev);
7519   
7520         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7521         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7522         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7523 }
7524   
7525 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7526 {
7527         struct tg3 *tp = netdev_priv(dev);
7528         int irq_sync = 0;
7529   
7530         if (netif_running(dev)) {
7531                 tg3_netif_stop(tp);
7532                 irq_sync = 1;
7533         }
7534
7535         tg3_full_lock(tp, irq_sync);
7536
7537         if (epause->autoneg)
7538                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7539         else
7540                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7541         if (epause->rx_pause)
7542                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7543         else
7544                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7545         if (epause->tx_pause)
7546                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7547         else
7548                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7549
7550         if (netif_running(dev)) {
7551                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7552                 tg3_init_hw(tp);
7553                 tg3_netif_start(tp);
7554         }
7555
7556         tg3_full_unlock(tp);
7557   
7558         return 0;
7559 }
7560   
7561 static u32 tg3_get_rx_csum(struct net_device *dev)
7562 {
7563         struct tg3 *tp = netdev_priv(dev);
7564         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7565 }
7566   
7567 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7568 {
7569         struct tg3 *tp = netdev_priv(dev);
7570   
7571         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7572                 if (data != 0)
7573                         return -EINVAL;
7574                 return 0;
7575         }
7576   
7577         spin_lock_bh(&tp->lock);
7578         if (data)
7579                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7580         else
7581                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7582         spin_unlock_bh(&tp->lock);
7583   
7584         return 0;
7585 }
7586   
7587 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7588 {
7589         struct tg3 *tp = netdev_priv(dev);
7590   
7591         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7592                 if (data != 0)
7593                         return -EINVAL;
7594                 return 0;
7595         }
7596   
7597         if (data)
7598                 dev->features |= NETIF_F_IP_CSUM;
7599         else
7600                 dev->features &= ~NETIF_F_IP_CSUM;
7601
7602         return 0;
7603 }
7604
7605 static int tg3_get_stats_count (struct net_device *dev)
7606 {
7607         return TG3_NUM_STATS;
7608 }
7609
7610 static int tg3_get_test_count (struct net_device *dev)
7611 {
7612         return TG3_NUM_TEST;
7613 }
7614
7615 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7616 {
7617         switch (stringset) {
7618         case ETH_SS_STATS:
7619                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7620                 break;
7621         case ETH_SS_TEST:
7622                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7623                 break;
7624         default:
7625                 WARN_ON(1);     /* we need a WARN() */
7626                 break;
7627         }
7628 }
7629
7630 static int tg3_phys_id(struct net_device *dev, u32 data)
7631 {
7632         struct tg3 *tp = netdev_priv(dev);
7633         int i;
7634
7635         if (!netif_running(tp->dev))
7636                 return -EAGAIN;
7637
7638         if (data == 0)
7639                 data = 2;
7640
7641         for (i = 0; i < (data * 2); i++) {
7642                 if ((i % 2) == 0)
7643                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7644                                            LED_CTRL_1000MBPS_ON |
7645                                            LED_CTRL_100MBPS_ON |
7646                                            LED_CTRL_10MBPS_ON |
7647                                            LED_CTRL_TRAFFIC_OVERRIDE |
7648                                            LED_CTRL_TRAFFIC_BLINK |
7649                                            LED_CTRL_TRAFFIC_LED);
7650         
7651                 else
7652                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7653                                            LED_CTRL_TRAFFIC_OVERRIDE);
7654
7655                 if (msleep_interruptible(500))
7656                         break;
7657         }
7658         tw32(MAC_LED_CTRL, tp->led_ctrl);
7659         return 0;
7660 }
7661
7662 static void tg3_get_ethtool_stats (struct net_device *dev,
7663                                    struct ethtool_stats *estats, u64 *tmp_stats)
7664 {
7665         struct tg3 *tp = netdev_priv(dev);
7666         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7667 }
7668
7669 #define NVRAM_TEST_SIZE 0x100
7670
7671 static int tg3_test_nvram(struct tg3 *tp)
7672 {
7673         u32 *buf, csum;
7674         int i, j, err = 0;
7675
7676         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7677         if (buf == NULL)
7678                 return -ENOMEM;
7679
7680         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7681                 u32 val;
7682
7683                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7684                         break;
7685                 buf[j] = cpu_to_le32(val);
7686         }
7687         if (i < NVRAM_TEST_SIZE)
7688                 goto out;
7689
7690         err = -EIO;
7691         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7692                 goto out;
7693
7694         /* Bootstrap checksum at offset 0x10 */
7695         csum = calc_crc((unsigned char *) buf, 0x10);
7696         if(csum != cpu_to_le32(buf[0x10/4]))
7697                 goto out;
7698
7699         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7700         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7701         if (csum != cpu_to_le32(buf[0xfc/4]))
7702                  goto out;
7703
7704         err = 0;
7705
7706 out:
7707         kfree(buf);
7708         return err;
7709 }
7710
7711 #define TG3_SERDES_TIMEOUT_SEC  2
7712 #define TG3_COPPER_TIMEOUT_SEC  6
7713
7714 static int tg3_test_link(struct tg3 *tp)
7715 {
7716         int i, max;
7717
7718         if (!netif_running(tp->dev))
7719                 return -ENODEV;
7720
7721         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7722                 max = TG3_SERDES_TIMEOUT_SEC;
7723         else
7724                 max = TG3_COPPER_TIMEOUT_SEC;
7725
7726         for (i = 0; i < max; i++) {
7727                 if (netif_carrier_ok(tp->dev))
7728                         return 0;
7729
7730                 if (msleep_interruptible(1000))
7731                         break;
7732         }
7733
7734         return -EIO;
7735 }
7736
7737 /* Only test the commonly used registers */
7738 static int tg3_test_registers(struct tg3 *tp)
7739 {
7740         int i, is_5705;
7741         u32 offset, read_mask, write_mask, val, save_val, read_val;
7742         static struct {
7743                 u16 offset;
7744                 u16 flags;
7745 #define TG3_FL_5705     0x1
7746 #define TG3_FL_NOT_5705 0x2
7747 #define TG3_FL_NOT_5788 0x4
7748                 u32 read_mask;
7749                 u32 write_mask;
7750         } reg_tbl[] = {
7751                 /* MAC Control Registers */
7752                 { MAC_MODE, TG3_FL_NOT_5705,
7753                         0x00000000, 0x00ef6f8c },
7754                 { MAC_MODE, TG3_FL_5705,
7755                         0x00000000, 0x01ef6b8c },
7756                 { MAC_STATUS, TG3_FL_NOT_5705,
7757                         0x03800107, 0x00000000 },
7758                 { MAC_STATUS, TG3_FL_5705,
7759                         0x03800100, 0x00000000 },
7760                 { MAC_ADDR_0_HIGH, 0x0000,
7761                         0x00000000, 0x0000ffff },
7762                 { MAC_ADDR_0_LOW, 0x0000,
7763                         0x00000000, 0xffffffff },
7764                 { MAC_RX_MTU_SIZE, 0x0000,
7765                         0x00000000, 0x0000ffff },
7766                 { MAC_TX_MODE, 0x0000,
7767                         0x00000000, 0x00000070 },
7768                 { MAC_TX_LENGTHS, 0x0000,
7769                         0x00000000, 0x00003fff },
7770                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7771                         0x00000000, 0x000007fc },
7772                 { MAC_RX_MODE, TG3_FL_5705,
7773                         0x00000000, 0x000007dc },
7774                 { MAC_HASH_REG_0, 0x0000,
7775                         0x00000000, 0xffffffff },
7776                 { MAC_HASH_REG_1, 0x0000,
7777                         0x00000000, 0xffffffff },
7778                 { MAC_HASH_REG_2, 0x0000,
7779                         0x00000000, 0xffffffff },
7780                 { MAC_HASH_REG_3, 0x0000,
7781                         0x00000000, 0xffffffff },
7782
7783                 /* Receive Data and Receive BD Initiator Control Registers. */
7784                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7785                         0x00000000, 0xffffffff },
7786                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7787                         0x00000000, 0xffffffff },
7788                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7789                         0x00000000, 0x00000003 },
7790                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7791                         0x00000000, 0xffffffff },
7792                 { RCVDBDI_STD_BD+0, 0x0000,
7793                         0x00000000, 0xffffffff },
7794                 { RCVDBDI_STD_BD+4, 0x0000,
7795                         0x00000000, 0xffffffff },
7796                 { RCVDBDI_STD_BD+8, 0x0000,
7797                         0x00000000, 0xffff0002 },
7798                 { RCVDBDI_STD_BD+0xc, 0x0000,
7799                         0x00000000, 0xffffffff },
7800         
7801                 /* Receive BD Initiator Control Registers. */
7802                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7803                         0x00000000, 0xffffffff },
7804                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7805                         0x00000000, 0x000003ff },
7806                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7807                         0x00000000, 0xffffffff },
7808         
7809                 /* Host Coalescing Control Registers. */
7810                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7811                         0x00000000, 0x00000004 },
7812                 { HOSTCC_MODE, TG3_FL_5705,
7813                         0x00000000, 0x000000f6 },
7814                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7815                         0x00000000, 0xffffffff },
7816                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7817                         0x00000000, 0x000003ff },
7818                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7819                         0x00000000, 0xffffffff },
7820                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7821                         0x00000000, 0x000003ff },
7822                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7823                         0x00000000, 0xffffffff },
7824                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7825                         0x00000000, 0x000000ff },
7826                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7827                         0x00000000, 0xffffffff },
7828                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7829                         0x00000000, 0x000000ff },
7830                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7831                         0x00000000, 0xffffffff },
7832                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7833                         0x00000000, 0xffffffff },
7834                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7835                         0x00000000, 0xffffffff },
7836                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7837                         0x00000000, 0x000000ff },
7838                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7839                         0x00000000, 0xffffffff },
7840                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7841                         0x00000000, 0x000000ff },
7842                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7843                         0x00000000, 0xffffffff },
7844                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7845                         0x00000000, 0xffffffff },
7846                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7847                         0x00000000, 0xffffffff },
7848                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7849                         0x00000000, 0xffffffff },
7850                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7851                         0x00000000, 0xffffffff },
7852                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7853                         0xffffffff, 0x00000000 },
7854                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7855                         0xffffffff, 0x00000000 },
7856
7857                 /* Buffer Manager Control Registers. */
7858                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7859                         0x00000000, 0x007fff80 },
7860                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7861                         0x00000000, 0x007fffff },
7862                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7863                         0x00000000, 0x0000003f },
7864                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7865                         0x00000000, 0x000001ff },
7866                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7867                         0x00000000, 0x000001ff },
7868                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7869                         0xffffffff, 0x00000000 },
7870                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7871                         0xffffffff, 0x00000000 },
7872         
7873                 /* Mailbox Registers */
7874                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7875                         0x00000000, 0x000001ff },
7876                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7877                         0x00000000, 0x000001ff },
7878                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7879                         0x00000000, 0x000007ff },
7880                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7881                         0x00000000, 0x000001ff },
7882
7883                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7884         };
7885
7886         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7887                 is_5705 = 1;
7888         else
7889                 is_5705 = 0;
7890
7891         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7892                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7893                         continue;
7894
7895                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7896                         continue;
7897
7898                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7899                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7900                         continue;
7901
7902                 offset = (u32) reg_tbl[i].offset;
7903                 read_mask = reg_tbl[i].read_mask;
7904                 write_mask = reg_tbl[i].write_mask;
7905
7906                 /* Save the original register content */
7907                 save_val = tr32(offset);
7908
7909                 /* Determine the read-only value. */
7910                 read_val = save_val & read_mask;
7911
7912                 /* Write zero to the register, then make sure the read-only bits
7913                  * are not changed and the read/write bits are all zeros.
7914                  */
7915                 tw32(offset, 0);
7916
7917                 val = tr32(offset);
7918
7919                 /* Test the read-only and read/write bits. */
7920                 if (((val & read_mask) != read_val) || (val & write_mask))
7921                         goto out;
7922
7923                 /* Write ones to all the bits defined by RdMask and WrMask, then
7924                  * make sure the read-only bits are not changed and the
7925                  * read/write bits are all ones.
7926                  */
7927                 tw32(offset, read_mask | write_mask);
7928
7929                 val = tr32(offset);
7930
7931                 /* Test the read-only bits. */
7932                 if ((val & read_mask) != read_val)
7933                         goto out;
7934
7935                 /* Test the read/write bits. */
7936                 if ((val & write_mask) != write_mask)
7937                         goto out;
7938
7939                 tw32(offset, save_val);
7940         }
7941
7942         return 0;
7943
7944 out:
7945         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7946         tw32(offset, save_val);
7947         return -EIO;
7948 }
7949
7950 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7951 {
7952         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7953         int i;
7954         u32 j;
7955
7956         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7957                 for (j = 0; j < len; j += 4) {
7958                         u32 val;
7959
7960                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7961                         tg3_read_mem(tp, offset + j, &val);
7962                         if (val != test_pattern[i])
7963                                 return -EIO;
7964                 }
7965         }
7966         return 0;
7967 }
7968
7969 static int tg3_test_memory(struct tg3 *tp)
7970 {
7971         static struct mem_entry {
7972                 u32 offset;
7973                 u32 len;
7974         } mem_tbl_570x[] = {
7975                 { 0x00000000, 0x01000},
7976                 { 0x00002000, 0x1c000},
7977                 { 0xffffffff, 0x00000}
7978         }, mem_tbl_5705[] = {
7979                 { 0x00000100, 0x0000c},
7980                 { 0x00000200, 0x00008},
7981                 { 0x00000b50, 0x00400},
7982                 { 0x00004000, 0x00800},
7983                 { 0x00006000, 0x01000},
7984                 { 0x00008000, 0x02000},
7985                 { 0x00010000, 0x0e000},
7986                 { 0xffffffff, 0x00000}
7987         };
7988         struct mem_entry *mem_tbl;
7989         int err = 0;
7990         int i;
7991
7992         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7993                 mem_tbl = mem_tbl_5705;
7994         else
7995                 mem_tbl = mem_tbl_570x;
7996
7997         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7998                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7999                     mem_tbl[i].len)) != 0)
8000                         break;
8001         }
8002         
8003         return err;
8004 }
8005
8006 #define TG3_MAC_LOOPBACK        0
8007 #define TG3_PHY_LOOPBACK        1
8008
8009 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8010 {
8011         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8012         u32 desc_idx;
8013         struct sk_buff *skb, *rx_skb;
8014         u8 *tx_data;
8015         dma_addr_t map;
8016         int num_pkts, tx_len, rx_len, i, err;
8017         struct tg3_rx_buffer_desc *desc;
8018
8019         if (loopback_mode == TG3_MAC_LOOPBACK) {
8020                 /* HW errata - mac loopback fails in some cases on 5780.
8021                  * Normal traffic and PHY loopback are not affected by
8022                  * errata.
8023                  */
8024                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8025                         return 0;
8026
8027                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8028                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8029                            MAC_MODE_PORT_MODE_GMII;
8030                 tw32(MAC_MODE, mac_mode);
8031         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8032                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8033                                            BMCR_SPEED1000);
8034                 udelay(40);
8035                 /* reset to prevent losing 1st rx packet intermittently */
8036                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8037                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8038                         udelay(10);
8039                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8040                 }
8041                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8042                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8043                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8044                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8045                 tw32(MAC_MODE, mac_mode);
8046         }
8047         else
8048                 return -EINVAL;
8049
8050         err = -EIO;
8051
8052         tx_len = 1514;
8053         skb = dev_alloc_skb(tx_len);
8054         tx_data = skb_put(skb, tx_len);
8055         memcpy(tx_data, tp->dev->dev_addr, 6);
8056         memset(tx_data + 6, 0x0, 8);
8057
8058         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8059
8060         for (i = 14; i < tx_len; i++)
8061                 tx_data[i] = (u8) (i & 0xff);
8062
8063         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8064
8065         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8066              HOSTCC_MODE_NOW);
8067
8068         udelay(10);
8069
8070         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8071
8072         num_pkts = 0;
8073
8074         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8075
8076         tp->tx_prod++;
8077         num_pkts++;
8078
8079         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8080                      tp->tx_prod);
8081         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8082
8083         udelay(10);
8084
8085         for (i = 0; i < 10; i++) {
8086                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8087                        HOSTCC_MODE_NOW);
8088
8089                 udelay(10);
8090
8091                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8092                 rx_idx = tp->hw_status->idx[0].rx_producer;
8093                 if ((tx_idx == tp->tx_prod) &&
8094                     (rx_idx == (rx_start_idx + num_pkts)))
8095                         break;
8096         }
8097
8098         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8099         dev_kfree_skb(skb);
8100
8101         if (tx_idx != tp->tx_prod)
8102                 goto out;
8103
8104         if (rx_idx != rx_start_idx + num_pkts)
8105                 goto out;
8106
8107         desc = &tp->rx_rcb[rx_start_idx];
8108         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8109         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8110         if (opaque_key != RXD_OPAQUE_RING_STD)
8111                 goto out;
8112
8113         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8114             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8115                 goto out;
8116
8117         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8118         if (rx_len != tx_len)
8119                 goto out;
8120
8121         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8122
8123         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8124         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8125
8126         for (i = 14; i < tx_len; i++) {
8127                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8128                         goto out;
8129         }
8130         err = 0;
8131         
8132         /* tg3_free_rings will unmap and free the rx_skb */
8133 out:
8134         return err;
8135 }
8136
8137 #define TG3_MAC_LOOPBACK_FAILED         1
8138 #define TG3_PHY_LOOPBACK_FAILED         2
8139 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8140                                          TG3_PHY_LOOPBACK_FAILED)
8141
8142 static int tg3_test_loopback(struct tg3 *tp)
8143 {
8144         int err = 0;
8145
8146         if (!netif_running(tp->dev))
8147                 return TG3_LOOPBACK_FAILED;
8148
8149         tg3_reset_hw(tp);
8150
8151         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8152                 err |= TG3_MAC_LOOPBACK_FAILED;
8153         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8154                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8155                         err |= TG3_PHY_LOOPBACK_FAILED;
8156         }
8157
8158         return err;
8159 }
8160
8161 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8162                           u64 *data)
8163 {
8164         struct tg3 *tp = netdev_priv(dev);
8165
8166         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8167
8168         if (tg3_test_nvram(tp) != 0) {
8169                 etest->flags |= ETH_TEST_FL_FAILED;
8170                 data[0] = 1;
8171         }
8172         if (tg3_test_link(tp) != 0) {
8173                 etest->flags |= ETH_TEST_FL_FAILED;
8174                 data[1] = 1;
8175         }
8176         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8177                 int irq_sync = 0;
8178
8179                 if (netif_running(dev)) {
8180                         tg3_netif_stop(tp);
8181                         irq_sync = 1;
8182                 }
8183
8184                 tg3_full_lock(tp, irq_sync);
8185
8186                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8187                 tg3_nvram_lock(tp);
8188                 tg3_halt_cpu(tp, RX_CPU_BASE);
8189                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8190                         tg3_halt_cpu(tp, TX_CPU_BASE);
8191                 tg3_nvram_unlock(tp);
8192
8193                 if (tg3_test_registers(tp) != 0) {
8194                         etest->flags |= ETH_TEST_FL_FAILED;
8195                         data[2] = 1;
8196                 }
8197                 if (tg3_test_memory(tp) != 0) {
8198                         etest->flags |= ETH_TEST_FL_FAILED;
8199                         data[3] = 1;
8200                 }
8201                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8202                         etest->flags |= ETH_TEST_FL_FAILED;
8203
8204                 tg3_full_unlock(tp);
8205
8206                 if (tg3_test_interrupt(tp) != 0) {
8207                         etest->flags |= ETH_TEST_FL_FAILED;
8208                         data[5] = 1;
8209                 }
8210
8211                 tg3_full_lock(tp, 0);
8212
8213                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8214                 if (netif_running(dev)) {
8215                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8216                         tg3_init_hw(tp);
8217                         tg3_netif_start(tp);
8218                 }
8219
8220                 tg3_full_unlock(tp);
8221         }
8222 }
8223
8224 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8225 {
8226         struct mii_ioctl_data *data = if_mii(ifr);
8227         struct tg3 *tp = netdev_priv(dev);
8228         int err;
8229
8230         switch(cmd) {
8231         case SIOCGMIIPHY:
8232                 data->phy_id = PHY_ADDR;
8233
8234                 /* fallthru */
8235         case SIOCGMIIREG: {
8236                 u32 mii_regval;
8237
8238                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8239                         break;                  /* We have no PHY */
8240
8241                 spin_lock_bh(&tp->lock);
8242                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8243                 spin_unlock_bh(&tp->lock);
8244
8245                 data->val_out = mii_regval;
8246
8247                 return err;
8248         }
8249
8250         case SIOCSMIIREG:
8251                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8252                         break;                  /* We have no PHY */
8253
8254                 if (!capable(CAP_NET_ADMIN))
8255                         return -EPERM;
8256
8257                 spin_lock_bh(&tp->lock);
8258                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8259                 spin_unlock_bh(&tp->lock);
8260
8261                 return err;
8262
8263         default:
8264                 /* do nothing */
8265                 break;
8266         }
8267         return -EOPNOTSUPP;
8268 }
8269
8270 #if TG3_VLAN_TAG_USED
8271 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8272 {
8273         struct tg3 *tp = netdev_priv(dev);
8274
8275         tg3_full_lock(tp, 0);
8276
8277         tp->vlgrp = grp;
8278
8279         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8280         __tg3_set_rx_mode(dev);
8281
8282         tg3_full_unlock(tp);
8283 }
8284
8285 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8286 {
8287         struct tg3 *tp = netdev_priv(dev);
8288
8289         tg3_full_lock(tp, 0);
8290         if (tp->vlgrp)
8291                 tp->vlgrp->vlan_devices[vid] = NULL;
8292         tg3_full_unlock(tp);
8293 }
8294 #endif
8295
8296 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8297 {
8298         struct tg3 *tp = netdev_priv(dev);
8299
8300         memcpy(ec, &tp->coal, sizeof(*ec));
8301         return 0;
8302 }
8303
8304 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8305 {
8306         struct tg3 *tp = netdev_priv(dev);
8307         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8308         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8309
8310         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8311                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8312                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8313                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8314                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8315         }
8316
8317         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8318             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8319             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8320             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8321             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8322             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8323             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8324             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8325             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8326             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8327                 return -EINVAL;
8328
8329         /* No rx interrupts will be generated if both are zero */
8330         if ((ec->rx_coalesce_usecs == 0) &&
8331             (ec->rx_max_coalesced_frames == 0))
8332                 return -EINVAL;
8333
8334         /* No tx interrupts will be generated if both are zero */
8335         if ((ec->tx_coalesce_usecs == 0) &&
8336             (ec->tx_max_coalesced_frames == 0))
8337                 return -EINVAL;
8338
8339         /* Only copy relevant parameters, ignore all others. */
8340         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8341         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8342         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8343         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8344         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8345         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8346         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8347         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8348         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8349
8350         if (netif_running(dev)) {
8351                 tg3_full_lock(tp, 0);
8352                 __tg3_set_coalesce(tp, &tp->coal);
8353                 tg3_full_unlock(tp);
8354         }
8355         return 0;
8356 }
8357
8358 static struct ethtool_ops tg3_ethtool_ops = {
8359         .get_settings           = tg3_get_settings,
8360         .set_settings           = tg3_set_settings,
8361         .get_drvinfo            = tg3_get_drvinfo,
8362         .get_regs_len           = tg3_get_regs_len,
8363         .get_regs               = tg3_get_regs,
8364         .get_wol                = tg3_get_wol,
8365         .set_wol                = tg3_set_wol,
8366         .get_msglevel           = tg3_get_msglevel,
8367         .set_msglevel           = tg3_set_msglevel,
8368         .nway_reset             = tg3_nway_reset,
8369         .get_link               = ethtool_op_get_link,
8370         .get_eeprom_len         = tg3_get_eeprom_len,
8371         .get_eeprom             = tg3_get_eeprom,
8372         .set_eeprom             = tg3_set_eeprom,
8373         .get_ringparam          = tg3_get_ringparam,
8374         .set_ringparam          = tg3_set_ringparam,
8375         .get_pauseparam         = tg3_get_pauseparam,
8376         .set_pauseparam         = tg3_set_pauseparam,
8377         .get_rx_csum            = tg3_get_rx_csum,
8378         .set_rx_csum            = tg3_set_rx_csum,
8379         .get_tx_csum            = ethtool_op_get_tx_csum,
8380         .set_tx_csum            = tg3_set_tx_csum,
8381         .get_sg                 = ethtool_op_get_sg,
8382         .set_sg                 = ethtool_op_set_sg,
8383 #if TG3_TSO_SUPPORT != 0
8384         .get_tso                = ethtool_op_get_tso,
8385         .set_tso                = tg3_set_tso,
8386 #endif
8387         .self_test_count        = tg3_get_test_count,
8388         .self_test              = tg3_self_test,
8389         .get_strings            = tg3_get_strings,
8390         .phys_id                = tg3_phys_id,
8391         .get_stats_count        = tg3_get_stats_count,
8392         .get_ethtool_stats      = tg3_get_ethtool_stats,
8393         .get_coalesce           = tg3_get_coalesce,
8394         .set_coalesce           = tg3_set_coalesce,
8395         .get_perm_addr          = ethtool_op_get_perm_addr,
8396 };
8397
8398 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8399 {
8400         u32 cursize, val;
8401
8402         tp->nvram_size = EEPROM_CHIP_SIZE;
8403
8404         if (tg3_nvram_read(tp, 0, &val) != 0)
8405                 return;
8406
8407         if (swab32(val) != TG3_EEPROM_MAGIC)
8408                 return;
8409
8410         /*
8411          * Size the chip by reading offsets at increasing powers of two.
8412          * When we encounter our validation signature, we know the addressing
8413          * has wrapped around, and thus have our chip size.
8414          */
8415         cursize = 0x800;
8416
8417         while (cursize < tp->nvram_size) {
8418                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8419                         return;
8420
8421                 if (swab32(val) == TG3_EEPROM_MAGIC)
8422                         break;
8423
8424                 cursize <<= 1;
8425         }
8426
8427         tp->nvram_size = cursize;
8428 }
8429                 
8430 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8431 {
8432         u32 val;
8433
8434         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8435                 if (val != 0) {
8436                         tp->nvram_size = (val >> 16) * 1024;
8437                         return;
8438                 }
8439         }
8440         tp->nvram_size = 0x20000;
8441 }
8442
8443 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8444 {
8445         u32 nvcfg1;
8446
8447         nvcfg1 = tr32(NVRAM_CFG1);
8448         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8449                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8450         }
8451         else {
8452                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8453                 tw32(NVRAM_CFG1, nvcfg1);
8454         }
8455
8456         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8457             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8458                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8459                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8460                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8461                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8462                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8463                                 break;
8464                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8465                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8466                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8467                                 break;
8468                         case FLASH_VENDOR_ATMEL_EEPROM:
8469                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8470                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8471                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8472                                 break;
8473                         case FLASH_VENDOR_ST:
8474                                 tp->nvram_jedecnum = JEDEC_ST;
8475                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8476                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8477                                 break;
8478                         case FLASH_VENDOR_SAIFUN:
8479                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8480                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8481                                 break;
8482                         case FLASH_VENDOR_SST_SMALL:
8483                         case FLASH_VENDOR_SST_LARGE:
8484                                 tp->nvram_jedecnum = JEDEC_SST;
8485                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8486                                 break;
8487                 }
8488         }
8489         else {
8490                 tp->nvram_jedecnum = JEDEC_ATMEL;
8491                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8492                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8493         }
8494 }
8495
8496 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8497 {
8498         u32 nvcfg1;
8499
8500         nvcfg1 = tr32(NVRAM_CFG1);
8501
8502         /* NVRAM protection for TPM */
8503         if (nvcfg1 & (1 << 27))
8504                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8505
8506         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8507                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8508                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8509                         tp->nvram_jedecnum = JEDEC_ATMEL;
8510                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8511                         break;
8512                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8513                         tp->nvram_jedecnum = JEDEC_ATMEL;
8514                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8515                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8516                         break;
8517                 case FLASH_5752VENDOR_ST_M45PE10:
8518                 case FLASH_5752VENDOR_ST_M45PE20:
8519                 case FLASH_5752VENDOR_ST_M45PE40:
8520                         tp->nvram_jedecnum = JEDEC_ST;
8521                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8522                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8523                         break;
8524         }
8525
8526         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8527                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8528                         case FLASH_5752PAGE_SIZE_256:
8529                                 tp->nvram_pagesize = 256;
8530                                 break;
8531                         case FLASH_5752PAGE_SIZE_512:
8532                                 tp->nvram_pagesize = 512;
8533                                 break;
8534                         case FLASH_5752PAGE_SIZE_1K:
8535                                 tp->nvram_pagesize = 1024;
8536                                 break;
8537                         case FLASH_5752PAGE_SIZE_2K:
8538                                 tp->nvram_pagesize = 2048;
8539                                 break;
8540                         case FLASH_5752PAGE_SIZE_4K:
8541                                 tp->nvram_pagesize = 4096;
8542                                 break;
8543                         case FLASH_5752PAGE_SIZE_264:
8544                                 tp->nvram_pagesize = 264;
8545                                 break;
8546                 }
8547         }
8548         else {
8549                 /* For eeprom, set pagesize to maximum eeprom size */
8550                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8551
8552                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8553                 tw32(NVRAM_CFG1, nvcfg1);
8554         }
8555 }
8556
8557 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8558 static void __devinit tg3_nvram_init(struct tg3 *tp)
8559 {
8560         int j;
8561
8562         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8563                 return;
8564
8565         tw32_f(GRC_EEPROM_ADDR,
8566              (EEPROM_ADDR_FSM_RESET |
8567               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8568                EEPROM_ADDR_CLKPERD_SHIFT)));
8569
8570         /* XXX schedule_timeout() ... */
8571         for (j = 0; j < 100; j++)
8572                 udelay(10);
8573
8574         /* Enable seeprom accesses. */
8575         tw32_f(GRC_LOCAL_CTRL,
8576              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8577         udelay(100);
8578
8579         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8580             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8581                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8582
8583                 tg3_nvram_lock(tp);
8584                 tg3_enable_nvram_access(tp);
8585
8586                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8587                         tg3_get_5752_nvram_info(tp);
8588                 else
8589                         tg3_get_nvram_info(tp);
8590
8591                 tg3_get_nvram_size(tp);
8592
8593                 tg3_disable_nvram_access(tp);
8594                 tg3_nvram_unlock(tp);
8595
8596         } else {
8597                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8598
8599                 tg3_get_eeprom_size(tp);
8600         }
8601 }
8602
8603 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8604                                         u32 offset, u32 *val)
8605 {
8606         u32 tmp;
8607         int i;
8608
8609         if (offset > EEPROM_ADDR_ADDR_MASK ||
8610             (offset % 4) != 0)
8611                 return -EINVAL;
8612
8613         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8614                                         EEPROM_ADDR_DEVID_MASK |
8615                                         EEPROM_ADDR_READ);
8616         tw32(GRC_EEPROM_ADDR,
8617              tmp |
8618              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8619              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8620               EEPROM_ADDR_ADDR_MASK) |
8621              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8622
8623         for (i = 0; i < 10000; i++) {
8624                 tmp = tr32(GRC_EEPROM_ADDR);
8625
8626                 if (tmp & EEPROM_ADDR_COMPLETE)
8627                         break;
8628                 udelay(100);
8629         }
8630         if (!(tmp & EEPROM_ADDR_COMPLETE))
8631                 return -EBUSY;
8632
8633         *val = tr32(GRC_EEPROM_DATA);
8634         return 0;
8635 }
8636
8637 #define NVRAM_CMD_TIMEOUT 10000
8638
8639 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8640 {
8641         int i;
8642
8643         tw32(NVRAM_CMD, nvram_cmd);
8644         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8645                 udelay(10);
8646                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8647                         udelay(10);
8648                         break;
8649                 }
8650         }
8651         if (i == NVRAM_CMD_TIMEOUT) {
8652                 return -EBUSY;
8653         }
8654         return 0;
8655 }
8656
8657 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8658 {
8659         int ret;
8660
8661         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8662                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8663                 return -EINVAL;
8664         }
8665
8666         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8667                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8668
8669         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8670                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8671                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8672
8673                 offset = ((offset / tp->nvram_pagesize) <<
8674                           ATMEL_AT45DB0X1B_PAGE_POS) +
8675                         (offset % tp->nvram_pagesize);
8676         }
8677
8678         if (offset > NVRAM_ADDR_MSK)
8679                 return -EINVAL;
8680
8681         tg3_nvram_lock(tp);
8682
8683         tg3_enable_nvram_access(tp);
8684
8685         tw32(NVRAM_ADDR, offset);
8686         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8687                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8688
8689         if (ret == 0)
8690                 *val = swab32(tr32(NVRAM_RDDATA));
8691
8692         tg3_disable_nvram_access(tp);
8693
8694         tg3_nvram_unlock(tp);
8695
8696         return ret;
8697 }
8698
8699 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8700                                     u32 offset, u32 len, u8 *buf)
8701 {
8702         int i, j, rc = 0;
8703         u32 val;
8704
8705         for (i = 0; i < len; i += 4) {
8706                 u32 addr, data;
8707
8708                 addr = offset + i;
8709
8710                 memcpy(&data, buf + i, 4);
8711
8712                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8713
8714                 val = tr32(GRC_EEPROM_ADDR);
8715                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8716
8717                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8718                         EEPROM_ADDR_READ);
8719                 tw32(GRC_EEPROM_ADDR, val |
8720                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8721                         (addr & EEPROM_ADDR_ADDR_MASK) |
8722                         EEPROM_ADDR_START |
8723                         EEPROM_ADDR_WRITE);
8724                 
8725                 for (j = 0; j < 10000; j++) {
8726                         val = tr32(GRC_EEPROM_ADDR);
8727
8728                         if (val & EEPROM_ADDR_COMPLETE)
8729                                 break;
8730                         udelay(100);
8731                 }
8732                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8733                         rc = -EBUSY;
8734                         break;
8735                 }
8736         }
8737
8738         return rc;
8739 }
8740
8741 /* offset and length are dword aligned */
8742 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8743                 u8 *buf)
8744 {
8745         int ret = 0;
8746         u32 pagesize = tp->nvram_pagesize;
8747         u32 pagemask = pagesize - 1;
8748         u32 nvram_cmd;
8749         u8 *tmp;
8750
8751         tmp = kmalloc(pagesize, GFP_KERNEL);
8752         if (tmp == NULL)
8753                 return -ENOMEM;
8754
8755         while (len) {
8756                 int j;
8757                 u32 phy_addr, page_off, size;
8758
8759                 phy_addr = offset & ~pagemask;
8760         
8761                 for (j = 0; j < pagesize; j += 4) {
8762                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8763                                                 (u32 *) (tmp + j))))
8764                                 break;
8765                 }
8766                 if (ret)
8767                         break;
8768
8769                 page_off = offset & pagemask;
8770                 size = pagesize;
8771                 if (len < size)
8772                         size = len;
8773
8774                 len -= size;
8775
8776                 memcpy(tmp + page_off, buf, size);
8777
8778                 offset = offset + (pagesize - page_off);
8779
8780                 /* Nvram lock released by tg3_nvram_read() above,
8781                  * so need to get it again.
8782                  */
8783                 tg3_nvram_lock(tp);
8784                 tg3_enable_nvram_access(tp);
8785
8786                 /*
8787                  * Before we can erase the flash page, we need
8788                  * to issue a special "write enable" command.
8789                  */
8790                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8791
8792                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8793                         break;
8794
8795                 /* Erase the target page */
8796                 tw32(NVRAM_ADDR, phy_addr);
8797
8798                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8799                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8800
8801                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8802                         break;
8803
8804                 /* Issue another write enable to start the write. */
8805                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8806
8807                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8808                         break;
8809
8810                 for (j = 0; j < pagesize; j += 4) {
8811                         u32 data;
8812
8813                         data = *((u32 *) (tmp + j));
8814                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8815
8816                         tw32(NVRAM_ADDR, phy_addr + j);
8817
8818                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8819                                 NVRAM_CMD_WR;
8820
8821                         if (j == 0)
8822                                 nvram_cmd |= NVRAM_CMD_FIRST;
8823                         else if (j == (pagesize - 4))
8824                                 nvram_cmd |= NVRAM_CMD_LAST;
8825
8826                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8827                                 break;
8828                 }
8829                 if (ret)
8830                         break;
8831         }
8832
8833         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8834         tg3_nvram_exec_cmd(tp, nvram_cmd);
8835
8836         kfree(tmp);
8837
8838         return ret;
8839 }
8840
8841 /* offset and length are dword aligned */
8842 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8843                 u8 *buf)
8844 {
8845         int i, ret = 0;
8846
8847         for (i = 0; i < len; i += 4, offset += 4) {
8848                 u32 data, page_off, phy_addr, nvram_cmd;
8849
8850                 memcpy(&data, buf + i, 4);
8851                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8852
8853                 page_off = offset % tp->nvram_pagesize;
8854
8855                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8856                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8857
8858                         phy_addr = ((offset / tp->nvram_pagesize) <<
8859                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8860                 }
8861                 else {
8862                         phy_addr = offset;
8863                 }
8864
8865                 tw32(NVRAM_ADDR, phy_addr);
8866
8867                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8868
8869                 if ((page_off == 0) || (i == 0))
8870                         nvram_cmd |= NVRAM_CMD_FIRST;
8871                 else if (page_off == (tp->nvram_pagesize - 4))
8872                         nvram_cmd |= NVRAM_CMD_LAST;
8873
8874                 if (i == (len - 4))
8875                         nvram_cmd |= NVRAM_CMD_LAST;
8876
8877                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8878                     (tp->nvram_jedecnum == JEDEC_ST) &&
8879                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8880
8881                         if ((ret = tg3_nvram_exec_cmd(tp,
8882                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8883                                 NVRAM_CMD_DONE)))
8884
8885                                 break;
8886                 }
8887                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8888                         /* We always do complete word writes to eeprom. */
8889                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8890                 }
8891
8892                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8893                         break;
8894         }
8895         return ret;
8896 }
8897
8898 /* offset and length are dword aligned */
8899 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8900 {
8901         int ret;
8902
8903         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8904                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8905                 return -EINVAL;
8906         }
8907
8908         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8909                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8910                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8911                 udelay(40);
8912         }
8913
8914         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8915                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8916         }
8917         else {
8918                 u32 grc_mode;
8919
8920                 tg3_nvram_lock(tp);
8921
8922                 tg3_enable_nvram_access(tp);
8923                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8924                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8925                         tw32(NVRAM_WRITE1, 0x406);
8926
8927                 grc_mode = tr32(GRC_MODE);
8928                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8929
8930                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8931                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8932
8933                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8934                                 buf);
8935                 }
8936                 else {
8937                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8938                                 buf);
8939                 }
8940
8941                 grc_mode = tr32(GRC_MODE);
8942                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8943
8944                 tg3_disable_nvram_access(tp);
8945                 tg3_nvram_unlock(tp);
8946         }
8947
8948         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8949                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8950                 udelay(40);
8951         }
8952
8953         return ret;
8954 }
8955
8956 struct subsys_tbl_ent {
8957         u16 subsys_vendor, subsys_devid;
8958         u32 phy_id;
8959 };
8960
8961 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8962         /* Broadcom boards. */
8963         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8964         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8965         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8966         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8967         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8968         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8969         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8970         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8971         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8972         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8973         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8974
8975         /* 3com boards. */
8976         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8977         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8978         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8979         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8980         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8981
8982         /* DELL boards. */
8983         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8984         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8985         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8986         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8987
8988         /* Compaq boards. */
8989         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8990         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8991         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8992         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8993         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8994
8995         /* IBM boards. */
8996         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8997 };
8998
8999 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9000 {
9001         int i;
9002
9003         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9004                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9005                      tp->pdev->subsystem_vendor) &&
9006                     (subsys_id_to_phy_id[i].subsys_devid ==
9007                      tp->pdev->subsystem_device))
9008                         return &subsys_id_to_phy_id[i];
9009         }
9010         return NULL;
9011 }
9012
9013 /* Since this function may be called in D3-hot power state during
9014  * tg3_init_one(), only config cycles are allowed.
9015  */
9016 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9017 {
9018         u32 val;
9019
9020         /* Make sure register accesses (indirect or otherwise)
9021          * will function correctly.
9022          */
9023         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9024                                tp->misc_host_ctrl);
9025
9026         tp->phy_id = PHY_ID_INVALID;
9027         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9028
9029         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9030         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9031                 u32 nic_cfg, led_cfg;
9032                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9033                 int eeprom_phy_serdes = 0;
9034
9035                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9036                 tp->nic_sram_data_cfg = nic_cfg;
9037
9038                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9039                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9040                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9041                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9042                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9043                     (ver > 0) && (ver < 0x100))
9044                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9045
9046                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9047                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9048                         eeprom_phy_serdes = 1;
9049
9050                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9051                 if (nic_phy_id != 0) {
9052                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9053                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9054
9055                         eeprom_phy_id  = (id1 >> 16) << 10;
9056                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9057                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9058                 } else
9059                         eeprom_phy_id = 0;
9060
9061                 tp->phy_id = eeprom_phy_id;
9062                 if (eeprom_phy_serdes) {
9063                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9064                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9065                         else
9066                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9067                 }
9068
9069                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9070                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9071                                     SHASTA_EXT_LED_MODE_MASK);
9072                 else
9073                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9074
9075                 switch (led_cfg) {
9076                 default:
9077                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9078                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9079                         break;
9080
9081                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9082                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9083                         break;
9084
9085                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9086                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9087
9088                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9089                          * read on some older 5700/5701 bootcode.
9090                          */
9091                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9092                             ASIC_REV_5700 ||
9093                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9094                             ASIC_REV_5701)
9095                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9096
9097                         break;
9098
9099                 case SHASTA_EXT_LED_SHARED:
9100                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9101                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9102                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9103                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9104                                                  LED_CTRL_MODE_PHY_2);
9105                         break;
9106
9107                 case SHASTA_EXT_LED_MAC:
9108                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9109                         break;
9110
9111                 case SHASTA_EXT_LED_COMBO:
9112                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9113                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9114                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9115                                                  LED_CTRL_MODE_PHY_2);
9116                         break;
9117
9118                 };
9119
9120                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9121                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9122                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9123                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9124
9125                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9126                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9127                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9128                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9129
9130                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9131                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9132                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9133                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9134                 }
9135                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9136                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9137
9138                 if (cfg2 & (1 << 17))
9139                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9140
9141                 /* serdes signal pre-emphasis in register 0x590 set by */
9142                 /* bootcode if bit 18 is set */
9143                 if (cfg2 & (1 << 18))
9144                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9145         }
9146 }
9147
9148 static int __devinit tg3_phy_probe(struct tg3 *tp)
9149 {
9150         u32 hw_phy_id_1, hw_phy_id_2;
9151         u32 hw_phy_id, hw_phy_id_masked;
9152         int err;
9153
9154         /* Reading the PHY ID register can conflict with ASF
9155          * firwmare access to the PHY hardware.
9156          */
9157         err = 0;
9158         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9159                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9160         } else {
9161                 /* Now read the physical PHY_ID from the chip and verify
9162                  * that it is sane.  If it doesn't look good, we fall back
9163                  * to either the hard-coded table based PHY_ID and failing
9164                  * that the value found in the eeprom area.
9165                  */
9166                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9167                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9168
9169                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9170                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9171                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9172
9173                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9174         }
9175
9176         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9177                 tp->phy_id = hw_phy_id;
9178                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9179                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9180                 else
9181                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9182         } else {
9183                 if (tp->phy_id != PHY_ID_INVALID) {
9184                         /* Do nothing, phy ID already set up in
9185                          * tg3_get_eeprom_hw_cfg().
9186                          */
9187                 } else {
9188                         struct subsys_tbl_ent *p;
9189
9190                         /* No eeprom signature?  Try the hardcoded
9191                          * subsys device table.
9192                          */
9193                         p = lookup_by_subsys(tp);
9194                         if (!p)
9195                                 return -ENODEV;
9196
9197                         tp->phy_id = p->phy_id;
9198                         if (!tp->phy_id ||
9199                             tp->phy_id == PHY_ID_BCM8002)
9200                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9201                 }
9202         }
9203
9204         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9205             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9206                 u32 bmsr, adv_reg, tg3_ctrl;
9207
9208                 tg3_readphy(tp, MII_BMSR, &bmsr);
9209                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9210                     (bmsr & BMSR_LSTATUS))
9211                         goto skip_phy_reset;
9212                     
9213                 err = tg3_phy_reset(tp);
9214                 if (err)
9215                         return err;
9216
9217                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9218                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9219                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9220                 tg3_ctrl = 0;
9221                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9222                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9223                                     MII_TG3_CTRL_ADV_1000_FULL);
9224                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9225                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9226                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9227                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9228                 }
9229
9230                 if (!tg3_copper_is_advertising_all(tp)) {
9231                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9232
9233                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9234                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9235
9236                         tg3_writephy(tp, MII_BMCR,
9237                                      BMCR_ANENABLE | BMCR_ANRESTART);
9238                 }
9239                 tg3_phy_set_wirespeed(tp);
9240
9241                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9242                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9243                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9244         }
9245
9246 skip_phy_reset:
9247         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9248                 err = tg3_init_5401phy_dsp(tp);
9249                 if (err)
9250                         return err;
9251         }
9252
9253         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9254                 err = tg3_init_5401phy_dsp(tp);
9255         }
9256
9257         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9258                 tp->link_config.advertising =
9259                         (ADVERTISED_1000baseT_Half |
9260                          ADVERTISED_1000baseT_Full |
9261                          ADVERTISED_Autoneg |
9262                          ADVERTISED_FIBRE);
9263         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9264                 tp->link_config.advertising &=
9265                         ~(ADVERTISED_1000baseT_Half |
9266                           ADVERTISED_1000baseT_Full);
9267
9268         return err;
9269 }
9270
9271 static void __devinit tg3_read_partno(struct tg3 *tp)
9272 {
9273         unsigned char vpd_data[256];
9274         int i;
9275
9276         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9277                 /* Sun decided not to put the necessary bits in the
9278                  * NVRAM of their onboard tg3 parts :(
9279                  */
9280                 strcpy(tp->board_part_number, "Sun 570X");
9281                 return;
9282         }
9283
9284         for (i = 0; i < 256; i += 4) {
9285                 u32 tmp;
9286
9287                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9288                         goto out_not_found;
9289
9290                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9291                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9292                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9293                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9294         }
9295
9296         /* Now parse and find the part number. */
9297         for (i = 0; i < 256; ) {
9298                 unsigned char val = vpd_data[i];
9299                 int block_end;
9300
9301                 if (val == 0x82 || val == 0x91) {
9302                         i = (i + 3 +
9303                              (vpd_data[i + 1] +
9304                               (vpd_data[i + 2] << 8)));
9305                         continue;
9306                 }
9307
9308                 if (val != 0x90)
9309                         goto out_not_found;
9310
9311                 block_end = (i + 3 +
9312                              (vpd_data[i + 1] +
9313                               (vpd_data[i + 2] << 8)));
9314                 i += 3;
9315                 while (i < block_end) {
9316                         if (vpd_data[i + 0] == 'P' &&
9317                             vpd_data[i + 1] == 'N') {
9318                                 int partno_len = vpd_data[i + 2];
9319
9320                                 if (partno_len > 24)
9321                                         goto out_not_found;
9322
9323                                 memcpy(tp->board_part_number,
9324                                        &vpd_data[i + 3],
9325                                        partno_len);
9326
9327                                 /* Success. */
9328                                 return;
9329                         }
9330                 }
9331
9332                 /* Part number not found. */
9333                 goto out_not_found;
9334         }
9335
9336 out_not_found:
9337         strcpy(tp->board_part_number, "none");
9338 }
9339
9340 #ifdef CONFIG_SPARC64
9341 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9342 {
9343         struct pci_dev *pdev = tp->pdev;
9344         struct pcidev_cookie *pcp = pdev->sysdata;
9345
9346         if (pcp != NULL) {
9347                 int node = pcp->prom_node;
9348                 u32 venid;
9349                 int err;
9350
9351                 err = prom_getproperty(node, "subsystem-vendor-id",
9352                                        (char *) &venid, sizeof(venid));
9353                 if (err == 0 || err == -1)
9354                         return 0;
9355                 if (venid == PCI_VENDOR_ID_SUN)
9356                         return 1;
9357         }
9358         return 0;
9359 }
9360 #endif
9361
9362 static int __devinit tg3_get_invariants(struct tg3 *tp)
9363 {
9364         static struct pci_device_id write_reorder_chipsets[] = {
9365                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9366                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9367                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9368                              PCI_DEVICE_ID_VIA_8385_0) },
9369                 { },
9370         };
9371         u32 misc_ctrl_reg;
9372         u32 cacheline_sz_reg;
9373         u32 pci_state_reg, grc_misc_cfg;
9374         u32 val;
9375         u16 pci_cmd;
9376         int err;
9377
9378 #ifdef CONFIG_SPARC64
9379         if (tg3_is_sun_570X(tp))
9380                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9381 #endif
9382
9383         /* Force memory write invalidate off.  If we leave it on,
9384          * then on 5700_BX chips we have to enable a workaround.
9385          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9386          * to match the cacheline size.  The Broadcom driver have this
9387          * workaround but turns MWI off all the times so never uses
9388          * it.  This seems to suggest that the workaround is insufficient.
9389          */
9390         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9391         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9392         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9393
9394         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9395          * has the register indirect write enable bit set before
9396          * we try to access any of the MMIO registers.  It is also
9397          * critical that the PCI-X hw workaround situation is decided
9398          * before that as well.
9399          */
9400         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9401                               &misc_ctrl_reg);
9402
9403         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9404                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9405
9406         /* Wrong chip ID in 5752 A0. This code can be removed later
9407          * as A0 is not in production.
9408          */
9409         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9410                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9411
9412         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9413          * we need to disable memory and use config. cycles
9414          * only to access all registers. The 5702/03 chips
9415          * can mistakenly decode the special cycles from the
9416          * ICH chipsets as memory write cycles, causing corruption
9417          * of register and memory space. Only certain ICH bridges
9418          * will drive special cycles with non-zero data during the
9419          * address phase which can fall within the 5703's address
9420          * range. This is not an ICH bug as the PCI spec allows
9421          * non-zero address during special cycles. However, only
9422          * these ICH bridges are known to drive non-zero addresses
9423          * during special cycles.
9424          *
9425          * Since special cycles do not cross PCI bridges, we only
9426          * enable this workaround if the 5703 is on the secondary
9427          * bus of these ICH bridges.
9428          */
9429         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9430             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9431                 static struct tg3_dev_id {
9432                         u32     vendor;
9433                         u32     device;
9434                         u32     rev;
9435                 } ich_chipsets[] = {
9436                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9437                           PCI_ANY_ID },
9438                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9439                           PCI_ANY_ID },
9440                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9441                           0xa },
9442                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9443                           PCI_ANY_ID },
9444                         { },
9445                 };
9446                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9447                 struct pci_dev *bridge = NULL;
9448
9449                 while (pci_id->vendor != 0) {
9450                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9451                                                 bridge);
9452                         if (!bridge) {
9453                                 pci_id++;
9454                                 continue;
9455                         }
9456                         if (pci_id->rev != PCI_ANY_ID) {
9457                                 u8 rev;
9458
9459                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9460                                                      &rev);
9461                                 if (rev > pci_id->rev)
9462                                         continue;
9463                         }
9464                         if (bridge->subordinate &&
9465                             (bridge->subordinate->number ==
9466                              tp->pdev->bus->number)) {
9467
9468                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9469                                 pci_dev_put(bridge);
9470                                 break;
9471                         }
9472                 }
9473         }
9474
9475         /* Find msi capability. */
9476         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9477             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9478                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9479                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9480         }
9481
9482         /* Initialize misc host control in PCI block. */
9483         tp->misc_host_ctrl |= (misc_ctrl_reg &
9484                                MISC_HOST_CTRL_CHIPREV);
9485         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9486                                tp->misc_host_ctrl);
9487
9488         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9489                               &cacheline_sz_reg);
9490
9491         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9492         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9493         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9494         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9495
9496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9498             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9499                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9500
9501         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9502             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9503                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9504
9505         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9506                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9507
9508         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9509             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9510             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9511                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9512
9513         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9514                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9515
9516         /* If we have an AMD 762 or VIA K8T800 chipset, write
9517          * reordering to the mailbox registers done by the host
9518          * controller can cause major troubles.  We read back from
9519          * every mailbox register write to force the writes to be
9520          * posted to the chip in order.
9521          */
9522         if (pci_dev_present(write_reorder_chipsets) &&
9523             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9524                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9525
9526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9527             tp->pci_lat_timer < 64) {
9528                 tp->pci_lat_timer = 64;
9529
9530                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9531                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9532                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9533                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9534
9535                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9536                                        cacheline_sz_reg);
9537         }
9538
9539         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9540                               &pci_state_reg);
9541
9542         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9543                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9544
9545                 /* If this is a 5700 BX chipset, and we are in PCI-X
9546                  * mode, enable register write workaround.
9547                  *
9548                  * The workaround is to use indirect register accesses
9549                  * for all chip writes not to mailbox registers.
9550                  */
9551                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9552                         u32 pm_reg;
9553                         u16 pci_cmd;
9554
9555                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9556
9557                         /* The chip can have it's power management PCI config
9558                          * space registers clobbered due to this bug.
9559                          * So explicitly force the chip into D0 here.
9560                          */
9561                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9562                                               &pm_reg);
9563                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9564                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9565                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9566                                                pm_reg);
9567
9568                         /* Also, force SERR#/PERR# in PCI command. */
9569                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9570                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9571                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9572                 }
9573         }
9574
9575         /* 5700 BX chips need to have their TX producer index mailboxes
9576          * written twice to workaround a bug.
9577          */
9578         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9579                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9580
9581         /* Back to back register writes can cause problems on this chip,
9582          * the workaround is to read back all reg writes except those to
9583          * mailbox regs.  See tg3_write_indirect_reg32().
9584          *
9585          * PCI Express 5750_A0 rev chips need this workaround too.
9586          */
9587         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9588             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9589              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9590                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9591
9592         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9593                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9594         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9595                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9596
9597         /* Chip-specific fixup from Broadcom driver */
9598         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9599             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9600                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9601                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9602         }
9603
9604         /* Default fast path register access methods */
9605         tp->read32 = tg3_read32;
9606         tp->write32 = tg3_write32;
9607         tp->read32_mbox = tg3_read32;
9608         tp->write32_mbox = tg3_write32;
9609         tp->write32_tx_mbox = tg3_write32;
9610         tp->write32_rx_mbox = tg3_write32;
9611
9612         /* Various workaround register access methods */
9613         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9614                 tp->write32 = tg3_write_indirect_reg32;
9615         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9616                 tp->write32 = tg3_write_flush_reg32;
9617
9618         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9619             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9620                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9621                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9622                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9623         }
9624
9625         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9626                 tp->read32 = tg3_read_indirect_reg32;
9627                 tp->write32 = tg3_write_indirect_reg32;
9628                 tp->read32_mbox = tg3_read_indirect_mbox;
9629                 tp->write32_mbox = tg3_write_indirect_mbox;
9630                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9631                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9632
9633                 iounmap(tp->regs);
9634                 tp->regs = NULL;
9635
9636                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9637                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9638                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9639         }
9640
9641         /* Get eeprom hw config before calling tg3_set_power_state().
9642          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9643          * determined before calling tg3_set_power_state() so that
9644          * we know whether or not to switch out of Vaux power.
9645          * When the flag is set, it means that GPIO1 is used for eeprom
9646          * write protect and also implies that it is a LOM where GPIOs
9647          * are not used to switch power.
9648          */ 
9649         tg3_get_eeprom_hw_cfg(tp);
9650
9651         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9652          * GPIO1 driven high will bring 5700's external PHY out of reset.
9653          * It is also used as eeprom write protect on LOMs.
9654          */
9655         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9656         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9657             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9658                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9659                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9660         /* Unused GPIO3 must be driven as output on 5752 because there
9661          * are no pull-up resistors on unused GPIO pins.
9662          */
9663         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9664                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9665
9666         /* Force the chip into D0. */
9667         err = tg3_set_power_state(tp, 0);
9668         if (err) {
9669                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9670                        pci_name(tp->pdev));
9671                 return err;
9672         }
9673
9674         /* 5700 B0 chips do not support checksumming correctly due
9675          * to hardware bugs.
9676          */
9677         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9678                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9679
9680         /* Pseudo-header checksum is done by hardware logic and not
9681          * the offload processers, so make the chip do the pseudo-
9682          * header checksums on receive.  For transmit it is more
9683          * convenient to do the pseudo-header checksum in software
9684          * as Linux does that on transmit for us in all cases.
9685          */
9686         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9687         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9688
9689         /* Derive initial jumbo mode from MTU assigned in
9690          * ether_setup() via the alloc_etherdev() call
9691          */
9692         if (tp->dev->mtu > ETH_DATA_LEN &&
9693             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9694                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9695
9696         /* Determine WakeOnLan speed to use. */
9697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9698             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9699             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9700             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9701                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9702         } else {
9703                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9704         }
9705
9706         /* A few boards don't want Ethernet@WireSpeed phy feature */
9707         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9708             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9709              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9710              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9711             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9712                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9713
9714         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9715             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9716                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9717         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9718                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9719
9720         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9721                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9722
9723         tp->coalesce_mode = 0;
9724         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9725             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9726                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9727
9728         /* Initialize MAC MI mode, polling disabled. */
9729         tw32_f(MAC_MI_MODE, tp->mi_mode);
9730         udelay(80);
9731
9732         /* Initialize data/descriptor byte/word swapping. */
9733         val = tr32(GRC_MODE);
9734         val &= GRC_MODE_HOST_STACKUP;
9735         tw32(GRC_MODE, val | tp->grc_mode);
9736
9737         tg3_switch_clocks(tp);
9738
9739         /* Clear this out for sanity. */
9740         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9741
9742         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9743                               &pci_state_reg);
9744         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9745             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9746                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9747
9748                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9749                     chiprevid == CHIPREV_ID_5701_B0 ||
9750                     chiprevid == CHIPREV_ID_5701_B2 ||
9751                     chiprevid == CHIPREV_ID_5701_B5) {
9752                         void __iomem *sram_base;
9753
9754                         /* Write some dummy words into the SRAM status block
9755                          * area, see if it reads back correctly.  If the return
9756                          * value is bad, force enable the PCIX workaround.
9757                          */
9758                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9759
9760                         writel(0x00000000, sram_base);
9761                         writel(0x00000000, sram_base + 4);
9762                         writel(0xffffffff, sram_base + 4);
9763                         if (readl(sram_base) != 0x00000000)
9764                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9765                 }
9766         }
9767
9768         udelay(50);
9769         tg3_nvram_init(tp);
9770
9771         grc_misc_cfg = tr32(GRC_MISC_CFG);
9772         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9773
9774         /* Broadcom's driver says that CIOBE multisplit has a bug */
9775 #if 0
9776         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9777             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9778                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9779                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9780         }
9781 #endif
9782         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9783             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9784              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9785                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9786
9787         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9788             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9789                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9790         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9791                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9792                                       HOSTCC_MODE_CLRTICK_TXBD);
9793
9794                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9795                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9796                                        tp->misc_host_ctrl);
9797         }
9798
9799         /* these are limited to 10/100 only */
9800         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9801              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9802             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9803              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9804              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9805               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9806               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9807             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9808              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9809               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9810                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9811
9812         err = tg3_phy_probe(tp);
9813         if (err) {
9814                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9815                        pci_name(tp->pdev), err);
9816                 /* ... but do not return immediately ... */
9817         }
9818
9819         tg3_read_partno(tp);
9820
9821         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9822                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9823         } else {
9824                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9825                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9826                 else
9827                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9828         }
9829
9830         /* 5700 {AX,BX} chips have a broken status block link
9831          * change bit implementation, so we must use the
9832          * status register in those cases.
9833          */
9834         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9835                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9836         else
9837                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9838
9839         /* The led_ctrl is set during tg3_phy_probe, here we might
9840          * have to force the link status polling mechanism based
9841          * upon subsystem IDs.
9842          */
9843         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9844             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9845                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9846                                   TG3_FLAG_USE_LINKCHG_REG);
9847         }
9848
9849         /* For all SERDES we poll the MAC status register. */
9850         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9851                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9852         else
9853                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9854
9855         /* It seems all chips can get confused if TX buffers
9856          * straddle the 4GB address boundary in some cases.
9857          */
9858         tp->dev->hard_start_xmit = tg3_start_xmit;
9859
9860         tp->rx_offset = 2;
9861         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9862             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9863                 tp->rx_offset = 0;
9864
9865         /* By default, disable wake-on-lan.  User can change this
9866          * using ETHTOOL_SWOL.
9867          */
9868         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9869
9870         return err;
9871 }
9872
9873 #ifdef CONFIG_SPARC64
9874 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9875 {
9876         struct net_device *dev = tp->dev;
9877         struct pci_dev *pdev = tp->pdev;
9878         struct pcidev_cookie *pcp = pdev->sysdata;
9879
9880         if (pcp != NULL) {
9881                 int node = pcp->prom_node;
9882
9883                 if (prom_getproplen(node, "local-mac-address") == 6) {
9884                         prom_getproperty(node, "local-mac-address",
9885                                          dev->dev_addr, 6);
9886                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9887                         return 0;
9888                 }
9889         }
9890         return -ENODEV;
9891 }
9892
9893 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9894 {
9895         struct net_device *dev = tp->dev;
9896
9897         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9898         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
9899         return 0;
9900 }
9901 #endif
9902
9903 static int __devinit tg3_get_device_address(struct tg3 *tp)
9904 {
9905         struct net_device *dev = tp->dev;
9906         u32 hi, lo, mac_offset;
9907
9908 #ifdef CONFIG_SPARC64
9909         if (!tg3_get_macaddr_sparc(tp))
9910                 return 0;
9911 #endif
9912
9913         mac_offset = 0x7c;
9914         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9915              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9916             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9917                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9918                         mac_offset = 0xcc;
9919                 if (tg3_nvram_lock(tp))
9920                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9921                 else
9922                         tg3_nvram_unlock(tp);
9923         }
9924
9925         /* First try to get it from MAC address mailbox. */
9926         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9927         if ((hi >> 16) == 0x484b) {
9928                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9929                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9930
9931                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9932                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9933                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9934                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9935                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9936         }
9937         /* Next, try NVRAM. */
9938         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9939                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9940                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9941                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9942                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9943                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9944                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9945                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9946                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9947         }
9948         /* Finally just fetch it out of the MAC control regs. */
9949         else {
9950                 hi = tr32(MAC_ADDR_0_HIGH);
9951                 lo = tr32(MAC_ADDR_0_LOW);
9952
9953                 dev->dev_addr[5] = lo & 0xff;
9954                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9955                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9956                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9957                 dev->dev_addr[1] = hi & 0xff;
9958                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9959         }
9960
9961         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9962 #ifdef CONFIG_SPARC64
9963                 if (!tg3_get_default_macaddr_sparc(tp))
9964                         return 0;
9965 #endif
9966                 return -EINVAL;
9967         }
9968         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9969         return 0;
9970 }
9971
9972 #define BOUNDARY_SINGLE_CACHELINE       1
9973 #define BOUNDARY_MULTI_CACHELINE        2
9974
9975 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9976 {
9977         int cacheline_size;
9978         u8 byte;
9979         int goal;
9980
9981         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9982         if (byte == 0)
9983                 cacheline_size = 1024;
9984         else
9985                 cacheline_size = (int) byte * 4;
9986
9987         /* On 5703 and later chips, the boundary bits have no
9988          * effect.
9989          */
9990         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9991             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9992             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9993                 goto out;
9994
9995 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9996         goal = BOUNDARY_MULTI_CACHELINE;
9997 #else
9998 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9999         goal = BOUNDARY_SINGLE_CACHELINE;
10000 #else
10001         goal = 0;
10002 #endif
10003 #endif
10004
10005         if (!goal)
10006                 goto out;
10007
10008         /* PCI controllers on most RISC systems tend to disconnect
10009          * when a device tries to burst across a cache-line boundary.
10010          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10011          *
10012          * Unfortunately, for PCI-E there are only limited
10013          * write-side controls for this, and thus for reads
10014          * we will still get the disconnects.  We'll also waste
10015          * these PCI cycles for both read and write for chips
10016          * other than 5700 and 5701 which do not implement the
10017          * boundary bits.
10018          */
10019         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10020             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10021                 switch (cacheline_size) {
10022                 case 16:
10023                 case 32:
10024                 case 64:
10025                 case 128:
10026                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10027                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10028                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10029                         } else {
10030                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10031                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10032                         }
10033                         break;
10034
10035                 case 256:
10036                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10037                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10038                         break;
10039
10040                 default:
10041                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10042                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10043                         break;
10044                 };
10045         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10046                 switch (cacheline_size) {
10047                 case 16:
10048                 case 32:
10049                 case 64:
10050                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10051                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10052                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10053                                 break;
10054                         }
10055                         /* fallthrough */
10056                 case 128:
10057                 default:
10058                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10059                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10060                         break;
10061                 };
10062         } else {
10063                 switch (cacheline_size) {
10064                 case 16:
10065                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10066                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10067                                         DMA_RWCTRL_WRITE_BNDRY_16);
10068                                 break;
10069                         }
10070                         /* fallthrough */
10071                 case 32:
10072                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10073                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10074                                         DMA_RWCTRL_WRITE_BNDRY_32);
10075                                 break;
10076                         }
10077                         /* fallthrough */
10078                 case 64:
10079                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10080                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10081                                         DMA_RWCTRL_WRITE_BNDRY_64);
10082                                 break;
10083                         }
10084                         /* fallthrough */
10085                 case 128:
10086                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10087                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10088                                         DMA_RWCTRL_WRITE_BNDRY_128);
10089                                 break;
10090                         }
10091                         /* fallthrough */
10092                 case 256:
10093                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10094                                 DMA_RWCTRL_WRITE_BNDRY_256);
10095                         break;
10096                 case 512:
10097                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10098                                 DMA_RWCTRL_WRITE_BNDRY_512);
10099                         break;
10100                 case 1024:
10101                 default:
10102                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10103                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10104                         break;
10105                 };
10106         }
10107
10108 out:
10109         return val;
10110 }
10111
10112 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10113 {
10114         struct tg3_internal_buffer_desc test_desc;
10115         u32 sram_dma_descs;
10116         int i, ret;
10117
10118         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10119
10120         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10121         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10122         tw32(RDMAC_STATUS, 0);
10123         tw32(WDMAC_STATUS, 0);
10124
10125         tw32(BUFMGR_MODE, 0);
10126         tw32(FTQ_RESET, 0);
10127
10128         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10129         test_desc.addr_lo = buf_dma & 0xffffffff;
10130         test_desc.nic_mbuf = 0x00002100;
10131         test_desc.len = size;
10132
10133         /*
10134          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10135          * the *second* time the tg3 driver was getting loaded after an
10136          * initial scan.
10137          *
10138          * Broadcom tells me:
10139          *   ...the DMA engine is connected to the GRC block and a DMA
10140          *   reset may affect the GRC block in some unpredictable way...
10141          *   The behavior of resets to individual blocks has not been tested.
10142          *
10143          * Broadcom noted the GRC reset will also reset all sub-components.
10144          */
10145         if (to_device) {
10146                 test_desc.cqid_sqid = (13 << 8) | 2;
10147
10148                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10149                 udelay(40);
10150         } else {
10151                 test_desc.cqid_sqid = (16 << 8) | 7;
10152
10153                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10154                 udelay(40);
10155         }
10156         test_desc.flags = 0x00000005;
10157
10158         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10159                 u32 val;
10160
10161                 val = *(((u32 *)&test_desc) + i);
10162                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10163                                        sram_dma_descs + (i * sizeof(u32)));
10164                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10165         }
10166         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10167
10168         if (to_device) {
10169                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10170         } else {
10171                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10172         }
10173
10174         ret = -ENODEV;
10175         for (i = 0; i < 40; i++) {
10176                 u32 val;
10177
10178                 if (to_device)
10179                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10180                 else
10181                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10182                 if ((val & 0xffff) == sram_dma_descs) {
10183                         ret = 0;
10184                         break;
10185                 }
10186
10187                 udelay(100);
10188         }
10189
10190         return ret;
10191 }
10192
10193 #define TEST_BUFFER_SIZE        0x2000
10194
10195 static int __devinit tg3_test_dma(struct tg3 *tp)
10196 {
10197         dma_addr_t buf_dma;
10198         u32 *buf, saved_dma_rwctrl;
10199         int ret;
10200
10201         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10202         if (!buf) {
10203                 ret = -ENOMEM;
10204                 goto out_nofree;
10205         }
10206
10207         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10208                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10209
10210         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10211
10212         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10213                 /* DMA read watermark not used on PCIE */
10214                 tp->dma_rwctrl |= 0x00180000;
10215         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10216                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10217                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10218                         tp->dma_rwctrl |= 0x003f0000;
10219                 else
10220                         tp->dma_rwctrl |= 0x003f000f;
10221         } else {
10222                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10223                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10224                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10225
10226                         if (ccval == 0x6 || ccval == 0x7)
10227                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10228
10229                         /* Set bit 23 to enable PCIX hw bug fix */
10230                         tp->dma_rwctrl |= 0x009f0000;
10231                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10232                         /* 5780 always in PCIX mode */
10233                         tp->dma_rwctrl |= 0x00144000;
10234                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10235                         /* 5714 always in PCIX mode */
10236                         tp->dma_rwctrl |= 0x00148000;
10237                 } else {
10238                         tp->dma_rwctrl |= 0x001b000f;
10239                 }
10240         }
10241
10242         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10243             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10244                 tp->dma_rwctrl &= 0xfffffff0;
10245
10246         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10247             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10248                 /* Remove this if it causes problems for some boards. */
10249                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10250
10251                 /* On 5700/5701 chips, we need to set this bit.
10252                  * Otherwise the chip will issue cacheline transactions
10253                  * to streamable DMA memory with not all the byte
10254                  * enables turned on.  This is an error on several
10255                  * RISC PCI controllers, in particular sparc64.
10256                  *
10257                  * On 5703/5704 chips, this bit has been reassigned
10258                  * a different meaning.  In particular, it is used
10259                  * on those chips to enable a PCI-X workaround.
10260                  */
10261                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10262         }
10263
10264         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10265
10266 #if 0
10267         /* Unneeded, already done by tg3_get_invariants.  */
10268         tg3_switch_clocks(tp);
10269 #endif
10270
10271         ret = 0;
10272         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10273             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10274                 goto out;
10275
10276         /* It is best to perform DMA test with maximum write burst size
10277          * to expose the 5700/5701 write DMA bug.
10278          */
10279         saved_dma_rwctrl = tp->dma_rwctrl;
10280         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10281         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10282
10283         while (1) {
10284                 u32 *p = buf, i;
10285
10286                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10287                         p[i] = i;
10288
10289                 /* Send the buffer to the chip. */
10290                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10291                 if (ret) {
10292                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10293                         break;
10294                 }
10295
10296 #if 0
10297                 /* validate data reached card RAM correctly. */
10298                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10299                         u32 val;
10300                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10301                         if (le32_to_cpu(val) != p[i]) {
10302                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10303                                 /* ret = -ENODEV here? */
10304                         }
10305                         p[i] = 0;
10306                 }
10307 #endif
10308                 /* Now read it back. */
10309                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10310                 if (ret) {
10311                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10312
10313                         break;
10314                 }
10315
10316                 /* Verify it. */
10317                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10318                         if (p[i] == i)
10319                                 continue;
10320
10321                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10322                             DMA_RWCTRL_WRITE_BNDRY_16) {
10323                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10324                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10325                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10326                                 break;
10327                         } else {
10328                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10329                                 ret = -ENODEV;
10330                                 goto out;
10331                         }
10332                 }
10333
10334                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10335                         /* Success. */
10336                         ret = 0;
10337                         break;
10338                 }
10339         }
10340         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10341             DMA_RWCTRL_WRITE_BNDRY_16) {
10342                 static struct pci_device_id dma_wait_state_chipsets[] = {
10343                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10344                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10345                         { },
10346                 };
10347
10348                 /* DMA test passed without adjusting DMA boundary,
10349                  * now look for chipsets that are known to expose the
10350                  * DMA bug without failing the test.
10351                  */
10352                 if (pci_dev_present(dma_wait_state_chipsets)) {
10353                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10354                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10355                 }
10356                 else
10357                         /* Safe to use the calculated DMA boundary. */
10358                         tp->dma_rwctrl = saved_dma_rwctrl;
10359
10360                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10361         }
10362
10363 out:
10364         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10365 out_nofree:
10366         return ret;
10367 }
10368
10369 static void __devinit tg3_init_link_config(struct tg3 *tp)
10370 {
10371         tp->link_config.advertising =
10372                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10373                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10374                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10375                  ADVERTISED_Autoneg | ADVERTISED_MII);
10376         tp->link_config.speed = SPEED_INVALID;
10377         tp->link_config.duplex = DUPLEX_INVALID;
10378         tp->link_config.autoneg = AUTONEG_ENABLE;
10379         netif_carrier_off(tp->dev);
10380         tp->link_config.active_speed = SPEED_INVALID;
10381         tp->link_config.active_duplex = DUPLEX_INVALID;
10382         tp->link_config.phy_is_low_power = 0;
10383         tp->link_config.orig_speed = SPEED_INVALID;
10384         tp->link_config.orig_duplex = DUPLEX_INVALID;
10385         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10386 }
10387
10388 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10389 {
10390         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10391                 tp->bufmgr_config.mbuf_read_dma_low_water =
10392                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10393                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10394                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10395                 tp->bufmgr_config.mbuf_high_water =
10396                         DEFAULT_MB_HIGH_WATER_5705;
10397
10398                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10399                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10400                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10401                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10402                 tp->bufmgr_config.mbuf_high_water_jumbo =
10403                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10404         } else {
10405                 tp->bufmgr_config.mbuf_read_dma_low_water =
10406                         DEFAULT_MB_RDMA_LOW_WATER;
10407                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10408                         DEFAULT_MB_MACRX_LOW_WATER;
10409                 tp->bufmgr_config.mbuf_high_water =
10410                         DEFAULT_MB_HIGH_WATER;
10411
10412                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10413                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10414                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10415                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10416                 tp->bufmgr_config.mbuf_high_water_jumbo =
10417                         DEFAULT_MB_HIGH_WATER_JUMBO;
10418         }
10419
10420         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10421         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10422 }
10423
10424 static char * __devinit tg3_phy_string(struct tg3 *tp)
10425 {
10426         switch (tp->phy_id & PHY_ID_MASK) {
10427         case PHY_ID_BCM5400:    return "5400";
10428         case PHY_ID_BCM5401:    return "5401";
10429         case PHY_ID_BCM5411:    return "5411";
10430         case PHY_ID_BCM5701:    return "5701";
10431         case PHY_ID_BCM5703:    return "5703";
10432         case PHY_ID_BCM5704:    return "5704";
10433         case PHY_ID_BCM5705:    return "5705";
10434         case PHY_ID_BCM5750:    return "5750";
10435         case PHY_ID_BCM5752:    return "5752";
10436         case PHY_ID_BCM5714:    return "5714";
10437         case PHY_ID_BCM5780:    return "5780";
10438         case PHY_ID_BCM8002:    return "8002/serdes";
10439         case 0:                 return "serdes";
10440         default:                return "unknown";
10441         };
10442 }
10443
10444 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10445 {
10446         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10447                 strcpy(str, "PCI Express");
10448                 return str;
10449         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10450                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10451
10452                 strcpy(str, "PCIX:");
10453
10454                 if ((clock_ctrl == 7) ||
10455                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10456                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10457                         strcat(str, "133MHz");
10458                 else if (clock_ctrl == 0)
10459                         strcat(str, "33MHz");
10460                 else if (clock_ctrl == 2)
10461                         strcat(str, "50MHz");
10462                 else if (clock_ctrl == 4)
10463                         strcat(str, "66MHz");
10464                 else if (clock_ctrl == 6)
10465                         strcat(str, "100MHz");
10466                 else if (clock_ctrl == 7)
10467                         strcat(str, "133MHz");
10468         } else {
10469                 strcpy(str, "PCI:");
10470                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10471                         strcat(str, "66MHz");
10472                 else
10473                         strcat(str, "33MHz");
10474         }
10475         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10476                 strcat(str, ":32-bit");
10477         else
10478                 strcat(str, ":64-bit");
10479         return str;
10480 }
10481
10482 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10483 {
10484         struct pci_dev *peer;
10485         unsigned int func, devnr = tp->pdev->devfn & ~7;
10486
10487         for (func = 0; func < 8; func++) {
10488                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10489                 if (peer && peer != tp->pdev)
10490                         break;
10491                 pci_dev_put(peer);
10492         }
10493         /* 5704 can be configured in single-port mode, set peer to
10494          * tp->pdev in that case.
10495          */
10496         if (!peer) {
10497                 peer = tp->pdev;
10498                 return peer;
10499         }
10500
10501         /*
10502          * We don't need to keep the refcount elevated; there's no way
10503          * to remove one half of this device without removing the other
10504          */
10505         pci_dev_put(peer);
10506
10507         return peer;
10508 }
10509
10510 static void __devinit tg3_init_coal(struct tg3 *tp)
10511 {
10512         struct ethtool_coalesce *ec = &tp->coal;
10513
10514         memset(ec, 0, sizeof(*ec));
10515         ec->cmd = ETHTOOL_GCOALESCE;
10516         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10517         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10518         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10519         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10520         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10521         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10522         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10523         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10524         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10525
10526         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10527                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10528                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10529                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10530                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10531                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10532         }
10533
10534         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10535                 ec->rx_coalesce_usecs_irq = 0;
10536                 ec->tx_coalesce_usecs_irq = 0;
10537                 ec->stats_block_coalesce_usecs = 0;
10538         }
10539 }
10540
10541 static int __devinit tg3_init_one(struct pci_dev *pdev,
10542                                   const struct pci_device_id *ent)
10543 {
10544         static int tg3_version_printed = 0;
10545         unsigned long tg3reg_base, tg3reg_len;
10546         struct net_device *dev;
10547         struct tg3 *tp;
10548         int i, err, pci_using_dac, pm_cap;
10549         char str[40];
10550
10551         if (tg3_version_printed++ == 0)
10552                 printk(KERN_INFO "%s", version);
10553
10554         err = pci_enable_device(pdev);
10555         if (err) {
10556                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10557                        "aborting.\n");
10558                 return err;
10559         }
10560
10561         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10562                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10563                        "base address, aborting.\n");
10564                 err = -ENODEV;
10565                 goto err_out_disable_pdev;
10566         }
10567
10568         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10569         if (err) {
10570                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10571                        "aborting.\n");
10572                 goto err_out_disable_pdev;
10573         }
10574
10575         pci_set_master(pdev);
10576
10577         /* Find power-management capability. */
10578         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10579         if (pm_cap == 0) {
10580                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10581                        "aborting.\n");
10582                 err = -EIO;
10583                 goto err_out_free_res;
10584         }
10585
10586         /* Configure DMA attributes. */
10587         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10588         if (!err) {
10589                 pci_using_dac = 1;
10590                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10591                 if (err < 0) {
10592                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10593                                "for consistent allocations\n");
10594                         goto err_out_free_res;
10595                 }
10596         } else {
10597                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10598                 if (err) {
10599                         printk(KERN_ERR PFX "No usable DMA configuration, "
10600                                "aborting.\n");
10601                         goto err_out_free_res;
10602                 }
10603                 pci_using_dac = 0;
10604         }
10605
10606         tg3reg_base = pci_resource_start(pdev, 0);
10607         tg3reg_len = pci_resource_len(pdev, 0);
10608
10609         dev = alloc_etherdev(sizeof(*tp));
10610         if (!dev) {
10611                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10612                 err = -ENOMEM;
10613                 goto err_out_free_res;
10614         }
10615
10616         SET_MODULE_OWNER(dev);
10617         SET_NETDEV_DEV(dev, &pdev->dev);
10618
10619         if (pci_using_dac)
10620                 dev->features |= NETIF_F_HIGHDMA;
10621         dev->features |= NETIF_F_LLTX;
10622 #if TG3_VLAN_TAG_USED
10623         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10624         dev->vlan_rx_register = tg3_vlan_rx_register;
10625         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10626 #endif
10627
10628         tp = netdev_priv(dev);
10629         tp->pdev = pdev;
10630         tp->dev = dev;
10631         tp->pm_cap = pm_cap;
10632         tp->mac_mode = TG3_DEF_MAC_MODE;
10633         tp->rx_mode = TG3_DEF_RX_MODE;
10634         tp->tx_mode = TG3_DEF_TX_MODE;
10635         tp->mi_mode = MAC_MI_MODE_BASE;
10636         if (tg3_debug > 0)
10637                 tp->msg_enable = tg3_debug;
10638         else
10639                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10640
10641         /* The word/byte swap controls here control register access byte
10642          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10643          * setting below.
10644          */
10645         tp->misc_host_ctrl =
10646                 MISC_HOST_CTRL_MASK_PCI_INT |
10647                 MISC_HOST_CTRL_WORD_SWAP |
10648                 MISC_HOST_CTRL_INDIR_ACCESS |
10649                 MISC_HOST_CTRL_PCISTATE_RW;
10650
10651         /* The NONFRM (non-frame) byte/word swap controls take effect
10652          * on descriptor entries, anything which isn't packet data.
10653          *
10654          * The StrongARM chips on the board (one for tx, one for rx)
10655          * are running in big-endian mode.
10656          */
10657         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10658                         GRC_MODE_WSWAP_NONFRM_DATA);
10659 #ifdef __BIG_ENDIAN
10660         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10661 #endif
10662         spin_lock_init(&tp->lock);
10663         spin_lock_init(&tp->tx_lock);
10664         spin_lock_init(&tp->indirect_lock);
10665         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10666
10667         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10668         if (tp->regs == 0UL) {
10669                 printk(KERN_ERR PFX "Cannot map device registers, "
10670                        "aborting.\n");
10671                 err = -ENOMEM;
10672                 goto err_out_free_dev;
10673         }
10674
10675         tg3_init_link_config(tp);
10676
10677         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10678         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10679         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10680
10681         dev->open = tg3_open;
10682         dev->stop = tg3_close;
10683         dev->get_stats = tg3_get_stats;
10684         dev->set_multicast_list = tg3_set_rx_mode;
10685         dev->set_mac_address = tg3_set_mac_addr;
10686         dev->do_ioctl = tg3_ioctl;
10687         dev->tx_timeout = tg3_tx_timeout;
10688         dev->poll = tg3_poll;
10689         dev->ethtool_ops = &tg3_ethtool_ops;
10690         dev->weight = 64;
10691         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10692         dev->change_mtu = tg3_change_mtu;
10693         dev->irq = pdev->irq;
10694 #ifdef CONFIG_NET_POLL_CONTROLLER
10695         dev->poll_controller = tg3_poll_controller;
10696 #endif
10697
10698         err = tg3_get_invariants(tp);
10699         if (err) {
10700                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10701                        "aborting.\n");
10702                 goto err_out_iounmap;
10703         }
10704
10705         tg3_init_bufmgr_config(tp);
10706
10707 #if TG3_TSO_SUPPORT != 0
10708         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10709                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10710         }
10711         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10713             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10714             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10715                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10716         } else {
10717                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10718         }
10719
10720         /* TSO is off by default, user can enable using ethtool.  */
10721 #if 0
10722         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10723                 dev->features |= NETIF_F_TSO;
10724 #endif
10725
10726 #endif
10727
10728         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10729             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10730             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10731                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10732                 tp->rx_pending = 63;
10733         }
10734
10735         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10736             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10737                 tp->pdev_peer = tg3_find_peer(tp);
10738
10739         err = tg3_get_device_address(tp);
10740         if (err) {
10741                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10742                        "aborting.\n");
10743                 goto err_out_iounmap;
10744         }
10745
10746         /*
10747          * Reset chip in case UNDI or EFI driver did not shutdown
10748          * DMA self test will enable WDMAC and we'll see (spurious)
10749          * pending DMA on the PCI bus at that point.
10750          */
10751         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10752             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10753                 pci_save_state(tp->pdev);
10754                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10755                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10756         }
10757
10758         err = tg3_test_dma(tp);
10759         if (err) {
10760                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10761                 goto err_out_iounmap;
10762         }
10763
10764         /* Tigon3 can do ipv4 only... and some chips have buggy
10765          * checksumming.
10766          */
10767         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10768                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10769                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10770         } else
10771                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10772
10773         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10774                 dev->features &= ~NETIF_F_HIGHDMA;
10775
10776         /* flow control autonegotiation is default behavior */
10777         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10778
10779         tg3_init_coal(tp);
10780
10781         /* Now that we have fully setup the chip, save away a snapshot
10782          * of the PCI config space.  We need to restore this after
10783          * GRC_MISC_CFG core clock resets and some resume events.
10784          */
10785         pci_save_state(tp->pdev);
10786
10787         err = register_netdev(dev);
10788         if (err) {
10789                 printk(KERN_ERR PFX "Cannot register net device, "
10790                        "aborting.\n");
10791                 goto err_out_iounmap;
10792         }
10793
10794         pci_set_drvdata(pdev, dev);
10795
10796         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10797                dev->name,
10798                tp->board_part_number,
10799                tp->pci_chip_rev_id,
10800                tg3_phy_string(tp),
10801                tg3_bus_string(tp, str),
10802                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10803
10804         for (i = 0; i < 6; i++)
10805                 printk("%2.2x%c", dev->dev_addr[i],
10806                        i == 5 ? '\n' : ':');
10807
10808         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10809                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10810                "TSOcap[%d] \n",
10811                dev->name,
10812                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10813                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10814                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10815                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10816                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10817                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10818                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10819         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10820                dev->name, tp->dma_rwctrl);
10821
10822         return 0;
10823
10824 err_out_iounmap:
10825         if (tp->regs) {
10826                 iounmap(tp->regs);
10827                 tp->regs = NULL;
10828         }
10829
10830 err_out_free_dev:
10831         free_netdev(dev);
10832
10833 err_out_free_res:
10834         pci_release_regions(pdev);
10835
10836 err_out_disable_pdev:
10837         pci_disable_device(pdev);
10838         pci_set_drvdata(pdev, NULL);
10839         return err;
10840 }
10841
10842 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10843 {
10844         struct net_device *dev = pci_get_drvdata(pdev);
10845
10846         if (dev) {
10847                 struct tg3 *tp = netdev_priv(dev);
10848
10849                 unregister_netdev(dev);
10850                 if (tp->regs) {
10851                         iounmap(tp->regs);
10852                         tp->regs = NULL;
10853                 }
10854                 free_netdev(dev);
10855                 pci_release_regions(pdev);
10856                 pci_disable_device(pdev);
10857                 pci_set_drvdata(pdev, NULL);
10858         }
10859 }
10860
10861 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10862 {
10863         struct net_device *dev = pci_get_drvdata(pdev);
10864         struct tg3 *tp = netdev_priv(dev);
10865         int err;
10866
10867         if (!netif_running(dev))
10868                 return 0;
10869
10870         tg3_netif_stop(tp);
10871
10872         del_timer_sync(&tp->timer);
10873
10874         tg3_full_lock(tp, 1);
10875         tg3_disable_ints(tp);
10876         tg3_full_unlock(tp);
10877
10878         netif_device_detach(dev);
10879
10880         tg3_full_lock(tp, 0);
10881         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10882         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
10883         tg3_full_unlock(tp);
10884
10885         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10886         if (err) {
10887                 tg3_full_lock(tp, 0);
10888
10889                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10890                 tg3_init_hw(tp);
10891
10892                 tp->timer.expires = jiffies + tp->timer_offset;
10893                 add_timer(&tp->timer);
10894
10895                 netif_device_attach(dev);
10896                 tg3_netif_start(tp);
10897
10898                 tg3_full_unlock(tp);
10899         }
10900
10901         return err;
10902 }
10903
10904 static int tg3_resume(struct pci_dev *pdev)
10905 {
10906         struct net_device *dev = pci_get_drvdata(pdev);
10907         struct tg3 *tp = netdev_priv(dev);
10908         int err;
10909
10910         if (!netif_running(dev))
10911                 return 0;
10912
10913         pci_restore_state(tp->pdev);
10914
10915         err = tg3_set_power_state(tp, 0);
10916         if (err)
10917                 return err;
10918
10919         netif_device_attach(dev);
10920
10921         tg3_full_lock(tp, 0);
10922
10923         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10924         tg3_init_hw(tp);
10925
10926         tp->timer.expires = jiffies + tp->timer_offset;
10927         add_timer(&tp->timer);
10928
10929         tg3_netif_start(tp);
10930
10931         tg3_full_unlock(tp);
10932
10933         return 0;
10934 }
10935
10936 static struct pci_driver tg3_driver = {
10937         .name           = DRV_MODULE_NAME,
10938         .id_table       = tg3_pci_tbl,
10939         .probe          = tg3_init_one,
10940         .remove         = __devexit_p(tg3_remove_one),
10941         .suspend        = tg3_suspend,
10942         .resume         = tg3_resume
10943 };
10944
10945 static int __init tg3_init(void)
10946 {
10947         return pci_module_init(&tg3_driver);
10948 }
10949
10950 static void __exit tg3_cleanup(void)
10951 {
10952         pci_unregister_driver(&tg3_driver);
10953 }
10954
10955 module_init(tg3_init);
10956 module_exit(tg3_cleanup);