]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/tg3.c
dd036c011c1469c6c8232d6e6dcc0c88b1435715
[karo-tx-linux.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.25"
65 #define DRV_MODULE_RELDATE      "March 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473                         tw32_f(TG3PCI_CLOCK_CTRL,
474                                clock_ctrl | CLOCK_CTRL_625_CORE);
475                         udelay(40);
476                 }
477         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478                 tw32_f(TG3PCI_CLOCK_CTRL,
479                      clock_ctrl |
480                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481                 udelay(40);
482                 tw32_f(TG3PCI_CLOCK_CTRL,
483                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
484                 udelay(40);
485         }
486         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487         udelay(40);
488 }
489
490 #define PHY_BUSY_LOOPS  5000
491
492 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493 {
494         u32 frame_val;
495         unsigned int loops;
496         int ret;
497
498         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499                 tw32_f(MAC_MI_MODE,
500                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501                 udelay(80);
502         }
503
504         *val = 0x0;
505
506         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507                       MI_COM_PHY_ADDR_MASK);
508         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509                       MI_COM_REG_ADDR_MASK);
510         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511         
512         tw32_f(MAC_MI_COM, frame_val);
513
514         loops = PHY_BUSY_LOOPS;
515         while (loops != 0) {
516                 udelay(10);
517                 frame_val = tr32(MAC_MI_COM);
518
519                 if ((frame_val & MI_COM_BUSY) == 0) {
520                         udelay(5);
521                         frame_val = tr32(MAC_MI_COM);
522                         break;
523                 }
524                 loops -= 1;
525         }
526
527         ret = -EBUSY;
528         if (loops != 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         unsigned int loops;
545         int ret;
546
547         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548                 tw32_f(MAC_MI_MODE,
549                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550                 udelay(80);
551         }
552
553         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554                       MI_COM_PHY_ADDR_MASK);
555         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556                       MI_COM_REG_ADDR_MASK);
557         frame_val |= (val & MI_COM_DATA_MASK);
558         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559         
560         tw32_f(MAC_MI_COM, frame_val);
561
562         loops = PHY_BUSY_LOOPS;
563         while (loops != 0) {
564                 udelay(10);
565                 frame_val = tr32(MAC_MI_COM);
566                 if ((frame_val & MI_COM_BUSY) == 0) {
567                         udelay(5);
568                         frame_val = tr32(MAC_MI_COM);
569                         break;
570                 }
571                 loops -= 1;
572         }
573
574         ret = -EBUSY;
575         if (loops != 0)
576                 ret = 0;
577
578         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579                 tw32_f(MAC_MI_MODE, tp->mi_mode);
580                 udelay(80);
581         }
582
583         return ret;
584 }
585
586 static void tg3_phy_set_wirespeed(struct tg3 *tp)
587 {
588         u32 val;
589
590         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591                 return;
592
593         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596                              (val | (1 << 15) | (1 << 4)));
597 }
598
599 static int tg3_bmcr_reset(struct tg3 *tp)
600 {
601         u32 phy_control;
602         int limit, err;
603
604         /* OK, reset it, and poll the BMCR_RESET bit until it
605          * clears or we time out.
606          */
607         phy_control = BMCR_RESET;
608         err = tg3_writephy(tp, MII_BMCR, phy_control);
609         if (err != 0)
610                 return -EBUSY;
611
612         limit = 5000;
613         while (limit--) {
614                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615                 if (err != 0)
616                         return -EBUSY;
617
618                 if ((phy_control & BMCR_RESET) == 0) {
619                         udelay(40);
620                         break;
621                 }
622                 udelay(10);
623         }
624         if (limit <= 0)
625                 return -EBUSY;
626
627         return 0;
628 }
629
630 static int tg3_wait_macro_done(struct tg3 *tp)
631 {
632         int limit = 100;
633
634         while (limit--) {
635                 u32 tmp32;
636
637                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638                         if ((tmp32 & 0x1000) == 0)
639                                 break;
640                 }
641         }
642         if (limit <= 0)
643                 return -EBUSY;
644
645         return 0;
646 }
647
648 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649 {
650         static const u32 test_pat[4][6] = {
651         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655         };
656         int chan;
657
658         for (chan = 0; chan < 4; chan++) {
659                 int i;
660
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662                              (chan * 0x2000) | 0x0200);
663                 tg3_writephy(tp, 0x16, 0x0002);
664
665                 for (i = 0; i < 6; i++)
666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667                                      test_pat[chan][i]);
668
669                 tg3_writephy(tp, 0x16, 0x0202);
670                 if (tg3_wait_macro_done(tp)) {
671                         *resetp = 1;
672                         return -EBUSY;
673                 }
674
675                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676                              (chan * 0x2000) | 0x0200);
677                 tg3_writephy(tp, 0x16, 0x0082);
678                 if (tg3_wait_macro_done(tp)) {
679                         *resetp = 1;
680                         return -EBUSY;
681                 }
682
683                 tg3_writephy(tp, 0x16, 0x0802);
684                 if (tg3_wait_macro_done(tp)) {
685                         *resetp = 1;
686                         return -EBUSY;
687                 }
688
689                 for (i = 0; i < 6; i += 2) {
690                         u32 low, high;
691
692                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694                             tg3_wait_macro_done(tp)) {
695                                 *resetp = 1;
696                                 return -EBUSY;
697                         }
698                         low &= 0x7fff;
699                         high &= 0x000f;
700                         if (low != test_pat[chan][i] ||
701                             high != test_pat[chan][i+1]) {
702                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706                                 return -EBUSY;
707                         }
708                 }
709         }
710
711         return 0;
712 }
713
714 static int tg3_phy_reset_chanpat(struct tg3 *tp)
715 {
716         int chan;
717
718         for (chan = 0; chan < 4; chan++) {
719                 int i;
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0002);
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp))
728                         return -EBUSY;
729         }
730
731         return 0;
732 }
733
734 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735 {
736         u32 reg32, phy9_orig;
737         int retries, do_phy_reset, err;
738
739         retries = 10;
740         do_phy_reset = 1;
741         do {
742                 if (do_phy_reset) {
743                         err = tg3_bmcr_reset(tp);
744                         if (err)
745                                 return err;
746                         do_phy_reset = 0;
747                 }
748
749                 /* Disable transmitter and interrupt.  */
750                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751                         continue;
752
753                 reg32 |= 0x3000;
754                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756                 /* Set full-duplex, 1000 mbps.  */
757                 tg3_writephy(tp, MII_BMCR,
758                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760                 /* Set to master mode.  */
761                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762                         continue;
763
764                 tg3_writephy(tp, MII_TG3_CTRL,
765                              (MII_TG3_CTRL_AS_MASTER |
766                               MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768                 /* Enable SM_DSP_CLOCK and 6dB.  */
769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771                 /* Block the PHY control access.  */
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776                 if (!err)
777                         break;
778         } while (--retries);
779
780         err = tg3_phy_reset_chanpat(tp);
781         if (err)
782                 return err;
783
784         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788         tg3_writephy(tp, 0x16, 0x0000);
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792                 /* Set Extended packet length bit for jumbo frames */
793                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794         }
795         else {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797         }
798
799         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802                 reg32 &= ~0x3000;
803                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804         } else if (!err)
805                 err = -EBUSY;
806
807         return err;
808 }
809
810 /* This will reset the tigon3 PHY if there is no valid
811  * link unless the FORCE argument is non-zero.
812  */
813 static int tg3_phy_reset(struct tg3 *tp)
814 {
815         u32 phy_status;
816         int err;
817
818         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
819         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820         if (err != 0)
821                 return -EBUSY;
822
823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826                 err = tg3_phy_reset_5703_4_5(tp);
827                 if (err)
828                         return err;
829                 goto out;
830         }
831
832         err = tg3_bmcr_reset(tp);
833         if (err)
834                 return err;
835
836 out:
837         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844         }
845         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848         }
849         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858         }
859         /* Set Extended packet length bit (bit 14) on all chips that */
860         /* support jumbo frames */
861         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862                 /* Cannot do read-modify-write on 5401 */
863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
877                 u32 phy_reg;
878
879                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
880                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
881                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
882         }
883
884         tg3_phy_set_wirespeed(tp);
885         return 0;
886 }
887
888 static void tg3_frob_aux_power(struct tg3 *tp)
889 {
890         struct tg3 *tp_peer = tp;
891
892         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
893                 return;
894
895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
896                 tp_peer = pci_get_drvdata(tp->pdev_peer);
897                 if (!tp_peer)
898                         BUG();
899         }
900
901
902         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
903             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE0 |
908                               GRC_LCLCTRL_GPIO_OE1 |
909                               GRC_LCLCTRL_GPIO_OE2 |
910                               GRC_LCLCTRL_GPIO_OUTPUT0 |
911                               GRC_LCLCTRL_GPIO_OUTPUT1));
912                         udelay(100);
913                 } else {
914                         u32 no_gpio2;
915                         u32 grc_local_ctrl;
916
917                         if (tp_peer != tp &&
918                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
919                                 return;
920
921                         /* On 5753 and variants, GPIO2 cannot be used. */
922                         no_gpio2 = tp->nic_sram_data_cfg &
923                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
924
925                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
926                                          GRC_LCLCTRL_GPIO_OE1 |
927                                          GRC_LCLCTRL_GPIO_OE2 |
928                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT2;
930                         if (no_gpio2) {
931                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
932                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
933                         }
934                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935                                                 grc_local_ctrl);
936                         udelay(100);
937
938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
939
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         if (!no_gpio2) {
945                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
946                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                        grc_local_ctrl);
948                                 udelay(100);
949                         }
950                 }
951         } else {
952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
953                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
954                         if (tp_peer != tp &&
955                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
956                                 return;
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE1));
965                         udelay(100);
966
967                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968                              (GRC_LCLCTRL_GPIO_OE1 |
969                               GRC_LCLCTRL_GPIO_OUTPUT1));
970                         udelay(100);
971                 }
972         }
973 }
974
975 static int tg3_setup_phy(struct tg3 *, int);
976
977 #define RESET_KIND_SHUTDOWN     0
978 #define RESET_KIND_INIT         1
979 #define RESET_KIND_SUSPEND      2
980
981 static void tg3_write_sig_post_reset(struct tg3 *, int);
982 static int tg3_halt_cpu(struct tg3 *, u32);
983
984 static int tg3_set_power_state(struct tg3 *tp, int state)
985 {
986         u32 misc_host_ctrl;
987         u16 power_control, power_caps;
988         int pm = tp->pm_cap;
989
990         /* Make sure register accesses (indirect or otherwise)
991          * will function correctly.
992          */
993         pci_write_config_dword(tp->pdev,
994                                TG3PCI_MISC_HOST_CTRL,
995                                tp->misc_host_ctrl);
996
997         pci_read_config_word(tp->pdev,
998                              pm + PCI_PM_CTRL,
999                              &power_control);
1000         power_control |= PCI_PM_CTRL_PME_STATUS;
1001         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1002         switch (state) {
1003         case 0:
1004                 power_control |= 0;
1005                 pci_write_config_word(tp->pdev,
1006                                       pm + PCI_PM_CTRL,
1007                                       power_control);
1008                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1009                 udelay(100);
1010
1011                 return 0;
1012
1013         case 1:
1014                 power_control |= 1;
1015                 break;
1016
1017         case 2:
1018                 power_control |= 2;
1019                 break;
1020
1021         case 3:
1022                 power_control |= 3;
1023                 break;
1024
1025         default:
1026                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1027                        "requested.\n",
1028                        tp->dev->name, state);
1029                 return -EINVAL;
1030         };
1031
1032         power_control |= PCI_PM_CTRL_PME_ENABLE;
1033
1034         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1035         tw32(TG3PCI_MISC_HOST_CTRL,
1036              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1037
1038         if (tp->link_config.phy_is_low_power == 0) {
1039                 tp->link_config.phy_is_low_power = 1;
1040                 tp->link_config.orig_speed = tp->link_config.speed;
1041                 tp->link_config.orig_duplex = tp->link_config.duplex;
1042                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1043         }
1044
1045         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1046                 tp->link_config.speed = SPEED_10;
1047                 tp->link_config.duplex = DUPLEX_HALF;
1048                 tp->link_config.autoneg = AUTONEG_ENABLE;
1049                 tg3_setup_phy(tp, 0);
1050         }
1051
1052         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1053
1054         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1055                 u32 mac_mode;
1056
1057                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1058                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1059                         udelay(40);
1060
1061                         mac_mode = MAC_MODE_PORT_MODE_MII;
1062
1063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1064                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1065                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1066                 } else {
1067                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1068                 }
1069
1070                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1071                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1072
1073                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1074                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1075                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1076
1077                 tw32_f(MAC_MODE, mac_mode);
1078                 udelay(100);
1079
1080                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1081                 udelay(10);
1082         }
1083
1084         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1085             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1086              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1087                 u32 base_val;
1088
1089                 base_val = tp->pci_clock_ctrl;
1090                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1091                              CLOCK_CTRL_TXCLK_DISABLE);
1092
1093                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1094                      CLOCK_CTRL_ALTCLK |
1095                      CLOCK_CTRL_PWRDOWN_PLL133);
1096                 udelay(40);
1097         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1098                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1099                 u32 newbits1, newbits2;
1100
1101                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1102                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1103                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1104                                     CLOCK_CTRL_TXCLK_DISABLE |
1105                                     CLOCK_CTRL_ALTCLK);
1106                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1107                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1108                         newbits1 = CLOCK_CTRL_625_CORE;
1109                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1110                 } else {
1111                         newbits1 = CLOCK_CTRL_ALTCLK;
1112                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1113                 }
1114
1115                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1116                 udelay(40);
1117
1118                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1119                 udelay(40);
1120
1121                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1122                         u32 newbits3;
1123
1124                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1125                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1126                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1127                                             CLOCK_CTRL_TXCLK_DISABLE |
1128                                             CLOCK_CTRL_44MHZ_CORE);
1129                         } else {
1130                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1131                         }
1132
1133                         tw32_f(TG3PCI_CLOCK_CTRL,
1134                                          tp->pci_clock_ctrl | newbits3);
1135                         udelay(40);
1136                 }
1137         }
1138
1139         tg3_frob_aux_power(tp);
1140
1141         /* Workaround for unstable PLL clock */
1142         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1143             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1144                 u32 val = tr32(0x7d00);
1145
1146                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1147                 tw32(0x7d00, val);
1148                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1149                         tg3_halt_cpu(tp, RX_CPU_BASE);
1150         }
1151
1152         /* Finally, set the new power state. */
1153         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1154
1155         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1156
1157         return 0;
1158 }
1159
1160 static void tg3_link_report(struct tg3 *tp)
1161 {
1162         if (!netif_carrier_ok(tp->dev)) {
1163                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1164         } else {
1165                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1166                        tp->dev->name,
1167                        (tp->link_config.active_speed == SPEED_1000 ?
1168                         1000 :
1169                         (tp->link_config.active_speed == SPEED_100 ?
1170                          100 : 10)),
1171                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1172                         "full" : "half"));
1173
1174                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1175                        "%s for RX.\n",
1176                        tp->dev->name,
1177                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1178                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1179         }
1180 }
1181
1182 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1183 {
1184         u32 new_tg3_flags = 0;
1185         u32 old_rx_mode = tp->rx_mode;
1186         u32 old_tx_mode = tp->tx_mode;
1187
1188         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1189                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1190                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1191                                 if (remote_adv & LPA_PAUSE_CAP)
1192                                         new_tg3_flags |=
1193                                                 (TG3_FLAG_RX_PAUSE |
1194                                                 TG3_FLAG_TX_PAUSE);
1195                                 else if (remote_adv & LPA_PAUSE_ASYM)
1196                                         new_tg3_flags |=
1197                                                 (TG3_FLAG_RX_PAUSE);
1198                         } else {
1199                                 if (remote_adv & LPA_PAUSE_CAP)
1200                                         new_tg3_flags |=
1201                                                 (TG3_FLAG_RX_PAUSE |
1202                                                 TG3_FLAG_TX_PAUSE);
1203                         }
1204                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1205                         if ((remote_adv & LPA_PAUSE_CAP) &&
1206                         (remote_adv & LPA_PAUSE_ASYM))
1207                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1208                 }
1209
1210                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1211                 tp->tg3_flags |= new_tg3_flags;
1212         } else {
1213                 new_tg3_flags = tp->tg3_flags;
1214         }
1215
1216         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1217                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1218         else
1219                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1220
1221         if (old_rx_mode != tp->rx_mode) {
1222                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1223         }
1224         
1225         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1226                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1227         else
1228                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1229
1230         if (old_tx_mode != tp->tx_mode) {
1231                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1232         }
1233 }
1234
1235 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1236 {
1237         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1238         case MII_TG3_AUX_STAT_10HALF:
1239                 *speed = SPEED_10;
1240                 *duplex = DUPLEX_HALF;
1241                 break;
1242
1243         case MII_TG3_AUX_STAT_10FULL:
1244                 *speed = SPEED_10;
1245                 *duplex = DUPLEX_FULL;
1246                 break;
1247
1248         case MII_TG3_AUX_STAT_100HALF:
1249                 *speed = SPEED_100;
1250                 *duplex = DUPLEX_HALF;
1251                 break;
1252
1253         case MII_TG3_AUX_STAT_100FULL:
1254                 *speed = SPEED_100;
1255                 *duplex = DUPLEX_FULL;
1256                 break;
1257
1258         case MII_TG3_AUX_STAT_1000HALF:
1259                 *speed = SPEED_1000;
1260                 *duplex = DUPLEX_HALF;
1261                 break;
1262
1263         case MII_TG3_AUX_STAT_1000FULL:
1264                 *speed = SPEED_1000;
1265                 *duplex = DUPLEX_FULL;
1266                 break;
1267
1268         default:
1269                 *speed = SPEED_INVALID;
1270                 *duplex = DUPLEX_INVALID;
1271                 break;
1272         };
1273 }
1274
1275 static void tg3_phy_copper_begin(struct tg3 *tp)
1276 {
1277         u32 new_adv;
1278         int i;
1279
1280         if (tp->link_config.phy_is_low_power) {
1281                 /* Entering low power mode.  Disable gigabit and
1282                  * 100baseT advertisements.
1283                  */
1284                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1285
1286                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1287                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1288                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1289                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1290
1291                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1292         } else if (tp->link_config.speed == SPEED_INVALID) {
1293                 tp->link_config.advertising =
1294                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1295                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1296                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1297                          ADVERTISED_Autoneg | ADVERTISED_MII);
1298
1299                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1300                         tp->link_config.advertising &=
1301                                 ~(ADVERTISED_1000baseT_Half |
1302                                   ADVERTISED_1000baseT_Full);
1303
1304                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1305                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1306                         new_adv |= ADVERTISE_10HALF;
1307                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1308                         new_adv |= ADVERTISE_10FULL;
1309                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1310                         new_adv |= ADVERTISE_100HALF;
1311                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1312                         new_adv |= ADVERTISE_100FULL;
1313                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1314
1315                 if (tp->link_config.advertising &
1316                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1317                         new_adv = 0;
1318                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1319                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1320                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1321                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1322                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1323                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1324                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1325                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1326                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1327                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1328                 } else {
1329                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1330                 }
1331         } else {
1332                 /* Asking for a specific link mode. */
1333                 if (tp->link_config.speed == SPEED_1000) {
1334                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1335                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1336
1337                         if (tp->link_config.duplex == DUPLEX_FULL)
1338                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1339                         else
1340                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1341                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1342                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1343                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1344                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1345                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1346                 } else {
1347                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1348
1349                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1350                         if (tp->link_config.speed == SPEED_100) {
1351                                 if (tp->link_config.duplex == DUPLEX_FULL)
1352                                         new_adv |= ADVERTISE_100FULL;
1353                                 else
1354                                         new_adv |= ADVERTISE_100HALF;
1355                         } else {
1356                                 if (tp->link_config.duplex == DUPLEX_FULL)
1357                                         new_adv |= ADVERTISE_10FULL;
1358                                 else
1359                                         new_adv |= ADVERTISE_10HALF;
1360                         }
1361                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1362                 }
1363         }
1364
1365         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1366             tp->link_config.speed != SPEED_INVALID) {
1367                 u32 bmcr, orig_bmcr;
1368
1369                 tp->link_config.active_speed = tp->link_config.speed;
1370                 tp->link_config.active_duplex = tp->link_config.duplex;
1371
1372                 bmcr = 0;
1373                 switch (tp->link_config.speed) {
1374                 default:
1375                 case SPEED_10:
1376                         break;
1377
1378                 case SPEED_100:
1379                         bmcr |= BMCR_SPEED100;
1380                         break;
1381
1382                 case SPEED_1000:
1383                         bmcr |= TG3_BMCR_SPEED1000;
1384                         break;
1385                 };
1386
1387                 if (tp->link_config.duplex == DUPLEX_FULL)
1388                         bmcr |= BMCR_FULLDPLX;
1389
1390                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1391                     (bmcr != orig_bmcr)) {
1392                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1393                         for (i = 0; i < 1500; i++) {
1394                                 u32 tmp;
1395
1396                                 udelay(10);
1397                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1398                                     tg3_readphy(tp, MII_BMSR, &tmp))
1399                                         continue;
1400                                 if (!(tmp & BMSR_LSTATUS)) {
1401                                         udelay(40);
1402                                         break;
1403                                 }
1404                         }
1405                         tg3_writephy(tp, MII_BMCR, bmcr);
1406                         udelay(40);
1407                 }
1408         } else {
1409                 tg3_writephy(tp, MII_BMCR,
1410                              BMCR_ANENABLE | BMCR_ANRESTART);
1411         }
1412 }
1413
1414 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1415 {
1416         int err;
1417
1418         /* Turn off tap power management. */
1419         /* Set Extended packet length bit */
1420         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1421
1422         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1423         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1424
1425         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1426         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1427
1428         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1429         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1430
1431         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1432         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1433
1434         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1435         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1436
1437         udelay(40);
1438
1439         return err;
1440 }
1441
1442 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1443 {
1444         u32 adv_reg, all_mask;
1445
1446         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1447                 return 0;
1448
1449         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1450                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1451         if ((adv_reg & all_mask) != all_mask)
1452                 return 0;
1453         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1454                 u32 tg3_ctrl;
1455
1456                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1457                         return 0;
1458
1459                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1460                             MII_TG3_CTRL_ADV_1000_FULL);
1461                 if ((tg3_ctrl & all_mask) != all_mask)
1462                         return 0;
1463         }
1464         return 1;
1465 }
1466
1467 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1468 {
1469         int current_link_up;
1470         u32 bmsr, dummy;
1471         u16 current_speed;
1472         u8 current_duplex;
1473         int i, err;
1474
1475         tw32(MAC_EVENT, 0);
1476
1477         tw32_f(MAC_STATUS,
1478              (MAC_STATUS_SYNC_CHANGED |
1479               MAC_STATUS_CFG_CHANGED |
1480               MAC_STATUS_MI_COMPLETION |
1481               MAC_STATUS_LNKSTATE_CHANGED));
1482         udelay(40);
1483
1484         tp->mi_mode = MAC_MI_MODE_BASE;
1485         tw32_f(MAC_MI_MODE, tp->mi_mode);
1486         udelay(80);
1487
1488         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1489
1490         /* Some third-party PHYs need to be reset on link going
1491          * down.
1492          */
1493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1494              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1495              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1496             netif_carrier_ok(tp->dev)) {
1497                 tg3_readphy(tp, MII_BMSR, &bmsr);
1498                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1499                     !(bmsr & BMSR_LSTATUS))
1500                         force_reset = 1;
1501         }
1502         if (force_reset)
1503                 tg3_phy_reset(tp);
1504
1505         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1506                 tg3_readphy(tp, MII_BMSR, &bmsr);
1507                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1508                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1509                         bmsr = 0;
1510
1511                 if (!(bmsr & BMSR_LSTATUS)) {
1512                         err = tg3_init_5401phy_dsp(tp);
1513                         if (err)
1514                                 return err;
1515
1516                         tg3_readphy(tp, MII_BMSR, &bmsr);
1517                         for (i = 0; i < 1000; i++) {
1518                                 udelay(10);
1519                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1520                                     (bmsr & BMSR_LSTATUS)) {
1521                                         udelay(40);
1522                                         break;
1523                                 }
1524                         }
1525
1526                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1527                             !(bmsr & BMSR_LSTATUS) &&
1528                             tp->link_config.active_speed == SPEED_1000) {
1529                                 err = tg3_phy_reset(tp);
1530                                 if (!err)
1531                                         err = tg3_init_5401phy_dsp(tp);
1532                                 if (err)
1533                                         return err;
1534                         }
1535                 }
1536         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1537                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1538                 /* 5701 {A0,B0} CRC bug workaround */
1539                 tg3_writephy(tp, 0x15, 0x0a75);
1540                 tg3_writephy(tp, 0x1c, 0x8c68);
1541                 tg3_writephy(tp, 0x1c, 0x8d68);
1542                 tg3_writephy(tp, 0x1c, 0x8c68);
1543         }
1544
1545         /* Clear pending interrupts... */
1546         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1547         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1548
1549         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1550                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1551         else
1552                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1553
1554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1556                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1557                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1558                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1559                 else
1560                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1561         }
1562
1563         current_link_up = 0;
1564         current_speed = SPEED_INVALID;
1565         current_duplex = DUPLEX_INVALID;
1566
1567         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1568                 u32 val;
1569
1570                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1571                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1572                 if (!(val & (1 << 10))) {
1573                         val |= (1 << 10);
1574                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1575                         goto relink;
1576                 }
1577         }
1578
1579         bmsr = 0;
1580         for (i = 0; i < 100; i++) {
1581                 tg3_readphy(tp, MII_BMSR, &bmsr);
1582                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1583                     (bmsr & BMSR_LSTATUS))
1584                         break;
1585                 udelay(40);
1586         }
1587
1588         if (bmsr & BMSR_LSTATUS) {
1589                 u32 aux_stat, bmcr;
1590
1591                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1592                 for (i = 0; i < 2000; i++) {
1593                         udelay(10);
1594                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1595                             aux_stat)
1596                                 break;
1597                 }
1598
1599                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1600                                              &current_speed,
1601                                              &current_duplex);
1602
1603                 bmcr = 0;
1604                 for (i = 0; i < 200; i++) {
1605                         tg3_readphy(tp, MII_BMCR, &bmcr);
1606                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1607                                 continue;
1608                         if (bmcr && bmcr != 0x7fff)
1609                                 break;
1610                         udelay(10);
1611                 }
1612
1613                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1614                         if (bmcr & BMCR_ANENABLE) {
1615                                 current_link_up = 1;
1616
1617                                 /* Force autoneg restart if we are exiting
1618                                  * low power mode.
1619                                  */
1620                                 if (!tg3_copper_is_advertising_all(tp))
1621                                         current_link_up = 0;
1622                         } else {
1623                                 current_link_up = 0;
1624                         }
1625                 } else {
1626                         if (!(bmcr & BMCR_ANENABLE) &&
1627                             tp->link_config.speed == current_speed &&
1628                             tp->link_config.duplex == current_duplex) {
1629                                 current_link_up = 1;
1630                         } else {
1631                                 current_link_up = 0;
1632                         }
1633                 }
1634
1635                 tp->link_config.active_speed = current_speed;
1636                 tp->link_config.active_duplex = current_duplex;
1637         }
1638
1639         if (current_link_up == 1 &&
1640             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1641             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1642                 u32 local_adv, remote_adv;
1643
1644                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1645                         local_adv = 0;
1646                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1647
1648                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1649                         remote_adv = 0;
1650
1651                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1652
1653                 /* If we are not advertising full pause capability,
1654                  * something is wrong.  Bring the link down and reconfigure.
1655                  */
1656                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1657                         current_link_up = 0;
1658                 } else {
1659                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1660                 }
1661         }
1662 relink:
1663         if (current_link_up == 0) {
1664                 u32 tmp;
1665
1666                 tg3_phy_copper_begin(tp);
1667
1668                 tg3_readphy(tp, MII_BMSR, &tmp);
1669                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1670                     (tmp & BMSR_LSTATUS))
1671                         current_link_up = 1;
1672         }
1673
1674         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1675         if (current_link_up == 1) {
1676                 if (tp->link_config.active_speed == SPEED_100 ||
1677                     tp->link_config.active_speed == SPEED_10)
1678                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1679                 else
1680                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1681         } else
1682                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1683
1684         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1685         if (tp->link_config.active_duplex == DUPLEX_HALF)
1686                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1687
1688         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1690                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1691                     (current_link_up == 1 &&
1692                      tp->link_config.active_speed == SPEED_10))
1693                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1694         } else {
1695                 if (current_link_up == 1)
1696                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1697         }
1698
1699         /* ??? Without this setting Netgear GA302T PHY does not
1700          * ??? send/receive packets...
1701          */
1702         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1703             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1704                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1705                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1706                 udelay(80);
1707         }
1708
1709         tw32_f(MAC_MODE, tp->mac_mode);
1710         udelay(40);
1711
1712         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1713                 /* Polled via timer. */
1714                 tw32_f(MAC_EVENT, 0);
1715         } else {
1716                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1717         }
1718         udelay(40);
1719
1720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1721             current_link_up == 1 &&
1722             tp->link_config.active_speed == SPEED_1000 &&
1723             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1724              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1725                 udelay(120);
1726                 tw32_f(MAC_STATUS,
1727                      (MAC_STATUS_SYNC_CHANGED |
1728                       MAC_STATUS_CFG_CHANGED));
1729                 udelay(40);
1730                 tg3_write_mem(tp,
1731                               NIC_SRAM_FIRMWARE_MBOX,
1732                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1733         }
1734
1735         if (current_link_up != netif_carrier_ok(tp->dev)) {
1736                 if (current_link_up)
1737                         netif_carrier_on(tp->dev);
1738                 else
1739                         netif_carrier_off(tp->dev);
1740                 tg3_link_report(tp);
1741         }
1742
1743         return 0;
1744 }
1745
1746 struct tg3_fiber_aneginfo {
1747         int state;
1748 #define ANEG_STATE_UNKNOWN              0
1749 #define ANEG_STATE_AN_ENABLE            1
1750 #define ANEG_STATE_RESTART_INIT         2
1751 #define ANEG_STATE_RESTART              3
1752 #define ANEG_STATE_DISABLE_LINK_OK      4
1753 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1754 #define ANEG_STATE_ABILITY_DETECT       6
1755 #define ANEG_STATE_ACK_DETECT_INIT      7
1756 #define ANEG_STATE_ACK_DETECT           8
1757 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1758 #define ANEG_STATE_COMPLETE_ACK         10
1759 #define ANEG_STATE_IDLE_DETECT_INIT     11
1760 #define ANEG_STATE_IDLE_DETECT          12
1761 #define ANEG_STATE_LINK_OK              13
1762 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1763 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1764
1765         u32 flags;
1766 #define MR_AN_ENABLE            0x00000001
1767 #define MR_RESTART_AN           0x00000002
1768 #define MR_AN_COMPLETE          0x00000004
1769 #define MR_PAGE_RX              0x00000008
1770 #define MR_NP_LOADED            0x00000010
1771 #define MR_TOGGLE_TX            0x00000020
1772 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1773 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1774 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1775 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1776 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1777 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1778 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1779 #define MR_TOGGLE_RX            0x00002000
1780 #define MR_NP_RX                0x00004000
1781
1782 #define MR_LINK_OK              0x80000000
1783
1784         unsigned long link_time, cur_time;
1785
1786         u32 ability_match_cfg;
1787         int ability_match_count;
1788
1789         char ability_match, idle_match, ack_match;
1790
1791         u32 txconfig, rxconfig;
1792 #define ANEG_CFG_NP             0x00000080
1793 #define ANEG_CFG_ACK            0x00000040
1794 #define ANEG_CFG_RF2            0x00000020
1795 #define ANEG_CFG_RF1            0x00000010
1796 #define ANEG_CFG_PS2            0x00000001
1797 #define ANEG_CFG_PS1            0x00008000
1798 #define ANEG_CFG_HD             0x00004000
1799 #define ANEG_CFG_FD             0x00002000
1800 #define ANEG_CFG_INVAL          0x00001f06
1801
1802 };
1803 #define ANEG_OK         0
1804 #define ANEG_DONE       1
1805 #define ANEG_TIMER_ENAB 2
1806 #define ANEG_FAILED     -1
1807
1808 #define ANEG_STATE_SETTLE_TIME  10000
1809
1810 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1811                                    struct tg3_fiber_aneginfo *ap)
1812 {
1813         unsigned long delta;
1814         u32 rx_cfg_reg;
1815         int ret;
1816
1817         if (ap->state == ANEG_STATE_UNKNOWN) {
1818                 ap->rxconfig = 0;
1819                 ap->link_time = 0;
1820                 ap->cur_time = 0;
1821                 ap->ability_match_cfg = 0;
1822                 ap->ability_match_count = 0;
1823                 ap->ability_match = 0;
1824                 ap->idle_match = 0;
1825                 ap->ack_match = 0;
1826         }
1827         ap->cur_time++;
1828
1829         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1830                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1831
1832                 if (rx_cfg_reg != ap->ability_match_cfg) {
1833                         ap->ability_match_cfg = rx_cfg_reg;
1834                         ap->ability_match = 0;
1835                         ap->ability_match_count = 0;
1836                 } else {
1837                         if (++ap->ability_match_count > 1) {
1838                                 ap->ability_match = 1;
1839                                 ap->ability_match_cfg = rx_cfg_reg;
1840                         }
1841                 }
1842                 if (rx_cfg_reg & ANEG_CFG_ACK)
1843                         ap->ack_match = 1;
1844                 else
1845                         ap->ack_match = 0;
1846
1847                 ap->idle_match = 0;
1848         } else {
1849                 ap->idle_match = 1;
1850                 ap->ability_match_cfg = 0;
1851                 ap->ability_match_count = 0;
1852                 ap->ability_match = 0;
1853                 ap->ack_match = 0;
1854
1855                 rx_cfg_reg = 0;
1856         }
1857
1858         ap->rxconfig = rx_cfg_reg;
1859         ret = ANEG_OK;
1860
1861         switch(ap->state) {
1862         case ANEG_STATE_UNKNOWN:
1863                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1864                         ap->state = ANEG_STATE_AN_ENABLE;
1865
1866                 /* fallthru */
1867         case ANEG_STATE_AN_ENABLE:
1868                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1869                 if (ap->flags & MR_AN_ENABLE) {
1870                         ap->link_time = 0;
1871                         ap->cur_time = 0;
1872                         ap->ability_match_cfg = 0;
1873                         ap->ability_match_count = 0;
1874                         ap->ability_match = 0;
1875                         ap->idle_match = 0;
1876                         ap->ack_match = 0;
1877
1878                         ap->state = ANEG_STATE_RESTART_INIT;
1879                 } else {
1880                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1881                 }
1882                 break;
1883
1884         case ANEG_STATE_RESTART_INIT:
1885                 ap->link_time = ap->cur_time;
1886                 ap->flags &= ~(MR_NP_LOADED);
1887                 ap->txconfig = 0;
1888                 tw32(MAC_TX_AUTO_NEG, 0);
1889                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1890                 tw32_f(MAC_MODE, tp->mac_mode);
1891                 udelay(40);
1892
1893                 ret = ANEG_TIMER_ENAB;
1894                 ap->state = ANEG_STATE_RESTART;
1895
1896                 /* fallthru */
1897         case ANEG_STATE_RESTART:
1898                 delta = ap->cur_time - ap->link_time;
1899                 if (delta > ANEG_STATE_SETTLE_TIME) {
1900                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1901                 } else {
1902                         ret = ANEG_TIMER_ENAB;
1903                 }
1904                 break;
1905
1906         case ANEG_STATE_DISABLE_LINK_OK:
1907                 ret = ANEG_DONE;
1908                 break;
1909
1910         case ANEG_STATE_ABILITY_DETECT_INIT:
1911                 ap->flags &= ~(MR_TOGGLE_TX);
1912                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1913                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1914                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1915                 tw32_f(MAC_MODE, tp->mac_mode);
1916                 udelay(40);
1917
1918                 ap->state = ANEG_STATE_ABILITY_DETECT;
1919                 break;
1920
1921         case ANEG_STATE_ABILITY_DETECT:
1922                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1923                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1924                 }
1925                 break;
1926
1927         case ANEG_STATE_ACK_DETECT_INIT:
1928                 ap->txconfig |= ANEG_CFG_ACK;
1929                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1930                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1931                 tw32_f(MAC_MODE, tp->mac_mode);
1932                 udelay(40);
1933
1934                 ap->state = ANEG_STATE_ACK_DETECT;
1935
1936                 /* fallthru */
1937         case ANEG_STATE_ACK_DETECT:
1938                 if (ap->ack_match != 0) {
1939                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1940                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1941                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1942                         } else {
1943                                 ap->state = ANEG_STATE_AN_ENABLE;
1944                         }
1945                 } else if (ap->ability_match != 0 &&
1946                            ap->rxconfig == 0) {
1947                         ap->state = ANEG_STATE_AN_ENABLE;
1948                 }
1949                 break;
1950
1951         case ANEG_STATE_COMPLETE_ACK_INIT:
1952                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1953                         ret = ANEG_FAILED;
1954                         break;
1955                 }
1956                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1957                                MR_LP_ADV_HALF_DUPLEX |
1958                                MR_LP_ADV_SYM_PAUSE |
1959                                MR_LP_ADV_ASYM_PAUSE |
1960                                MR_LP_ADV_REMOTE_FAULT1 |
1961                                MR_LP_ADV_REMOTE_FAULT2 |
1962                                MR_LP_ADV_NEXT_PAGE |
1963                                MR_TOGGLE_RX |
1964                                MR_NP_RX);
1965                 if (ap->rxconfig & ANEG_CFG_FD)
1966                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1967                 if (ap->rxconfig & ANEG_CFG_HD)
1968                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1969                 if (ap->rxconfig & ANEG_CFG_PS1)
1970                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1971                 if (ap->rxconfig & ANEG_CFG_PS2)
1972                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1973                 if (ap->rxconfig & ANEG_CFG_RF1)
1974                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1975                 if (ap->rxconfig & ANEG_CFG_RF2)
1976                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1977                 if (ap->rxconfig & ANEG_CFG_NP)
1978                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1979
1980                 ap->link_time = ap->cur_time;
1981
1982                 ap->flags ^= (MR_TOGGLE_TX);
1983                 if (ap->rxconfig & 0x0008)
1984                         ap->flags |= MR_TOGGLE_RX;
1985                 if (ap->rxconfig & ANEG_CFG_NP)
1986                         ap->flags |= MR_NP_RX;
1987                 ap->flags |= MR_PAGE_RX;
1988
1989                 ap->state = ANEG_STATE_COMPLETE_ACK;
1990                 ret = ANEG_TIMER_ENAB;
1991                 break;
1992
1993         case ANEG_STATE_COMPLETE_ACK:
1994                 if (ap->ability_match != 0 &&
1995                     ap->rxconfig == 0) {
1996                         ap->state = ANEG_STATE_AN_ENABLE;
1997                         break;
1998                 }
1999                 delta = ap->cur_time - ap->link_time;
2000                 if (delta > ANEG_STATE_SETTLE_TIME) {
2001                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2002                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2003                         } else {
2004                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2005                                     !(ap->flags & MR_NP_RX)) {
2006                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2007                                 } else {
2008                                         ret = ANEG_FAILED;
2009                                 }
2010                         }
2011                 }
2012                 break;
2013
2014         case ANEG_STATE_IDLE_DETECT_INIT:
2015                 ap->link_time = ap->cur_time;
2016                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2017                 tw32_f(MAC_MODE, tp->mac_mode);
2018                 udelay(40);
2019
2020                 ap->state = ANEG_STATE_IDLE_DETECT;
2021                 ret = ANEG_TIMER_ENAB;
2022                 break;
2023
2024         case ANEG_STATE_IDLE_DETECT:
2025                 if (ap->ability_match != 0 &&
2026                     ap->rxconfig == 0) {
2027                         ap->state = ANEG_STATE_AN_ENABLE;
2028                         break;
2029                 }
2030                 delta = ap->cur_time - ap->link_time;
2031                 if (delta > ANEG_STATE_SETTLE_TIME) {
2032                         /* XXX another gem from the Broadcom driver :( */
2033                         ap->state = ANEG_STATE_LINK_OK;
2034                 }
2035                 break;
2036
2037         case ANEG_STATE_LINK_OK:
2038                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2039                 ret = ANEG_DONE;
2040                 break;
2041
2042         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2043                 /* ??? unimplemented */
2044                 break;
2045
2046         case ANEG_STATE_NEXT_PAGE_WAIT:
2047                 /* ??? unimplemented */
2048                 break;
2049
2050         default:
2051                 ret = ANEG_FAILED;
2052                 break;
2053         };
2054
2055         return ret;
2056 }
2057
2058 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2059 {
2060         int res = 0;
2061         struct tg3_fiber_aneginfo aninfo;
2062         int status = ANEG_FAILED;
2063         unsigned int tick;
2064         u32 tmp;
2065
2066         tw32_f(MAC_TX_AUTO_NEG, 0);
2067
2068         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2069         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2070         udelay(40);
2071
2072         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2073         udelay(40);
2074
2075         memset(&aninfo, 0, sizeof(aninfo));
2076         aninfo.flags |= MR_AN_ENABLE;
2077         aninfo.state = ANEG_STATE_UNKNOWN;
2078         aninfo.cur_time = 0;
2079         tick = 0;
2080         while (++tick < 195000) {
2081                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2082                 if (status == ANEG_DONE || status == ANEG_FAILED)
2083                         break;
2084
2085                 udelay(1);
2086         }
2087
2088         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2089         tw32_f(MAC_MODE, tp->mac_mode);
2090         udelay(40);
2091
2092         *flags = aninfo.flags;
2093
2094         if (status == ANEG_DONE &&
2095             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2096                              MR_LP_ADV_FULL_DUPLEX)))
2097                 res = 1;
2098
2099         return res;
2100 }
2101
2102 static void tg3_init_bcm8002(struct tg3 *tp)
2103 {
2104         u32 mac_status = tr32(MAC_STATUS);
2105         int i;
2106
2107         /* Reset when initting first time or we have a link. */
2108         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2109             !(mac_status & MAC_STATUS_PCS_SYNCED))
2110                 return;
2111
2112         /* Set PLL lock range. */
2113         tg3_writephy(tp, 0x16, 0x8007);
2114
2115         /* SW reset */
2116         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2117
2118         /* Wait for reset to complete. */
2119         /* XXX schedule_timeout() ... */
2120         for (i = 0; i < 500; i++)
2121                 udelay(10);
2122
2123         /* Config mode; select PMA/Ch 1 regs. */
2124         tg3_writephy(tp, 0x10, 0x8411);
2125
2126         /* Enable auto-lock and comdet, select txclk for tx. */
2127         tg3_writephy(tp, 0x11, 0x0a10);
2128
2129         tg3_writephy(tp, 0x18, 0x00a0);
2130         tg3_writephy(tp, 0x16, 0x41ff);
2131
2132         /* Assert and deassert POR. */
2133         tg3_writephy(tp, 0x13, 0x0400);
2134         udelay(40);
2135         tg3_writephy(tp, 0x13, 0x0000);
2136
2137         tg3_writephy(tp, 0x11, 0x0a50);
2138         udelay(40);
2139         tg3_writephy(tp, 0x11, 0x0a10);
2140
2141         /* Wait for signal to stabilize */
2142         /* XXX schedule_timeout() ... */
2143         for (i = 0; i < 15000; i++)
2144                 udelay(10);
2145
2146         /* Deselect the channel register so we can read the PHYID
2147          * later.
2148          */
2149         tg3_writephy(tp, 0x10, 0x8011);
2150 }
2151
2152 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2153 {
2154         u32 sg_dig_ctrl, sg_dig_status;
2155         u32 serdes_cfg, expected_sg_dig_ctrl;
2156         int workaround, port_a;
2157         int current_link_up;
2158
2159         serdes_cfg = 0;
2160         expected_sg_dig_ctrl = 0;
2161         workaround = 0;
2162         port_a = 1;
2163         current_link_up = 0;
2164
2165         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2166             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2167                 workaround = 1;
2168                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2169                         port_a = 0;
2170
2171                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2172                 /* preserve bits 20-23 for voltage regulator */
2173                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2174         }
2175
2176         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2177
2178         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2179                 if (sg_dig_ctrl & (1 << 31)) {
2180                         if (workaround) {
2181                                 u32 val = serdes_cfg;
2182
2183                                 if (port_a)
2184                                         val |= 0xc010000;
2185                                 else
2186                                         val |= 0x4010000;
2187                                 tw32_f(MAC_SERDES_CFG, val);
2188                         }
2189                         tw32_f(SG_DIG_CTRL, 0x01388400);
2190                 }
2191                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2192                         tg3_setup_flow_control(tp, 0, 0);
2193                         current_link_up = 1;
2194                 }
2195                 goto out;
2196         }
2197
2198         /* Want auto-negotiation.  */
2199         expected_sg_dig_ctrl = 0x81388400;
2200
2201         /* Pause capability */
2202         expected_sg_dig_ctrl |= (1 << 11);
2203
2204         /* Asymettric pause */
2205         expected_sg_dig_ctrl |= (1 << 12);
2206
2207         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2208                 if (workaround)
2209                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2210                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2211                 udelay(5);
2212                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2213
2214                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2215         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2216                                  MAC_STATUS_SIGNAL_DET)) {
2217                 int i;
2218
2219                 /* Giver time to negotiate (~200ms) */
2220                 for (i = 0; i < 40000; i++) {
2221                         sg_dig_status = tr32(SG_DIG_STATUS);
2222                         if (sg_dig_status & (0x3))
2223                                 break;
2224                         udelay(5);
2225                 }
2226                 mac_status = tr32(MAC_STATUS);
2227
2228                 if ((sg_dig_status & (1 << 1)) &&
2229                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2230                         u32 local_adv, remote_adv;
2231
2232                         local_adv = ADVERTISE_PAUSE_CAP;
2233                         remote_adv = 0;
2234                         if (sg_dig_status & (1 << 19))
2235                                 remote_adv |= LPA_PAUSE_CAP;
2236                         if (sg_dig_status & (1 << 20))
2237                                 remote_adv |= LPA_PAUSE_ASYM;
2238
2239                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2240                         current_link_up = 1;
2241                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2242                 } else if (!(sg_dig_status & (1 << 1))) {
2243                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2244                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2245                         else {
2246                                 if (workaround) {
2247                                         u32 val = serdes_cfg;
2248
2249                                         if (port_a)
2250                                                 val |= 0xc010000;
2251                                         else
2252                                                 val |= 0x4010000;
2253
2254                                         tw32_f(MAC_SERDES_CFG, val);
2255                                 }
2256
2257                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2258                                 udelay(40);
2259
2260                                 /* Link parallel detection - link is up */
2261                                 /* only if we have PCS_SYNC and not */
2262                                 /* receiving config code words */
2263                                 mac_status = tr32(MAC_STATUS);
2264                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2265                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2266                                         tg3_setup_flow_control(tp, 0, 0);
2267                                         current_link_up = 1;
2268                                 }
2269                         }
2270                 }
2271         }
2272
2273 out:
2274         return current_link_up;
2275 }
2276
2277 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2278 {
2279         int current_link_up = 0;
2280
2281         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2282                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2283                 goto out;
2284         }
2285
2286         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2287                 u32 flags;
2288                 int i;
2289   
2290                 if (fiber_autoneg(tp, &flags)) {
2291                         u32 local_adv, remote_adv;
2292
2293                         local_adv = ADVERTISE_PAUSE_CAP;
2294                         remote_adv = 0;
2295                         if (flags & MR_LP_ADV_SYM_PAUSE)
2296                                 remote_adv |= LPA_PAUSE_CAP;
2297                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2298                                 remote_adv |= LPA_PAUSE_ASYM;
2299
2300                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2301
2302                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2303                         current_link_up = 1;
2304                 }
2305                 for (i = 0; i < 30; i++) {
2306                         udelay(20);
2307                         tw32_f(MAC_STATUS,
2308                                (MAC_STATUS_SYNC_CHANGED |
2309                                 MAC_STATUS_CFG_CHANGED));
2310                         udelay(40);
2311                         if ((tr32(MAC_STATUS) &
2312                              (MAC_STATUS_SYNC_CHANGED |
2313                               MAC_STATUS_CFG_CHANGED)) == 0)
2314                                 break;
2315                 }
2316
2317                 mac_status = tr32(MAC_STATUS);
2318                 if (current_link_up == 0 &&
2319                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2320                     !(mac_status & MAC_STATUS_RCVD_CFG))
2321                         current_link_up = 1;
2322         } else {
2323                 /* Forcing 1000FD link up. */
2324                 current_link_up = 1;
2325                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2326
2327                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2328                 udelay(40);
2329         }
2330
2331 out:
2332         return current_link_up;
2333 }
2334
2335 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2336 {
2337         u32 orig_pause_cfg;
2338         u16 orig_active_speed;
2339         u8 orig_active_duplex;
2340         u32 mac_status;
2341         int current_link_up;
2342         int i;
2343
2344         orig_pause_cfg =
2345                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2346                                   TG3_FLAG_TX_PAUSE));
2347         orig_active_speed = tp->link_config.active_speed;
2348         orig_active_duplex = tp->link_config.active_duplex;
2349
2350         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2351             netif_carrier_ok(tp->dev) &&
2352             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2353                 mac_status = tr32(MAC_STATUS);
2354                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2355                                MAC_STATUS_SIGNAL_DET |
2356                                MAC_STATUS_CFG_CHANGED |
2357                                MAC_STATUS_RCVD_CFG);
2358                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2359                                    MAC_STATUS_SIGNAL_DET)) {
2360                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2361                                             MAC_STATUS_CFG_CHANGED));
2362                         return 0;
2363                 }
2364         }
2365
2366         tw32_f(MAC_TX_AUTO_NEG, 0);
2367
2368         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2369         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2370         tw32_f(MAC_MODE, tp->mac_mode);
2371         udelay(40);
2372
2373         if (tp->phy_id == PHY_ID_BCM8002)
2374                 tg3_init_bcm8002(tp);
2375
2376         /* Enable link change event even when serdes polling.  */
2377         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2378         udelay(40);
2379
2380         current_link_up = 0;
2381         mac_status = tr32(MAC_STATUS);
2382
2383         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2384                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2385         else
2386                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2387
2388         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2389         tw32_f(MAC_MODE, tp->mac_mode);
2390         udelay(40);
2391
2392         tp->hw_status->status =
2393                 (SD_STATUS_UPDATED |
2394                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2395
2396         for (i = 0; i < 100; i++) {
2397                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2398                                     MAC_STATUS_CFG_CHANGED));
2399                 udelay(5);
2400                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2401                                          MAC_STATUS_CFG_CHANGED)) == 0)
2402                         break;
2403         }
2404
2405         mac_status = tr32(MAC_STATUS);
2406         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2407                 current_link_up = 0;
2408                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2409                         tw32_f(MAC_MODE, (tp->mac_mode |
2410                                           MAC_MODE_SEND_CONFIGS));
2411                         udelay(1);
2412                         tw32_f(MAC_MODE, tp->mac_mode);
2413                 }
2414         }
2415
2416         if (current_link_up == 1) {
2417                 tp->link_config.active_speed = SPEED_1000;
2418                 tp->link_config.active_duplex = DUPLEX_FULL;
2419                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2420                                     LED_CTRL_LNKLED_OVERRIDE |
2421                                     LED_CTRL_1000MBPS_ON));
2422         } else {
2423                 tp->link_config.active_speed = SPEED_INVALID;
2424                 tp->link_config.active_duplex = DUPLEX_INVALID;
2425                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2426                                     LED_CTRL_LNKLED_OVERRIDE |
2427                                     LED_CTRL_TRAFFIC_OVERRIDE));
2428         }
2429
2430         if (current_link_up != netif_carrier_ok(tp->dev)) {
2431                 if (current_link_up)
2432                         netif_carrier_on(tp->dev);
2433                 else
2434                         netif_carrier_off(tp->dev);
2435                 tg3_link_report(tp);
2436         } else {
2437                 u32 now_pause_cfg =
2438                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2439                                          TG3_FLAG_TX_PAUSE);
2440                 if (orig_pause_cfg != now_pause_cfg ||
2441                     orig_active_speed != tp->link_config.active_speed ||
2442                     orig_active_duplex != tp->link_config.active_duplex)
2443                         tg3_link_report(tp);
2444         }
2445
2446         return 0;
2447 }
2448
2449 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2450 {
2451         int err;
2452
2453         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2454                 err = tg3_setup_fiber_phy(tp, force_reset);
2455         } else {
2456                 err = tg3_setup_copper_phy(tp, force_reset);
2457         }
2458
2459         if (tp->link_config.active_speed == SPEED_1000 &&
2460             tp->link_config.active_duplex == DUPLEX_HALF)
2461                 tw32(MAC_TX_LENGTHS,
2462                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2463                       (6 << TX_LENGTHS_IPG_SHIFT) |
2464                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2465         else
2466                 tw32(MAC_TX_LENGTHS,
2467                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2468                       (6 << TX_LENGTHS_IPG_SHIFT) |
2469                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2470
2471         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2472                 if (netif_carrier_ok(tp->dev)) {
2473                         tw32(HOSTCC_STAT_COAL_TICKS,
2474                              DEFAULT_STAT_COAL_TICKS);
2475                 } else {
2476                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2477                 }
2478         }
2479
2480         return err;
2481 }
2482
2483 /* Tigon3 never reports partial packet sends.  So we do not
2484  * need special logic to handle SKBs that have not had all
2485  * of their frags sent yet, like SunGEM does.
2486  */
2487 static void tg3_tx(struct tg3 *tp)
2488 {
2489         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2490         u32 sw_idx = tp->tx_cons;
2491
2492         while (sw_idx != hw_idx) {
2493                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2494                 struct sk_buff *skb = ri->skb;
2495                 int i;
2496
2497                 if (unlikely(skb == NULL))
2498                         BUG();
2499
2500                 pci_unmap_single(tp->pdev,
2501                                  pci_unmap_addr(ri, mapping),
2502                                  skb_headlen(skb),
2503                                  PCI_DMA_TODEVICE);
2504
2505                 ri->skb = NULL;
2506
2507                 sw_idx = NEXT_TX(sw_idx);
2508
2509                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2510                         if (unlikely(sw_idx == hw_idx))
2511                                 BUG();
2512
2513                         ri = &tp->tx_buffers[sw_idx];
2514                         if (unlikely(ri->skb != NULL))
2515                                 BUG();
2516
2517                         pci_unmap_page(tp->pdev,
2518                                        pci_unmap_addr(ri, mapping),
2519                                        skb_shinfo(skb)->frags[i].size,
2520                                        PCI_DMA_TODEVICE);
2521
2522                         sw_idx = NEXT_TX(sw_idx);
2523                 }
2524
2525                 dev_kfree_skb_irq(skb);
2526         }
2527
2528         tp->tx_cons = sw_idx;
2529
2530         if (netif_queue_stopped(tp->dev) &&
2531             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2532                 netif_wake_queue(tp->dev);
2533 }
2534
2535 /* Returns size of skb allocated or < 0 on error.
2536  *
2537  * We only need to fill in the address because the other members
2538  * of the RX descriptor are invariant, see tg3_init_rings.
2539  *
2540  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2541  * posting buffers we only dirty the first cache line of the RX
2542  * descriptor (containing the address).  Whereas for the RX status
2543  * buffers the cpu only reads the last cacheline of the RX descriptor
2544  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2545  */
2546 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2547                             int src_idx, u32 dest_idx_unmasked)
2548 {
2549         struct tg3_rx_buffer_desc *desc;
2550         struct ring_info *map, *src_map;
2551         struct sk_buff *skb;
2552         dma_addr_t mapping;
2553         int skb_size, dest_idx;
2554
2555         src_map = NULL;
2556         switch (opaque_key) {
2557         case RXD_OPAQUE_RING_STD:
2558                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2559                 desc = &tp->rx_std[dest_idx];
2560                 map = &tp->rx_std_buffers[dest_idx];
2561                 if (src_idx >= 0)
2562                         src_map = &tp->rx_std_buffers[src_idx];
2563                 skb_size = RX_PKT_BUF_SZ;
2564                 break;
2565
2566         case RXD_OPAQUE_RING_JUMBO:
2567                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2568                 desc = &tp->rx_jumbo[dest_idx];
2569                 map = &tp->rx_jumbo_buffers[dest_idx];
2570                 if (src_idx >= 0)
2571                         src_map = &tp->rx_jumbo_buffers[src_idx];
2572                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2573                 break;
2574
2575         default:
2576                 return -EINVAL;
2577         };
2578
2579         /* Do not overwrite any of the map or rp information
2580          * until we are sure we can commit to a new buffer.
2581          *
2582          * Callers depend upon this behavior and assume that
2583          * we leave everything unchanged if we fail.
2584          */
2585         skb = dev_alloc_skb(skb_size);
2586         if (skb == NULL)
2587                 return -ENOMEM;
2588
2589         skb->dev = tp->dev;
2590         skb_reserve(skb, tp->rx_offset);
2591
2592         mapping = pci_map_single(tp->pdev, skb->data,
2593                                  skb_size - tp->rx_offset,
2594                                  PCI_DMA_FROMDEVICE);
2595
2596         map->skb = skb;
2597         pci_unmap_addr_set(map, mapping, mapping);
2598
2599         if (src_map != NULL)
2600                 src_map->skb = NULL;
2601
2602         desc->addr_hi = ((u64)mapping >> 32);
2603         desc->addr_lo = ((u64)mapping & 0xffffffff);
2604
2605         return skb_size;
2606 }
2607
2608 /* We only need to move over in the address because the other
2609  * members of the RX descriptor are invariant.  See notes above
2610  * tg3_alloc_rx_skb for full details.
2611  */
2612 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2613                            int src_idx, u32 dest_idx_unmasked)
2614 {
2615         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2616         struct ring_info *src_map, *dest_map;
2617         int dest_idx;
2618
2619         switch (opaque_key) {
2620         case RXD_OPAQUE_RING_STD:
2621                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2622                 dest_desc = &tp->rx_std[dest_idx];
2623                 dest_map = &tp->rx_std_buffers[dest_idx];
2624                 src_desc = &tp->rx_std[src_idx];
2625                 src_map = &tp->rx_std_buffers[src_idx];
2626                 break;
2627
2628         case RXD_OPAQUE_RING_JUMBO:
2629                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2630                 dest_desc = &tp->rx_jumbo[dest_idx];
2631                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2632                 src_desc = &tp->rx_jumbo[src_idx];
2633                 src_map = &tp->rx_jumbo_buffers[src_idx];
2634                 break;
2635
2636         default:
2637                 return;
2638         };
2639
2640         dest_map->skb = src_map->skb;
2641         pci_unmap_addr_set(dest_map, mapping,
2642                            pci_unmap_addr(src_map, mapping));
2643         dest_desc->addr_hi = src_desc->addr_hi;
2644         dest_desc->addr_lo = src_desc->addr_lo;
2645
2646         src_map->skb = NULL;
2647 }
2648
2649 #if TG3_VLAN_TAG_USED
2650 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2651 {
2652         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2653 }
2654 #endif
2655
2656 /* The RX ring scheme is composed of multiple rings which post fresh
2657  * buffers to the chip, and one special ring the chip uses to report
2658  * status back to the host.
2659  *
2660  * The special ring reports the status of received packets to the
2661  * host.  The chip does not write into the original descriptor the
2662  * RX buffer was obtained from.  The chip simply takes the original
2663  * descriptor as provided by the host, updates the status and length
2664  * field, then writes this into the next status ring entry.
2665  *
2666  * Each ring the host uses to post buffers to the chip is described
2667  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2668  * it is first placed into the on-chip ram.  When the packet's length
2669  * is known, it walks down the TG3_BDINFO entries to select the ring.
2670  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2671  * which is within the range of the new packet's length is chosen.
2672  *
2673  * The "separate ring for rx status" scheme may sound queer, but it makes
2674  * sense from a cache coherency perspective.  If only the host writes
2675  * to the buffer post rings, and only the chip writes to the rx status
2676  * rings, then cache lines never move beyond shared-modified state.
2677  * If both the host and chip were to write into the same ring, cache line
2678  * eviction could occur since both entities want it in an exclusive state.
2679  */
2680 static int tg3_rx(struct tg3 *tp, int budget)
2681 {
2682         u32 work_mask;
2683         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2684         u16 hw_idx, sw_idx;
2685         int received;
2686
2687         hw_idx = tp->hw_status->idx[0].rx_producer;
2688         /*
2689          * We need to order the read of hw_idx and the read of
2690          * the opaque cookie.
2691          */
2692         rmb();
2693         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2694         work_mask = 0;
2695         received = 0;
2696         while (sw_idx != hw_idx && budget > 0) {
2697                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2698                 unsigned int len;
2699                 struct sk_buff *skb;
2700                 dma_addr_t dma_addr;
2701                 u32 opaque_key, desc_idx, *post_ptr;
2702
2703                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2704                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2705                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2706                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2707                                                   mapping);
2708                         skb = tp->rx_std_buffers[desc_idx].skb;
2709                         post_ptr = &tp->rx_std_ptr;
2710                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2711                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2712                                                   mapping);
2713                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2714                         post_ptr = &tp->rx_jumbo_ptr;
2715                 }
2716                 else {
2717                         goto next_pkt_nopost;
2718                 }
2719
2720                 work_mask |= opaque_key;
2721
2722                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2723                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2724                 drop_it:
2725                         tg3_recycle_rx(tp, opaque_key,
2726                                        desc_idx, *post_ptr);
2727                 drop_it_no_recycle:
2728                         /* Other statistics kept track of by card. */
2729                         tp->net_stats.rx_dropped++;
2730                         goto next_pkt;
2731                 }
2732
2733                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2734
2735                 if (len > RX_COPY_THRESHOLD 
2736                         && tp->rx_offset == 2
2737                         /* rx_offset != 2 iff this is a 5701 card running
2738                          * in PCI-X mode [see tg3_get_invariants()] */
2739                 ) {
2740                         int skb_size;
2741
2742                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2743                                                     desc_idx, *post_ptr);
2744                         if (skb_size < 0)
2745                                 goto drop_it;
2746
2747                         pci_unmap_single(tp->pdev, dma_addr,
2748                                          skb_size - tp->rx_offset,
2749                                          PCI_DMA_FROMDEVICE);
2750
2751                         skb_put(skb, len);
2752                 } else {
2753                         struct sk_buff *copy_skb;
2754
2755                         tg3_recycle_rx(tp, opaque_key,
2756                                        desc_idx, *post_ptr);
2757
2758                         copy_skb = dev_alloc_skb(len + 2);
2759                         if (copy_skb == NULL)
2760                                 goto drop_it_no_recycle;
2761
2762                         copy_skb->dev = tp->dev;
2763                         skb_reserve(copy_skb, 2);
2764                         skb_put(copy_skb, len);
2765                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2766                         memcpy(copy_skb->data, skb->data, len);
2767                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2768
2769                         /* We'll reuse the original ring buffer. */
2770                         skb = copy_skb;
2771                 }
2772
2773                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2774                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2775                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2776                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2777                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2778                 else
2779                         skb->ip_summed = CHECKSUM_NONE;
2780
2781                 skb->protocol = eth_type_trans(skb, tp->dev);
2782 #if TG3_VLAN_TAG_USED
2783                 if (tp->vlgrp != NULL &&
2784                     desc->type_flags & RXD_FLAG_VLAN) {
2785                         tg3_vlan_rx(tp, skb,
2786                                     desc->err_vlan & RXD_VLAN_MASK);
2787                 } else
2788 #endif
2789                         netif_receive_skb(skb);
2790
2791                 tp->dev->last_rx = jiffies;
2792                 received++;
2793                 budget--;
2794
2795 next_pkt:
2796                 (*post_ptr)++;
2797 next_pkt_nopost:
2798                 rx_rcb_ptr++;
2799                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2800         }
2801
2802         /* ACK the status ring. */
2803         tp->rx_rcb_ptr = rx_rcb_ptr;
2804         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2805                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2806
2807         /* Refill RX ring(s). */
2808         if (work_mask & RXD_OPAQUE_RING_STD) {
2809                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2810                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2811                              sw_idx);
2812         }
2813         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2814                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2815                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2816                              sw_idx);
2817         }
2818         mmiowb();
2819
2820         return received;
2821 }
2822
2823 static int tg3_poll(struct net_device *netdev, int *budget)
2824 {
2825         struct tg3 *tp = netdev_priv(netdev);
2826         struct tg3_hw_status *sblk = tp->hw_status;
2827         unsigned long flags;
2828         int done;
2829
2830         spin_lock_irqsave(&tp->lock, flags);
2831
2832         /* handle link change and other phy events */
2833         if (!(tp->tg3_flags &
2834               (TG3_FLAG_USE_LINKCHG_REG |
2835                TG3_FLAG_POLL_SERDES))) {
2836                 if (sblk->status & SD_STATUS_LINK_CHG) {
2837                         sblk->status = SD_STATUS_UPDATED |
2838                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2839                         tg3_setup_phy(tp, 0);
2840                 }
2841         }
2842
2843         /* run TX completion thread */
2844         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2845                 spin_lock(&tp->tx_lock);
2846                 tg3_tx(tp);
2847                 spin_unlock(&tp->tx_lock);
2848         }
2849
2850         spin_unlock_irqrestore(&tp->lock, flags);
2851
2852         /* run RX thread, within the bounds set by NAPI.
2853          * All RX "locking" is done by ensuring outside
2854          * code synchronizes with dev->poll()
2855          */
2856         done = 1;
2857         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2858                 int orig_budget = *budget;
2859                 int work_done;
2860
2861                 if (orig_budget > netdev->quota)
2862                         orig_budget = netdev->quota;
2863
2864                 work_done = tg3_rx(tp, orig_budget);
2865
2866                 *budget -= work_done;
2867                 netdev->quota -= work_done;
2868
2869                 if (work_done >= orig_budget)
2870                         done = 0;
2871         }
2872
2873         /* if no more work, tell net stack and NIC we're done */
2874         if (done) {
2875                 spin_lock_irqsave(&tp->lock, flags);
2876                 __netif_rx_complete(netdev);
2877                 tg3_restart_ints(tp);
2878                 spin_unlock_irqrestore(&tp->lock, flags);
2879         }
2880
2881         return (done ? 0 : 1);
2882 }
2883
2884 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2885 {
2886         struct tg3_hw_status *sblk = tp->hw_status;
2887         unsigned int work_exists = 0;
2888
2889         /* check for phy events */
2890         if (!(tp->tg3_flags &
2891               (TG3_FLAG_USE_LINKCHG_REG |
2892                TG3_FLAG_POLL_SERDES))) {
2893                 if (sblk->status & SD_STATUS_LINK_CHG)
2894                         work_exists = 1;
2895         }
2896         /* check for RX/TX work to do */
2897         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2898             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2899                 work_exists = 1;
2900
2901         return work_exists;
2902 }
2903
2904 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2905 {
2906         struct net_device *dev = dev_id;
2907         struct tg3 *tp = netdev_priv(dev);
2908         struct tg3_hw_status *sblk = tp->hw_status;
2909         unsigned long flags;
2910         unsigned int handled = 1;
2911
2912         spin_lock_irqsave(&tp->lock, flags);
2913
2914         /* In INTx mode, it is possible for the interrupt to arrive at
2915          * the CPU before the status block posted prior to the interrupt.
2916          * Reading the PCI State register will confirm whether the
2917          * interrupt is ours and will flush the status block.
2918          */
2919         if ((sblk->status & SD_STATUS_UPDATED) ||
2920             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2921                 /*
2922                  * writing any value to intr-mbox-0 clears PCI INTA# and
2923                  * chip-internal interrupt pending events.
2924                  * writing non-zero to intr-mbox-0 additional tells the
2925                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2926                  * event coalescing.
2927                  */
2928                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2929                              0x00000001);
2930                 /*
2931                  * Flush PCI write.  This also guarantees that our
2932                  * status block has been flushed to host memory.
2933                  */
2934                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2935                 sblk->status &= ~SD_STATUS_UPDATED;
2936
2937                 if (likely(tg3_has_work(dev, tp)))
2938                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2939                 else {
2940                         /* no work, shared interrupt perhaps?  re-enable
2941                          * interrupts, and flush that PCI write
2942                          */
2943                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2944                                 0x00000000);
2945                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2946                 }
2947         } else {        /* shared interrupt */
2948                 handled = 0;
2949         }
2950
2951         spin_unlock_irqrestore(&tp->lock, flags);
2952
2953         return IRQ_RETVAL(handled);
2954 }
2955
2956 static int tg3_init_hw(struct tg3 *);
2957 static int tg3_halt(struct tg3 *);
2958
2959 #ifdef CONFIG_NET_POLL_CONTROLLER
2960 static void tg3_poll_controller(struct net_device *dev)
2961 {
2962         tg3_interrupt(dev->irq, dev, NULL);
2963 }
2964 #endif
2965
2966 static void tg3_reset_task(void *_data)
2967 {
2968         struct tg3 *tp = _data;
2969         unsigned int restart_timer;
2970
2971         tg3_netif_stop(tp);
2972
2973         spin_lock_irq(&tp->lock);
2974         spin_lock(&tp->tx_lock);
2975
2976         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2977         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2978
2979         tg3_halt(tp);
2980         tg3_init_hw(tp);
2981
2982         tg3_netif_start(tp);
2983
2984         spin_unlock(&tp->tx_lock);
2985         spin_unlock_irq(&tp->lock);
2986
2987         if (restart_timer)
2988                 mod_timer(&tp->timer, jiffies + 1);
2989 }
2990
2991 static void tg3_tx_timeout(struct net_device *dev)
2992 {
2993         struct tg3 *tp = netdev_priv(dev);
2994
2995         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2996                dev->name);
2997
2998         schedule_work(&tp->reset_task);
2999 }
3000
3001 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3002
3003 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3004                                        u32 guilty_entry, int guilty_len,
3005                                        u32 last_plus_one, u32 *start, u32 mss)
3006 {
3007         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3008         dma_addr_t new_addr;
3009         u32 entry = *start;
3010         int i;
3011
3012         if (!new_skb) {
3013                 dev_kfree_skb(skb);
3014                 return -1;
3015         }
3016
3017         /* New SKB is guaranteed to be linear. */
3018         entry = *start;
3019         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3020                                   PCI_DMA_TODEVICE);
3021         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3022                     (skb->ip_summed == CHECKSUM_HW) ?
3023                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3024         *start = NEXT_TX(entry);
3025
3026         /* Now clean up the sw ring entries. */
3027         i = 0;
3028         while (entry != last_plus_one) {
3029                 int len;
3030
3031                 if (i == 0)
3032                         len = skb_headlen(skb);
3033                 else
3034                         len = skb_shinfo(skb)->frags[i-1].size;
3035                 pci_unmap_single(tp->pdev,
3036                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3037                                  len, PCI_DMA_TODEVICE);
3038                 if (i == 0) {
3039                         tp->tx_buffers[entry].skb = new_skb;
3040                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3041                 } else {
3042                         tp->tx_buffers[entry].skb = NULL;
3043                 }
3044                 entry = NEXT_TX(entry);
3045                 i++;
3046         }
3047
3048         dev_kfree_skb(skb);
3049
3050         return 0;
3051 }
3052
3053 static void tg3_set_txd(struct tg3 *tp, int entry,
3054                         dma_addr_t mapping, int len, u32 flags,
3055                         u32 mss_and_is_end)
3056 {
3057         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3058         int is_end = (mss_and_is_end & 0x1);
3059         u32 mss = (mss_and_is_end >> 1);
3060         u32 vlan_tag = 0;
3061
3062         if (is_end)
3063                 flags |= TXD_FLAG_END;
3064         if (flags & TXD_FLAG_VLAN) {
3065                 vlan_tag = flags >> 16;
3066                 flags &= 0xffff;
3067         }
3068         vlan_tag |= (mss << TXD_MSS_SHIFT);
3069
3070         txd->addr_hi = ((u64) mapping >> 32);
3071         txd->addr_lo = ((u64) mapping & 0xffffffff);
3072         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3073         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3074 }
3075
3076 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3077 {
3078         u32 base = (u32) mapping & 0xffffffff;
3079
3080         return ((base > 0xffffdcc0) &&
3081                 (base + len + 8 < base));
3082 }
3083
3084 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3085 {
3086         struct tg3 *tp = netdev_priv(dev);
3087         dma_addr_t mapping;
3088         unsigned int i;
3089         u32 len, entry, base_flags, mss;
3090         int would_hit_hwbug;
3091         unsigned long flags;
3092
3093         len = skb_headlen(skb);
3094
3095         /* No BH disabling for tx_lock here.  We are running in BH disabled
3096          * context and TX reclaim runs via tp->poll inside of a software
3097          * interrupt.  Rejoice!
3098          *
3099          * Actually, things are not so simple.  If we are to take a hw
3100          * IRQ here, we can deadlock, consider:
3101          *
3102          *       CPU1           CPU2
3103          *   tg3_start_xmit
3104          *   take tp->tx_lock
3105          *                      tg3_timer
3106          *                      take tp->lock
3107          *   tg3_interrupt
3108          *   spin on tp->lock
3109          *                      spin on tp->tx_lock
3110          *
3111          * So we really do need to disable interrupts when taking
3112          * tx_lock here.
3113          */
3114         local_irq_save(flags);
3115         if (!spin_trylock(&tp->tx_lock)) { 
3116                 local_irq_restore(flags);
3117                 return NETDEV_TX_LOCKED; 
3118         } 
3119
3120         /* This is a hard error, log it. */
3121         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3122                 netif_stop_queue(dev);
3123                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3124                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3125                        dev->name);
3126                 return NETDEV_TX_BUSY;
3127         }
3128
3129         entry = tp->tx_prod;
3130         base_flags = 0;
3131         if (skb->ip_summed == CHECKSUM_HW)
3132                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3133 #if TG3_TSO_SUPPORT != 0
3134         mss = 0;
3135         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3136             (mss = skb_shinfo(skb)->tso_size) != 0) {
3137                 int tcp_opt_len, ip_tcp_len;
3138
3139                 if (skb_header_cloned(skb) &&
3140                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3141                         dev_kfree_skb(skb);
3142                         goto out_unlock;
3143                 }
3144
3145                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3146                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3147
3148                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3149                                TXD_FLAG_CPU_POST_DMA);
3150
3151                 skb->nh.iph->check = 0;
3152                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3153                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3154                         skb->h.th->check = 0;
3155                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3156                 }
3157                 else {
3158                         skb->h.th->check =
3159                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3160                                                    skb->nh.iph->daddr,
3161                                                    0, IPPROTO_TCP, 0);
3162                 }
3163
3164                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3165                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3166                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3167                                 int tsflags;
3168
3169                                 tsflags = ((skb->nh.iph->ihl - 5) +
3170                                            (tcp_opt_len >> 2));
3171                                 mss |= (tsflags << 11);
3172                         }
3173                 } else {
3174                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3175                                 int tsflags;
3176
3177                                 tsflags = ((skb->nh.iph->ihl - 5) +
3178                                            (tcp_opt_len >> 2));
3179                                 base_flags |= tsflags << 12;
3180                         }
3181                 }
3182         }
3183 #else
3184         mss = 0;
3185 #endif
3186 #if TG3_VLAN_TAG_USED
3187         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3188                 base_flags |= (TXD_FLAG_VLAN |
3189                                (vlan_tx_tag_get(skb) << 16));
3190 #endif
3191
3192         /* Queue skb data, a.k.a. the main skb fragment. */
3193         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3194
3195         tp->tx_buffers[entry].skb = skb;
3196         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3197
3198         would_hit_hwbug = 0;
3199
3200         if (tg3_4g_overflow_test(mapping, len))
3201                 would_hit_hwbug = entry + 1;
3202
3203         tg3_set_txd(tp, entry, mapping, len, base_flags,
3204                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3205
3206         entry = NEXT_TX(entry);
3207
3208         /* Now loop through additional data fragments, and queue them. */
3209         if (skb_shinfo(skb)->nr_frags > 0) {
3210                 unsigned int i, last;
3211
3212                 last = skb_shinfo(skb)->nr_frags - 1;
3213                 for (i = 0; i <= last; i++) {
3214                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3215
3216                         len = frag->size;
3217                         mapping = pci_map_page(tp->pdev,
3218                                                frag->page,
3219                                                frag->page_offset,
3220                                                len, PCI_DMA_TODEVICE);
3221
3222                         tp->tx_buffers[entry].skb = NULL;
3223                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3224
3225                         if (tg3_4g_overflow_test(mapping, len)) {
3226                                 /* Only one should match. */
3227                                 if (would_hit_hwbug)
3228                                         BUG();
3229                                 would_hit_hwbug = entry + 1;
3230                         }
3231
3232                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3233                                 tg3_set_txd(tp, entry, mapping, len,
3234                                             base_flags, (i == last)|(mss << 1));
3235                         else
3236                                 tg3_set_txd(tp, entry, mapping, len,
3237                                             base_flags, (i == last));
3238
3239                         entry = NEXT_TX(entry);
3240                 }
3241         }
3242
3243         if (would_hit_hwbug) {
3244                 u32 last_plus_one = entry;
3245                 u32 start;
3246                 unsigned int len = 0;
3247
3248                 would_hit_hwbug -= 1;
3249                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3250                 entry &= (TG3_TX_RING_SIZE - 1);
3251                 start = entry;
3252                 i = 0;
3253                 while (entry != last_plus_one) {
3254                         if (i == 0)
3255                                 len = skb_headlen(skb);
3256                         else
3257                                 len = skb_shinfo(skb)->frags[i-1].size;
3258
3259                         if (entry == would_hit_hwbug)
3260                                 break;
3261
3262                         i++;
3263                         entry = NEXT_TX(entry);
3264
3265                 }
3266
3267                 /* If the workaround fails due to memory/mapping
3268                  * failure, silently drop this packet.
3269                  */
3270                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3271                                                 entry, len,
3272                                                 last_plus_one,
3273                                                 &start, mss))
3274                         goto out_unlock;
3275
3276                 entry = start;
3277         }
3278
3279         /* Packets are ready, update Tx producer idx local and on card. */
3280         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3281
3282         tp->tx_prod = entry;
3283         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3284                 netif_stop_queue(dev);
3285
3286 out_unlock:
3287         mmiowb();
3288         spin_unlock_irqrestore(&tp->tx_lock, flags);
3289
3290         dev->trans_start = jiffies;
3291
3292         return NETDEV_TX_OK;
3293 }
3294
3295 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3296                                int new_mtu)
3297 {
3298         dev->mtu = new_mtu;
3299
3300         if (new_mtu > ETH_DATA_LEN)
3301                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3302         else
3303                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3304 }
3305
3306 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3307 {
3308         struct tg3 *tp = netdev_priv(dev);
3309
3310         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3311                 return -EINVAL;
3312
3313         if (!netif_running(dev)) {
3314                 /* We'll just catch it later when the
3315                  * device is up'd.
3316                  */
3317                 tg3_set_mtu(dev, tp, new_mtu);
3318                 return 0;
3319         }
3320
3321         tg3_netif_stop(tp);
3322         spin_lock_irq(&tp->lock);
3323         spin_lock(&tp->tx_lock);
3324
3325         tg3_halt(tp);
3326
3327         tg3_set_mtu(dev, tp, new_mtu);
3328
3329         tg3_init_hw(tp);
3330
3331         tg3_netif_start(tp);
3332
3333         spin_unlock(&tp->tx_lock);
3334         spin_unlock_irq(&tp->lock);
3335
3336         return 0;
3337 }
3338
3339 /* Free up pending packets in all rx/tx rings.
3340  *
3341  * The chip has been shut down and the driver detached from
3342  * the networking, so no interrupts or new tx packets will
3343  * end up in the driver.  tp->{tx,}lock is not held and we are not
3344  * in an interrupt context and thus may sleep.
3345  */
3346 static void tg3_free_rings(struct tg3 *tp)
3347 {
3348         struct ring_info *rxp;
3349         int i;
3350
3351         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3352                 rxp = &tp->rx_std_buffers[i];
3353
3354                 if (rxp->skb == NULL)
3355                         continue;
3356                 pci_unmap_single(tp->pdev,
3357                                  pci_unmap_addr(rxp, mapping),
3358                                  RX_PKT_BUF_SZ - tp->rx_offset,
3359                                  PCI_DMA_FROMDEVICE);
3360                 dev_kfree_skb_any(rxp->skb);
3361                 rxp->skb = NULL;
3362         }
3363
3364         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3365                 rxp = &tp->rx_jumbo_buffers[i];
3366
3367                 if (rxp->skb == NULL)
3368                         continue;
3369                 pci_unmap_single(tp->pdev,
3370                                  pci_unmap_addr(rxp, mapping),
3371                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3372                                  PCI_DMA_FROMDEVICE);
3373                 dev_kfree_skb_any(rxp->skb);
3374                 rxp->skb = NULL;
3375         }
3376
3377         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3378                 struct tx_ring_info *txp;
3379                 struct sk_buff *skb;
3380                 int j;
3381
3382                 txp = &tp->tx_buffers[i];
3383                 skb = txp->skb;
3384
3385                 if (skb == NULL) {
3386                         i++;
3387                         continue;
3388                 }
3389
3390                 pci_unmap_single(tp->pdev,
3391                                  pci_unmap_addr(txp, mapping),
3392                                  skb_headlen(skb),
3393                                  PCI_DMA_TODEVICE);
3394                 txp->skb = NULL;
3395
3396                 i++;
3397
3398                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3399                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3400                         pci_unmap_page(tp->pdev,
3401                                        pci_unmap_addr(txp, mapping),
3402                                        skb_shinfo(skb)->frags[j].size,
3403                                        PCI_DMA_TODEVICE);
3404                         i++;
3405                 }
3406
3407                 dev_kfree_skb_any(skb);
3408         }
3409 }
3410
3411 /* Initialize tx/rx rings for packet processing.
3412  *
3413  * The chip has been shut down and the driver detached from
3414  * the networking, so no interrupts or new tx packets will
3415  * end up in the driver.  tp->{tx,}lock are held and thus
3416  * we may not sleep.
3417  */
3418 static void tg3_init_rings(struct tg3 *tp)
3419 {
3420         u32 i;
3421
3422         /* Free up all the SKBs. */
3423         tg3_free_rings(tp);
3424
3425         /* Zero out all descriptors. */
3426         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3427         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3428         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3429         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3430
3431         /* Initialize invariants of the rings, we only set this
3432          * stuff once.  This works because the card does not
3433          * write into the rx buffer posting rings.
3434          */
3435         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3436                 struct tg3_rx_buffer_desc *rxd;
3437
3438                 rxd = &tp->rx_std[i];
3439                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3440                         << RXD_LEN_SHIFT;
3441                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3442                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3443                                (i << RXD_OPAQUE_INDEX_SHIFT));
3444         }
3445
3446         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3447                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3448                         struct tg3_rx_buffer_desc *rxd;
3449
3450                         rxd = &tp->rx_jumbo[i];
3451                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3452                                 << RXD_LEN_SHIFT;
3453                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3454                                 RXD_FLAG_JUMBO;
3455                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3456                                (i << RXD_OPAQUE_INDEX_SHIFT));
3457                 }
3458         }
3459
3460         /* Now allocate fresh SKBs for each rx ring. */
3461         for (i = 0; i < tp->rx_pending; i++) {
3462                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3463                                      -1, i) < 0)
3464                         break;
3465         }
3466
3467         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3468                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3469                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3470                                              -1, i) < 0)
3471                                 break;
3472                 }
3473         }
3474 }
3475
3476 /*
3477  * Must not be invoked with interrupt sources disabled and
3478  * the hardware shutdown down.
3479  */
3480 static void tg3_free_consistent(struct tg3 *tp)
3481 {
3482         if (tp->rx_std_buffers) {
3483                 kfree(tp->rx_std_buffers);
3484                 tp->rx_std_buffers = NULL;
3485         }
3486         if (tp->rx_std) {
3487                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3488                                     tp->rx_std, tp->rx_std_mapping);
3489                 tp->rx_std = NULL;
3490         }
3491         if (tp->rx_jumbo) {
3492                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3493                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3494                 tp->rx_jumbo = NULL;
3495         }
3496         if (tp->rx_rcb) {
3497                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3498                                     tp->rx_rcb, tp->rx_rcb_mapping);
3499                 tp->rx_rcb = NULL;
3500         }
3501         if (tp->tx_ring) {
3502                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3503                         tp->tx_ring, tp->tx_desc_mapping);
3504                 tp->tx_ring = NULL;
3505         }
3506         if (tp->hw_status) {
3507                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3508                                     tp->hw_status, tp->status_mapping);
3509                 tp->hw_status = NULL;
3510         }
3511         if (tp->hw_stats) {
3512                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3513                                     tp->hw_stats, tp->stats_mapping);
3514                 tp->hw_stats = NULL;
3515         }
3516 }
3517
3518 /*
3519  * Must not be invoked with interrupt sources disabled and
3520  * the hardware shutdown down.  Can sleep.
3521  */
3522 static int tg3_alloc_consistent(struct tg3 *tp)
3523 {
3524         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3525                                       (TG3_RX_RING_SIZE +
3526                                        TG3_RX_JUMBO_RING_SIZE)) +
3527                                      (sizeof(struct tx_ring_info) *
3528                                       TG3_TX_RING_SIZE),
3529                                      GFP_KERNEL);
3530         if (!tp->rx_std_buffers)
3531                 return -ENOMEM;
3532
3533         memset(tp->rx_std_buffers, 0,
3534                (sizeof(struct ring_info) *
3535                 (TG3_RX_RING_SIZE +
3536                  TG3_RX_JUMBO_RING_SIZE)) +
3537                (sizeof(struct tx_ring_info) *
3538                 TG3_TX_RING_SIZE));
3539
3540         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3541         tp->tx_buffers = (struct tx_ring_info *)
3542                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3543
3544         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3545                                           &tp->rx_std_mapping);
3546         if (!tp->rx_std)
3547                 goto err_out;
3548
3549         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3550                                             &tp->rx_jumbo_mapping);
3551
3552         if (!tp->rx_jumbo)
3553                 goto err_out;
3554
3555         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3556                                           &tp->rx_rcb_mapping);
3557         if (!tp->rx_rcb)
3558                 goto err_out;
3559
3560         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3561                                            &tp->tx_desc_mapping);
3562         if (!tp->tx_ring)
3563                 goto err_out;
3564
3565         tp->hw_status = pci_alloc_consistent(tp->pdev,
3566                                              TG3_HW_STATUS_SIZE,
3567                                              &tp->status_mapping);
3568         if (!tp->hw_status)
3569                 goto err_out;
3570
3571         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3572                                             sizeof(struct tg3_hw_stats),
3573                                             &tp->stats_mapping);
3574         if (!tp->hw_stats)
3575                 goto err_out;
3576
3577         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3578         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3579
3580         return 0;
3581
3582 err_out:
3583         tg3_free_consistent(tp);
3584         return -ENOMEM;
3585 }
3586
3587 #define MAX_WAIT_CNT 1000
3588
3589 /* To stop a block, clear the enable bit and poll till it
3590  * clears.  tp->lock is held.
3591  */
3592 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3593 {
3594         unsigned int i;
3595         u32 val;
3596
3597         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3598                 switch (ofs) {
3599                 case RCVLSC_MODE:
3600                 case DMAC_MODE:
3601                 case MBFREE_MODE:
3602                 case BUFMGR_MODE:
3603                 case MEMARB_MODE:
3604                         /* We can't enable/disable these bits of the
3605                          * 5705/5750, just say success.
3606                          */
3607                         return 0;
3608
3609                 default:
3610                         break;
3611                 };
3612         }
3613
3614         val = tr32(ofs);
3615         val &= ~enable_bit;
3616         tw32_f(ofs, val);
3617
3618         for (i = 0; i < MAX_WAIT_CNT; i++) {
3619                 udelay(100);
3620                 val = tr32(ofs);
3621                 if ((val & enable_bit) == 0)
3622                         break;
3623         }
3624
3625         if (i == MAX_WAIT_CNT) {
3626                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3627                        "ofs=%lx enable_bit=%x\n",
3628                        ofs, enable_bit);
3629                 return -ENODEV;
3630         }
3631
3632         return 0;
3633 }
3634
3635 /* tp->lock is held. */
3636 static int tg3_abort_hw(struct tg3 *tp)
3637 {
3638         int i, err;
3639
3640         tg3_disable_ints(tp);
3641
3642         tp->rx_mode &= ~RX_MODE_ENABLE;
3643         tw32_f(MAC_RX_MODE, tp->rx_mode);
3644         udelay(10);
3645
3646         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3647         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3648         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3649         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3650         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3651         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3652
3653         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3654         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3655         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3656         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3657         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3658         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3659         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3660         if (err)
3661                 goto out;
3662
3663         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3664         tw32_f(MAC_MODE, tp->mac_mode);
3665         udelay(40);
3666
3667         tp->tx_mode &= ~TX_MODE_ENABLE;
3668         tw32_f(MAC_TX_MODE, tp->tx_mode);
3669
3670         for (i = 0; i < MAX_WAIT_CNT; i++) {
3671                 udelay(100);
3672                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3673                         break;
3674         }
3675         if (i >= MAX_WAIT_CNT) {
3676                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3677                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3678                        tp->dev->name, tr32(MAC_TX_MODE));
3679                 return -ENODEV;
3680         }
3681
3682         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3683         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3684         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3685
3686         tw32(FTQ_RESET, 0xffffffff);
3687         tw32(FTQ_RESET, 0x00000000);
3688
3689         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3690         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3691         if (err)
3692                 goto out;
3693
3694         if (tp->hw_status)
3695                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3696         if (tp->hw_stats)
3697                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3698
3699 out:
3700         return err;
3701 }
3702
3703 /* tp->lock is held. */
3704 static int tg3_nvram_lock(struct tg3 *tp)
3705 {
3706         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3707                 int i;
3708
3709                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3710                 for (i = 0; i < 8000; i++) {
3711                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3712                                 break;
3713                         udelay(20);
3714                 }
3715                 if (i == 8000)
3716                         return -ENODEV;
3717         }
3718         return 0;
3719 }
3720
3721 /* tp->lock is held. */
3722 static void tg3_nvram_unlock(struct tg3 *tp)
3723 {
3724         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3725                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3726 }
3727
3728 /* tp->lock is held. */
3729 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3730 {
3731         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3732                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3733                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3734
3735         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3736                 switch (kind) {
3737                 case RESET_KIND_INIT:
3738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3739                                       DRV_STATE_START);
3740                         break;
3741
3742                 case RESET_KIND_SHUTDOWN:
3743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3744                                       DRV_STATE_UNLOAD);
3745                         break;
3746
3747                 case RESET_KIND_SUSPEND:
3748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3749                                       DRV_STATE_SUSPEND);
3750                         break;
3751
3752                 default:
3753                         break;
3754                 };
3755         }
3756 }
3757
3758 /* tp->lock is held. */
3759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3760 {
3761         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3762                 switch (kind) {
3763                 case RESET_KIND_INIT:
3764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3765                                       DRV_STATE_START_DONE);
3766                         break;
3767
3768                 case RESET_KIND_SHUTDOWN:
3769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3770                                       DRV_STATE_UNLOAD_DONE);
3771                         break;
3772
3773                 default:
3774                         break;
3775                 };
3776         }
3777 }
3778
3779 /* tp->lock is held. */
3780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3781 {
3782         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3783                 switch (kind) {
3784                 case RESET_KIND_INIT:
3785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3786                                       DRV_STATE_START);
3787                         break;
3788
3789                 case RESET_KIND_SHUTDOWN:
3790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3791                                       DRV_STATE_UNLOAD);
3792                         break;
3793
3794                 case RESET_KIND_SUSPEND:
3795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3796                                       DRV_STATE_SUSPEND);
3797                         break;
3798
3799                 default:
3800                         break;
3801                 };
3802         }
3803 }
3804
3805 static void tg3_stop_fw(struct tg3 *);
3806
3807 /* tp->lock is held. */
3808 static int tg3_chip_reset(struct tg3 *tp)
3809 {
3810         u32 val;
3811         u32 flags_save;
3812         int i;
3813
3814         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3815                 tg3_nvram_lock(tp);
3816
3817         /*
3818          * We must avoid the readl() that normally takes place.
3819          * It locks machines, causes machine checks, and other
3820          * fun things.  So, temporarily disable the 5701
3821          * hardware workaround, while we do the reset.
3822          */
3823         flags_save = tp->tg3_flags;
3824         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3825
3826         /* do the reset */
3827         val = GRC_MISC_CFG_CORECLK_RESET;
3828
3829         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3830                 if (tr32(0x7e2c) == 0x60) {
3831                         tw32(0x7e2c, 0x20);
3832                 }
3833                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3834                         tw32(GRC_MISC_CFG, (1 << 29));
3835                         val |= (1 << 29);
3836                 }
3837         }
3838
3839         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3840                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3841         tw32(GRC_MISC_CFG, val);
3842
3843         /* restore 5701 hardware bug workaround flag */
3844         tp->tg3_flags = flags_save;
3845
3846         /* Unfortunately, we have to delay before the PCI read back.
3847          * Some 575X chips even will not respond to a PCI cfg access
3848          * when the reset command is given to the chip.
3849          *
3850          * How do these hardware designers expect things to work
3851          * properly if the PCI write is posted for a long period
3852          * of time?  It is always necessary to have some method by
3853          * which a register read back can occur to push the write
3854          * out which does the reset.
3855          *
3856          * For most tg3 variants the trick below was working.
3857          * Ho hum...
3858          */
3859         udelay(120);
3860
3861         /* Flush PCI posted writes.  The normal MMIO registers
3862          * are inaccessible at this time so this is the only
3863          * way to make this reliably (actually, this is no longer
3864          * the case, see above).  I tried to use indirect
3865          * register read/write but this upset some 5701 variants.
3866          */
3867         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3868
3869         udelay(120);
3870
3871         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3872                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3873                         int i;
3874                         u32 cfg_val;
3875
3876                         /* Wait for link training to complete.  */
3877                         for (i = 0; i < 5000; i++)
3878                                 udelay(100);
3879
3880                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3881                         pci_write_config_dword(tp->pdev, 0xc4,
3882                                                cfg_val | (1 << 15));
3883                 }
3884                 /* Set PCIE max payload size and clear error status.  */
3885                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3886         }
3887
3888         /* Re-enable indirect register accesses. */
3889         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3890                                tp->misc_host_ctrl);
3891
3892         /* Set MAX PCI retry to zero. */
3893         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3894         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3895             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3896                 val |= PCISTATE_RETRY_SAME_DMA;
3897         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3898
3899         pci_restore_state(tp->pdev);
3900
3901         /* Make sure PCI-X relaxed ordering bit is clear. */
3902         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3903         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3904         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3905
3906         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3907
3908         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3909                 tg3_stop_fw(tp);
3910                 tw32(0x5000, 0x400);
3911         }
3912
3913         tw32(GRC_MODE, tp->grc_mode);
3914
3915         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3916                 u32 val = tr32(0xc4);
3917
3918                 tw32(0xc4, val | (1 << 15));
3919         }
3920
3921         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3922             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3923                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3924                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3925                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3926                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3927         }
3928
3929         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3930                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3931                 tw32_f(MAC_MODE, tp->mac_mode);
3932         } else
3933                 tw32_f(MAC_MODE, 0);
3934         udelay(40);
3935
3936         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3937                 /* Wait for firmware initialization to complete. */
3938                 for (i = 0; i < 100000; i++) {
3939                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3940                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3941                                 break;
3942                         udelay(10);
3943                 }
3944                 if (i >= 100000) {
3945                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3946                                "firmware will not restart magic=%08x\n",
3947                                tp->dev->name, val);
3948                         return -ENODEV;
3949                 }
3950         }
3951
3952         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3953             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3954                 u32 val = tr32(0x7c00);
3955
3956                 tw32(0x7c00, val | (1 << 25));
3957         }
3958
3959         /* Reprobe ASF enable state.  */
3960         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3961         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3962         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3963         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3964                 u32 nic_cfg;
3965
3966                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3967                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3968                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3969                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
3970                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3971                 }
3972         }
3973
3974         return 0;
3975 }
3976
3977 /* tp->lock is held. */
3978 static void tg3_stop_fw(struct tg3 *tp)
3979 {
3980         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3981                 u32 val;
3982                 int i;
3983
3984                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3985                 val = tr32(GRC_RX_CPU_EVENT);
3986                 val |= (1 << 14);
3987                 tw32(GRC_RX_CPU_EVENT, val);
3988
3989                 /* Wait for RX cpu to ACK the event.  */
3990                 for (i = 0; i < 100; i++) {
3991                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3992                                 break;
3993                         udelay(1);
3994                 }
3995         }
3996 }
3997
3998 /* tp->lock is held. */
3999 static int tg3_halt(struct tg3 *tp)
4000 {
4001         int err;
4002
4003         tg3_stop_fw(tp);
4004
4005         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4006
4007         tg3_abort_hw(tp);
4008         err = tg3_chip_reset(tp);
4009
4010         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4011         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4012
4013         if (err)
4014                 return err;
4015
4016         return 0;
4017 }
4018
4019 #define TG3_FW_RELEASE_MAJOR    0x0
4020 #define TG3_FW_RELASE_MINOR     0x0
4021 #define TG3_FW_RELEASE_FIX      0x0
4022 #define TG3_FW_START_ADDR       0x08000000
4023 #define TG3_FW_TEXT_ADDR        0x08000000
4024 #define TG3_FW_TEXT_LEN         0x9c0
4025 #define TG3_FW_RODATA_ADDR      0x080009c0
4026 #define TG3_FW_RODATA_LEN       0x60
4027 #define TG3_FW_DATA_ADDR        0x08000a40
4028 #define TG3_FW_DATA_LEN         0x20
4029 #define TG3_FW_SBSS_ADDR        0x08000a60
4030 #define TG3_FW_SBSS_LEN         0xc
4031 #define TG3_FW_BSS_ADDR         0x08000a70
4032 #define TG3_FW_BSS_LEN          0x10
4033
4034 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4035         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4036         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4037         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4038         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4039         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4040         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4041         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4042         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4043         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4044         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4045         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4046         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4047         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4048         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4049         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4050         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4051         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4052         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4053         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4054         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4055         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4056         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4057         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4058         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4059         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4060         0, 0, 0, 0, 0, 0,
4061         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4062         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4063         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4064         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4065         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4066         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4067         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4068         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4069         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4072         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4073         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4074         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4075         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4076         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4077         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4078         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4079         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4080         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4081         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4082         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4083         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4084         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4085         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4086         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4087         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4088         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4089         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4090         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4091         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4092         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4093         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4094         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4095         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4096         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4097         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4098         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4099         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4100         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4101         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4102         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4103         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4104         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4105         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4106         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4107         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4108         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4109         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4110         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4111         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4112         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4113         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4114         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4115         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4116         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4117         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4118         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4119         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4120         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4121         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4122         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4123         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4124         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4125         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4126 };
4127
4128 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4129         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4130         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4131         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4132         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4133         0x00000000
4134 };
4135
4136 #if 0 /* All zeros, don't eat up space with it. */
4137 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4138         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4139         0x00000000, 0x00000000, 0x00000000, 0x00000000
4140 };
4141 #endif
4142
4143 #define RX_CPU_SCRATCH_BASE     0x30000
4144 #define RX_CPU_SCRATCH_SIZE     0x04000
4145 #define TX_CPU_SCRATCH_BASE     0x34000
4146 #define TX_CPU_SCRATCH_SIZE     0x04000
4147
4148 /* tp->lock is held. */
4149 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4150 {
4151         int i;
4152
4153         if (offset == TX_CPU_BASE &&
4154             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4155                 BUG();
4156
4157         if (offset == RX_CPU_BASE) {
4158                 for (i = 0; i < 10000; i++) {
4159                         tw32(offset + CPU_STATE, 0xffffffff);
4160                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4161                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4162                                 break;
4163                 }
4164
4165                 tw32(offset + CPU_STATE, 0xffffffff);
4166                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4167                 udelay(10);
4168         } else {
4169                 for (i = 0; i < 10000; i++) {
4170                         tw32(offset + CPU_STATE, 0xffffffff);
4171                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4172                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4173                                 break;
4174                 }
4175         }
4176
4177         if (i >= 10000) {
4178                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4179                        "and %s CPU\n",
4180                        tp->dev->name,
4181                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4182                 return -ENODEV;
4183         }
4184         return 0;
4185 }
4186
4187 struct fw_info {
4188         unsigned int text_base;
4189         unsigned int text_len;
4190         u32 *text_data;
4191         unsigned int rodata_base;
4192         unsigned int rodata_len;
4193         u32 *rodata_data;
4194         unsigned int data_base;
4195         unsigned int data_len;
4196         u32 *data_data;
4197 };
4198
4199 /* tp->lock is held. */
4200 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4201                                  int cpu_scratch_size, struct fw_info *info)
4202 {
4203         int err, i;
4204         u32 orig_tg3_flags = tp->tg3_flags;
4205         void (*write_op)(struct tg3 *, u32, u32);
4206
4207         if (cpu_base == TX_CPU_BASE &&
4208             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4209                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4210                        "TX cpu firmware on %s which is 5705.\n",
4211                        tp->dev->name);
4212                 return -EINVAL;
4213         }
4214
4215         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4216                 write_op = tg3_write_mem;
4217         else
4218                 write_op = tg3_write_indirect_reg32;
4219
4220         /* Force use of PCI config space for indirect register
4221          * write calls.
4222          */
4223         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4224
4225         err = tg3_halt_cpu(tp, cpu_base);
4226         if (err)
4227                 goto out;
4228
4229         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4230                 write_op(tp, cpu_scratch_base + i, 0);
4231         tw32(cpu_base + CPU_STATE, 0xffffffff);
4232         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4233         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4234                 write_op(tp, (cpu_scratch_base +
4235                               (info->text_base & 0xffff) +
4236                               (i * sizeof(u32))),
4237                          (info->text_data ?
4238                           info->text_data[i] : 0));
4239         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4240                 write_op(tp, (cpu_scratch_base +
4241                               (info->rodata_base & 0xffff) +
4242                               (i * sizeof(u32))),
4243                          (info->rodata_data ?
4244                           info->rodata_data[i] : 0));
4245         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4246                 write_op(tp, (cpu_scratch_base +
4247                               (info->data_base & 0xffff) +
4248                               (i * sizeof(u32))),
4249                          (info->data_data ?
4250                           info->data_data[i] : 0));
4251
4252         err = 0;
4253
4254 out:
4255         tp->tg3_flags = orig_tg3_flags;
4256         return err;
4257 }
4258
4259 /* tp->lock is held. */
4260 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4261 {
4262         struct fw_info info;
4263         int err, i;
4264
4265         info.text_base = TG3_FW_TEXT_ADDR;
4266         info.text_len = TG3_FW_TEXT_LEN;
4267         info.text_data = &tg3FwText[0];
4268         info.rodata_base = TG3_FW_RODATA_ADDR;
4269         info.rodata_len = TG3_FW_RODATA_LEN;
4270         info.rodata_data = &tg3FwRodata[0];
4271         info.data_base = TG3_FW_DATA_ADDR;
4272         info.data_len = TG3_FW_DATA_LEN;
4273         info.data_data = NULL;
4274
4275         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4276                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4277                                     &info);
4278         if (err)
4279                 return err;
4280
4281         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4282                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4283                                     &info);
4284         if (err)
4285                 return err;
4286
4287         /* Now startup only the RX cpu. */
4288         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4289         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4290
4291         for (i = 0; i < 5; i++) {
4292                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4293                         break;
4294                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4295                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4296                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4297                 udelay(1000);
4298         }
4299         if (i >= 5) {
4300                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4301                        "to set RX CPU PC, is %08x should be %08x\n",
4302                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4303                        TG3_FW_TEXT_ADDR);
4304                 return -ENODEV;
4305         }
4306         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4307         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4308
4309         return 0;
4310 }
4311
4312 #if TG3_TSO_SUPPORT != 0
4313
4314 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4315 #define TG3_TSO_FW_RELASE_MINOR         0x6
4316 #define TG3_TSO_FW_RELEASE_FIX          0x0
4317 #define TG3_TSO_FW_START_ADDR           0x08000000
4318 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4319 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4320 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4321 #define TG3_TSO_FW_RODATA_LEN           0x60
4322 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4323 #define TG3_TSO_FW_DATA_LEN             0x30
4324 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4325 #define TG3_TSO_FW_SBSS_LEN             0x2c
4326 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4327 #define TG3_TSO_FW_BSS_LEN              0x894
4328
4329 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4330         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4331         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4332         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4333         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4334         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4335         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4336         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4337         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4338         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4339         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4340         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4341         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4342         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4343         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4344         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4345         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4346         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4347         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4348         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4349         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4350         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4351         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4352         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4353         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4354         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4355         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4356         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4357         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4358         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4359         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4360         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4361         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4362         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4363         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4364         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4365         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4366         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4367         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4368         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4369         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4370         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4371         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4372         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4373         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4374         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4375         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4376         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4377         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4378         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4379         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4380         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4381         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4382         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4383         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4384         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4385         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4386         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4387         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4388         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4389         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4390         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4391         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4392         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4393         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4394         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4395         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4396         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4397         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4398         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4399         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4400         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4401         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4402         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4403         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4404         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4405         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4406         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4407         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4408         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4409         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4410         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4411         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4412         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4413         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4414         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4415         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4416         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4417         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4418         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4419         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4420         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4421         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4422         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4423         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4424         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4425         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4426         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4427         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4428         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4429         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4430         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4431         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4432         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4433         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4434         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4435         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4436         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4437         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4438         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4439         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4440         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4441         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4442         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4443         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4444         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4445         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4446         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4447         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4448         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4449         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4450         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4451         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4452         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4453         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4454         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4455         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4456         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4457         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4458         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4459         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4460         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4461         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4462         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4463         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4464         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4465         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4466         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4467         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4468         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4469         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4470         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4471         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4472         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4473         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4474         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4475         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4476         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4477         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4478         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4479         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4480         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4481         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4482         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4483         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4484         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4485         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4486         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4487         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4488         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4489         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4490         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4491         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4492         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4493         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4494         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4495         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4496         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4497         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4498         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4499         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4500         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4501         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4502         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4503         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4504         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4505         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4506         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4507         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4508         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4509         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4510         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4511         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4512         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4513         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4514         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4515         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4516         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4517         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4518         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4519         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4520         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4521         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4522         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4523         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4524         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4525         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4526         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4527         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4528         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4529         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4530         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4531         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4532         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4533         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4534         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4535         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4536         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4537         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4538         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4539         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4540         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4541         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4542         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4543         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4544         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4545         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4546         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4547         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4548         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4549         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4550         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4551         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4552         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4553         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4554         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4555         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4556         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4557         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4558         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4559         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4560         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4561         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4562         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4563         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4564         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4565         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4566         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4567         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4568         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4569         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4570         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4571         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4572         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4573         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4574         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4575         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4576         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4577         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4578         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4579         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4580         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4581         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4582         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4583         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4584         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4585         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4586         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4587         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4588         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4589         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4590         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4591         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4592         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4593         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4594         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4595         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4596         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4597         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4598         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4599         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4600         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4601         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4602         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4603         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4604         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4605         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4606         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4607         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4608         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4609         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4610         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4611         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4612         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4613         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4614 };
4615
4616 static u32 tg3TsoFwRodata[] = {
4617         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4618         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4619         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4620         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4621         0x00000000,
4622 };
4623
4624 static u32 tg3TsoFwData[] = {
4625         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4626         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4627         0x00000000,
4628 };
4629
4630 /* 5705 needs a special version of the TSO firmware.  */
4631 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4632 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4633 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4634 #define TG3_TSO5_FW_START_ADDR          0x00010000
4635 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4636 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4637 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4638 #define TG3_TSO5_FW_RODATA_LEN          0x50
4639 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4640 #define TG3_TSO5_FW_DATA_LEN            0x20
4641 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4642 #define TG3_TSO5_FW_SBSS_LEN            0x28
4643 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4644 #define TG3_TSO5_FW_BSS_LEN             0x88
4645
4646 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4647         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4648         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4649         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4650         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4651         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4652         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4653         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4654         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4655         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4656         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4657         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4658         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4659         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4660         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4661         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4662         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4663         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4664         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4665         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4666         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4667         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4668         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4669         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4670         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4671         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4672         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4673         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4674         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4675         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4676         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4677         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4678         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4679         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4680         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4681         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4682         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4683         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4684         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4685         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4686         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4687         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4688         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4689         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4690         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4691         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4692         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4693         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4694         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4695         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4696         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4697         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4698         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4699         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4700         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4701         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4702         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4703         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4704         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4705         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4706         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4707         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4708         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4709         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4710         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4711         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4712         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4713         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4714         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4715         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4716         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4717         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4718         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4719         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4720         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4721         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4722         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4723         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4724         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4725         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4726         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4727         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4728         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4729         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4730         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4731         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4732         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4733         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4734         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4735         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4736         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4737         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4738         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4739         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4740         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4741         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4742         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4743         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4744         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4745         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4746         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4747         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4748         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4749         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4750         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4751         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4752         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4753         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4754         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4755         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4756         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4757         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4758         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4759         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4760         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4761         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4762         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4763         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4764         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4765         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4766         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4767         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4768         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4769         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4770         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4771         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4772         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4773         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4774         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4775         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4776         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4777         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4778         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4779         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4780         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4781         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4782         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4783         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4784         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4785         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4786         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4787         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4788         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4789         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4790         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4791         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4792         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4793         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4794         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4795         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4796         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4797         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4798         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4799         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4800         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4801         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4802         0x00000000, 0x00000000, 0x00000000,
4803 };
4804
4805 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4806         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4807         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4808         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4809         0x00000000, 0x00000000, 0x00000000,
4810 };
4811
4812 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4813         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4814         0x00000000, 0x00000000, 0x00000000,
4815 };
4816
4817 /* tp->lock is held. */
4818 static int tg3_load_tso_firmware(struct tg3 *tp)
4819 {
4820         struct fw_info info;
4821         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4822         int err, i;
4823
4824         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4825                 return 0;
4826
4827         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4828                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4829                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4830                 info.text_data = &tg3Tso5FwText[0];
4831                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4832                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4833                 info.rodata_data = &tg3Tso5FwRodata[0];
4834                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4835                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4836                 info.data_data = &tg3Tso5FwData[0];
4837                 cpu_base = RX_CPU_BASE;
4838                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4839                 cpu_scratch_size = (info.text_len +
4840                                     info.rodata_len +
4841                                     info.data_len +
4842                                     TG3_TSO5_FW_SBSS_LEN +
4843                                     TG3_TSO5_FW_BSS_LEN);
4844         } else {
4845                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4846                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4847                 info.text_data = &tg3TsoFwText[0];
4848                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4849                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4850                 info.rodata_data = &tg3TsoFwRodata[0];
4851                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4852                 info.data_len = TG3_TSO_FW_DATA_LEN;
4853                 info.data_data = &tg3TsoFwData[0];
4854                 cpu_base = TX_CPU_BASE;
4855                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4856                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4857         }
4858
4859         err = tg3_load_firmware_cpu(tp, cpu_base,
4860                                     cpu_scratch_base, cpu_scratch_size,
4861                                     &info);
4862         if (err)
4863                 return err;
4864
4865         /* Now startup the cpu. */
4866         tw32(cpu_base + CPU_STATE, 0xffffffff);
4867         tw32_f(cpu_base + CPU_PC,    info.text_base);
4868
4869         for (i = 0; i < 5; i++) {
4870                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4871                         break;
4872                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4873                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4874                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4875                 udelay(1000);
4876         }
4877         if (i >= 5) {
4878                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4879                        "to set CPU PC, is %08x should be %08x\n",
4880                        tp->dev->name, tr32(cpu_base + CPU_PC),
4881                        info.text_base);
4882                 return -ENODEV;
4883         }
4884         tw32(cpu_base + CPU_STATE, 0xffffffff);
4885         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4886         return 0;
4887 }
4888
4889 #endif /* TG3_TSO_SUPPORT != 0 */
4890
4891 /* tp->lock is held. */
4892 static void __tg3_set_mac_addr(struct tg3 *tp)
4893 {
4894         u32 addr_high, addr_low;
4895         int i;
4896
4897         addr_high = ((tp->dev->dev_addr[0] << 8) |
4898                      tp->dev->dev_addr[1]);
4899         addr_low = ((tp->dev->dev_addr[2] << 24) |
4900                     (tp->dev->dev_addr[3] << 16) |
4901                     (tp->dev->dev_addr[4] <<  8) |
4902                     (tp->dev->dev_addr[5] <<  0));
4903         for (i = 0; i < 4; i++) {
4904                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4905                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4906         }
4907
4908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4910                 for (i = 0; i < 12; i++) {
4911                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4912                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4913                 }
4914         }
4915
4916         addr_high = (tp->dev->dev_addr[0] +
4917                      tp->dev->dev_addr[1] +
4918                      tp->dev->dev_addr[2] +
4919                      tp->dev->dev_addr[3] +
4920                      tp->dev->dev_addr[4] +
4921                      tp->dev->dev_addr[5]) &
4922                 TX_BACKOFF_SEED_MASK;
4923         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4924 }
4925
4926 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4927 {
4928         struct tg3 *tp = netdev_priv(dev);
4929         struct sockaddr *addr = p;
4930
4931         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4932
4933         spin_lock_irq(&tp->lock);
4934         __tg3_set_mac_addr(tp);
4935         spin_unlock_irq(&tp->lock);
4936
4937         return 0;
4938 }
4939
4940 /* tp->lock is held. */
4941 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4942                            dma_addr_t mapping, u32 maxlen_flags,
4943                            u32 nic_addr)
4944 {
4945         tg3_write_mem(tp,
4946                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4947                       ((u64) mapping >> 32));
4948         tg3_write_mem(tp,
4949                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4950                       ((u64) mapping & 0xffffffff));
4951         tg3_write_mem(tp,
4952                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4953                        maxlen_flags);
4954
4955         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4956                 tg3_write_mem(tp,
4957                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4958                               nic_addr);
4959 }
4960
4961 static void __tg3_set_rx_mode(struct net_device *);
4962
4963 /* tp->lock is held. */
4964 static int tg3_reset_hw(struct tg3 *tp)
4965 {
4966         u32 val, rdmac_mode;
4967         int i, err, limit;
4968
4969         tg3_disable_ints(tp);
4970
4971         tg3_stop_fw(tp);
4972
4973         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4974
4975         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4976                 err = tg3_abort_hw(tp);
4977                 if (err)
4978                         return err;
4979         }
4980
4981         err = tg3_chip_reset(tp);
4982         if (err)
4983                 return err;
4984
4985         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4986
4987         /* This works around an issue with Athlon chipsets on
4988          * B3 tigon3 silicon.  This bit has no effect on any
4989          * other revision.  But do not set this on PCI Express
4990          * chips.
4991          */
4992         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4993                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4994         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4995
4996         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4997             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4998                 val = tr32(TG3PCI_PCISTATE);
4999                 val |= PCISTATE_RETRY_SAME_DMA;
5000                 tw32(TG3PCI_PCISTATE, val);
5001         }
5002
5003         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5004                 /* Enable some hw fixes.  */
5005                 val = tr32(TG3PCI_MSI_DATA);
5006                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5007                 tw32(TG3PCI_MSI_DATA, val);
5008         }
5009
5010         /* Descriptor ring init may make accesses to the
5011          * NIC SRAM area to setup the TX descriptors, so we
5012          * can only do this after the hardware has been
5013          * successfully reset.
5014          */
5015         tg3_init_rings(tp);
5016
5017         /* This value is determined during the probe time DMA
5018          * engine test, tg3_test_dma.
5019          */
5020         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5021
5022         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5023                           GRC_MODE_4X_NIC_SEND_RINGS |
5024                           GRC_MODE_NO_TX_PHDR_CSUM |
5025                           GRC_MODE_NO_RX_PHDR_CSUM);
5026         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5027         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5028                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5029         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5030                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5031
5032         tw32(GRC_MODE,
5033              tp->grc_mode |
5034              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5035
5036         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5037         val = tr32(GRC_MISC_CFG);
5038         val &= ~0xff;
5039         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5040         tw32(GRC_MISC_CFG, val);
5041
5042         /* Initialize MBUF/DESC pool. */
5043         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5044                 /* Do nothing.  */
5045         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5046                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5047                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5048                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5049                 else
5050                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5051                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5052                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5053         }
5054 #if TG3_TSO_SUPPORT != 0
5055         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5056                 int fw_len;
5057
5058                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5059                           TG3_TSO5_FW_RODATA_LEN +
5060                           TG3_TSO5_FW_DATA_LEN +
5061                           TG3_TSO5_FW_SBSS_LEN +
5062                           TG3_TSO5_FW_BSS_LEN);
5063                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5064                 tw32(BUFMGR_MB_POOL_ADDR,
5065                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5066                 tw32(BUFMGR_MB_POOL_SIZE,
5067                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5068         }
5069 #endif
5070
5071         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5072                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5073                      tp->bufmgr_config.mbuf_read_dma_low_water);
5074                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5075                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5076                 tw32(BUFMGR_MB_HIGH_WATER,
5077                      tp->bufmgr_config.mbuf_high_water);
5078         } else {
5079                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5080                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5081                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5082                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5083                 tw32(BUFMGR_MB_HIGH_WATER,
5084                      tp->bufmgr_config.mbuf_high_water_jumbo);
5085         }
5086         tw32(BUFMGR_DMA_LOW_WATER,
5087              tp->bufmgr_config.dma_low_water);
5088         tw32(BUFMGR_DMA_HIGH_WATER,
5089              tp->bufmgr_config.dma_high_water);
5090
5091         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5092         for (i = 0; i < 2000; i++) {
5093                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5094                         break;
5095                 udelay(10);
5096         }
5097         if (i >= 2000) {
5098                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5099                        tp->dev->name);
5100                 return -ENODEV;
5101         }
5102
5103         /* Setup replenish threshold. */
5104         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5105
5106         /* Initialize TG3_BDINFO's at:
5107          *  RCVDBDI_STD_BD:     standard eth size rx ring
5108          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5109          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5110          *
5111          * like so:
5112          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5113          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5114          *                              ring attribute flags
5115          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5116          *
5117          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5118          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5119          *
5120          * The size of each ring is fixed in the firmware, but the location is
5121          * configurable.
5122          */
5123         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5124              ((u64) tp->rx_std_mapping >> 32));
5125         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5126              ((u64) tp->rx_std_mapping & 0xffffffff));
5127         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5128              NIC_SRAM_RX_BUFFER_DESC);
5129
5130         /* Don't even try to program the JUMBO/MINI buffer descriptor
5131          * configs on 5705.
5132          */
5133         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5134                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5135                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5136         } else {
5137                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5138                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5139
5140                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5141                      BDINFO_FLAGS_DISABLED);
5142
5143                 /* Setup replenish threshold. */
5144                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5145
5146                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5147                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5148                              ((u64) tp->rx_jumbo_mapping >> 32));
5149                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5150                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5151                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5152                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5153                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5154                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5155                 } else {
5156                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5157                              BDINFO_FLAGS_DISABLED);
5158                 }
5159
5160         }
5161
5162         /* There is only one send ring on 5705/5750, no need to explicitly
5163          * disable the others.
5164          */
5165         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5166                 /* Clear out send RCB ring in SRAM. */
5167                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5168                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5169                                       BDINFO_FLAGS_DISABLED);
5170         }
5171
5172         tp->tx_prod = 0;
5173         tp->tx_cons = 0;
5174         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5175         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5176
5177         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5178                        tp->tx_desc_mapping,
5179                        (TG3_TX_RING_SIZE <<
5180                         BDINFO_FLAGS_MAXLEN_SHIFT),
5181                        NIC_SRAM_TX_BUFFER_DESC);
5182
5183         /* There is only one receive return ring on 5705/5750, no need
5184          * to explicitly disable the others.
5185          */
5186         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5187                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5188                      i += TG3_BDINFO_SIZE) {
5189                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5190                                       BDINFO_FLAGS_DISABLED);
5191                 }
5192         }
5193
5194         tp->rx_rcb_ptr = 0;
5195         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5196
5197         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5198                        tp->rx_rcb_mapping,
5199                        (TG3_RX_RCB_RING_SIZE(tp) <<
5200                         BDINFO_FLAGS_MAXLEN_SHIFT),
5201                        0);
5202
5203         tp->rx_std_ptr = tp->rx_pending;
5204         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5205                      tp->rx_std_ptr);
5206
5207         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5208                                                 tp->rx_jumbo_pending : 0;
5209         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5210                      tp->rx_jumbo_ptr);
5211
5212         /* Initialize MAC address and backoff seed. */
5213         __tg3_set_mac_addr(tp);
5214
5215         /* MTU + ethernet header + FCS + optional VLAN tag */
5216         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5217
5218         /* The slot time is changed by tg3_setup_phy if we
5219          * run at gigabit with half duplex.
5220          */
5221         tw32(MAC_TX_LENGTHS,
5222              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5223              (6 << TX_LENGTHS_IPG_SHIFT) |
5224              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5225
5226         /* Receive rules. */
5227         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5228         tw32(RCVLPC_CONFIG, 0x0181);
5229
5230         /* Calculate RDMAC_MODE setting early, we need it to determine
5231          * the RCVLPC_STATE_ENABLE mask.
5232          */
5233         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5234                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5235                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5236                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5237                       RDMAC_MODE_LNGREAD_ENAB);
5238         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5239                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5240
5241         /* If statement applies to 5705 and 5750 PCI devices only */
5242         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5243              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5244             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5245                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5246                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5247                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5248                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5249                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5250                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5251                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5252                 }
5253         }
5254
5255         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5256                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5257
5258 #if TG3_TSO_SUPPORT != 0
5259         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5260                 rdmac_mode |= (1 << 27);
5261 #endif
5262
5263         /* Receive/send statistics. */
5264         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5265             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5266                 val = tr32(RCVLPC_STATS_ENABLE);
5267                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5268                 tw32(RCVLPC_STATS_ENABLE, val);
5269         } else {
5270                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5271         }
5272         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5273         tw32(SNDDATAI_STATSENAB, 0xffffff);
5274         tw32(SNDDATAI_STATSCTRL,
5275              (SNDDATAI_SCTRL_ENABLE |
5276               SNDDATAI_SCTRL_FASTUPD));
5277
5278         /* Setup host coalescing engine. */
5279         tw32(HOSTCC_MODE, 0);
5280         for (i = 0; i < 2000; i++) {
5281                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5282                         break;
5283                 udelay(10);
5284         }
5285
5286         tw32(HOSTCC_RXCOL_TICKS, 0);
5287         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5288         tw32(HOSTCC_RXMAX_FRAMES, 1);
5289         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5290         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5291                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5292                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5293         }
5294         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5295         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5296
5297         /* set status block DMA address */
5298         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5299              ((u64) tp->status_mapping >> 32));
5300         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5301              ((u64) tp->status_mapping & 0xffffffff));
5302
5303         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5304                 /* Status/statistics block address.  See tg3_timer,
5305                  * the tg3_periodic_fetch_stats call there, and
5306                  * tg3_get_stats to see how this works for 5705/5750 chips.
5307                  */
5308                 tw32(HOSTCC_STAT_COAL_TICKS,
5309                      DEFAULT_STAT_COAL_TICKS);
5310                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5311                      ((u64) tp->stats_mapping >> 32));
5312                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5313                      ((u64) tp->stats_mapping & 0xffffffff));
5314                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5315                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5316         }
5317
5318         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5319
5320         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5321         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5322         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5323                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5324
5325         /* Clear statistics/status block in chip, and status block in ram. */
5326         for (i = NIC_SRAM_STATS_BLK;
5327              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5328              i += sizeof(u32)) {
5329                 tg3_write_mem(tp, i, 0);
5330                 udelay(40);
5331         }
5332         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5333
5334         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5335                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5336         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5337         udelay(40);
5338
5339         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5340         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5341                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5342                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5343         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5344         udelay(100);
5345
5346         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5347         tr32(MAILBOX_INTERRUPT_0);
5348
5349         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5350                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5351                 udelay(40);
5352         }
5353
5354         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5355                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5356                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5357                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5358                WDMAC_MODE_LNGREAD_ENAB);
5359
5360         /* If statement applies to 5705 and 5750 PCI devices only */
5361         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5362              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5363             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5364                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5365                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5366                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5367                         /* nothing */
5368                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5369                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5370                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5371                         val |= WDMAC_MODE_RX_ACCEL;
5372                 }
5373         }
5374
5375         tw32_f(WDMAC_MODE, val);
5376         udelay(40);
5377
5378         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5379                 val = tr32(TG3PCI_X_CAPS);
5380                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5381                         val &= ~PCIX_CAPS_BURST_MASK;
5382                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5383                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5384                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5385                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5386                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5387                                 val |= (tp->split_mode_max_reqs <<
5388                                         PCIX_CAPS_SPLIT_SHIFT);
5389                 }
5390                 tw32(TG3PCI_X_CAPS, val);
5391         }
5392
5393         tw32_f(RDMAC_MODE, rdmac_mode);
5394         udelay(40);
5395
5396         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5397         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5398                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5399         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5400         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5401         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5402         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5403         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5404 #if TG3_TSO_SUPPORT != 0
5405         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5406                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5407 #endif
5408         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5409         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5410
5411         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5412                 err = tg3_load_5701_a0_firmware_fix(tp);
5413                 if (err)
5414                         return err;
5415         }
5416
5417 #if TG3_TSO_SUPPORT != 0
5418         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5419                 err = tg3_load_tso_firmware(tp);
5420                 if (err)
5421                         return err;
5422         }
5423 #endif
5424
5425         tp->tx_mode = TX_MODE_ENABLE;
5426         tw32_f(MAC_TX_MODE, tp->tx_mode);
5427         udelay(100);
5428
5429         tp->rx_mode = RX_MODE_ENABLE;
5430         tw32_f(MAC_RX_MODE, tp->rx_mode);
5431         udelay(10);
5432
5433         if (tp->link_config.phy_is_low_power) {
5434                 tp->link_config.phy_is_low_power = 0;
5435                 tp->link_config.speed = tp->link_config.orig_speed;
5436                 tp->link_config.duplex = tp->link_config.orig_duplex;
5437                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5438         }
5439
5440         tp->mi_mode = MAC_MI_MODE_BASE;
5441         tw32_f(MAC_MI_MODE, tp->mi_mode);
5442         udelay(80);
5443
5444         tw32(MAC_LED_CTRL, tp->led_ctrl);
5445
5446         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5447         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5448                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5449                 udelay(10);
5450         }
5451         tw32_f(MAC_RX_MODE, tp->rx_mode);
5452         udelay(10);
5453
5454         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5455                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5456                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5457                         /* Set drive transmission level to 1.2V  */
5458                         /* only if the signal pre-emphasis bit is not set  */
5459                         val = tr32(MAC_SERDES_CFG);
5460                         val &= 0xfffff000;
5461                         val |= 0x880;
5462                         tw32(MAC_SERDES_CFG, val);
5463                 }
5464                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5465                         tw32(MAC_SERDES_CFG, 0x616000);
5466         }
5467
5468         /* Prevent chip from dropping frames when flow control
5469          * is enabled.
5470          */
5471         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5472
5473         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5474             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5475                 /* Use hardware link auto-negotiation */
5476                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5477         }
5478
5479         err = tg3_setup_phy(tp, 1);
5480         if (err)
5481                 return err;
5482
5483         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5484                 u32 tmp;
5485
5486                 /* Clear CRC stats. */
5487                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5488                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5489                         tg3_readphy(tp, 0x14, &tmp);
5490                 }
5491         }
5492
5493         __tg3_set_rx_mode(tp->dev);
5494
5495         /* Initialize receive rules. */
5496         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5497         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5498         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5499         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5500
5501         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5502                 limit = 8;
5503         else
5504                 limit = 16;
5505         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5506                 limit -= 4;
5507         switch (limit) {
5508         case 16:
5509                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5510         case 15:
5511                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5512         case 14:
5513                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5514         case 13:
5515                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5516         case 12:
5517                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5518         case 11:
5519                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5520         case 10:
5521                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5522         case 9:
5523                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5524         case 8:
5525                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5526         case 7:
5527                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5528         case 6:
5529                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5530         case 5:
5531                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5532         case 4:
5533                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5534         case 3:
5535                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5536         case 2:
5537         case 1:
5538
5539         default:
5540                 break;
5541         };
5542
5543         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5544
5545         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5546                 tg3_enable_ints(tp);
5547
5548         return 0;
5549 }
5550
5551 /* Called at device open time to get the chip ready for
5552  * packet processing.  Invoked with tp->lock held.
5553  */
5554 static int tg3_init_hw(struct tg3 *tp)
5555 {
5556         int err;
5557
5558         /* Force the chip into D0. */
5559         err = tg3_set_power_state(tp, 0);
5560         if (err)
5561                 goto out;
5562
5563         tg3_switch_clocks(tp);
5564
5565         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5566
5567         err = tg3_reset_hw(tp);
5568
5569 out:
5570         return err;
5571 }
5572
5573 #define TG3_STAT_ADD32(PSTAT, REG) \
5574 do {    u32 __val = tr32(REG); \
5575         (PSTAT)->low += __val; \
5576         if ((PSTAT)->low < __val) \
5577                 (PSTAT)->high += 1; \
5578 } while (0)
5579
5580 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5581 {
5582         struct tg3_hw_stats *sp = tp->hw_stats;
5583
5584         if (!netif_carrier_ok(tp->dev))
5585                 return;
5586
5587         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5588         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5589         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5590         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5591         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5592         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5593         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5594         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5595         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5596         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5597         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5598         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5599         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5600
5601         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5602         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5603         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5604         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5605         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5606         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5607         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5608         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5609         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5610         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5611         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5612         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5613         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5614         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5615 }
5616
5617 static void tg3_timer(unsigned long __opaque)
5618 {
5619         struct tg3 *tp = (struct tg3 *) __opaque;
5620         unsigned long flags;
5621
5622         spin_lock_irqsave(&tp->lock, flags);
5623         spin_lock(&tp->tx_lock);
5624
5625         /* All of this garbage is because when using non-tagged
5626          * IRQ status the mailbox/status_block protocol the chip
5627          * uses with the cpu is race prone.
5628          */
5629         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5630                 tw32(GRC_LOCAL_CTRL,
5631                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5632         } else {
5633                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5634                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5635         }
5636
5637         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5638                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5639                 spin_unlock(&tp->tx_lock);
5640                 spin_unlock_irqrestore(&tp->lock, flags);
5641                 schedule_work(&tp->reset_task);
5642                 return;
5643         }
5644
5645         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5646                 tg3_periodic_fetch_stats(tp);
5647
5648         /* This part only runs once per second. */
5649         if (!--tp->timer_counter) {
5650                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5651                         u32 mac_stat;
5652                         int phy_event;
5653
5654                         mac_stat = tr32(MAC_STATUS);
5655
5656                         phy_event = 0;
5657                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5658                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5659                                         phy_event = 1;
5660                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5661                                 phy_event = 1;
5662
5663                         if (phy_event)
5664                                 tg3_setup_phy(tp, 0);
5665                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5666                         u32 mac_stat = tr32(MAC_STATUS);
5667                         int need_setup = 0;
5668
5669                         if (netif_carrier_ok(tp->dev) &&
5670                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5671                                 need_setup = 1;
5672                         }
5673                         if (! netif_carrier_ok(tp->dev) &&
5674                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5675                                          MAC_STATUS_SIGNAL_DET))) {
5676                                 need_setup = 1;
5677                         }
5678                         if (need_setup) {
5679                                 tw32_f(MAC_MODE,
5680                                      (tp->mac_mode &
5681                                       ~MAC_MODE_PORT_MODE_MASK));
5682                                 udelay(40);
5683                                 tw32_f(MAC_MODE, tp->mac_mode);
5684                                 udelay(40);
5685                                 tg3_setup_phy(tp, 0);
5686                         }
5687                 }
5688
5689                 tp->timer_counter = tp->timer_multiplier;
5690         }
5691
5692         /* Heartbeat is only sent once every 120 seconds.  */
5693         if (!--tp->asf_counter) {
5694                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5695                         u32 val;
5696
5697                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5698                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5699                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5700                         val = tr32(GRC_RX_CPU_EVENT);
5701                         val |= (1 << 14);
5702                         tw32(GRC_RX_CPU_EVENT, val);
5703                 }
5704                 tp->asf_counter = tp->asf_multiplier;
5705         }
5706
5707         spin_unlock(&tp->tx_lock);
5708         spin_unlock_irqrestore(&tp->lock, flags);
5709
5710         tp->timer.expires = jiffies + tp->timer_offset;
5711         add_timer(&tp->timer);
5712 }
5713
5714 static int tg3_open(struct net_device *dev)
5715 {
5716         struct tg3 *tp = netdev_priv(dev);
5717         int err;
5718
5719         spin_lock_irq(&tp->lock);
5720         spin_lock(&tp->tx_lock);
5721
5722         tg3_disable_ints(tp);
5723         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5724
5725         spin_unlock(&tp->tx_lock);
5726         spin_unlock_irq(&tp->lock);
5727
5728         /* The placement of this call is tied
5729          * to the setup and use of Host TX descriptors.
5730          */
5731         err = tg3_alloc_consistent(tp);
5732         if (err)
5733                 return err;
5734
5735         err = request_irq(dev->irq, tg3_interrupt,
5736                           SA_SHIRQ, dev->name, dev);
5737
5738         if (err) {
5739                 tg3_free_consistent(tp);
5740                 return err;
5741         }
5742
5743         spin_lock_irq(&tp->lock);
5744         spin_lock(&tp->tx_lock);
5745
5746         err = tg3_init_hw(tp);
5747         if (err) {
5748                 tg3_halt(tp);
5749                 tg3_free_rings(tp);
5750         } else {
5751                 tp->timer_offset = HZ / 10;
5752                 tp->timer_counter = tp->timer_multiplier = 10;
5753                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5754
5755                 init_timer(&tp->timer);
5756                 tp->timer.expires = jiffies + tp->timer_offset;
5757                 tp->timer.data = (unsigned long) tp;
5758                 tp->timer.function = tg3_timer;
5759                 add_timer(&tp->timer);
5760
5761                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5762         }
5763
5764         spin_unlock(&tp->tx_lock);
5765         spin_unlock_irq(&tp->lock);
5766
5767         if (err) {
5768                 free_irq(dev->irq, dev);
5769                 tg3_free_consistent(tp);
5770                 return err;
5771         }
5772
5773         spin_lock_irq(&tp->lock);
5774         spin_lock(&tp->tx_lock);
5775
5776         tg3_enable_ints(tp);
5777
5778         spin_unlock(&tp->tx_lock);
5779         spin_unlock_irq(&tp->lock);
5780
5781         netif_start_queue(dev);
5782
5783         return 0;
5784 }
5785
5786 #if 0
5787 /*static*/ void tg3_dump_state(struct tg3 *tp)
5788 {
5789         u32 val32, val32_2, val32_3, val32_4, val32_5;
5790         u16 val16;
5791         int i;
5792
5793         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5794         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5795         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5796                val16, val32);
5797
5798         /* MAC block */
5799         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5800                tr32(MAC_MODE), tr32(MAC_STATUS));
5801         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5802                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5803         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5804                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5805         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5806                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5807
5808         /* Send data initiator control block */
5809         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5810                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5811         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5812                tr32(SNDDATAI_STATSCTRL));
5813
5814         /* Send data completion control block */
5815         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5816
5817         /* Send BD ring selector block */
5818         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5819                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5820
5821         /* Send BD initiator control block */
5822         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5823                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5824
5825         /* Send BD completion control block */
5826         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5827
5828         /* Receive list placement control block */
5829         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5830                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5831         printk("       RCVLPC_STATSCTRL[%08x]\n",
5832                tr32(RCVLPC_STATSCTRL));
5833
5834         /* Receive data and receive BD initiator control block */
5835         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5836                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5837
5838         /* Receive data completion control block */
5839         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5840                tr32(RCVDCC_MODE));
5841
5842         /* Receive BD initiator control block */
5843         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5844                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5845
5846         /* Receive BD completion control block */
5847         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5848                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5849
5850         /* Receive list selector control block */
5851         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5852                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5853
5854         /* Mbuf cluster free block */
5855         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5856                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5857
5858         /* Host coalescing control block */
5859         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5860                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5861         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5862                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5863                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5864         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5865                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5866                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5867         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5868                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5869         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5870                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5871
5872         /* Memory arbiter control block */
5873         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5874                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5875
5876         /* Buffer manager control block */
5877         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5878                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5879         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5880                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5881         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5882                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5883                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5884                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5885
5886         /* Read DMA control block */
5887         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5888                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5889
5890         /* Write DMA control block */
5891         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5892                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5893
5894         /* DMA completion block */
5895         printk("DEBUG: DMAC_MODE[%08x]\n",
5896                tr32(DMAC_MODE));
5897
5898         /* GRC block */
5899         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5900                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5901         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5902                tr32(GRC_LOCAL_CTRL));
5903
5904         /* TG3_BDINFOs */
5905         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5906                tr32(RCVDBDI_JUMBO_BD + 0x0),
5907                tr32(RCVDBDI_JUMBO_BD + 0x4),
5908                tr32(RCVDBDI_JUMBO_BD + 0x8),
5909                tr32(RCVDBDI_JUMBO_BD + 0xc));
5910         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5911                tr32(RCVDBDI_STD_BD + 0x0),
5912                tr32(RCVDBDI_STD_BD + 0x4),
5913                tr32(RCVDBDI_STD_BD + 0x8),
5914                tr32(RCVDBDI_STD_BD + 0xc));
5915         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5916                tr32(RCVDBDI_MINI_BD + 0x0),
5917                tr32(RCVDBDI_MINI_BD + 0x4),
5918                tr32(RCVDBDI_MINI_BD + 0x8),
5919                tr32(RCVDBDI_MINI_BD + 0xc));
5920
5921         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5922         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5923         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5924         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5925         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5926                val32, val32_2, val32_3, val32_4);
5927
5928         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5929         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5930         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5931         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5932         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5933                val32, val32_2, val32_3, val32_4);
5934
5935         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5936         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5937         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5938         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5939         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5940         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5941                val32, val32_2, val32_3, val32_4, val32_5);
5942
5943         /* SW status block */
5944         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5945                tp->hw_status->status,
5946                tp->hw_status->status_tag,
5947                tp->hw_status->rx_jumbo_consumer,
5948                tp->hw_status->rx_consumer,
5949                tp->hw_status->rx_mini_consumer,
5950                tp->hw_status->idx[0].rx_producer,
5951                tp->hw_status->idx[0].tx_consumer);
5952
5953         /* SW statistics block */
5954         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5955                ((u32 *)tp->hw_stats)[0],
5956                ((u32 *)tp->hw_stats)[1],
5957                ((u32 *)tp->hw_stats)[2],
5958                ((u32 *)tp->hw_stats)[3]);
5959
5960         /* Mailboxes */
5961         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5962                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5963                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5964                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5965                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5966
5967         /* NIC side send descriptors. */
5968         for (i = 0; i < 6; i++) {
5969                 unsigned long txd;
5970
5971                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5972                         + (i * sizeof(struct tg3_tx_buffer_desc));
5973                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5974                        i,
5975                        readl(txd + 0x0), readl(txd + 0x4),
5976                        readl(txd + 0x8), readl(txd + 0xc));
5977         }
5978
5979         /* NIC side RX descriptors. */
5980         for (i = 0; i < 6; i++) {
5981                 unsigned long rxd;
5982
5983                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5984                         + (i * sizeof(struct tg3_rx_buffer_desc));
5985                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5986                        i,
5987                        readl(rxd + 0x0), readl(rxd + 0x4),
5988                        readl(rxd + 0x8), readl(rxd + 0xc));
5989                 rxd += (4 * sizeof(u32));
5990                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5991                        i,
5992                        readl(rxd + 0x0), readl(rxd + 0x4),
5993                        readl(rxd + 0x8), readl(rxd + 0xc));
5994         }
5995
5996         for (i = 0; i < 6; i++) {
5997                 unsigned long rxd;
5998
5999                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6000                         + (i * sizeof(struct tg3_rx_buffer_desc));
6001                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6002                        i,
6003                        readl(rxd + 0x0), readl(rxd + 0x4),
6004                        readl(rxd + 0x8), readl(rxd + 0xc));
6005                 rxd += (4 * sizeof(u32));
6006                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6007                        i,
6008                        readl(rxd + 0x0), readl(rxd + 0x4),
6009                        readl(rxd + 0x8), readl(rxd + 0xc));
6010         }
6011 }
6012 #endif
6013
6014 static struct net_device_stats *tg3_get_stats(struct net_device *);
6015 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6016
6017 static int tg3_close(struct net_device *dev)
6018 {
6019         struct tg3 *tp = netdev_priv(dev);
6020
6021         netif_stop_queue(dev);
6022
6023         del_timer_sync(&tp->timer);
6024
6025         spin_lock_irq(&tp->lock);
6026         spin_lock(&tp->tx_lock);
6027 #if 0
6028         tg3_dump_state(tp);
6029 #endif
6030
6031         tg3_disable_ints(tp);
6032
6033         tg3_halt(tp);
6034         tg3_free_rings(tp);
6035         tp->tg3_flags &=
6036                 ~(TG3_FLAG_INIT_COMPLETE |
6037                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6038         netif_carrier_off(tp->dev);
6039
6040         spin_unlock(&tp->tx_lock);
6041         spin_unlock_irq(&tp->lock);
6042
6043         free_irq(dev->irq, dev);
6044
6045         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6046                sizeof(tp->net_stats_prev));
6047         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6048                sizeof(tp->estats_prev));
6049
6050         tg3_free_consistent(tp);
6051
6052         return 0;
6053 }
6054
6055 static inline unsigned long get_stat64(tg3_stat64_t *val)
6056 {
6057         unsigned long ret;
6058
6059 #if (BITS_PER_LONG == 32)
6060         ret = val->low;
6061 #else
6062         ret = ((u64)val->high << 32) | ((u64)val->low);
6063 #endif
6064         return ret;
6065 }
6066
6067 static unsigned long calc_crc_errors(struct tg3 *tp)
6068 {
6069         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6070
6071         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6072             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6073              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6074                 unsigned long flags;
6075                 u32 val;
6076
6077                 spin_lock_irqsave(&tp->lock, flags);
6078                 if (!tg3_readphy(tp, 0x1e, &val)) {
6079                         tg3_writephy(tp, 0x1e, val | 0x8000);
6080                         tg3_readphy(tp, 0x14, &val);
6081                 } else
6082                         val = 0;
6083                 spin_unlock_irqrestore(&tp->lock, flags);
6084
6085                 tp->phy_crc_errors += val;
6086
6087                 return tp->phy_crc_errors;
6088         }
6089
6090         return get_stat64(&hw_stats->rx_fcs_errors);
6091 }
6092
6093 #define ESTAT_ADD(member) \
6094         estats->member =        old_estats->member + \
6095                                 get_stat64(&hw_stats->member)
6096
6097 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6098 {
6099         struct tg3_ethtool_stats *estats = &tp->estats;
6100         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6101         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6102
6103         if (!hw_stats)
6104                 return old_estats;
6105
6106         ESTAT_ADD(rx_octets);
6107         ESTAT_ADD(rx_fragments);
6108         ESTAT_ADD(rx_ucast_packets);
6109         ESTAT_ADD(rx_mcast_packets);
6110         ESTAT_ADD(rx_bcast_packets);
6111         ESTAT_ADD(rx_fcs_errors);
6112         ESTAT_ADD(rx_align_errors);
6113         ESTAT_ADD(rx_xon_pause_rcvd);
6114         ESTAT_ADD(rx_xoff_pause_rcvd);
6115         ESTAT_ADD(rx_mac_ctrl_rcvd);
6116         ESTAT_ADD(rx_xoff_entered);
6117         ESTAT_ADD(rx_frame_too_long_errors);
6118         ESTAT_ADD(rx_jabbers);
6119         ESTAT_ADD(rx_undersize_packets);
6120         ESTAT_ADD(rx_in_length_errors);
6121         ESTAT_ADD(rx_out_length_errors);
6122         ESTAT_ADD(rx_64_or_less_octet_packets);
6123         ESTAT_ADD(rx_65_to_127_octet_packets);
6124         ESTAT_ADD(rx_128_to_255_octet_packets);
6125         ESTAT_ADD(rx_256_to_511_octet_packets);
6126         ESTAT_ADD(rx_512_to_1023_octet_packets);
6127         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6128         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6129         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6130         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6131         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6132
6133         ESTAT_ADD(tx_octets);
6134         ESTAT_ADD(tx_collisions);
6135         ESTAT_ADD(tx_xon_sent);
6136         ESTAT_ADD(tx_xoff_sent);
6137         ESTAT_ADD(tx_flow_control);
6138         ESTAT_ADD(tx_mac_errors);
6139         ESTAT_ADD(tx_single_collisions);
6140         ESTAT_ADD(tx_mult_collisions);
6141         ESTAT_ADD(tx_deferred);
6142         ESTAT_ADD(tx_excessive_collisions);
6143         ESTAT_ADD(tx_late_collisions);
6144         ESTAT_ADD(tx_collide_2times);
6145         ESTAT_ADD(tx_collide_3times);
6146         ESTAT_ADD(tx_collide_4times);
6147         ESTAT_ADD(tx_collide_5times);
6148         ESTAT_ADD(tx_collide_6times);
6149         ESTAT_ADD(tx_collide_7times);
6150         ESTAT_ADD(tx_collide_8times);
6151         ESTAT_ADD(tx_collide_9times);
6152         ESTAT_ADD(tx_collide_10times);
6153         ESTAT_ADD(tx_collide_11times);
6154         ESTAT_ADD(tx_collide_12times);
6155         ESTAT_ADD(tx_collide_13times);
6156         ESTAT_ADD(tx_collide_14times);
6157         ESTAT_ADD(tx_collide_15times);
6158         ESTAT_ADD(tx_ucast_packets);
6159         ESTAT_ADD(tx_mcast_packets);
6160         ESTAT_ADD(tx_bcast_packets);
6161         ESTAT_ADD(tx_carrier_sense_errors);
6162         ESTAT_ADD(tx_discards);
6163         ESTAT_ADD(tx_errors);
6164
6165         ESTAT_ADD(dma_writeq_full);
6166         ESTAT_ADD(dma_write_prioq_full);
6167         ESTAT_ADD(rxbds_empty);
6168         ESTAT_ADD(rx_discards);
6169         ESTAT_ADD(rx_errors);
6170         ESTAT_ADD(rx_threshold_hit);
6171
6172         ESTAT_ADD(dma_readq_full);
6173         ESTAT_ADD(dma_read_prioq_full);
6174         ESTAT_ADD(tx_comp_queue_full);
6175
6176         ESTAT_ADD(ring_set_send_prod_index);
6177         ESTAT_ADD(ring_status_update);
6178         ESTAT_ADD(nic_irqs);
6179         ESTAT_ADD(nic_avoided_irqs);
6180         ESTAT_ADD(nic_tx_threshold_hit);
6181
6182         return estats;
6183 }
6184
6185 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6186 {
6187         struct tg3 *tp = netdev_priv(dev);
6188         struct net_device_stats *stats = &tp->net_stats;
6189         struct net_device_stats *old_stats = &tp->net_stats_prev;
6190         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6191
6192         if (!hw_stats)
6193                 return old_stats;
6194
6195         stats->rx_packets = old_stats->rx_packets +
6196                 get_stat64(&hw_stats->rx_ucast_packets) +
6197                 get_stat64(&hw_stats->rx_mcast_packets) +
6198                 get_stat64(&hw_stats->rx_bcast_packets);
6199                 
6200         stats->tx_packets = old_stats->tx_packets +
6201                 get_stat64(&hw_stats->tx_ucast_packets) +
6202                 get_stat64(&hw_stats->tx_mcast_packets) +
6203                 get_stat64(&hw_stats->tx_bcast_packets);
6204
6205         stats->rx_bytes = old_stats->rx_bytes +
6206                 get_stat64(&hw_stats->rx_octets);
6207         stats->tx_bytes = old_stats->tx_bytes +
6208                 get_stat64(&hw_stats->tx_octets);
6209
6210         stats->rx_errors = old_stats->rx_errors +
6211                 get_stat64(&hw_stats->rx_errors) +
6212                 get_stat64(&hw_stats->rx_discards);
6213         stats->tx_errors = old_stats->tx_errors +
6214                 get_stat64(&hw_stats->tx_errors) +
6215                 get_stat64(&hw_stats->tx_mac_errors) +
6216                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6217                 get_stat64(&hw_stats->tx_discards);
6218
6219         stats->multicast = old_stats->multicast +
6220                 get_stat64(&hw_stats->rx_mcast_packets);
6221         stats->collisions = old_stats->collisions +
6222                 get_stat64(&hw_stats->tx_collisions);
6223
6224         stats->rx_length_errors = old_stats->rx_length_errors +
6225                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6226                 get_stat64(&hw_stats->rx_undersize_packets);
6227
6228         stats->rx_over_errors = old_stats->rx_over_errors +
6229                 get_stat64(&hw_stats->rxbds_empty);
6230         stats->rx_frame_errors = old_stats->rx_frame_errors +
6231                 get_stat64(&hw_stats->rx_align_errors);
6232         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6233                 get_stat64(&hw_stats->tx_discards);
6234         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6235                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6236
6237         stats->rx_crc_errors = old_stats->rx_crc_errors +
6238                 calc_crc_errors(tp);
6239
6240         return stats;
6241 }
6242
6243 static inline u32 calc_crc(unsigned char *buf, int len)
6244 {
6245         u32 reg;
6246         u32 tmp;
6247         int j, k;
6248
6249         reg = 0xffffffff;
6250
6251         for (j = 0; j < len; j++) {
6252                 reg ^= buf[j];
6253
6254                 for (k = 0; k < 8; k++) {
6255                         tmp = reg & 0x01;
6256
6257                         reg >>= 1;
6258
6259                         if (tmp) {
6260                                 reg ^= 0xedb88320;
6261                         }
6262                 }
6263         }
6264
6265         return ~reg;
6266 }
6267
6268 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6269 {
6270         /* accept or reject all multicast frames */
6271         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6272         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6273         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6274         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6275 }
6276
6277 static void __tg3_set_rx_mode(struct net_device *dev)
6278 {
6279         struct tg3 *tp = netdev_priv(dev);
6280         u32 rx_mode;
6281
6282         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6283                                   RX_MODE_KEEP_VLAN_TAG);
6284
6285         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6286          * flag clear.
6287          */
6288 #if TG3_VLAN_TAG_USED
6289         if (!tp->vlgrp &&
6290             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6291                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6292 #else
6293         /* By definition, VLAN is disabled always in this
6294          * case.
6295          */
6296         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6297                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6298 #endif
6299
6300         if (dev->flags & IFF_PROMISC) {
6301                 /* Promiscuous mode. */
6302                 rx_mode |= RX_MODE_PROMISC;
6303         } else if (dev->flags & IFF_ALLMULTI) {
6304                 /* Accept all multicast. */
6305                 tg3_set_multi (tp, 1);
6306         } else if (dev->mc_count < 1) {
6307                 /* Reject all multicast. */
6308                 tg3_set_multi (tp, 0);
6309         } else {
6310                 /* Accept one or more multicast(s). */
6311                 struct dev_mc_list *mclist;
6312                 unsigned int i;
6313                 u32 mc_filter[4] = { 0, };
6314                 u32 regidx;
6315                 u32 bit;
6316                 u32 crc;
6317
6318                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6319                      i++, mclist = mclist->next) {
6320
6321                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6322                         bit = ~crc & 0x7f;
6323                         regidx = (bit & 0x60) >> 5;
6324                         bit &= 0x1f;
6325                         mc_filter[regidx] |= (1 << bit);
6326                 }
6327
6328                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6329                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6330                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6331                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6332         }
6333
6334         if (rx_mode != tp->rx_mode) {
6335                 tp->rx_mode = rx_mode;
6336                 tw32_f(MAC_RX_MODE, rx_mode);
6337                 udelay(10);
6338         }
6339 }
6340
6341 static void tg3_set_rx_mode(struct net_device *dev)
6342 {
6343         struct tg3 *tp = netdev_priv(dev);
6344
6345         spin_lock_irq(&tp->lock);
6346         spin_lock(&tp->tx_lock);
6347         __tg3_set_rx_mode(dev);
6348         spin_unlock(&tp->tx_lock);
6349         spin_unlock_irq(&tp->lock);
6350 }
6351
6352 #define TG3_REGDUMP_LEN         (32 * 1024)
6353
6354 static int tg3_get_regs_len(struct net_device *dev)
6355 {
6356         return TG3_REGDUMP_LEN;
6357 }
6358
6359 static void tg3_get_regs(struct net_device *dev,
6360                 struct ethtool_regs *regs, void *_p)
6361 {
6362         u32 *p = _p;
6363         struct tg3 *tp = netdev_priv(dev);
6364         u8 *orig_p = _p;
6365         int i;
6366
6367         regs->version = 0;
6368
6369         memset(p, 0, TG3_REGDUMP_LEN);
6370
6371         spin_lock_irq(&tp->lock);
6372         spin_lock(&tp->tx_lock);
6373
6374 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6375 #define GET_REG32_LOOP(base,len)                \
6376 do {    p = (u32 *)(orig_p + (base));           \
6377         for (i = 0; i < len; i += 4)            \
6378                 __GET_REG32((base) + i);        \
6379 } while (0)
6380 #define GET_REG32_1(reg)                        \
6381 do {    p = (u32 *)(orig_p + (reg));            \
6382         __GET_REG32((reg));                     \
6383 } while (0)
6384
6385         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6386         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6387         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6388         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6389         GET_REG32_1(SNDDATAC_MODE);
6390         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6391         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6392         GET_REG32_1(SNDBDC_MODE);
6393         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6394         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6395         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6396         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6397         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6398         GET_REG32_1(RCVDCC_MODE);
6399         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6400         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6401         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6402         GET_REG32_1(MBFREE_MODE);
6403         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6404         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6405         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6406         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6407         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6408         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6409         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6410         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6411         GET_REG32_LOOP(FTQ_RESET, 0x120);
6412         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6413         GET_REG32_1(DMAC_MODE);
6414         GET_REG32_LOOP(GRC_MODE, 0x4c);
6415         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6416                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6417
6418 #undef __GET_REG32
6419 #undef GET_REG32_LOOP
6420 #undef GET_REG32_1
6421
6422         spin_unlock(&tp->tx_lock);
6423         spin_unlock_irq(&tp->lock);
6424 }
6425
6426 static int tg3_get_eeprom_len(struct net_device *dev)
6427 {
6428         struct tg3 *tp = netdev_priv(dev);
6429
6430         return tp->nvram_size;
6431 }
6432
6433 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6434
6435 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6436 {
6437         struct tg3 *tp = netdev_priv(dev);
6438         int ret;
6439         u8  *pd;
6440         u32 i, offset, len, val, b_offset, b_count;
6441
6442         offset = eeprom->offset;
6443         len = eeprom->len;
6444         eeprom->len = 0;
6445
6446         eeprom->magic = TG3_EEPROM_MAGIC;
6447
6448         if (offset & 3) {
6449                 /* adjustments to start on required 4 byte boundary */
6450                 b_offset = offset & 3;
6451                 b_count = 4 - b_offset;
6452                 if (b_count > len) {
6453                         /* i.e. offset=1 len=2 */
6454                         b_count = len;
6455                 }
6456                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6457                 if (ret)
6458                         return ret;
6459                 val = cpu_to_le32(val);
6460                 memcpy(data, ((char*)&val) + b_offset, b_count);
6461                 len -= b_count;
6462                 offset += b_count;
6463                 eeprom->len += b_count;
6464         }
6465
6466         /* read bytes upto the last 4 byte boundary */
6467         pd = &data[eeprom->len];
6468         for (i = 0; i < (len - (len & 3)); i += 4) {
6469                 ret = tg3_nvram_read(tp, offset + i, &val);
6470                 if (ret) {
6471                         eeprom->len += i;
6472                         return ret;
6473                 }
6474                 val = cpu_to_le32(val);
6475                 memcpy(pd + i, &val, 4);
6476         }
6477         eeprom->len += i;
6478
6479         if (len & 3) {
6480                 /* read last bytes not ending on 4 byte boundary */
6481                 pd = &data[eeprom->len];
6482                 b_count = len & 3;
6483                 b_offset = offset + len - b_count;
6484                 ret = tg3_nvram_read(tp, b_offset, &val);
6485                 if (ret)
6486                         return ret;
6487                 val = cpu_to_le32(val);
6488                 memcpy(pd, ((char*)&val), b_count);
6489                 eeprom->len += b_count;
6490         }
6491         return 0;
6492 }
6493
6494 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6495
6496 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6497 {
6498         struct tg3 *tp = netdev_priv(dev);
6499         int ret;
6500         u32 offset, len, b_offset, odd_len, start, end;
6501         u8 *buf;
6502
6503         if (eeprom->magic != TG3_EEPROM_MAGIC)
6504                 return -EINVAL;
6505
6506         offset = eeprom->offset;
6507         len = eeprom->len;
6508
6509         if ((b_offset = (offset & 3))) {
6510                 /* adjustments to start on required 4 byte boundary */
6511                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6512                 if (ret)
6513                         return ret;
6514                 start = cpu_to_le32(start);
6515                 len += b_offset;
6516                 offset &= ~3;
6517         }
6518
6519         odd_len = 0;
6520         if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6521                 /* adjustments to end on required 4 byte boundary */
6522                 odd_len = 1;
6523                 len = (len + 3) & ~3;
6524                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6525                 if (ret)
6526                         return ret;
6527                 end = cpu_to_le32(end);
6528         }
6529
6530         buf = data;
6531         if (b_offset || odd_len) {
6532                 buf = kmalloc(len, GFP_KERNEL);
6533                 if (buf == 0)
6534                         return -ENOMEM;
6535                 if (b_offset)
6536                         memcpy(buf, &start, 4);
6537                 if (odd_len)
6538                         memcpy(buf+len-4, &end, 4);
6539                 memcpy(buf + b_offset, data, eeprom->len);
6540         }
6541
6542         ret = tg3_nvram_write_block(tp, offset, len, buf);
6543
6544         if (buf != data)
6545                 kfree(buf);
6546
6547         return ret;
6548 }
6549
6550 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6551 {
6552         struct tg3 *tp = netdev_priv(dev);
6553   
6554         cmd->supported = (SUPPORTED_Autoneg);
6555
6556         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6557                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6558                                    SUPPORTED_1000baseT_Full);
6559
6560         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6561                 cmd->supported |= (SUPPORTED_100baseT_Half |
6562                                   SUPPORTED_100baseT_Full |
6563                                   SUPPORTED_10baseT_Half |
6564                                   SUPPORTED_10baseT_Full |
6565                                   SUPPORTED_MII);
6566         else
6567                 cmd->supported |= SUPPORTED_FIBRE;
6568   
6569         cmd->advertising = tp->link_config.advertising;
6570         if (netif_running(dev)) {
6571                 cmd->speed = tp->link_config.active_speed;
6572                 cmd->duplex = tp->link_config.active_duplex;
6573         }
6574         cmd->port = 0;
6575         cmd->phy_address = PHY_ADDR;
6576         cmd->transceiver = 0;
6577         cmd->autoneg = tp->link_config.autoneg;
6578         cmd->maxtxpkt = 0;
6579         cmd->maxrxpkt = 0;
6580         return 0;
6581 }
6582   
6583 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6584 {
6585         struct tg3 *tp = netdev_priv(dev);
6586   
6587         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6588                 /* These are the only valid advertisement bits allowed.  */
6589                 if (cmd->autoneg == AUTONEG_ENABLE &&
6590                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6591                                           ADVERTISED_1000baseT_Full |
6592                                           ADVERTISED_Autoneg |
6593                                           ADVERTISED_FIBRE)))
6594                         return -EINVAL;
6595         }
6596
6597         spin_lock_irq(&tp->lock);
6598         spin_lock(&tp->tx_lock);
6599
6600         tp->link_config.autoneg = cmd->autoneg;
6601         if (cmd->autoneg == AUTONEG_ENABLE) {
6602                 tp->link_config.advertising = cmd->advertising;
6603                 tp->link_config.speed = SPEED_INVALID;
6604                 tp->link_config.duplex = DUPLEX_INVALID;
6605         } else {
6606                 tp->link_config.advertising = 0;
6607                 tp->link_config.speed = cmd->speed;
6608                 tp->link_config.duplex = cmd->duplex;
6609         }
6610   
6611         if (netif_running(dev))
6612                 tg3_setup_phy(tp, 1);
6613
6614         spin_unlock(&tp->tx_lock);
6615         spin_unlock_irq(&tp->lock);
6616   
6617         return 0;
6618 }
6619   
6620 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6621 {
6622         struct tg3 *tp = netdev_priv(dev);
6623   
6624         strcpy(info->driver, DRV_MODULE_NAME);
6625         strcpy(info->version, DRV_MODULE_VERSION);
6626         strcpy(info->bus_info, pci_name(tp->pdev));
6627 }
6628   
6629 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6630 {
6631         struct tg3 *tp = netdev_priv(dev);
6632   
6633         wol->supported = WAKE_MAGIC;
6634         wol->wolopts = 0;
6635         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6636                 wol->wolopts = WAKE_MAGIC;
6637         memset(&wol->sopass, 0, sizeof(wol->sopass));
6638 }
6639   
6640 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6641 {
6642         struct tg3 *tp = netdev_priv(dev);
6643   
6644         if (wol->wolopts & ~WAKE_MAGIC)
6645                 return -EINVAL;
6646         if ((wol->wolopts & WAKE_MAGIC) &&
6647             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6648             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6649                 return -EINVAL;
6650   
6651         spin_lock_irq(&tp->lock);
6652         if (wol->wolopts & WAKE_MAGIC)
6653                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6654         else
6655                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6656         spin_unlock_irq(&tp->lock);
6657   
6658         return 0;
6659 }
6660   
6661 static u32 tg3_get_msglevel(struct net_device *dev)
6662 {
6663         struct tg3 *tp = netdev_priv(dev);
6664         return tp->msg_enable;
6665 }
6666   
6667 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6668 {
6669         struct tg3 *tp = netdev_priv(dev);
6670         tp->msg_enable = value;
6671 }
6672   
6673 #if TG3_TSO_SUPPORT != 0
6674 static int tg3_set_tso(struct net_device *dev, u32 value)
6675 {
6676         struct tg3 *tp = netdev_priv(dev);
6677
6678         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6679                 if (value)
6680                         return -EINVAL;
6681                 return 0;
6682         }
6683         return ethtool_op_set_tso(dev, value);
6684 }
6685 #endif
6686   
6687 static int tg3_nway_reset(struct net_device *dev)
6688 {
6689         struct tg3 *tp = netdev_priv(dev);
6690         u32 bmcr;
6691         int r;
6692   
6693         if (!netif_running(dev))
6694                 return -EAGAIN;
6695
6696         spin_lock_irq(&tp->lock);
6697         r = -EINVAL;
6698         tg3_readphy(tp, MII_BMCR, &bmcr);
6699         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6700             (bmcr & BMCR_ANENABLE)) {
6701                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6702                 r = 0;
6703         }
6704         spin_unlock_irq(&tp->lock);
6705   
6706         return r;
6707 }
6708   
6709 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6710 {
6711         struct tg3 *tp = netdev_priv(dev);
6712   
6713         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6714         ering->rx_mini_max_pending = 0;
6715         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6716
6717         ering->rx_pending = tp->rx_pending;
6718         ering->rx_mini_pending = 0;
6719         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6720         ering->tx_pending = tp->tx_pending;
6721 }
6722   
6723 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6724 {
6725         struct tg3 *tp = netdev_priv(dev);
6726   
6727         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6728             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6729             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6730                 return -EINVAL;
6731   
6732         if (netif_running(dev))
6733                 tg3_netif_stop(tp);
6734
6735         spin_lock_irq(&tp->lock);
6736         spin_lock(&tp->tx_lock);
6737   
6738         tp->rx_pending = ering->rx_pending;
6739
6740         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6741             tp->rx_pending > 63)
6742                 tp->rx_pending = 63;
6743         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6744         tp->tx_pending = ering->tx_pending;
6745
6746         if (netif_running(dev)) {
6747                 tg3_halt(tp);
6748                 tg3_init_hw(tp);
6749                 tg3_netif_start(tp);
6750         }
6751
6752         spin_unlock(&tp->tx_lock);
6753         spin_unlock_irq(&tp->lock);
6754   
6755         return 0;
6756 }
6757   
6758 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6759 {
6760         struct tg3 *tp = netdev_priv(dev);
6761   
6762         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6763         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6764         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6765 }
6766   
6767 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6768 {
6769         struct tg3 *tp = netdev_priv(dev);
6770   
6771         if (netif_running(dev))
6772                 tg3_netif_stop(tp);
6773
6774         spin_lock_irq(&tp->lock);
6775         spin_lock(&tp->tx_lock);
6776         if (epause->autoneg)
6777                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6778         else
6779                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6780         if (epause->rx_pause)
6781                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6782         else
6783                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6784         if (epause->tx_pause)
6785                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6786         else
6787                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6788
6789         if (netif_running(dev)) {
6790                 tg3_halt(tp);
6791                 tg3_init_hw(tp);
6792                 tg3_netif_start(tp);
6793         }
6794         spin_unlock(&tp->tx_lock);
6795         spin_unlock_irq(&tp->lock);
6796   
6797         return 0;
6798 }
6799   
6800 static u32 tg3_get_rx_csum(struct net_device *dev)
6801 {
6802         struct tg3 *tp = netdev_priv(dev);
6803         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6804 }
6805   
6806 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6807 {
6808         struct tg3 *tp = netdev_priv(dev);
6809   
6810         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6811                 if (data != 0)
6812                         return -EINVAL;
6813                 return 0;
6814         }
6815   
6816         spin_lock_irq(&tp->lock);
6817         if (data)
6818                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6819         else
6820                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6821         spin_unlock_irq(&tp->lock);
6822   
6823         return 0;
6824 }
6825   
6826 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6827 {
6828         struct tg3 *tp = netdev_priv(dev);
6829   
6830         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6831                 if (data != 0)
6832                         return -EINVAL;
6833                 return 0;
6834         }
6835   
6836         if (data)
6837                 dev->features |= NETIF_F_IP_CSUM;
6838         else
6839                 dev->features &= ~NETIF_F_IP_CSUM;
6840
6841         return 0;
6842 }
6843
6844 static int tg3_get_stats_count (struct net_device *dev)
6845 {
6846         return TG3_NUM_STATS;
6847 }
6848
6849 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6850 {
6851         switch (stringset) {
6852         case ETH_SS_STATS:
6853                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6854                 break;
6855         default:
6856                 WARN_ON(1);     /* we need a WARN() */
6857                 break;
6858         }
6859 }
6860
6861 static void tg3_get_ethtool_stats (struct net_device *dev,
6862                                    struct ethtool_stats *estats, u64 *tmp_stats)
6863 {
6864         struct tg3 *tp = netdev_priv(dev);
6865         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6866 }
6867
6868 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6869 {
6870         struct mii_ioctl_data *data = if_mii(ifr);
6871         struct tg3 *tp = netdev_priv(dev);
6872         int err;
6873
6874         switch(cmd) {
6875         case SIOCGMIIPHY:
6876                 data->phy_id = PHY_ADDR;
6877
6878                 /* fallthru */
6879         case SIOCGMIIREG: {
6880                 u32 mii_regval;
6881
6882                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6883                         break;                  /* We have no PHY */
6884
6885                 spin_lock_irq(&tp->lock);
6886                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6887                 spin_unlock_irq(&tp->lock);
6888
6889                 data->val_out = mii_regval;
6890
6891                 return err;
6892         }
6893
6894         case SIOCSMIIREG:
6895                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6896                         break;                  /* We have no PHY */
6897
6898                 if (!capable(CAP_NET_ADMIN))
6899                         return -EPERM;
6900
6901                 spin_lock_irq(&tp->lock);
6902                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6903                 spin_unlock_irq(&tp->lock);
6904
6905                 return err;
6906
6907         default:
6908                 /* do nothing */
6909                 break;
6910         }
6911         return -EOPNOTSUPP;
6912 }
6913
6914 #if TG3_VLAN_TAG_USED
6915 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6916 {
6917         struct tg3 *tp = netdev_priv(dev);
6918
6919         spin_lock_irq(&tp->lock);
6920         spin_lock(&tp->tx_lock);
6921
6922         tp->vlgrp = grp;
6923
6924         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6925         __tg3_set_rx_mode(dev);
6926
6927         spin_unlock(&tp->tx_lock);
6928         spin_unlock_irq(&tp->lock);
6929 }
6930
6931 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6932 {
6933         struct tg3 *tp = netdev_priv(dev);
6934
6935         spin_lock_irq(&tp->lock);
6936         spin_lock(&tp->tx_lock);
6937         if (tp->vlgrp)
6938                 tp->vlgrp->vlan_devices[vid] = NULL;
6939         spin_unlock(&tp->tx_lock);
6940         spin_unlock_irq(&tp->lock);
6941 }
6942 #endif
6943
6944 static struct ethtool_ops tg3_ethtool_ops = {
6945         .get_settings           = tg3_get_settings,
6946         .set_settings           = tg3_set_settings,
6947         .get_drvinfo            = tg3_get_drvinfo,
6948         .get_regs_len           = tg3_get_regs_len,
6949         .get_regs               = tg3_get_regs,
6950         .get_wol                = tg3_get_wol,
6951         .set_wol                = tg3_set_wol,
6952         .get_msglevel           = tg3_get_msglevel,
6953         .set_msglevel           = tg3_set_msglevel,
6954         .nway_reset             = tg3_nway_reset,
6955         .get_link               = ethtool_op_get_link,
6956         .get_eeprom_len         = tg3_get_eeprom_len,
6957         .get_eeprom             = tg3_get_eeprom,
6958         .set_eeprom             = tg3_set_eeprom,
6959         .get_ringparam          = tg3_get_ringparam,
6960         .set_ringparam          = tg3_set_ringparam,
6961         .get_pauseparam         = tg3_get_pauseparam,
6962         .set_pauseparam         = tg3_set_pauseparam,
6963         .get_rx_csum            = tg3_get_rx_csum,
6964         .set_rx_csum            = tg3_set_rx_csum,
6965         .get_tx_csum            = ethtool_op_get_tx_csum,
6966         .set_tx_csum            = tg3_set_tx_csum,
6967         .get_sg                 = ethtool_op_get_sg,
6968         .set_sg                 = ethtool_op_set_sg,
6969 #if TG3_TSO_SUPPORT != 0
6970         .get_tso                = ethtool_op_get_tso,
6971         .set_tso                = tg3_set_tso,
6972 #endif
6973         .get_strings            = tg3_get_strings,
6974         .get_stats_count        = tg3_get_stats_count,
6975         .get_ethtool_stats      = tg3_get_ethtool_stats,
6976 };
6977
6978 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6979 {
6980         u32 cursize, val;
6981
6982         tp->nvram_size = EEPROM_CHIP_SIZE;
6983
6984         if (tg3_nvram_read(tp, 0, &val) != 0)
6985                 return;
6986
6987         if (swab32(val) != TG3_EEPROM_MAGIC)
6988                 return;
6989
6990         /*
6991          * Size the chip by reading offsets at increasing powers of two.
6992          * When we encounter our validation signature, we know the addressing
6993          * has wrapped around, and thus have our chip size.
6994          */
6995         cursize = 0x800;
6996
6997         while (cursize < tp->nvram_size) {
6998                 if (tg3_nvram_read(tp, cursize, &val) != 0)
6999                         return;
7000
7001                 if (swab32(val) == TG3_EEPROM_MAGIC)
7002                         break;
7003
7004                 cursize <<= 1;
7005         }
7006
7007         tp->nvram_size = cursize;
7008 }
7009                 
7010 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7011 {
7012         u32 val;
7013
7014         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7015                 if (val != 0) {
7016                         tp->nvram_size = (val >> 16) * 1024;
7017                         return;
7018                 }
7019         }
7020         tp->nvram_size = 0x20000;
7021 }
7022
7023 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7024 {
7025         u32 nvcfg1;
7026
7027         nvcfg1 = tr32(NVRAM_CFG1);
7028         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7029                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7030         }
7031         else {
7032                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7033                 tw32(NVRAM_CFG1, nvcfg1);
7034         }
7035
7036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7037                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7038                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7039                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7040                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7041                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7042                                 break;
7043                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7044                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7045                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7046                                 break;
7047                         case FLASH_VENDOR_ATMEL_EEPROM:
7048                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7049                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7050                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7051                                 break;
7052                         case FLASH_VENDOR_ST:
7053                                 tp->nvram_jedecnum = JEDEC_ST;
7054                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7055                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7056                                 break;
7057                         case FLASH_VENDOR_SAIFUN:
7058                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7059                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7060                                 break;
7061                         case FLASH_VENDOR_SST_SMALL:
7062                         case FLASH_VENDOR_SST_LARGE:
7063                                 tp->nvram_jedecnum = JEDEC_SST;
7064                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7065                                 break;
7066                 }
7067         }
7068         else {
7069                 tp->nvram_jedecnum = JEDEC_ATMEL;
7070                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7071                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7072         }
7073 }
7074
7075 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7076 static void __devinit tg3_nvram_init(struct tg3 *tp)
7077 {
7078         int j;
7079
7080         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7081                 return;
7082
7083         tw32_f(GRC_EEPROM_ADDR,
7084              (EEPROM_ADDR_FSM_RESET |
7085               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7086                EEPROM_ADDR_CLKPERD_SHIFT)));
7087
7088         /* XXX schedule_timeout() ... */
7089         for (j = 0; j < 100; j++)
7090                 udelay(10);
7091
7092         /* Enable seeprom accesses. */
7093         tw32_f(GRC_LOCAL_CTRL,
7094              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7095         udelay(100);
7096
7097         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7098             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7099                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7100
7101                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7102                         u32 nvaccess = tr32(NVRAM_ACCESS);
7103
7104                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7105                 }
7106
7107                 tg3_get_nvram_info(tp);
7108                 tg3_get_nvram_size(tp);
7109
7110                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7111                         u32 nvaccess = tr32(NVRAM_ACCESS);
7112
7113                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7114                 }
7115
7116         } else {
7117                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7118
7119                 tg3_get_eeprom_size(tp);
7120         }
7121 }
7122
7123 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7124                                         u32 offset, u32 *val)
7125 {
7126         u32 tmp;
7127         int i;
7128
7129         if (offset > EEPROM_ADDR_ADDR_MASK ||
7130             (offset % 4) != 0)
7131                 return -EINVAL;
7132
7133         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7134                                         EEPROM_ADDR_DEVID_MASK |
7135                                         EEPROM_ADDR_READ);
7136         tw32(GRC_EEPROM_ADDR,
7137              tmp |
7138              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7139              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7140               EEPROM_ADDR_ADDR_MASK) |
7141              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7142
7143         for (i = 0; i < 10000; i++) {
7144                 tmp = tr32(GRC_EEPROM_ADDR);
7145
7146                 if (tmp & EEPROM_ADDR_COMPLETE)
7147                         break;
7148                 udelay(100);
7149         }
7150         if (!(tmp & EEPROM_ADDR_COMPLETE))
7151                 return -EBUSY;
7152
7153         *val = tr32(GRC_EEPROM_DATA);
7154         return 0;
7155 }
7156
7157 #define NVRAM_CMD_TIMEOUT 10000
7158
7159 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7160 {
7161         int i;
7162
7163         tw32(NVRAM_CMD, nvram_cmd);
7164         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7165                 udelay(10);
7166                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7167                         udelay(10);
7168                         break;
7169                 }
7170         }
7171         if (i == NVRAM_CMD_TIMEOUT) {
7172                 return -EBUSY;
7173         }
7174         return 0;
7175 }
7176
7177 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7178 {
7179         int ret;
7180
7181         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7182                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7183                 return -EINVAL;
7184         }
7185
7186         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7187                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7188
7189         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7190                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7191                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7192
7193                 offset = ((offset / tp->nvram_pagesize) <<
7194                           ATMEL_AT45DB0X1B_PAGE_POS) +
7195                         (offset % tp->nvram_pagesize);
7196         }
7197
7198         if (offset > NVRAM_ADDR_MSK)
7199                 return -EINVAL;
7200
7201         tg3_nvram_lock(tp);
7202
7203         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7204                 u32 nvaccess = tr32(NVRAM_ACCESS);
7205
7206                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7207         }
7208
7209         tw32(NVRAM_ADDR, offset);
7210         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7211                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7212
7213         if (ret == 0)
7214                 *val = swab32(tr32(NVRAM_RDDATA));
7215
7216         tg3_nvram_unlock(tp);
7217
7218         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7219                 u32 nvaccess = tr32(NVRAM_ACCESS);
7220
7221                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7222         }
7223
7224         return ret;
7225 }
7226
7227 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7228                                     u32 offset, u32 len, u8 *buf)
7229 {
7230         int i, j, rc = 0;
7231         u32 val;
7232
7233         for (i = 0; i < len; i += 4) {
7234                 u32 addr, data;
7235
7236                 addr = offset + i;
7237
7238                 memcpy(&data, buf + i, 4);
7239
7240                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7241
7242                 val = tr32(GRC_EEPROM_ADDR);
7243                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7244
7245                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7246                         EEPROM_ADDR_READ);
7247                 tw32(GRC_EEPROM_ADDR, val |
7248                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7249                         (addr & EEPROM_ADDR_ADDR_MASK) |
7250                         EEPROM_ADDR_START |
7251                         EEPROM_ADDR_WRITE);
7252                 
7253                 for (j = 0; j < 10000; j++) {
7254                         val = tr32(GRC_EEPROM_ADDR);
7255
7256                         if (val & EEPROM_ADDR_COMPLETE)
7257                                 break;
7258                         udelay(100);
7259                 }
7260                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7261                         rc = -EBUSY;
7262                         break;
7263                 }
7264         }
7265
7266         return rc;
7267 }
7268
7269 /* offset and length are dword aligned */
7270 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7271                 u8 *buf)
7272 {
7273         int ret = 0;
7274         u32 pagesize = tp->nvram_pagesize;
7275         u32 pagemask = pagesize - 1;
7276         u32 nvram_cmd;
7277         u8 *tmp;
7278
7279         tmp = kmalloc(pagesize, GFP_KERNEL);
7280         if (tmp == NULL)
7281                 return -ENOMEM;
7282
7283         while (len) {
7284                 int j;
7285                 u32 phy_addr, page_off, size, nvaccess;
7286
7287                 phy_addr = offset & ~pagemask;
7288         
7289                 for (j = 0; j < pagesize; j += 4) {
7290                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7291                                                 (u32 *) (tmp + j))))
7292                                 break;
7293                 }
7294                 if (ret)
7295                         break;
7296
7297                 page_off = offset & pagemask;
7298                 size = pagesize;
7299                 if (len < size)
7300                         size = len;
7301
7302                 len -= size;
7303
7304                 memcpy(tmp + page_off, buf, size);
7305
7306                 offset = offset + (pagesize - page_off);
7307
7308                 nvaccess = tr32(NVRAM_ACCESS);
7309                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7310
7311                 /*
7312                  * Before we can erase the flash page, we need
7313                  * to issue a special "write enable" command.
7314                  */
7315                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7316
7317                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7318                         break;
7319
7320                 /* Erase the target page */
7321                 tw32(NVRAM_ADDR, phy_addr);
7322
7323                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7324                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7325
7326                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7327                         break;
7328
7329                 /* Issue another write enable to start the write. */
7330                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7331
7332                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7333                         break;
7334
7335                 for (j = 0; j < pagesize; j += 4) {
7336                         u32 data;
7337
7338                         data = *((u32 *) (tmp + j));
7339                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7340
7341                         tw32(NVRAM_ADDR, phy_addr + j);
7342
7343                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7344                                 NVRAM_CMD_WR;
7345
7346                         if (j == 0)
7347                                 nvram_cmd |= NVRAM_CMD_FIRST;
7348                         else if (j == (pagesize - 4))
7349                                 nvram_cmd |= NVRAM_CMD_LAST;
7350
7351                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7352                                 break;
7353                 }
7354                 if (ret)
7355                         break;
7356         }
7357
7358         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7359         tg3_nvram_exec_cmd(tp, nvram_cmd);
7360
7361         kfree(tmp);
7362
7363         return ret;
7364 }
7365
7366 /* offset and length are dword aligned */
7367 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7368                 u8 *buf)
7369 {
7370         int i, ret = 0;
7371
7372         for (i = 0; i < len; i += 4, offset += 4) {
7373                 u32 data, page_off, phy_addr, nvram_cmd;
7374
7375                 memcpy(&data, buf + i, 4);
7376                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7377
7378                 page_off = offset % tp->nvram_pagesize;
7379
7380                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7381                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7382
7383                         phy_addr = ((offset / tp->nvram_pagesize) <<
7384                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7385                 }
7386                 else {
7387                         phy_addr = offset;
7388                 }
7389
7390                 tw32(NVRAM_ADDR, phy_addr);
7391
7392                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7393
7394                 if ((page_off == 0) || (i == 0))
7395                         nvram_cmd |= NVRAM_CMD_FIRST;
7396                 else if (page_off == (tp->nvram_pagesize - 4))
7397                         nvram_cmd |= NVRAM_CMD_LAST;
7398
7399                 if (i == (len - 4))
7400                         nvram_cmd |= NVRAM_CMD_LAST;
7401
7402                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7403                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7404
7405                         if ((ret = tg3_nvram_exec_cmd(tp,
7406                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7407                                 NVRAM_CMD_DONE)))
7408
7409                                 break;
7410                 }
7411                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7412                         /* We always do complete word writes to eeprom. */
7413                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7414                 }
7415
7416                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7417                         break;
7418         }
7419         return ret;
7420 }
7421
7422 /* offset and length are dword aligned */
7423 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7424 {
7425         int ret;
7426
7427         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7428                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7429                 return -EINVAL;
7430         }
7431
7432         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7433                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7434                        GRC_LCLCTRL_GPIO_OE1);
7435                 udelay(40);
7436         }
7437
7438         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7439                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7440         }
7441         else {
7442                 u32 grc_mode;
7443
7444                 tg3_nvram_lock(tp);
7445
7446                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7447                         u32 nvaccess = tr32(NVRAM_ACCESS);
7448
7449                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7450
7451                         tw32(NVRAM_WRITE1, 0x406);
7452                 }
7453
7454                 grc_mode = tr32(GRC_MODE);
7455                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7456
7457                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7458                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7459
7460                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7461                                 buf);
7462                 }
7463                 else {
7464                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7465                                 buf);
7466                 }
7467
7468                 grc_mode = tr32(GRC_MODE);
7469                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7470
7471                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7472                         u32 nvaccess = tr32(NVRAM_ACCESS);
7473
7474                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7475                 }
7476                 tg3_nvram_unlock(tp);
7477         }
7478
7479         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7480                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7481                        GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7482                 udelay(40);
7483         }
7484
7485         return ret;
7486 }
7487
7488 struct subsys_tbl_ent {
7489         u16 subsys_vendor, subsys_devid;
7490         u32 phy_id;
7491 };
7492
7493 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7494         /* Broadcom boards. */
7495         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7496         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7497         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7498         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7499         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7500         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7501         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7502         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7503         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7504         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7505         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7506
7507         /* 3com boards. */
7508         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7509         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7510         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7511         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7512         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7513
7514         /* DELL boards. */
7515         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7516         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7517         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7518         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7519
7520         /* Compaq boards. */
7521         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7522         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7523         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7524         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7525         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7526
7527         /* IBM boards. */
7528         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7529 };
7530
7531 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7532 {
7533         int i;
7534
7535         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7536                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7537                      tp->pdev->subsystem_vendor) &&
7538                     (subsys_id_to_phy_id[i].subsys_devid ==
7539                      tp->pdev->subsystem_device))
7540                         return &subsys_id_to_phy_id[i];
7541         }
7542         return NULL;
7543 }
7544
7545 /* Since this function may be called in D3-hot power state during
7546  * tg3_init_one(), only config cycles are allowed.
7547  */
7548 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7549 {
7550         u32 val;
7551
7552         /* Make sure register accesses (indirect or otherwise)
7553          * will function correctly.
7554          */
7555         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7556                                tp->misc_host_ctrl);
7557
7558         tp->phy_id = PHY_ID_INVALID;
7559         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7560
7561         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7562         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7563                 u32 nic_cfg, led_cfg;
7564                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7565                 int eeprom_phy_serdes = 0;
7566
7567                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7568                 tp->nic_sram_data_cfg = nic_cfg;
7569
7570                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7571                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7572                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7573                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7574                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7575                     (ver > 0) && (ver < 0x100))
7576                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7577
7578                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7579                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7580                         eeprom_phy_serdes = 1;
7581
7582                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7583                 if (nic_phy_id != 0) {
7584                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7585                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7586
7587                         eeprom_phy_id  = (id1 >> 16) << 10;
7588                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7589                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7590                 } else
7591                         eeprom_phy_id = 0;
7592
7593                 tp->phy_id = eeprom_phy_id;
7594                 if (eeprom_phy_serdes)
7595                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7596
7597                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7598                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7599                                     SHASTA_EXT_LED_MODE_MASK);
7600                 else
7601                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7602
7603                 switch (led_cfg) {
7604                 default:
7605                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7606                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7607                         break;
7608
7609                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7610                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7611                         break;
7612
7613                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7614                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7615                         break;
7616
7617                 case SHASTA_EXT_LED_SHARED:
7618                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7619                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7620                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7621                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7622                                                  LED_CTRL_MODE_PHY_2);
7623                         break;
7624
7625                 case SHASTA_EXT_LED_MAC:
7626                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7627                         break;
7628
7629                 case SHASTA_EXT_LED_COMBO:
7630                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7631                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7632                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7633                                                  LED_CTRL_MODE_PHY_2);
7634                         break;
7635
7636                 };
7637
7638                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7639                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7640                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7641                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7642
7643                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7644                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7645                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7646                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7647
7648                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7649                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7650                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7651                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7652                 }
7653                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7654                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7655
7656                 if (cfg2 & (1 << 17))
7657                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7658
7659                 /* serdes signal pre-emphasis in register 0x590 set by */
7660                 /* bootcode if bit 18 is set */
7661                 if (cfg2 & (1 << 18))
7662                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7663         }
7664 }
7665
7666 static int __devinit tg3_phy_probe(struct tg3 *tp)
7667 {
7668         u32 hw_phy_id_1, hw_phy_id_2;
7669         u32 hw_phy_id, hw_phy_id_masked;
7670         int err;
7671
7672         /* Reading the PHY ID register can conflict with ASF
7673          * firwmare access to the PHY hardware.
7674          */
7675         err = 0;
7676         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7677                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7678         } else {
7679                 /* Now read the physical PHY_ID from the chip and verify
7680                  * that it is sane.  If it doesn't look good, we fall back
7681                  * to either the hard-coded table based PHY_ID and failing
7682                  * that the value found in the eeprom area.
7683                  */
7684                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7685                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7686
7687                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7688                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7689                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7690
7691                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7692         }
7693
7694         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7695                 tp->phy_id = hw_phy_id;
7696                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7697                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7698         } else {
7699                 if (tp->phy_id != PHY_ID_INVALID) {
7700                         /* Do nothing, phy ID already set up in
7701                          * tg3_get_eeprom_hw_cfg().
7702                          */
7703                 } else {
7704                         struct subsys_tbl_ent *p;
7705
7706                         /* No eeprom signature?  Try the hardcoded
7707                          * subsys device table.
7708                          */
7709                         p = lookup_by_subsys(tp);
7710                         if (!p)
7711                                 return -ENODEV;
7712
7713                         tp->phy_id = p->phy_id;
7714                         if (!tp->phy_id ||
7715                             tp->phy_id == PHY_ID_BCM8002)
7716                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7717                 }
7718         }
7719
7720         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7721             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7722                 u32 bmsr, adv_reg, tg3_ctrl;
7723
7724                 tg3_readphy(tp, MII_BMSR, &bmsr);
7725                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7726                     (bmsr & BMSR_LSTATUS))
7727                         goto skip_phy_reset;
7728                     
7729                 err = tg3_phy_reset(tp);
7730                 if (err)
7731                         return err;
7732
7733                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7734                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7735                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7736                 tg3_ctrl = 0;
7737                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7738                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7739                                     MII_TG3_CTRL_ADV_1000_FULL);
7740                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7741                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7742                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7743                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7744                 }
7745
7746                 if (!tg3_copper_is_advertising_all(tp)) {
7747                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7748
7749                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7750                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7751
7752                         tg3_writephy(tp, MII_BMCR,
7753                                      BMCR_ANENABLE | BMCR_ANRESTART);
7754                 }
7755                 tg3_phy_set_wirespeed(tp);
7756
7757                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7758                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7759                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7760         }
7761
7762 skip_phy_reset:
7763         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7764                 err = tg3_init_5401phy_dsp(tp);
7765                 if (err)
7766                         return err;
7767         }
7768
7769         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7770                 err = tg3_init_5401phy_dsp(tp);
7771         }
7772
7773         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7774                 tp->link_config.advertising =
7775                         (ADVERTISED_1000baseT_Half |
7776                          ADVERTISED_1000baseT_Full |
7777                          ADVERTISED_Autoneg |
7778                          ADVERTISED_FIBRE);
7779         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7780                 tp->link_config.advertising &=
7781                         ~(ADVERTISED_1000baseT_Half |
7782                           ADVERTISED_1000baseT_Full);
7783
7784         return err;
7785 }
7786
7787 static void __devinit tg3_read_partno(struct tg3 *tp)
7788 {
7789         unsigned char vpd_data[256];
7790         int i;
7791
7792         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7793                 /* Sun decided not to put the necessary bits in the
7794                  * NVRAM of their onboard tg3 parts :(
7795                  */
7796                 strcpy(tp->board_part_number, "Sun 570X");
7797                 return;
7798         }
7799
7800         for (i = 0; i < 256; i += 4) {
7801                 u32 tmp;
7802
7803                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7804                         goto out_not_found;
7805
7806                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7807                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7808                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7809                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7810         }
7811
7812         /* Now parse and find the part number. */
7813         for (i = 0; i < 256; ) {
7814                 unsigned char val = vpd_data[i];
7815                 int block_end;
7816
7817                 if (val == 0x82 || val == 0x91) {
7818                         i = (i + 3 +
7819                              (vpd_data[i + 1] +
7820                               (vpd_data[i + 2] << 8)));
7821                         continue;
7822                 }
7823
7824                 if (val != 0x90)
7825                         goto out_not_found;
7826
7827                 block_end = (i + 3 +
7828                              (vpd_data[i + 1] +
7829                               (vpd_data[i + 2] << 8)));
7830                 i += 3;
7831                 while (i < block_end) {
7832                         if (vpd_data[i + 0] == 'P' &&
7833                             vpd_data[i + 1] == 'N') {
7834                                 int partno_len = vpd_data[i + 2];
7835
7836                                 if (partno_len > 24)
7837                                         goto out_not_found;
7838
7839                                 memcpy(tp->board_part_number,
7840                                        &vpd_data[i + 3],
7841                                        partno_len);
7842
7843                                 /* Success. */
7844                                 return;
7845                         }
7846                 }
7847
7848                 /* Part number not found. */
7849                 goto out_not_found;
7850         }
7851
7852 out_not_found:
7853         strcpy(tp->board_part_number, "none");
7854 }
7855
7856 #ifdef CONFIG_SPARC64
7857 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7858 {
7859         struct pci_dev *pdev = tp->pdev;
7860         struct pcidev_cookie *pcp = pdev->sysdata;
7861
7862         if (pcp != NULL) {
7863                 int node = pcp->prom_node;
7864                 u32 venid;
7865                 int err;
7866
7867                 err = prom_getproperty(node, "subsystem-vendor-id",
7868                                        (char *) &venid, sizeof(venid));
7869                 if (err == 0 || err == -1)
7870                         return 0;
7871                 if (venid == PCI_VENDOR_ID_SUN)
7872                         return 1;
7873         }
7874         return 0;
7875 }
7876 #endif
7877
7878 static int __devinit tg3_get_invariants(struct tg3 *tp)
7879 {
7880         static struct pci_device_id write_reorder_chipsets[] = {
7881                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7882                              PCI_DEVICE_ID_INTEL_82801AA_8) },
7883                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7884                              PCI_DEVICE_ID_INTEL_82801AB_8) },
7885                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7886                              PCI_DEVICE_ID_INTEL_82801BA_11) },
7887                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7888                              PCI_DEVICE_ID_INTEL_82801BA_6) },
7889                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
7890                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
7891                 { },
7892         };
7893         u32 misc_ctrl_reg;
7894         u32 cacheline_sz_reg;
7895         u32 pci_state_reg, grc_misc_cfg;
7896         u32 val;
7897         u16 pci_cmd;
7898         int err;
7899
7900 #ifdef CONFIG_SPARC64
7901         if (tg3_is_sun_570X(tp))
7902                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7903 #endif
7904
7905         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7906          * reordering to the mailbox registers done by the host
7907          * controller can cause major troubles.  We read back from
7908          * every mailbox register write to force the writes to be
7909          * posted to the chip in order.
7910          */
7911         if (pci_dev_present(write_reorder_chipsets))
7912                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7913
7914         /* Force memory write invalidate off.  If we leave it on,
7915          * then on 5700_BX chips we have to enable a workaround.
7916          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7917          * to match the cacheline size.  The Broadcom driver have this
7918          * workaround but turns MWI off all the times so never uses
7919          * it.  This seems to suggest that the workaround is insufficient.
7920          */
7921         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7922         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7923         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7924
7925         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7926          * has the register indirect write enable bit set before
7927          * we try to access any of the MMIO registers.  It is also
7928          * critical that the PCI-X hw workaround situation is decided
7929          * before that as well.
7930          */
7931         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7932                               &misc_ctrl_reg);
7933
7934         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7935                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7936
7937         /* Initialize misc host control in PCI block. */
7938         tp->misc_host_ctrl |= (misc_ctrl_reg &
7939                                MISC_HOST_CTRL_CHIPREV);
7940         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7941                                tp->misc_host_ctrl);
7942
7943         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7944                               &cacheline_sz_reg);
7945
7946         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7947         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7948         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7949         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7950
7951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7952             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752_A0 ||
7953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752_A1)
7954                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
7955
7956         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
7957             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
7958                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
7959
7960         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7961                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7962
7963         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7964                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7965
7966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7967             tp->pci_lat_timer < 64) {
7968                 tp->pci_lat_timer = 64;
7969
7970                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7971                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7972                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7973                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7974
7975                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7976                                        cacheline_sz_reg);
7977         }
7978
7979         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7980                               &pci_state_reg);
7981
7982         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7983                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7984
7985                 /* If this is a 5700 BX chipset, and we are in PCI-X
7986                  * mode, enable register write workaround.
7987                  *
7988                  * The workaround is to use indirect register accesses
7989                  * for all chip writes not to mailbox registers.
7990                  */
7991                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7992                         u32 pm_reg;
7993                         u16 pci_cmd;
7994
7995                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7996
7997                         /* The chip can have it's power management PCI config
7998                          * space registers clobbered due to this bug.
7999                          * So explicitly force the chip into D0 here.
8000                          */
8001                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8002                                               &pm_reg);
8003                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8004                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8005                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8006                                                pm_reg);
8007
8008                         /* Also, force SERR#/PERR# in PCI command. */
8009                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8010                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8011                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8012                 }
8013         }
8014
8015         /* Back to back register writes can cause problems on this chip,
8016          * the workaround is to read back all reg writes except those to
8017          * mailbox regs.  See tg3_write_indirect_reg32().
8018          *
8019          * PCI Express 5750_A0 rev chips need this workaround too.
8020          */
8021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8022             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8023              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8024                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8025
8026         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8027                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8028         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8029                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8030
8031         /* Chip-specific fixup from Broadcom driver */
8032         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8033             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8034                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8035                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8036         }
8037
8038         /* Get eeprom hw config before calling tg3_set_power_state().
8039          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8040          * determined before calling tg3_set_power_state() so that
8041          * we know whether or not to switch out of Vaux power.
8042          * When the flag is set, it means that GPIO1 is used for eeprom
8043          * write protect and also implies that it is a LOM where GPIOs
8044          * are not used to switch power.
8045          */ 
8046         tg3_get_eeprom_hw_cfg(tp);
8047
8048         /* Force the chip into D0. */
8049         err = tg3_set_power_state(tp, 0);
8050         if (err) {
8051                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8052                        pci_name(tp->pdev));
8053                 return err;
8054         }
8055
8056         /* 5700 B0 chips do not support checksumming correctly due
8057          * to hardware bugs.
8058          */
8059         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8060                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8061
8062         /* Pseudo-header checksum is done by hardware logic and not
8063          * the offload processers, so make the chip do the pseudo-
8064          * header checksums on receive.  For transmit it is more
8065          * convenient to do the pseudo-header checksum in software
8066          * as Linux does that on transmit for us in all cases.
8067          */
8068         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8069         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8070
8071         /* Derive initial jumbo mode from MTU assigned in
8072          * ether_setup() via the alloc_etherdev() call
8073          */
8074         if (tp->dev->mtu > ETH_DATA_LEN)
8075                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8076
8077         /* Determine WakeOnLan speed to use. */
8078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8079             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8080             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8081             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8082                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8083         } else {
8084                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8085         }
8086
8087         /* A few boards don't want Ethernet@WireSpeed phy feature */
8088         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8089             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8090              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8091              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8092                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8093
8094         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8095             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8096                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8097         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8098                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8099
8100         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8101                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8102
8103         /* Only 5701 and later support tagged irq status mode.
8104          * Also, 5788 chips cannot use tagged irq status.
8105          *
8106          * However, since we are using NAPI avoid tagged irq status
8107          * because the interrupt condition is more difficult to
8108          * fully clear in that mode.
8109          */
8110         tp->coalesce_mode = 0;
8111
8112         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8113             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8114                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8115
8116         /* Initialize MAC MI mode, polling disabled. */
8117         tw32_f(MAC_MI_MODE, tp->mi_mode);
8118         udelay(80);
8119
8120         /* Initialize data/descriptor byte/word swapping. */
8121         val = tr32(GRC_MODE);
8122         val &= GRC_MODE_HOST_STACKUP;
8123         tw32(GRC_MODE, val | tp->grc_mode);
8124
8125         tg3_switch_clocks(tp);
8126
8127         /* Clear this out for sanity. */
8128         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8129
8130         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8131                               &pci_state_reg);
8132         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8133             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8134                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8135
8136                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8137                     chiprevid == CHIPREV_ID_5701_B0 ||
8138                     chiprevid == CHIPREV_ID_5701_B2 ||
8139                     chiprevid == CHIPREV_ID_5701_B5) {
8140                         void __iomem *sram_base;
8141
8142                         /* Write some dummy words into the SRAM status block
8143                          * area, see if it reads back correctly.  If the return
8144                          * value is bad, force enable the PCIX workaround.
8145                          */
8146                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8147
8148                         writel(0x00000000, sram_base);
8149                         writel(0x00000000, sram_base + 4);
8150                         writel(0xffffffff, sram_base + 4);
8151                         if (readl(sram_base) != 0x00000000)
8152                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8153                 }
8154         }
8155
8156         udelay(50);
8157         tg3_nvram_init(tp);
8158
8159         grc_misc_cfg = tr32(GRC_MISC_CFG);
8160         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8161
8162         /* Broadcom's driver says that CIOBE multisplit has a bug */
8163 #if 0
8164         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8165             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8166                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8167                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8168         }
8169 #endif
8170         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8171             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8172              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8173                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8174
8175         /* these are limited to 10/100 only */
8176         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8177              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8178             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8179              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8180              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8181               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8182               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8183             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8184              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8185               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8186                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8187
8188         err = tg3_phy_probe(tp);
8189         if (err) {
8190                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8191                        pci_name(tp->pdev), err);
8192                 /* ... but do not return immediately ... */
8193         }
8194
8195         tg3_read_partno(tp);
8196
8197         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8198                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8199         } else {
8200                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8201                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8202                 else
8203                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8204         }
8205
8206         /* 5700 {AX,BX} chips have a broken status block link
8207          * change bit implementation, so we must use the
8208          * status register in those cases.
8209          */
8210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8211                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8212         else
8213                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8214
8215         /* The led_ctrl is set during tg3_phy_probe, here we might
8216          * have to force the link status polling mechanism based
8217          * upon subsystem IDs.
8218          */
8219         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8220             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8221                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8222                                   TG3_FLAG_USE_LINKCHG_REG);
8223         }
8224
8225         /* For all SERDES we poll the MAC status register. */
8226         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8227                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8228         else
8229                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8230
8231         /* 5700 BX chips need to have their TX producer index mailboxes
8232          * written twice to workaround a bug.
8233          */
8234         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8235                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8236         else
8237                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8238
8239         /* It seems all chips can get confused if TX buffers
8240          * straddle the 4GB address boundary in some cases.
8241          */
8242         tp->dev->hard_start_xmit = tg3_start_xmit;
8243
8244         tp->rx_offset = 2;
8245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8246             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8247                 tp->rx_offset = 0;
8248
8249         /* By default, disable wake-on-lan.  User can change this
8250          * using ETHTOOL_SWOL.
8251          */
8252         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8253
8254         return err;
8255 }
8256
8257 #ifdef CONFIG_SPARC64
8258 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8259 {
8260         struct net_device *dev = tp->dev;
8261         struct pci_dev *pdev = tp->pdev;
8262         struct pcidev_cookie *pcp = pdev->sysdata;
8263
8264         if (pcp != NULL) {
8265                 int node = pcp->prom_node;
8266
8267                 if (prom_getproplen(node, "local-mac-address") == 6) {
8268                         prom_getproperty(node, "local-mac-address",
8269                                          dev->dev_addr, 6);
8270                         return 0;
8271                 }
8272         }
8273         return -ENODEV;
8274 }
8275
8276 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8277 {
8278         struct net_device *dev = tp->dev;
8279
8280         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8281         return 0;
8282 }
8283 #endif
8284
8285 static int __devinit tg3_get_device_address(struct tg3 *tp)
8286 {
8287         struct net_device *dev = tp->dev;
8288         u32 hi, lo, mac_offset;
8289
8290 #ifdef CONFIG_SPARC64
8291         if (!tg3_get_macaddr_sparc(tp))
8292                 return 0;
8293 #endif
8294
8295         mac_offset = 0x7c;
8296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8297             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8298                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8299                         mac_offset = 0xcc;
8300                 if (tg3_nvram_lock(tp))
8301                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8302                 else
8303                         tg3_nvram_unlock(tp);
8304         }
8305
8306         /* First try to get it from MAC address mailbox. */
8307         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8308         if ((hi >> 16) == 0x484b) {
8309                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8310                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8311
8312                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8313                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8314                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8315                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8316                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8317         }
8318         /* Next, try NVRAM. */
8319         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8320                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8321                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8322                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8323                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8324                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8325                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8326                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8327                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8328         }
8329         /* Finally just fetch it out of the MAC control regs. */
8330         else {
8331                 hi = tr32(MAC_ADDR_0_HIGH);
8332                 lo = tr32(MAC_ADDR_0_LOW);
8333
8334                 dev->dev_addr[5] = lo & 0xff;
8335                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8336                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8337                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8338                 dev->dev_addr[1] = hi & 0xff;
8339                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8340         }
8341
8342         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8343 #ifdef CONFIG_SPARC64
8344                 if (!tg3_get_default_macaddr_sparc(tp))
8345                         return 0;
8346 #endif
8347                 return -EINVAL;
8348         }
8349         return 0;
8350 }
8351
8352 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8353 {
8354         struct tg3_internal_buffer_desc test_desc;
8355         u32 sram_dma_descs;
8356         int i, ret;
8357
8358         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8359
8360         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8361         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8362         tw32(RDMAC_STATUS, 0);
8363         tw32(WDMAC_STATUS, 0);
8364
8365         tw32(BUFMGR_MODE, 0);
8366         tw32(FTQ_RESET, 0);
8367
8368         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8369         test_desc.addr_lo = buf_dma & 0xffffffff;
8370         test_desc.nic_mbuf = 0x00002100;
8371         test_desc.len = size;
8372
8373         /*
8374          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8375          * the *second* time the tg3 driver was getting loaded after an
8376          * initial scan.
8377          *
8378          * Broadcom tells me:
8379          *   ...the DMA engine is connected to the GRC block and a DMA
8380          *   reset may affect the GRC block in some unpredictable way...
8381          *   The behavior of resets to individual blocks has not been tested.
8382          *
8383          * Broadcom noted the GRC reset will also reset all sub-components.
8384          */
8385         if (to_device) {
8386                 test_desc.cqid_sqid = (13 << 8) | 2;
8387
8388                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8389                 udelay(40);
8390         } else {
8391                 test_desc.cqid_sqid = (16 << 8) | 7;
8392
8393                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8394                 udelay(40);
8395         }
8396         test_desc.flags = 0x00000005;
8397
8398         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8399                 u32 val;
8400
8401                 val = *(((u32 *)&test_desc) + i);
8402                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8403                                        sram_dma_descs + (i * sizeof(u32)));
8404                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8405         }
8406         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8407
8408         if (to_device) {
8409                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8410         } else {
8411                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8412         }
8413
8414         ret = -ENODEV;
8415         for (i = 0; i < 40; i++) {
8416                 u32 val;
8417
8418                 if (to_device)
8419                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8420                 else
8421                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8422                 if ((val & 0xffff) == sram_dma_descs) {
8423                         ret = 0;
8424                         break;
8425                 }
8426
8427                 udelay(100);
8428         }
8429
8430         return ret;
8431 }
8432
8433 #define TEST_BUFFER_SIZE        0x400
8434
8435 static int __devinit tg3_test_dma(struct tg3 *tp)
8436 {
8437         dma_addr_t buf_dma;
8438         u32 *buf;
8439         int ret;
8440
8441         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8442         if (!buf) {
8443                 ret = -ENOMEM;
8444                 goto out_nofree;
8445         }
8446
8447         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8448                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8449
8450 #ifndef CONFIG_X86
8451         {
8452                 u8 byte;
8453                 int cacheline_size;
8454                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8455
8456                 if (byte == 0)
8457                         cacheline_size = 1024;
8458                 else
8459                         cacheline_size = (int) byte * 4;
8460
8461                 switch (cacheline_size) {
8462                 case 16:
8463                 case 32:
8464                 case 64:
8465                 case 128:
8466                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8467                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8468                                 tp->dma_rwctrl |=
8469                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8470                                 break;
8471                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8472                                 tp->dma_rwctrl &=
8473                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8474                                 tp->dma_rwctrl |=
8475                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8476                                 break;
8477                         }
8478                         /* fallthrough */
8479                 case 256:
8480                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8481                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8482                                 tp->dma_rwctrl |=
8483                                         DMA_RWCTRL_WRITE_BNDRY_256;
8484                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8485                                 tp->dma_rwctrl |=
8486                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8487                 };
8488         }
8489 #endif
8490
8491         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8492                 /* DMA read watermark not used on PCIE */
8493                 tp->dma_rwctrl |= 0x00180000;
8494         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8495                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8496                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8497                         tp->dma_rwctrl |= 0x003f0000;
8498                 else
8499                         tp->dma_rwctrl |= 0x003f000f;
8500         } else {
8501                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8502                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8503                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8504
8505                         if (ccval == 0x6 || ccval == 0x7)
8506                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8507
8508                         /* Set bit 23 to renable PCIX hw bug fix */
8509                         tp->dma_rwctrl |= 0x009f0000;
8510                 } else {
8511                         tp->dma_rwctrl |= 0x001b000f;
8512                 }
8513         }
8514
8515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8516             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8517                 tp->dma_rwctrl &= 0xfffffff0;
8518
8519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8521                 /* Remove this if it causes problems for some boards. */
8522                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8523
8524                 /* On 5700/5701 chips, we need to set this bit.
8525                  * Otherwise the chip will issue cacheline transactions
8526                  * to streamable DMA memory with not all the byte
8527                  * enables turned on.  This is an error on several
8528                  * RISC PCI controllers, in particular sparc64.
8529                  *
8530                  * On 5703/5704 chips, this bit has been reassigned
8531                  * a different meaning.  In particular, it is used
8532                  * on those chips to enable a PCI-X workaround.
8533                  */
8534                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8535         }
8536
8537         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8538
8539 #if 0
8540         /* Unneeded, already done by tg3_get_invariants.  */
8541         tg3_switch_clocks(tp);
8542 #endif
8543
8544         ret = 0;
8545         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8546             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8547                 goto out;
8548
8549         while (1) {
8550                 u32 *p = buf, i;
8551
8552                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8553                         p[i] = i;
8554
8555                 /* Send the buffer to the chip. */
8556                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8557                 if (ret) {
8558                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8559                         break;
8560                 }
8561
8562 #if 0
8563                 /* validate data reached card RAM correctly. */
8564                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8565                         u32 val;
8566                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8567                         if (le32_to_cpu(val) != p[i]) {
8568                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8569                                 /* ret = -ENODEV here? */
8570                         }
8571                         p[i] = 0;
8572                 }
8573 #endif
8574                 /* Now read it back. */
8575                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8576                 if (ret) {
8577                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8578
8579                         break;
8580                 }
8581
8582                 /* Verify it. */
8583                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8584                         if (p[i] == i)
8585                                 continue;
8586
8587                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8588                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8589                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8590                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8591                                 break;
8592                         } else {
8593                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8594                                 ret = -ENODEV;
8595                                 goto out;
8596                         }
8597                 }
8598
8599                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8600                         /* Success. */
8601                         ret = 0;
8602                         break;
8603                 }
8604         }
8605
8606 out:
8607         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8608 out_nofree:
8609         return ret;
8610 }
8611
8612 static void __devinit tg3_init_link_config(struct tg3 *tp)
8613 {
8614         tp->link_config.advertising =
8615                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8616                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8617                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8618                  ADVERTISED_Autoneg | ADVERTISED_MII);
8619         tp->link_config.speed = SPEED_INVALID;
8620         tp->link_config.duplex = DUPLEX_INVALID;
8621         tp->link_config.autoneg = AUTONEG_ENABLE;
8622         netif_carrier_off(tp->dev);
8623         tp->link_config.active_speed = SPEED_INVALID;
8624         tp->link_config.active_duplex = DUPLEX_INVALID;
8625         tp->link_config.phy_is_low_power = 0;
8626         tp->link_config.orig_speed = SPEED_INVALID;
8627         tp->link_config.orig_duplex = DUPLEX_INVALID;
8628         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8629 }
8630
8631 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8632 {
8633         tp->bufmgr_config.mbuf_read_dma_low_water =
8634                 DEFAULT_MB_RDMA_LOW_WATER;
8635         tp->bufmgr_config.mbuf_mac_rx_low_water =
8636                 DEFAULT_MB_MACRX_LOW_WATER;
8637         tp->bufmgr_config.mbuf_high_water =
8638                 DEFAULT_MB_HIGH_WATER;
8639
8640         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8641                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8642         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8643                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8644         tp->bufmgr_config.mbuf_high_water_jumbo =
8645                 DEFAULT_MB_HIGH_WATER_JUMBO;
8646
8647         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8648         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8649 }
8650
8651 static char * __devinit tg3_phy_string(struct tg3 *tp)
8652 {
8653         switch (tp->phy_id & PHY_ID_MASK) {
8654         case PHY_ID_BCM5400:    return "5400";
8655         case PHY_ID_BCM5401:    return "5401";
8656         case PHY_ID_BCM5411:    return "5411";
8657         case PHY_ID_BCM5701:    return "5701";
8658         case PHY_ID_BCM5703:    return "5703";
8659         case PHY_ID_BCM5704:    return "5704";
8660         case PHY_ID_BCM5705:    return "5705";
8661         case PHY_ID_BCM5750:    return "5750";
8662         case PHY_ID_BCM5752:    return "5752";
8663         case PHY_ID_BCM8002:    return "8002/serdes";
8664         case 0:                 return "serdes";
8665         default:                return "unknown";
8666         };
8667 }
8668
8669 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8670 {
8671         struct pci_dev *peer;
8672         unsigned int func, devnr = tp->pdev->devfn & ~7;
8673
8674         for (func = 0; func < 8; func++) {
8675                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8676                 if (peer && peer != tp->pdev)
8677                         break;
8678                 pci_dev_put(peer);
8679         }
8680         if (!peer || peer == tp->pdev)
8681                 BUG();
8682
8683         /*
8684          * We don't need to keep the refcount elevated; there's no way
8685          * to remove one half of this device without removing the other
8686          */
8687         pci_dev_put(peer);
8688
8689         return peer;
8690 }
8691
8692 static int __devinit tg3_init_one(struct pci_dev *pdev,
8693                                   const struct pci_device_id *ent)
8694 {
8695         static int tg3_version_printed = 0;
8696         unsigned long tg3reg_base, tg3reg_len;
8697         struct net_device *dev;
8698         struct tg3 *tp;
8699         int i, err, pci_using_dac, pm_cap;
8700
8701         if (tg3_version_printed++ == 0)
8702                 printk(KERN_INFO "%s", version);
8703
8704         err = pci_enable_device(pdev);
8705         if (err) {
8706                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8707                        "aborting.\n");
8708                 return err;
8709         }
8710
8711         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8712                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8713                        "base address, aborting.\n");
8714                 err = -ENODEV;
8715                 goto err_out_disable_pdev;
8716         }
8717
8718         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8719         if (err) {
8720                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8721                        "aborting.\n");
8722                 goto err_out_disable_pdev;
8723         }
8724
8725         pci_set_master(pdev);
8726
8727         /* Find power-management capability. */
8728         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8729         if (pm_cap == 0) {
8730                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8731                        "aborting.\n");
8732                 err = -EIO;
8733                 goto err_out_free_res;
8734         }
8735
8736         /* Configure DMA attributes. */
8737         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8738         if (!err) {
8739                 pci_using_dac = 1;
8740                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8741                 if (err < 0) {
8742                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8743                                "for consistent allocations\n");
8744                         goto err_out_free_res;
8745                 }
8746         } else {
8747                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8748                 if (err) {
8749                         printk(KERN_ERR PFX "No usable DMA configuration, "
8750                                "aborting.\n");
8751                         goto err_out_free_res;
8752                 }
8753                 pci_using_dac = 0;
8754         }
8755
8756         tg3reg_base = pci_resource_start(pdev, 0);
8757         tg3reg_len = pci_resource_len(pdev, 0);
8758
8759         dev = alloc_etherdev(sizeof(*tp));
8760         if (!dev) {
8761                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8762                 err = -ENOMEM;
8763                 goto err_out_free_res;
8764         }
8765
8766         SET_MODULE_OWNER(dev);
8767         SET_NETDEV_DEV(dev, &pdev->dev);
8768
8769         if (pci_using_dac)
8770                 dev->features |= NETIF_F_HIGHDMA;
8771         dev->features |= NETIF_F_LLTX;
8772 #if TG3_VLAN_TAG_USED
8773         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8774         dev->vlan_rx_register = tg3_vlan_rx_register;
8775         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8776 #endif
8777
8778         tp = netdev_priv(dev);
8779         tp->pdev = pdev;
8780         tp->dev = dev;
8781         tp->pm_cap = pm_cap;
8782         tp->mac_mode = TG3_DEF_MAC_MODE;
8783         tp->rx_mode = TG3_DEF_RX_MODE;
8784         tp->tx_mode = TG3_DEF_TX_MODE;
8785         tp->mi_mode = MAC_MI_MODE_BASE;
8786         if (tg3_debug > 0)
8787                 tp->msg_enable = tg3_debug;
8788         else
8789                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8790
8791         /* The word/byte swap controls here control register access byte
8792          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8793          * setting below.
8794          */
8795         tp->misc_host_ctrl =
8796                 MISC_HOST_CTRL_MASK_PCI_INT |
8797                 MISC_HOST_CTRL_WORD_SWAP |
8798                 MISC_HOST_CTRL_INDIR_ACCESS |
8799                 MISC_HOST_CTRL_PCISTATE_RW;
8800
8801         /* The NONFRM (non-frame) byte/word swap controls take effect
8802          * on descriptor entries, anything which isn't packet data.
8803          *
8804          * The StrongARM chips on the board (one for tx, one for rx)
8805          * are running in big-endian mode.
8806          */
8807         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8808                         GRC_MODE_WSWAP_NONFRM_DATA);
8809 #ifdef __BIG_ENDIAN
8810         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8811 #endif
8812         spin_lock_init(&tp->lock);
8813         spin_lock_init(&tp->tx_lock);
8814         spin_lock_init(&tp->indirect_lock);
8815         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8816
8817         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8818         if (tp->regs == 0UL) {
8819                 printk(KERN_ERR PFX "Cannot map device registers, "
8820                        "aborting.\n");
8821                 err = -ENOMEM;
8822                 goto err_out_free_dev;
8823         }
8824
8825         tg3_init_link_config(tp);
8826
8827         tg3_init_bufmgr_config(tp);
8828
8829         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8830         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8831         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8832
8833         dev->open = tg3_open;
8834         dev->stop = tg3_close;
8835         dev->get_stats = tg3_get_stats;
8836         dev->set_multicast_list = tg3_set_rx_mode;
8837         dev->set_mac_address = tg3_set_mac_addr;
8838         dev->do_ioctl = tg3_ioctl;
8839         dev->tx_timeout = tg3_tx_timeout;
8840         dev->poll = tg3_poll;
8841         dev->ethtool_ops = &tg3_ethtool_ops;
8842         dev->weight = 64;
8843         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8844         dev->change_mtu = tg3_change_mtu;
8845         dev->irq = pdev->irq;
8846 #ifdef CONFIG_NET_POLL_CONTROLLER
8847         dev->poll_controller = tg3_poll_controller;
8848 #endif
8849
8850         err = tg3_get_invariants(tp);
8851         if (err) {
8852                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8853                        "aborting.\n");
8854                 goto err_out_iounmap;
8855         }
8856
8857         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8858                 tp->bufmgr_config.mbuf_read_dma_low_water =
8859                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8860                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8861                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8862                 tp->bufmgr_config.mbuf_high_water =
8863                         DEFAULT_MB_HIGH_WATER_5705;
8864         }
8865
8866 #if TG3_TSO_SUPPORT != 0
8867         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8868                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8869         }
8870         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8871             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8872             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8873             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8874                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8875         } else {
8876                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8877         }
8878
8879         /* TSO is off by default, user can enable using ethtool.  */
8880 #if 0
8881         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8882                 dev->features |= NETIF_F_TSO;
8883 #endif
8884
8885 #endif
8886
8887         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8888             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8889             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8890                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8891                 tp->rx_pending = 63;
8892         }
8893
8894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8895                 tp->pdev_peer = tg3_find_5704_peer(tp);
8896
8897         err = tg3_get_device_address(tp);
8898         if (err) {
8899                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8900                        "aborting.\n");
8901                 goto err_out_iounmap;
8902         }
8903
8904         /*
8905          * Reset chip in case UNDI or EFI driver did not shutdown
8906          * DMA self test will enable WDMAC and we'll see (spurious)
8907          * pending DMA on the PCI bus at that point.
8908          */
8909         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8910             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8911                 pci_save_state(tp->pdev);
8912                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8913                 tg3_halt(tp);
8914         }
8915
8916         err = tg3_test_dma(tp);
8917         if (err) {
8918                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8919                 goto err_out_iounmap;
8920         }
8921
8922         /* Tigon3 can do ipv4 only... and some chips have buggy
8923          * checksumming.
8924          */
8925         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8926                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8927                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8928         } else
8929                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8930
8931         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8932                 dev->features &= ~NETIF_F_HIGHDMA;
8933
8934         /* flow control autonegotiation is default behavior */
8935         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8936
8937         err = register_netdev(dev);
8938         if (err) {
8939                 printk(KERN_ERR PFX "Cannot register net device, "
8940                        "aborting.\n");
8941                 goto err_out_iounmap;
8942         }
8943
8944         pci_set_drvdata(pdev, dev);
8945
8946         /* Now that we have fully setup the chip, save away a snapshot
8947          * of the PCI config space.  We need to restore this after
8948          * GRC_MISC_CFG core clock resets and some resume events.
8949          */
8950         pci_save_state(tp->pdev);
8951
8952         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8953                dev->name,
8954                tp->board_part_number,
8955                tp->pci_chip_rev_id,
8956                tg3_phy_string(tp),
8957                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8958                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8959                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8960                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8961                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8962                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8963
8964         for (i = 0; i < 6; i++)
8965                 printk("%2.2x%c", dev->dev_addr[i],
8966                        i == 5 ? '\n' : ':');
8967
8968         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8969                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8970                "TSOcap[%d] \n",
8971                dev->name,
8972                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8973                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8974                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8975                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8976                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8977                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8978                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8979
8980         return 0;
8981
8982 err_out_iounmap:
8983         iounmap(tp->regs);
8984
8985 err_out_free_dev:
8986         free_netdev(dev);
8987
8988 err_out_free_res:
8989         pci_release_regions(pdev);
8990
8991 err_out_disable_pdev:
8992         pci_disable_device(pdev);
8993         pci_set_drvdata(pdev, NULL);
8994         return err;
8995 }
8996
8997 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8998 {
8999         struct net_device *dev = pci_get_drvdata(pdev);
9000
9001         if (dev) {
9002                 struct tg3 *tp = netdev_priv(dev);
9003
9004                 unregister_netdev(dev);
9005                 iounmap(tp->regs);
9006                 free_netdev(dev);
9007                 pci_release_regions(pdev);
9008                 pci_disable_device(pdev);
9009                 pci_set_drvdata(pdev, NULL);
9010         }
9011 }
9012
9013 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9014 {
9015         struct net_device *dev = pci_get_drvdata(pdev);
9016         struct tg3 *tp = netdev_priv(dev);
9017         int err;
9018
9019         if (!netif_running(dev))
9020                 return 0;
9021
9022         tg3_netif_stop(tp);
9023
9024         del_timer_sync(&tp->timer);
9025
9026         spin_lock_irq(&tp->lock);
9027         spin_lock(&tp->tx_lock);
9028         tg3_disable_ints(tp);
9029         spin_unlock(&tp->tx_lock);
9030         spin_unlock_irq(&tp->lock);
9031
9032         netif_device_detach(dev);
9033
9034         spin_lock_irq(&tp->lock);
9035         spin_lock(&tp->tx_lock);
9036         tg3_halt(tp);
9037         spin_unlock(&tp->tx_lock);
9038         spin_unlock_irq(&tp->lock);
9039
9040         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9041         if (err) {
9042                 spin_lock_irq(&tp->lock);
9043                 spin_lock(&tp->tx_lock);
9044
9045                 tg3_init_hw(tp);
9046
9047                 tp->timer.expires = jiffies + tp->timer_offset;
9048                 add_timer(&tp->timer);
9049
9050                 netif_device_attach(dev);
9051                 tg3_netif_start(tp);
9052
9053                 spin_unlock(&tp->tx_lock);
9054                 spin_unlock_irq(&tp->lock);
9055         }
9056
9057         return err;
9058 }
9059
9060 static int tg3_resume(struct pci_dev *pdev)
9061 {
9062         struct net_device *dev = pci_get_drvdata(pdev);
9063         struct tg3 *tp = netdev_priv(dev);
9064         int err;
9065
9066         if (!netif_running(dev))
9067                 return 0;
9068
9069         pci_restore_state(tp->pdev);
9070
9071         err = tg3_set_power_state(tp, 0);
9072         if (err)
9073                 return err;
9074
9075         netif_device_attach(dev);
9076
9077         spin_lock_irq(&tp->lock);
9078         spin_lock(&tp->tx_lock);
9079
9080         tg3_init_hw(tp);
9081
9082         tp->timer.expires = jiffies + tp->timer_offset;
9083         add_timer(&tp->timer);
9084
9085         tg3_enable_ints(tp);
9086
9087         tg3_netif_start(tp);
9088
9089         spin_unlock(&tp->tx_lock);
9090         spin_unlock_irq(&tp->lock);
9091
9092         return 0;
9093 }
9094
9095 static struct pci_driver tg3_driver = {
9096         .name           = DRV_MODULE_NAME,
9097         .id_table       = tg3_pci_tbl,
9098         .probe          = tg3_init_one,
9099         .remove         = __devexit_p(tg3_remove_one),
9100         .suspend        = tg3_suspend,
9101         .resume         = tg3_resume
9102 };
9103
9104 static int __init tg3_init(void)
9105 {
9106         return pci_module_init(&tg3_driver);
9107 }
9108
9109 static void __exit tg3_cleanup(void)
9110 {
9111         pci_unregister_driver(&tg3_driver);
9112 }
9113
9114 module_init(tg3_init);
9115 module_exit(tg3_cleanup);