1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.4"
60 #define DRV_MODULE_RELDATE "February 18, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
271 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
277 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
288 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) {
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300 REG_WR(bp, BNX2_CTX_DATA, val);
302 spin_unlock_bh(&bp->indirect_lock);
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 = (bp->phy_addr << 21) | (reg << 16) |
322 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323 BNX2_EMAC_MDIO_COMM_START_BUSY;
324 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
326 for (i = 0; i < 50; i++) {
329 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
340 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
353 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
372 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
383 for (i = 0; i < 50; i++) {
386 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
402 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412 bnx2_disable_int(struct bnx2 *bp)
415 struct bnx2_napi *bnapi;
417 for (i = 0; i < bp->irq_nvecs; i++) {
418 bnapi = &bp->bnx2_napi[i];
419 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
422 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
426 bnx2_enable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
434 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437 bnapi->last_status_idx);
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
447 bnx2_disable_int_sync(struct bnx2 *bp)
451 atomic_inc(&bp->intr_sem);
452 bnx2_disable_int(bp);
453 for (i = 0; i < bp->irq_nvecs; i++)
454 synchronize_irq(bp->irq_tbl[i].vector);
458 bnx2_napi_disable(struct bnx2 *bp)
462 for (i = 0; i < bp->irq_nvecs; i++)
463 napi_disable(&bp->bnx2_napi[i].napi);
467 bnx2_napi_enable(struct bnx2 *bp)
471 for (i = 0; i < bp->irq_nvecs; i++)
472 napi_enable(&bp->bnx2_napi[i].napi);
476 bnx2_netif_stop(struct bnx2 *bp)
478 bnx2_disable_int_sync(bp);
479 if (netif_running(bp->dev)) {
480 bnx2_napi_disable(bp);
481 netif_tx_disable(bp->dev);
482 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 bnx2_netif_start(struct bnx2 *bp)
489 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev);
492 bnx2_napi_enable(bp);
499 bnx2_free_mem(struct bnx2 *bp)
503 for (i = 0; i < bp->ctx_pages; i++) {
504 if (bp->ctx_blk[i]) {
505 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
507 bp->ctx_blk_mapping[i]);
508 bp->ctx_blk[i] = NULL;
511 if (bp->status_blk) {
512 pci_free_consistent(bp->pdev, bp->status_stats_size,
513 bp->status_blk, bp->status_blk_mapping);
514 bp->status_blk = NULL;
515 bp->stats_blk = NULL;
517 if (bp->tx_desc_ring) {
518 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519 bp->tx_desc_ring, bp->tx_desc_mapping);
520 bp->tx_desc_ring = NULL;
522 kfree(bp->tx_buf_ring);
523 bp->tx_buf_ring = NULL;
524 for (i = 0; i < bp->rx_max_ring; i++) {
525 if (bp->rx_desc_ring[i])
526 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
528 bp->rx_desc_mapping[i]);
529 bp->rx_desc_ring[i] = NULL;
531 vfree(bp->rx_buf_ring);
532 bp->rx_buf_ring = NULL;
533 for (i = 0; i < bp->rx_max_pg_ring; i++) {
534 if (bp->rx_pg_desc_ring[i])
535 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536 bp->rx_pg_desc_ring[i],
537 bp->rx_pg_desc_mapping[i]);
538 bp->rx_pg_desc_ring[i] = NULL;
541 vfree(bp->rx_pg_ring);
542 bp->rx_pg_ring = NULL;
546 bnx2_alloc_mem(struct bnx2 *bp)
548 int i, status_blk_size;
550 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551 if (bp->tx_buf_ring == NULL)
554 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555 &bp->tx_desc_mapping);
556 if (bp->tx_desc_ring == NULL)
559 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560 if (bp->rx_buf_ring == NULL)
563 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
565 for (i = 0; i < bp->rx_max_ring; i++) {
566 bp->rx_desc_ring[i] =
567 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568 &bp->rx_desc_mapping[i]);
569 if (bp->rx_desc_ring[i] == NULL)
574 if (bp->rx_pg_ring_size) {
575 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
577 if (bp->rx_pg_ring == NULL)
580 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
584 for (i = 0; i < bp->rx_max_pg_ring; i++) {
585 bp->rx_pg_desc_ring[i] =
586 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
587 &bp->rx_pg_desc_mapping[i]);
588 if (bp->rx_pg_desc_ring[i] == NULL)
593 /* Combine status and statistics blocks into one allocation. */
594 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
595 if (bp->flags & BNX2_FLAG_MSIX_CAP)
596 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
597 BNX2_SBLK_MSIX_ALIGN_SIZE);
598 bp->status_stats_size = status_blk_size +
599 sizeof(struct statistics_block);
601 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602 &bp->status_blk_mapping);
603 if (bp->status_blk == NULL)
606 memset(bp->status_blk, 0, bp->status_stats_size);
608 bp->bnx2_napi[0].status_blk = bp->status_blk;
609 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
613 bnapi->status_blk_msix = (void *)
614 ((unsigned long) bp->status_blk +
615 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
616 bnapi->int_num = i << 24;
620 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
623 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
625 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
626 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
627 if (bp->ctx_pages == 0)
629 for (i = 0; i < bp->ctx_pages; i++) {
630 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
632 &bp->ctx_blk_mapping[i]);
633 if (bp->ctx_blk[i] == NULL)
645 bnx2_report_fw_link(struct bnx2 *bp)
647 u32 fw_link_status = 0;
649 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
655 switch (bp->line_speed) {
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_10HALF;
660 fw_link_status = BNX2_LINK_STATUS_10FULL;
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_100HALF;
666 fw_link_status = BNX2_LINK_STATUS_100FULL;
669 if (bp->duplex == DUPLEX_HALF)
670 fw_link_status = BNX2_LINK_STATUS_1000HALF;
672 fw_link_status = BNX2_LINK_STATUS_1000FULL;
675 if (bp->duplex == DUPLEX_HALF)
676 fw_link_status = BNX2_LINK_STATUS_2500HALF;
678 fw_link_status = BNX2_LINK_STATUS_2500FULL;
682 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
685 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
690 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
691 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
692 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
694 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
698 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
700 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
704 bnx2_xceiver_str(struct bnx2 *bp)
706 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
707 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
712 bnx2_report_link(struct bnx2 *bp)
715 netif_carrier_on(bp->dev);
716 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
717 bnx2_xceiver_str(bp));
719 printk("%d Mbps ", bp->line_speed);
721 if (bp->duplex == DUPLEX_FULL)
722 printk("full duplex");
724 printk("half duplex");
727 if (bp->flow_ctrl & FLOW_CTRL_RX) {
728 printk(", receive ");
729 if (bp->flow_ctrl & FLOW_CTRL_TX)
730 printk("& transmit ");
733 printk(", transmit ");
735 printk("flow control ON");
740 netif_carrier_off(bp->dev);
741 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
742 bnx2_xceiver_str(bp));
745 bnx2_report_fw_link(bp);
749 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
751 u32 local_adv, remote_adv;
754 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
755 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
757 if (bp->duplex == DUPLEX_FULL) {
758 bp->flow_ctrl = bp->req_flow_ctrl;
763 if (bp->duplex != DUPLEX_FULL) {
767 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
768 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
771 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
772 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
773 bp->flow_ctrl |= FLOW_CTRL_TX;
774 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
775 bp->flow_ctrl |= FLOW_CTRL_RX;
779 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
780 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
782 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
783 u32 new_local_adv = 0;
784 u32 new_remote_adv = 0;
786 if (local_adv & ADVERTISE_1000XPAUSE)
787 new_local_adv |= ADVERTISE_PAUSE_CAP;
788 if (local_adv & ADVERTISE_1000XPSE_ASYM)
789 new_local_adv |= ADVERTISE_PAUSE_ASYM;
790 if (remote_adv & ADVERTISE_1000XPAUSE)
791 new_remote_adv |= ADVERTISE_PAUSE_CAP;
792 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
793 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
795 local_adv = new_local_adv;
796 remote_adv = new_remote_adv;
799 /* See Table 28B-3 of 802.3ab-1999 spec. */
800 if (local_adv & ADVERTISE_PAUSE_CAP) {
801 if(local_adv & ADVERTISE_PAUSE_ASYM) {
802 if (remote_adv & ADVERTISE_PAUSE_CAP) {
803 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
805 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
806 bp->flow_ctrl = FLOW_CTRL_RX;
810 if (remote_adv & ADVERTISE_PAUSE_CAP) {
811 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
815 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
816 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
817 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
819 bp->flow_ctrl = FLOW_CTRL_TX;
825 bnx2_5709s_linkup(struct bnx2 *bp)
831 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
832 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
835 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
836 bp->line_speed = bp->req_line_speed;
837 bp->duplex = bp->req_duplex;
840 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
842 case MII_BNX2_GP_TOP_AN_SPEED_10:
843 bp->line_speed = SPEED_10;
845 case MII_BNX2_GP_TOP_AN_SPEED_100:
846 bp->line_speed = SPEED_100;
848 case MII_BNX2_GP_TOP_AN_SPEED_1G:
849 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
850 bp->line_speed = SPEED_1000;
852 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
853 bp->line_speed = SPEED_2500;
856 if (val & MII_BNX2_GP_TOP_AN_FD)
857 bp->duplex = DUPLEX_FULL;
859 bp->duplex = DUPLEX_HALF;
864 bnx2_5708s_linkup(struct bnx2 *bp)
869 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
870 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
871 case BCM5708S_1000X_STAT1_SPEED_10:
872 bp->line_speed = SPEED_10;
874 case BCM5708S_1000X_STAT1_SPEED_100:
875 bp->line_speed = SPEED_100;
877 case BCM5708S_1000X_STAT1_SPEED_1G:
878 bp->line_speed = SPEED_1000;
880 case BCM5708S_1000X_STAT1_SPEED_2G5:
881 bp->line_speed = SPEED_2500;
884 if (val & BCM5708S_1000X_STAT1_FD)
885 bp->duplex = DUPLEX_FULL;
887 bp->duplex = DUPLEX_HALF;
893 bnx2_5706s_linkup(struct bnx2 *bp)
895 u32 bmcr, local_adv, remote_adv, common;
898 bp->line_speed = SPEED_1000;
900 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
908 if (!(bmcr & BMCR_ANENABLE)) {
912 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
913 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
915 common = local_adv & remote_adv;
916 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
918 if (common & ADVERTISE_1000XFULL) {
919 bp->duplex = DUPLEX_FULL;
922 bp->duplex = DUPLEX_HALF;
930 bnx2_copper_linkup(struct bnx2 *bp)
934 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
935 if (bmcr & BMCR_ANENABLE) {
936 u32 local_adv, remote_adv, common;
938 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
939 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
941 common = local_adv & (remote_adv >> 2);
942 if (common & ADVERTISE_1000FULL) {
943 bp->line_speed = SPEED_1000;
944 bp->duplex = DUPLEX_FULL;
946 else if (common & ADVERTISE_1000HALF) {
947 bp->line_speed = SPEED_1000;
948 bp->duplex = DUPLEX_HALF;
951 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
952 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
954 common = local_adv & remote_adv;
955 if (common & ADVERTISE_100FULL) {
956 bp->line_speed = SPEED_100;
957 bp->duplex = DUPLEX_FULL;
959 else if (common & ADVERTISE_100HALF) {
960 bp->line_speed = SPEED_100;
961 bp->duplex = DUPLEX_HALF;
963 else if (common & ADVERTISE_10FULL) {
964 bp->line_speed = SPEED_10;
965 bp->duplex = DUPLEX_FULL;
967 else if (common & ADVERTISE_10HALF) {
968 bp->line_speed = SPEED_10;
969 bp->duplex = DUPLEX_HALF;
978 if (bmcr & BMCR_SPEED100) {
979 bp->line_speed = SPEED_100;
982 bp->line_speed = SPEED_10;
984 if (bmcr & BMCR_FULLDPLX) {
985 bp->duplex = DUPLEX_FULL;
988 bp->duplex = DUPLEX_HALF;
996 bnx2_init_rx_context0(struct bnx2 *bp)
998 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
1000 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1001 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1005 u32 lo_water, hi_water;
1007 if (bp->flow_ctrl & FLOW_CTRL_TX)
1008 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1010 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1011 if (lo_water >= bp->rx_ring_size)
1014 hi_water = bp->rx_ring_size / 4;
1016 if (hi_water <= lo_water)
1019 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1020 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1024 else if (hi_water == 0)
1026 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1028 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1032 bnx2_set_mac_link(struct bnx2 *bp)
1036 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1037 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1038 (bp->duplex == DUPLEX_HALF)) {
1039 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1042 /* Configure the EMAC mode register. */
1043 val = REG_RD(bp, BNX2_EMAC_MODE);
1045 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1046 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1047 BNX2_EMAC_MODE_25G_MODE);
1050 switch (bp->line_speed) {
1052 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1053 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1058 val |= BNX2_EMAC_MODE_PORT_MII;
1061 val |= BNX2_EMAC_MODE_25G_MODE;
1064 val |= BNX2_EMAC_MODE_PORT_GMII;
1069 val |= BNX2_EMAC_MODE_PORT_GMII;
1072 /* Set the MAC to operate in the appropriate duplex mode. */
1073 if (bp->duplex == DUPLEX_HALF)
1074 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1075 REG_WR(bp, BNX2_EMAC_MODE, val);
1077 /* Enable/disable rx PAUSE. */
1078 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1080 if (bp->flow_ctrl & FLOW_CTRL_RX)
1081 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1082 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1084 /* Enable/disable tx PAUSE. */
1085 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1086 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1088 if (bp->flow_ctrl & FLOW_CTRL_TX)
1089 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1090 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1092 /* Acknowledge the interrupt. */
1093 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1096 bnx2_init_rx_context0(bp);
1102 bnx2_enable_bmsr1(struct bnx2 *bp)
1104 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1105 (CHIP_NUM(bp) == CHIP_NUM_5709))
1106 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1107 MII_BNX2_BLK_ADDR_GP_STATUS);
1111 bnx2_disable_bmsr1(struct bnx2 *bp)
1113 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1114 (CHIP_NUM(bp) == CHIP_NUM_5709))
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1116 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1120 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1125 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1128 if (bp->autoneg & AUTONEG_SPEED)
1129 bp->advertising |= ADVERTISED_2500baseX_Full;
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1132 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1134 bnx2_read_phy(bp, bp->mii_up1, &up1);
1135 if (!(up1 & BCM5708S_UP1_2G5)) {
1136 up1 |= BCM5708S_UP1_2G5;
1137 bnx2_write_phy(bp, bp->mii_up1, up1);
1141 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1142 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1143 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1149 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1154 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1157 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1158 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1160 bnx2_read_phy(bp, bp->mii_up1, &up1);
1161 if (up1 & BCM5708S_UP1_2G5) {
1162 up1 &= ~BCM5708S_UP1_2G5;
1163 bnx2_write_phy(bp, bp->mii_up1, up1);
1167 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1168 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1169 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1175 bnx2_enable_forced_2g5(struct bnx2 *bp)
1179 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1182 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1186 MII_BNX2_BLK_ADDR_SERDES_DIG);
1187 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1188 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1189 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1190 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1193 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1194 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1197 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1198 bmcr |= BCM5708S_BMCR_FORCE_2500;
1201 if (bp->autoneg & AUTONEG_SPEED) {
1202 bmcr &= ~BMCR_ANENABLE;
1203 if (bp->req_duplex == DUPLEX_FULL)
1204 bmcr |= BMCR_FULLDPLX;
1206 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1210 bnx2_disable_forced_2g5(struct bnx2 *bp)
1214 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1217 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1220 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1221 MII_BNX2_BLK_ADDR_SERDES_DIG);
1222 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1223 val &= ~MII_BNX2_SD_MISC1_FORCE;
1224 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1226 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1227 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1228 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1230 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1231 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1232 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1235 if (bp->autoneg & AUTONEG_SPEED)
1236 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1237 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1241 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1245 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1246 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1248 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1250 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1254 bnx2_set_link(struct bnx2 *bp)
1259 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1264 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1267 link_up = bp->link_up;
1269 bnx2_enable_bmsr1(bp);
1270 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1271 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1272 bnx2_disable_bmsr1(bp);
1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1279 bnx2_5706s_force_link_dn(bp, 0);
1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1282 val = REG_RD(bp, BNX2_EMAC_STATUS);
1284 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1285 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1286 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1288 if ((val & BNX2_EMAC_STATUS_LINK) &&
1289 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1290 bmsr |= BMSR_LSTATUS;
1292 bmsr &= ~BMSR_LSTATUS;
1295 if (bmsr & BMSR_LSTATUS) {
1298 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1299 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1300 bnx2_5706s_linkup(bp);
1301 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1302 bnx2_5708s_linkup(bp);
1303 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1304 bnx2_5709s_linkup(bp);
1307 bnx2_copper_linkup(bp);
1309 bnx2_resolve_flow_ctrl(bp);
1312 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1313 (bp->autoneg & AUTONEG_SPEED))
1314 bnx2_disable_forced_2g5(bp);
1316 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1319 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1320 bmcr |= BMCR_ANENABLE;
1321 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1323 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1328 if (bp->link_up != link_up) {
1329 bnx2_report_link(bp);
1332 bnx2_set_mac_link(bp);
1338 bnx2_reset_phy(struct bnx2 *bp)
1343 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1345 #define PHY_RESET_MAX_WAIT 100
1346 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1349 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1350 if (!(reg & BMCR_RESET)) {
1355 if (i == PHY_RESET_MAX_WAIT) {
1362 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1366 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1367 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1369 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1370 adv = ADVERTISE_1000XPAUSE;
1373 adv = ADVERTISE_PAUSE_CAP;
1376 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1377 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1378 adv = ADVERTISE_1000XPSE_ASYM;
1381 adv = ADVERTISE_PAUSE_ASYM;
1384 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1385 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1386 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1389 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1395 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1398 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1400 u32 speed_arg = 0, pause_adv;
1402 pause_adv = bnx2_phy_get_pause_adv(bp);
1404 if (bp->autoneg & AUTONEG_SPEED) {
1405 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1406 if (bp->advertising & ADVERTISED_10baseT_Half)
1407 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1408 if (bp->advertising & ADVERTISED_10baseT_Full)
1409 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1410 if (bp->advertising & ADVERTISED_100baseT_Half)
1411 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1412 if (bp->advertising & ADVERTISED_100baseT_Full)
1413 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1414 if (bp->advertising & ADVERTISED_1000baseT_Full)
1415 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1416 if (bp->advertising & ADVERTISED_2500baseX_Full)
1417 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1419 if (bp->req_line_speed == SPEED_2500)
1420 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1421 else if (bp->req_line_speed == SPEED_1000)
1422 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1423 else if (bp->req_line_speed == SPEED_100) {
1424 if (bp->req_duplex == DUPLEX_FULL)
1425 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1427 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1428 } else if (bp->req_line_speed == SPEED_10) {
1429 if (bp->req_duplex == DUPLEX_FULL)
1430 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1432 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1436 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1437 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1438 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1439 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1441 if (port == PORT_TP)
1442 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1443 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1445 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1447 spin_unlock_bh(&bp->phy_lock);
1448 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1449 spin_lock_bh(&bp->phy_lock);
1455 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1460 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1461 return (bnx2_setup_remote_phy(bp, port));
1463 if (!(bp->autoneg & AUTONEG_SPEED)) {
1465 int force_link_down = 0;
1467 if (bp->req_line_speed == SPEED_2500) {
1468 if (!bnx2_test_and_enable_2g5(bp))
1469 force_link_down = 1;
1470 } else if (bp->req_line_speed == SPEED_1000) {
1471 if (bnx2_test_and_disable_2g5(bp))
1472 force_link_down = 1;
1474 bnx2_read_phy(bp, bp->mii_adv, &adv);
1475 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1477 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478 new_bmcr = bmcr & ~BMCR_ANENABLE;
1479 new_bmcr |= BMCR_SPEED1000;
1481 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1482 if (bp->req_line_speed == SPEED_2500)
1483 bnx2_enable_forced_2g5(bp);
1484 else if (bp->req_line_speed == SPEED_1000) {
1485 bnx2_disable_forced_2g5(bp);
1486 new_bmcr &= ~0x2000;
1489 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1490 if (bp->req_line_speed == SPEED_2500)
1491 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1493 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1496 if (bp->req_duplex == DUPLEX_FULL) {
1497 adv |= ADVERTISE_1000XFULL;
1498 new_bmcr |= BMCR_FULLDPLX;
1501 adv |= ADVERTISE_1000XHALF;
1502 new_bmcr &= ~BMCR_FULLDPLX;
1504 if ((new_bmcr != bmcr) || (force_link_down)) {
1505 /* Force a link down visible on the other side */
1507 bnx2_write_phy(bp, bp->mii_adv, adv &
1508 ~(ADVERTISE_1000XFULL |
1509 ADVERTISE_1000XHALF));
1510 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1511 BMCR_ANRESTART | BMCR_ANENABLE);
1514 netif_carrier_off(bp->dev);
1515 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1516 bnx2_report_link(bp);
1518 bnx2_write_phy(bp, bp->mii_adv, adv);
1519 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1521 bnx2_resolve_flow_ctrl(bp);
1522 bnx2_set_mac_link(bp);
1527 bnx2_test_and_enable_2g5(bp);
1529 if (bp->advertising & ADVERTISED_1000baseT_Full)
1530 new_adv |= ADVERTISE_1000XFULL;
1532 new_adv |= bnx2_phy_get_pause_adv(bp);
1534 bnx2_read_phy(bp, bp->mii_adv, &adv);
1535 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1537 bp->serdes_an_pending = 0;
1538 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1539 /* Force a link down visible on the other side */
1541 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1542 spin_unlock_bh(&bp->phy_lock);
1544 spin_lock_bh(&bp->phy_lock);
1547 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1548 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1550 /* Speed up link-up time when the link partner
1551 * does not autonegotiate which is very common
1552 * in blade servers. Some blade servers use
1553 * IPMI for kerboard input and it's important
1554 * to minimize link disruptions. Autoneg. involves
1555 * exchanging base pages plus 3 next pages and
1556 * normally completes in about 120 msec.
1558 bp->current_interval = SERDES_AN_TIMEOUT;
1559 bp->serdes_an_pending = 1;
1560 mod_timer(&bp->timer, jiffies + bp->current_interval);
1562 bnx2_resolve_flow_ctrl(bp);
1563 bnx2_set_mac_link(bp);
1569 #define ETHTOOL_ALL_FIBRE_SPEED \
1570 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1571 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1572 (ADVERTISED_1000baseT_Full)
1574 #define ETHTOOL_ALL_COPPER_SPEED \
1575 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1576 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1577 ADVERTISED_1000baseT_Full)
1579 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1580 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1582 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1585 bnx2_set_default_remote_link(struct bnx2 *bp)
1589 if (bp->phy_port == PORT_TP)
1590 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1592 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1594 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1595 bp->req_line_speed = 0;
1596 bp->autoneg |= AUTONEG_SPEED;
1597 bp->advertising = ADVERTISED_Autoneg;
1598 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1599 bp->advertising |= ADVERTISED_10baseT_Half;
1600 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1601 bp->advertising |= ADVERTISED_10baseT_Full;
1602 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1603 bp->advertising |= ADVERTISED_100baseT_Half;
1604 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1605 bp->advertising |= ADVERTISED_100baseT_Full;
1606 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1607 bp->advertising |= ADVERTISED_1000baseT_Full;
1608 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1609 bp->advertising |= ADVERTISED_2500baseX_Full;
1612 bp->advertising = 0;
1613 bp->req_duplex = DUPLEX_FULL;
1614 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1615 bp->req_line_speed = SPEED_10;
1616 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1617 bp->req_duplex = DUPLEX_HALF;
1619 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1620 bp->req_line_speed = SPEED_100;
1621 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1622 bp->req_duplex = DUPLEX_HALF;
1624 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1625 bp->req_line_speed = SPEED_1000;
1626 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1627 bp->req_line_speed = SPEED_2500;
1632 bnx2_set_default_link(struct bnx2 *bp)
1634 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1635 bnx2_set_default_remote_link(bp);
1639 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1640 bp->req_line_speed = 0;
1641 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1646 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1647 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1648 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1650 bp->req_line_speed = bp->line_speed = SPEED_1000;
1651 bp->req_duplex = DUPLEX_FULL;
1654 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1658 bnx2_send_heart_beat(struct bnx2 *bp)
1663 spin_lock(&bp->indirect_lock);
1664 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1665 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1666 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1667 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1668 spin_unlock(&bp->indirect_lock);
1672 bnx2_remote_phy_event(struct bnx2 *bp)
1675 u8 link_up = bp->link_up;
1678 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1680 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1681 bnx2_send_heart_beat(bp);
1683 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1685 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1691 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1692 bp->duplex = DUPLEX_FULL;
1694 case BNX2_LINK_STATUS_10HALF:
1695 bp->duplex = DUPLEX_HALF;
1696 case BNX2_LINK_STATUS_10FULL:
1697 bp->line_speed = SPEED_10;
1699 case BNX2_LINK_STATUS_100HALF:
1700 bp->duplex = DUPLEX_HALF;
1701 case BNX2_LINK_STATUS_100BASE_T4:
1702 case BNX2_LINK_STATUS_100FULL:
1703 bp->line_speed = SPEED_100;
1705 case BNX2_LINK_STATUS_1000HALF:
1706 bp->duplex = DUPLEX_HALF;
1707 case BNX2_LINK_STATUS_1000FULL:
1708 bp->line_speed = SPEED_1000;
1710 case BNX2_LINK_STATUS_2500HALF:
1711 bp->duplex = DUPLEX_HALF;
1712 case BNX2_LINK_STATUS_2500FULL:
1713 bp->line_speed = SPEED_2500;
1720 spin_lock(&bp->phy_lock);
1722 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1723 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1724 if (bp->duplex == DUPLEX_FULL)
1725 bp->flow_ctrl = bp->req_flow_ctrl;
1727 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1728 bp->flow_ctrl |= FLOW_CTRL_TX;
1729 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1730 bp->flow_ctrl |= FLOW_CTRL_RX;
1733 old_port = bp->phy_port;
1734 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1735 bp->phy_port = PORT_FIBRE;
1737 bp->phy_port = PORT_TP;
1739 if (old_port != bp->phy_port)
1740 bnx2_set_default_link(bp);
1742 spin_unlock(&bp->phy_lock);
1744 if (bp->link_up != link_up)
1745 bnx2_report_link(bp);
1747 bnx2_set_mac_link(bp);
1751 bnx2_set_remote_link(struct bnx2 *bp)
1755 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1757 case BNX2_FW_EVT_CODE_LINK_EVENT:
1758 bnx2_remote_phy_event(bp);
1760 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1762 bnx2_send_heart_beat(bp);
1769 bnx2_setup_copper_phy(struct bnx2 *bp)
1774 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1776 if (bp->autoneg & AUTONEG_SPEED) {
1777 u32 adv_reg, adv1000_reg;
1778 u32 new_adv_reg = 0;
1779 u32 new_adv1000_reg = 0;
1781 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1782 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1783 ADVERTISE_PAUSE_ASYM);
1785 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1786 adv1000_reg &= PHY_ALL_1000_SPEED;
1788 if (bp->advertising & ADVERTISED_10baseT_Half)
1789 new_adv_reg |= ADVERTISE_10HALF;
1790 if (bp->advertising & ADVERTISED_10baseT_Full)
1791 new_adv_reg |= ADVERTISE_10FULL;
1792 if (bp->advertising & ADVERTISED_100baseT_Half)
1793 new_adv_reg |= ADVERTISE_100HALF;
1794 if (bp->advertising & ADVERTISED_100baseT_Full)
1795 new_adv_reg |= ADVERTISE_100FULL;
1796 if (bp->advertising & ADVERTISED_1000baseT_Full)
1797 new_adv1000_reg |= ADVERTISE_1000FULL;
1799 new_adv_reg |= ADVERTISE_CSMA;
1801 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1803 if ((adv1000_reg != new_adv1000_reg) ||
1804 (adv_reg != new_adv_reg) ||
1805 ((bmcr & BMCR_ANENABLE) == 0)) {
1807 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1808 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1809 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1812 else if (bp->link_up) {
1813 /* Flow ctrl may have changed from auto to forced */
1814 /* or vice-versa. */
1816 bnx2_resolve_flow_ctrl(bp);
1817 bnx2_set_mac_link(bp);
1823 if (bp->req_line_speed == SPEED_100) {
1824 new_bmcr |= BMCR_SPEED100;
1826 if (bp->req_duplex == DUPLEX_FULL) {
1827 new_bmcr |= BMCR_FULLDPLX;
1829 if (new_bmcr != bmcr) {
1832 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1833 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1835 if (bmsr & BMSR_LSTATUS) {
1836 /* Force link down */
1837 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1838 spin_unlock_bh(&bp->phy_lock);
1840 spin_lock_bh(&bp->phy_lock);
1842 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1843 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1846 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1848 /* Normally, the new speed is setup after the link has
1849 * gone down and up again. In some cases, link will not go
1850 * down so we need to set up the new speed here.
1852 if (bmsr & BMSR_LSTATUS) {
1853 bp->line_speed = bp->req_line_speed;
1854 bp->duplex = bp->req_duplex;
1855 bnx2_resolve_flow_ctrl(bp);
1856 bnx2_set_mac_link(bp);
1859 bnx2_resolve_flow_ctrl(bp);
1860 bnx2_set_mac_link(bp);
1866 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1868 if (bp->loopback == MAC_LOOPBACK)
1871 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1872 return (bnx2_setup_serdes_phy(bp, port));
1875 return (bnx2_setup_copper_phy(bp));
1880 bnx2_init_5709s_phy(struct bnx2 *bp)
1884 bp->mii_bmcr = MII_BMCR + 0x10;
1885 bp->mii_bmsr = MII_BMSR + 0x10;
1886 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1887 bp->mii_adv = MII_ADVERTISE + 0x10;
1888 bp->mii_lpa = MII_LPA + 0x10;
1889 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1891 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1892 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1894 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1897 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1899 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1900 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1901 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1902 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1904 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1905 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1906 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1907 val |= BCM5708S_UP1_2G5;
1909 val &= ~BCM5708S_UP1_2G5;
1910 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1912 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1913 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1914 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1915 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1917 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1919 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1920 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1921 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1923 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1929 bnx2_init_5708s_phy(struct bnx2 *bp)
1935 bp->mii_up1 = BCM5708S_UP1;
1937 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1938 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1939 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1941 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1942 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1943 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1945 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1946 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1947 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1949 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1950 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1951 val |= BCM5708S_UP1_2G5;
1952 bnx2_write_phy(bp, BCM5708S_UP1, val);
1955 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1956 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1957 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1958 /* increase tx signal amplitude */
1959 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1960 BCM5708S_BLK_ADDR_TX_MISC);
1961 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1962 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1963 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1964 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1967 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1968 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1973 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1974 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1975 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1976 BCM5708S_BLK_ADDR_TX_MISC);
1977 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1978 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1979 BCM5708S_BLK_ADDR_DIG);
1986 bnx2_init_5706s_phy(struct bnx2 *bp)
1990 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1992 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1993 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1995 if (bp->dev->mtu > 1500) {
1998 /* Set extended packet length bit */
1999 bnx2_write_phy(bp, 0x18, 0x7);
2000 bnx2_read_phy(bp, 0x18, &val);
2001 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2003 bnx2_write_phy(bp, 0x1c, 0x6c00);
2004 bnx2_read_phy(bp, 0x1c, &val);
2005 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2010 bnx2_write_phy(bp, 0x18, 0x7);
2011 bnx2_read_phy(bp, 0x18, &val);
2012 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2014 bnx2_write_phy(bp, 0x1c, 0x6c00);
2015 bnx2_read_phy(bp, 0x1c, &val);
2016 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2023 bnx2_init_copper_phy(struct bnx2 *bp)
2029 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2030 bnx2_write_phy(bp, 0x18, 0x0c00);
2031 bnx2_write_phy(bp, 0x17, 0x000a);
2032 bnx2_write_phy(bp, 0x15, 0x310b);
2033 bnx2_write_phy(bp, 0x17, 0x201f);
2034 bnx2_write_phy(bp, 0x15, 0x9506);
2035 bnx2_write_phy(bp, 0x17, 0x401f);
2036 bnx2_write_phy(bp, 0x15, 0x14e2);
2037 bnx2_write_phy(bp, 0x18, 0x0400);
2040 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2041 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2042 MII_BNX2_DSP_EXPAND_REG | 0x8);
2043 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2045 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2048 if (bp->dev->mtu > 1500) {
2049 /* Set extended packet length bit */
2050 bnx2_write_phy(bp, 0x18, 0x7);
2051 bnx2_read_phy(bp, 0x18, &val);
2052 bnx2_write_phy(bp, 0x18, val | 0x4000);
2054 bnx2_read_phy(bp, 0x10, &val);
2055 bnx2_write_phy(bp, 0x10, val | 0x1);
2058 bnx2_write_phy(bp, 0x18, 0x7);
2059 bnx2_read_phy(bp, 0x18, &val);
2060 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2062 bnx2_read_phy(bp, 0x10, &val);
2063 bnx2_write_phy(bp, 0x10, val & ~0x1);
2066 /* ethernet@wirespeed */
2067 bnx2_write_phy(bp, 0x18, 0x7007);
2068 bnx2_read_phy(bp, 0x18, &val);
2069 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2075 bnx2_init_phy(struct bnx2 *bp)
2080 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2081 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2083 bp->mii_bmcr = MII_BMCR;
2084 bp->mii_bmsr = MII_BMSR;
2085 bp->mii_bmsr1 = MII_BMSR;
2086 bp->mii_adv = MII_ADVERTISE;
2087 bp->mii_lpa = MII_LPA;
2089 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2091 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2094 bnx2_read_phy(bp, MII_PHYSID1, &val);
2095 bp->phy_id = val << 16;
2096 bnx2_read_phy(bp, MII_PHYSID2, &val);
2097 bp->phy_id |= val & 0xffff;
2099 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2100 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2101 rc = bnx2_init_5706s_phy(bp);
2102 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2103 rc = bnx2_init_5708s_phy(bp);
2104 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2105 rc = bnx2_init_5709s_phy(bp);
2108 rc = bnx2_init_copper_phy(bp);
2113 rc = bnx2_setup_phy(bp, bp->phy_port);
2119 bnx2_set_mac_loopback(struct bnx2 *bp)
2123 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2124 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2125 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2126 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2131 static int bnx2_test_link(struct bnx2 *);
2134 bnx2_set_phy_loopback(struct bnx2 *bp)
2139 spin_lock_bh(&bp->phy_lock);
2140 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2142 spin_unlock_bh(&bp->phy_lock);
2146 for (i = 0; i < 10; i++) {
2147 if (bnx2_test_link(bp) == 0)
2152 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2153 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2154 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2155 BNX2_EMAC_MODE_25G_MODE);
2157 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2158 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2164 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2170 msg_data |= bp->fw_wr_seq;
2172 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2174 /* wait for an acknowledgement. */
2175 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2178 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2180 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2183 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2186 /* If we timed out, inform the firmware that this is the case. */
2187 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2189 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2192 msg_data &= ~BNX2_DRV_MSG_CODE;
2193 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2195 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2200 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2207 bnx2_init_5709_context(struct bnx2 *bp)
2212 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2213 val |= (BCM_PAGE_BITS - 8) << 16;
2214 REG_WR(bp, BNX2_CTX_COMMAND, val);
2215 for (i = 0; i < 10; i++) {
2216 val = REG_RD(bp, BNX2_CTX_COMMAND);
2217 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2221 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2224 for (i = 0; i < bp->ctx_pages; i++) {
2227 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2228 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2229 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2230 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2231 (u64) bp->ctx_blk_mapping[i] >> 32);
2232 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2233 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2234 for (j = 0; j < 10; j++) {
2236 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2237 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2241 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2250 bnx2_init_context(struct bnx2 *bp)
2256 u32 vcid_addr, pcid_addr, offset;
2261 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2264 vcid_addr = GET_PCID_ADDR(vcid);
2266 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2271 pcid_addr = GET_PCID_ADDR(new_vcid);
2274 vcid_addr = GET_CID_ADDR(vcid);
2275 pcid_addr = vcid_addr;
2278 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2279 vcid_addr += (i << PHY_CTX_SHIFT);
2280 pcid_addr += (i << PHY_CTX_SHIFT);
2282 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2283 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2285 /* Zero out the context. */
2286 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2287 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2293 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2299 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2300 if (good_mbuf == NULL) {
2301 printk(KERN_ERR PFX "Failed to allocate memory in "
2302 "bnx2_alloc_bad_rbuf\n");
2306 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2307 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2311 /* Allocate a bunch of mbufs and save the good ones in an array. */
2312 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2313 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2314 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2315 BNX2_RBUF_COMMAND_ALLOC_REQ);
2317 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2319 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2321 /* The addresses with Bit 9 set are bad memory blocks. */
2322 if (!(val & (1 << 9))) {
2323 good_mbuf[good_mbuf_cnt] = (u16) val;
2327 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2330 /* Free the good ones back to the mbuf pool thus discarding
2331 * all the bad ones. */
2332 while (good_mbuf_cnt) {
2335 val = good_mbuf[good_mbuf_cnt];
2336 val = (val << 9) | val | 1;
2338 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2345 bnx2_set_mac_addr(struct bnx2 *bp)
2348 u8 *mac_addr = bp->dev->dev_addr;
2350 val = (mac_addr[0] << 8) | mac_addr[1];
2352 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2354 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2355 (mac_addr[4] << 8) | mac_addr[5];
2357 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2361 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2364 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2365 struct rx_bd *rxbd =
2366 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2367 struct page *page = alloc_page(GFP_ATOMIC);
2371 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2372 PCI_DMA_FROMDEVICE);
2374 pci_unmap_addr_set(rx_pg, mapping, mapping);
2375 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2376 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2381 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2383 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2384 struct page *page = rx_pg->page;
2389 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2390 PCI_DMA_FROMDEVICE);
2397 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2399 struct sk_buff *skb;
2400 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2402 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2403 unsigned long align;
2405 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2410 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2411 skb_reserve(skb, BNX2_RX_ALIGN - align);
2413 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2414 PCI_DMA_FROMDEVICE);
2417 pci_unmap_addr_set(rx_buf, mapping, mapping);
2419 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2420 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2422 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2428 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2430 struct status_block *sblk = bnapi->status_blk;
2431 u32 new_link_state, old_link_state;
2434 new_link_state = sblk->status_attn_bits & event;
2435 old_link_state = sblk->status_attn_bits_ack & event;
2436 if (new_link_state != old_link_state) {
2438 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2440 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2448 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2450 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2451 spin_lock(&bp->phy_lock);
2453 spin_unlock(&bp->phy_lock);
2455 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2456 bnx2_set_remote_link(bp);
2461 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2465 if (bnapi->int_num == 0)
2466 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2468 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2470 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2476 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2478 u16 hw_cons, sw_cons, sw_ring_cons;
2481 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2482 sw_cons = bnapi->tx_cons;
2484 while (sw_cons != hw_cons) {
2485 struct sw_bd *tx_buf;
2486 struct sk_buff *skb;
2489 sw_ring_cons = TX_RING_IDX(sw_cons);
2491 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2494 /* partial BD completions possible with TSO packets */
2495 if (skb_is_gso(skb)) {
2496 u16 last_idx, last_ring_idx;
2498 last_idx = sw_cons +
2499 skb_shinfo(skb)->nr_frags + 1;
2500 last_ring_idx = sw_ring_cons +
2501 skb_shinfo(skb)->nr_frags + 1;
2502 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2505 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2510 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2511 skb_headlen(skb), PCI_DMA_TODEVICE);
2514 last = skb_shinfo(skb)->nr_frags;
2516 for (i = 0; i < last; i++) {
2517 sw_cons = NEXT_TX_BD(sw_cons);
2519 pci_unmap_page(bp->pdev,
2521 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2523 skb_shinfo(skb)->frags[i].size,
2527 sw_cons = NEXT_TX_BD(sw_cons);
2531 if (tx_pkt == budget)
2534 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2537 bnapi->hw_tx_cons = hw_cons;
2538 bnapi->tx_cons = sw_cons;
2539 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2540 * before checking for netif_queue_stopped(). Without the
2541 * memory barrier, there is a small possibility that bnx2_start_xmit()
2542 * will miss it and cause the queue to be stopped forever.
2546 if (unlikely(netif_queue_stopped(bp->dev)) &&
2547 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2548 netif_tx_lock(bp->dev);
2549 if ((netif_queue_stopped(bp->dev)) &&
2550 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2551 netif_wake_queue(bp->dev);
2552 netif_tx_unlock(bp->dev);
2558 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2559 struct sk_buff *skb, int count)
2561 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2562 struct rx_bd *cons_bd, *prod_bd;
2565 u16 hw_prod = bnapi->rx_pg_prod, prod;
2566 u16 cons = bnapi->rx_pg_cons;
2568 for (i = 0; i < count; i++) {
2569 prod = RX_PG_RING_IDX(hw_prod);
2571 prod_rx_pg = &bp->rx_pg_ring[prod];
2572 cons_rx_pg = &bp->rx_pg_ring[cons];
2573 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2574 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2576 if (i == 0 && skb) {
2578 struct skb_shared_info *shinfo;
2580 shinfo = skb_shinfo(skb);
2582 page = shinfo->frags[shinfo->nr_frags].page;
2583 shinfo->frags[shinfo->nr_frags].page = NULL;
2584 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2585 PCI_DMA_FROMDEVICE);
2586 cons_rx_pg->page = page;
2587 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2591 prod_rx_pg->page = cons_rx_pg->page;
2592 cons_rx_pg->page = NULL;
2593 pci_unmap_addr_set(prod_rx_pg, mapping,
2594 pci_unmap_addr(cons_rx_pg, mapping));
2596 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2597 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2600 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2601 hw_prod = NEXT_RX_BD(hw_prod);
2603 bnapi->rx_pg_prod = hw_prod;
2604 bnapi->rx_pg_cons = cons;
2608 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2611 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2612 struct rx_bd *cons_bd, *prod_bd;
2614 cons_rx_buf = &bp->rx_buf_ring[cons];
2615 prod_rx_buf = &bp->rx_buf_ring[prod];
2617 pci_dma_sync_single_for_device(bp->pdev,
2618 pci_unmap_addr(cons_rx_buf, mapping),
2619 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2621 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2623 prod_rx_buf->skb = skb;
2628 pci_unmap_addr_set(prod_rx_buf, mapping,
2629 pci_unmap_addr(cons_rx_buf, mapping));
2631 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2632 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2633 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2634 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2638 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2639 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2643 u16 prod = ring_idx & 0xffff;
2645 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2646 if (unlikely(err)) {
2647 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2649 unsigned int raw_len = len + 4;
2650 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2652 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2657 skb_reserve(skb, bp->rx_offset);
2658 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2659 PCI_DMA_FROMDEVICE);
2665 unsigned int i, frag_len, frag_size, pages;
2666 struct sw_pg *rx_pg;
2667 u16 pg_cons = bnapi->rx_pg_cons;
2668 u16 pg_prod = bnapi->rx_pg_prod;
2670 frag_size = len + 4 - hdr_len;
2671 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2672 skb_put(skb, hdr_len);
2674 for (i = 0; i < pages; i++) {
2675 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2676 if (unlikely(frag_len <= 4)) {
2677 unsigned int tail = 4 - frag_len;
2679 bnapi->rx_pg_cons = pg_cons;
2680 bnapi->rx_pg_prod = pg_prod;
2681 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2688 &skb_shinfo(skb)->frags[i - 1];
2690 skb->data_len -= tail;
2691 skb->truesize -= tail;
2695 rx_pg = &bp->rx_pg_ring[pg_cons];
2697 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2698 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2703 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2706 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2707 if (unlikely(err)) {
2708 bnapi->rx_pg_cons = pg_cons;
2709 bnapi->rx_pg_prod = pg_prod;
2710 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2715 frag_size -= frag_len;
2716 skb->data_len += frag_len;
2717 skb->truesize += frag_len;
2718 skb->len += frag_len;
2720 pg_prod = NEXT_RX_BD(pg_prod);
2721 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2723 bnapi->rx_pg_prod = pg_prod;
2724 bnapi->rx_pg_cons = pg_cons;
2730 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2732 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2734 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2740 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2742 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2743 struct l2_fhdr *rx_hdr;
2744 int rx_pkt = 0, pg_ring_used = 0;
2746 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2747 sw_cons = bnapi->rx_cons;
2748 sw_prod = bnapi->rx_prod;
2750 /* Memory barrier necessary as speculative reads of the rx
2751 * buffer can be ahead of the index in the status block
2754 while (sw_cons != hw_cons) {
2755 unsigned int len, hdr_len;
2757 struct sw_bd *rx_buf;
2758 struct sk_buff *skb;
2759 dma_addr_t dma_addr;
2761 sw_ring_cons = RX_RING_IDX(sw_cons);
2762 sw_ring_prod = RX_RING_IDX(sw_prod);
2764 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2769 dma_addr = pci_unmap_addr(rx_buf, mapping);
2771 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2772 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2774 rx_hdr = (struct l2_fhdr *) skb->data;
2775 len = rx_hdr->l2_fhdr_pkt_len;
2777 if ((status = rx_hdr->l2_fhdr_status) &
2778 (L2_FHDR_ERRORS_BAD_CRC |
2779 L2_FHDR_ERRORS_PHY_DECODE |
2780 L2_FHDR_ERRORS_ALIGNMENT |
2781 L2_FHDR_ERRORS_TOO_SHORT |
2782 L2_FHDR_ERRORS_GIANT_FRAME)) {
2784 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2789 if (status & L2_FHDR_STATUS_SPLIT) {
2790 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2792 } else if (len > bp->rx_jumbo_thresh) {
2793 hdr_len = bp->rx_jumbo_thresh;
2799 if (len <= bp->rx_copy_thresh) {
2800 struct sk_buff *new_skb;
2802 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2803 if (new_skb == NULL) {
2804 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2810 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2811 new_skb->data, len + 2);
2812 skb_reserve(new_skb, 2);
2813 skb_put(new_skb, len);
2815 bnx2_reuse_rx_skb(bp, bnapi, skb,
2816 sw_ring_cons, sw_ring_prod);
2819 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2820 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2823 skb->protocol = eth_type_trans(skb, bp->dev);
2825 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2826 (ntohs(skb->protocol) != 0x8100)) {
2833 skb->ip_summed = CHECKSUM_NONE;
2835 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2836 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2838 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2839 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2840 skb->ip_summed = CHECKSUM_UNNECESSARY;
2844 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2845 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2846 rx_hdr->l2_fhdr_vlan_tag);
2850 netif_receive_skb(skb);
2852 bp->dev->last_rx = jiffies;
2856 sw_cons = NEXT_RX_BD(sw_cons);
2857 sw_prod = NEXT_RX_BD(sw_prod);
2859 if ((rx_pkt == budget))
2862 /* Refresh hw_cons to see if there is new work */
2863 if (sw_cons == hw_cons) {
2864 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2868 bnapi->rx_cons = sw_cons;
2869 bnapi->rx_prod = sw_prod;
2872 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2875 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2877 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2885 /* MSI ISR - The only difference between this and the INTx ISR
2886 * is that the MSI interrupt is always serviced.
2889 bnx2_msi(int irq, void *dev_instance)
2891 struct net_device *dev = dev_instance;
2892 struct bnx2 *bp = netdev_priv(dev);
2893 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2895 prefetch(bnapi->status_blk);
2896 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2897 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2898 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2900 /* Return here if interrupt is disabled. */
2901 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2904 netif_rx_schedule(dev, &bnapi->napi);
2910 bnx2_msi_1shot(int irq, void *dev_instance)
2912 struct net_device *dev = dev_instance;
2913 struct bnx2 *bp = netdev_priv(dev);
2914 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2916 prefetch(bnapi->status_blk);
2918 /* Return here if interrupt is disabled. */
2919 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2922 netif_rx_schedule(dev, &bnapi->napi);
2928 bnx2_interrupt(int irq, void *dev_instance)
2930 struct net_device *dev = dev_instance;
2931 struct bnx2 *bp = netdev_priv(dev);
2932 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2933 struct status_block *sblk = bnapi->status_blk;
2935 /* When using INTx, it is possible for the interrupt to arrive
2936 * at the CPU before the status block posted prior to the
2937 * interrupt. Reading a register will flush the status block.
2938 * When using MSI, the MSI message will always complete after
2939 * the status block write.
2941 if ((sblk->status_idx == bnapi->last_status_idx) &&
2942 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2943 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2946 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2947 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2948 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2950 /* Read back to deassert IRQ immediately to avoid too many
2951 * spurious interrupts.
2953 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2955 /* Return here if interrupt is shared and is disabled. */
2956 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2959 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2960 bnapi->last_status_idx = sblk->status_idx;
2961 __netif_rx_schedule(dev, &bnapi->napi);
2968 bnx2_tx_msix(int irq, void *dev_instance)
2970 struct net_device *dev = dev_instance;
2971 struct bnx2 *bp = netdev_priv(dev);
2972 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2974 prefetch(bnapi->status_blk_msix);
2976 /* Return here if interrupt is disabled. */
2977 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2980 netif_rx_schedule(dev, &bnapi->napi);
2984 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2985 STATUS_ATTN_BITS_TIMER_ABORT)
2988 bnx2_has_work(struct bnx2_napi *bnapi)
2990 struct status_block *sblk = bnapi->status_blk;
2992 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2993 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2996 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2997 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3003 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3005 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3006 struct bnx2 *bp = bnapi->bp;
3008 struct status_block_msix *sblk = bnapi->status_blk_msix;
3011 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3012 if (unlikely(work_done >= budget))
3015 bnapi->last_status_idx = sblk->status_idx;
3017 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
3019 netif_rx_complete(bp->dev, napi);
3020 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3021 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3022 bnapi->last_status_idx);
3026 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3027 int work_done, int budget)
3029 struct status_block *sblk = bnapi->status_blk;
3030 u32 status_attn_bits = sblk->status_attn_bits;
3031 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3033 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3034 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3036 bnx2_phy_int(bp, bnapi);
3038 /* This is needed to take care of transient status
3039 * during link changes.
3041 REG_WR(bp, BNX2_HC_COMMAND,
3042 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3043 REG_RD(bp, BNX2_HC_COMMAND);
3046 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3047 bnx2_tx_int(bp, bnapi, 0);
3049 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3050 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3055 static int bnx2_poll(struct napi_struct *napi, int budget)
3057 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3058 struct bnx2 *bp = bnapi->bp;
3060 struct status_block *sblk = bnapi->status_blk;
3063 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3065 if (unlikely(work_done >= budget))
3068 /* bnapi->last_status_idx is used below to tell the hw how
3069 * much work has been processed, so we must read it before
3070 * checking for more work.
3072 bnapi->last_status_idx = sblk->status_idx;
3074 if (likely(!bnx2_has_work(bnapi))) {
3075 netif_rx_complete(bp->dev, napi);
3076 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3077 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3078 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3079 bnapi->last_status_idx);
3082 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3083 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3084 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3085 bnapi->last_status_idx);
3087 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3088 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3089 bnapi->last_status_idx);
3097 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3098 * from set_multicast.
3101 bnx2_set_rx_mode(struct net_device *dev)
3103 struct bnx2 *bp = netdev_priv(dev);
3104 u32 rx_mode, sort_mode;
3107 spin_lock_bh(&bp->phy_lock);
3109 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3110 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3111 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3113 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3114 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3116 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3117 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3119 if (dev->flags & IFF_PROMISC) {
3120 /* Promiscuous mode. */
3121 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3122 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3123 BNX2_RPM_SORT_USER0_PROM_VLAN;
3125 else if (dev->flags & IFF_ALLMULTI) {
3126 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3127 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3130 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3133 /* Accept one or more multicast(s). */
3134 struct dev_mc_list *mclist;
3135 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3140 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3142 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3143 i++, mclist = mclist->next) {
3145 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3147 regidx = (bit & 0xe0) >> 5;
3149 mc_filter[regidx] |= (1 << bit);
3152 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3153 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3157 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3160 if (rx_mode != bp->rx_mode) {
3161 bp->rx_mode = rx_mode;
3162 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3165 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3166 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3167 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3169 spin_unlock_bh(&bp->phy_lock);
3173 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3180 for (i = 0; i < rv2p_code_len; i += 8) {
3181 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3183 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3186 if (rv2p_proc == RV2P_PROC1) {
3187 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3188 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3191 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3192 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3196 /* Reset the processor, un-stall is done later. */
3197 if (rv2p_proc == RV2P_PROC1) {
3198 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3201 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3206 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3213 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3214 val |= cpu_reg->mode_value_halt;
3215 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3216 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3218 /* Load the Text area. */
3219 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3223 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3228 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3229 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3233 /* Load the Data area. */
3234 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3238 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3239 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3243 /* Load the SBSS area. */
3244 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3248 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3249 bnx2_reg_wr_ind(bp, offset, 0);
3253 /* Load the BSS area. */
3254 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3258 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3259 bnx2_reg_wr_ind(bp, offset, 0);
3263 /* Load the Read-Only area. */
3264 offset = cpu_reg->spad_base +
3265 (fw->rodata_addr - cpu_reg->mips_view_base);
3269 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3270 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3274 /* Clear the pre-fetch instruction. */
3275 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3276 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3278 /* Start the CPU. */
3279 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3280 val &= ~cpu_reg->mode_value_halt;
3281 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3282 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3288 bnx2_init_cpus(struct bnx2 *bp)
3290 struct cpu_reg cpu_reg;
3295 /* Initialize the RV2P processor. */
3296 text = vmalloc(FW_BUF_SIZE);
3299 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3300 rv2p = bnx2_xi_rv2p_proc1;
3301 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3303 rv2p = bnx2_rv2p_proc1;
3304 rv2p_len = sizeof(bnx2_rv2p_proc1);
3306 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3310 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3312 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3313 rv2p = bnx2_xi_rv2p_proc2;
3314 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3316 rv2p = bnx2_rv2p_proc2;
3317 rv2p_len = sizeof(bnx2_rv2p_proc2);
3319 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3323 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3325 /* Initialize the RX Processor. */
3326 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3327 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3328 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3329 cpu_reg.state = BNX2_RXP_CPU_STATE;
3330 cpu_reg.state_value_clear = 0xffffff;
3331 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3332 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3333 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3334 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3335 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3336 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3337 cpu_reg.mips_view_base = 0x8000000;
3339 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3340 fw = &bnx2_rxp_fw_09;
3342 fw = &bnx2_rxp_fw_06;
3345 rc = load_cpu_fw(bp, &cpu_reg, fw);
3349 /* Initialize the TX Processor. */
3350 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3351 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3352 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3353 cpu_reg.state = BNX2_TXP_CPU_STATE;
3354 cpu_reg.state_value_clear = 0xffffff;
3355 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3356 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3357 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3358 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3359 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3360 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3361 cpu_reg.mips_view_base = 0x8000000;
3363 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3364 fw = &bnx2_txp_fw_09;
3366 fw = &bnx2_txp_fw_06;
3369 rc = load_cpu_fw(bp, &cpu_reg, fw);
3373 /* Initialize the TX Patch-up Processor. */
3374 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3375 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3376 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3377 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3378 cpu_reg.state_value_clear = 0xffffff;
3379 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3380 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3381 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3382 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3383 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3384 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3385 cpu_reg.mips_view_base = 0x8000000;
3387 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3388 fw = &bnx2_tpat_fw_09;
3390 fw = &bnx2_tpat_fw_06;
3393 rc = load_cpu_fw(bp, &cpu_reg, fw);
3397 /* Initialize the Completion Processor. */
3398 cpu_reg.mode = BNX2_COM_CPU_MODE;
3399 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3400 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3401 cpu_reg.state = BNX2_COM_CPU_STATE;
3402 cpu_reg.state_value_clear = 0xffffff;
3403 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3404 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3405 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3406 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3407 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3408 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3409 cpu_reg.mips_view_base = 0x8000000;
3411 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3412 fw = &bnx2_com_fw_09;
3414 fw = &bnx2_com_fw_06;
3417 rc = load_cpu_fw(bp, &cpu_reg, fw);
3421 /* Initialize the Command Processor. */
3422 cpu_reg.mode = BNX2_CP_CPU_MODE;
3423 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3424 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3425 cpu_reg.state = BNX2_CP_CPU_STATE;
3426 cpu_reg.state_value_clear = 0xffffff;
3427 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3428 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3429 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3430 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3431 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3432 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3433 cpu_reg.mips_view_base = 0x8000000;
3435 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3436 fw = &bnx2_cp_fw_09;
3438 fw = &bnx2_cp_fw_06;
3441 rc = load_cpu_fw(bp, &cpu_reg, fw);
3449 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3453 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3459 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3460 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3461 PCI_PM_CTRL_PME_STATUS);
3463 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3464 /* delay required during transition out of D3hot */
3467 val = REG_RD(bp, BNX2_EMAC_MODE);
3468 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3469 val &= ~BNX2_EMAC_MODE_MPKT;
3470 REG_WR(bp, BNX2_EMAC_MODE, val);
3472 val = REG_RD(bp, BNX2_RPM_CONFIG);
3473 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3474 REG_WR(bp, BNX2_RPM_CONFIG, val);
3485 autoneg = bp->autoneg;
3486 advertising = bp->advertising;
3488 if (bp->phy_port == PORT_TP) {
3489 bp->autoneg = AUTONEG_SPEED;
3490 bp->advertising = ADVERTISED_10baseT_Half |
3491 ADVERTISED_10baseT_Full |
3492 ADVERTISED_100baseT_Half |
3493 ADVERTISED_100baseT_Full |
3497 spin_lock_bh(&bp->phy_lock);
3498 bnx2_setup_phy(bp, bp->phy_port);
3499 spin_unlock_bh(&bp->phy_lock);
3501 bp->autoneg = autoneg;
3502 bp->advertising = advertising;
3504 bnx2_set_mac_addr(bp);
3506 val = REG_RD(bp, BNX2_EMAC_MODE);
3508 /* Enable port mode. */
3509 val &= ~BNX2_EMAC_MODE_PORT;
3510 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3511 BNX2_EMAC_MODE_ACPI_RCVD |
3512 BNX2_EMAC_MODE_MPKT;
3513 if (bp->phy_port == PORT_TP)
3514 val |= BNX2_EMAC_MODE_PORT_MII;
3516 val |= BNX2_EMAC_MODE_PORT_GMII;
3517 if (bp->line_speed == SPEED_2500)
3518 val |= BNX2_EMAC_MODE_25G_MODE;
3521 REG_WR(bp, BNX2_EMAC_MODE, val);
3523 /* receive all multicast */
3524 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3525 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3528 REG_WR(bp, BNX2_EMAC_RX_MODE,
3529 BNX2_EMAC_RX_MODE_SORT_MODE);
3531 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3532 BNX2_RPM_SORT_USER0_MC_EN;
3533 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3534 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3535 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3536 BNX2_RPM_SORT_USER0_ENA);
3538 /* Need to enable EMAC and RPM for WOL. */
3539 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3540 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3541 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3542 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3544 val = REG_RD(bp, BNX2_RPM_CONFIG);
3545 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3546 REG_WR(bp, BNX2_RPM_CONFIG, val);
3548 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3551 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3554 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3555 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3557 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3558 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3559 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3568 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3570 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3573 /* No more memory access after this point until
3574 * device is brought back to D0.
3586 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3591 /* Request access to the flash interface. */
3592 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3593 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3594 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3595 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3601 if (j >= NVRAM_TIMEOUT_COUNT)
3608 bnx2_release_nvram_lock(struct bnx2 *bp)
3613 /* Relinquish nvram interface. */
3614 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3616 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3617 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3618 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3624 if (j >= NVRAM_TIMEOUT_COUNT)
3632 bnx2_enable_nvram_write(struct bnx2 *bp)
3636 val = REG_RD(bp, BNX2_MISC_CFG);
3637 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3639 if (bp->flash_info->flags & BNX2_NV_WREN) {
3642 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3643 REG_WR(bp, BNX2_NVM_COMMAND,
3644 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3646 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3649 val = REG_RD(bp, BNX2_NVM_COMMAND);
3650 if (val & BNX2_NVM_COMMAND_DONE)
3654 if (j >= NVRAM_TIMEOUT_COUNT)
3661 bnx2_disable_nvram_write(struct bnx2 *bp)
3665 val = REG_RD(bp, BNX2_MISC_CFG);
3666 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3671 bnx2_enable_nvram_access(struct bnx2 *bp)
3675 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3676 /* Enable both bits, even on read. */
3677 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3678 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3682 bnx2_disable_nvram_access(struct bnx2 *bp)
3686 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3687 /* Disable both bits, even after read. */
3688 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3689 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3690 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3694 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3699 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3700 /* Buffered flash, no erase needed */
3703 /* Build an erase command */
3704 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3705 BNX2_NVM_COMMAND_DOIT;
3707 /* Need to clear DONE bit separately. */
3708 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3710 /* Address of the NVRAM to read from. */
3711 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3713 /* Issue an erase command. */
3714 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3716 /* Wait for completion. */
3717 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3722 val = REG_RD(bp, BNX2_NVM_COMMAND);
3723 if (val & BNX2_NVM_COMMAND_DONE)
3727 if (j >= NVRAM_TIMEOUT_COUNT)
3734 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3739 /* Build the command word. */
3740 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3742 /* Calculate an offset of a buffered flash, not needed for 5709. */
3743 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3744 offset = ((offset / bp->flash_info->page_size) <<
3745 bp->flash_info->page_bits) +
3746 (offset % bp->flash_info->page_size);
3749 /* Need to clear DONE bit separately. */
3750 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3752 /* Address of the NVRAM to read from. */
3753 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3755 /* Issue a read command. */
3756 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3758 /* Wait for completion. */
3759 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3764 val = REG_RD(bp, BNX2_NVM_COMMAND);
3765 if (val & BNX2_NVM_COMMAND_DONE) {
3766 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3767 memcpy(ret_val, &v, 4);
3771 if (j >= NVRAM_TIMEOUT_COUNT)
3779 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3785 /* Build the command word. */
3786 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3788 /* Calculate an offset of a buffered flash, not needed for 5709. */
3789 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3790 offset = ((offset / bp->flash_info->page_size) <<
3791 bp->flash_info->page_bits) +
3792 (offset % bp->flash_info->page_size);
3795 /* Need to clear DONE bit separately. */
3796 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3798 memcpy(&val32, val, 4);
3800 /* Write the data. */
3801 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3803 /* Address of the NVRAM to write to. */
3804 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3806 /* Issue the write command. */
3807 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3809 /* Wait for completion. */
3810 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3813 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3816 if (j >= NVRAM_TIMEOUT_COUNT)
3823 bnx2_init_nvram(struct bnx2 *bp)
3826 int j, entry_count, rc = 0;
3827 struct flash_spec *flash;
3829 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3830 bp->flash_info = &flash_5709;
3831 goto get_flash_size;
3834 /* Determine the selected interface. */
3835 val = REG_RD(bp, BNX2_NVM_CFG1);
3837 entry_count = ARRAY_SIZE(flash_table);
3839 if (val & 0x40000000) {
3841 /* Flash interface has been reconfigured */
3842 for (j = 0, flash = &flash_table[0]; j < entry_count;
3844 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3845 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3846 bp->flash_info = flash;
3853 /* Not yet been reconfigured */
3855 if (val & (1 << 23))
3856 mask = FLASH_BACKUP_STRAP_MASK;
3858 mask = FLASH_STRAP_MASK;
3860 for (j = 0, flash = &flash_table[0]; j < entry_count;
3863 if ((val & mask) == (flash->strapping & mask)) {
3864 bp->flash_info = flash;
3866 /* Request access to the flash interface. */
3867 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3870 /* Enable access to flash interface */
3871 bnx2_enable_nvram_access(bp);
3873 /* Reconfigure the flash interface */
3874 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3875 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3876 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3877 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3879 /* Disable access to flash interface */
3880 bnx2_disable_nvram_access(bp);
3881 bnx2_release_nvram_lock(bp);
3886 } /* if (val & 0x40000000) */
3888 if (j == entry_count) {
3889 bp->flash_info = NULL;
3890 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3895 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3896 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3898 bp->flash_size = val;
3900 bp->flash_size = bp->flash_info->total_size;
3906 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3910 u32 cmd_flags, offset32, len32, extra;
3915 /* Request access to the flash interface. */
3916 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3919 /* Enable access to flash interface */
3920 bnx2_enable_nvram_access(bp);
3933 pre_len = 4 - (offset & 3);
3935 if (pre_len >= len32) {
3937 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3938 BNX2_NVM_COMMAND_LAST;
3941 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3944 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3949 memcpy(ret_buf, buf + (offset & 3), pre_len);
3956 extra = 4 - (len32 & 3);
3957 len32 = (len32 + 4) & ~3;
3964 cmd_flags = BNX2_NVM_COMMAND_LAST;
3966 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3967 BNX2_NVM_COMMAND_LAST;
3969 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3971 memcpy(ret_buf, buf, 4 - extra);
3973 else if (len32 > 0) {
3976 /* Read the first word. */
3980 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3982 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3984 /* Advance to the next dword. */
3989 while (len32 > 4 && rc == 0) {
3990 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3992 /* Advance to the next dword. */
4001 cmd_flags = BNX2_NVM_COMMAND_LAST;
4002 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4004 memcpy(ret_buf, buf, 4 - extra);
4007 /* Disable access to flash interface */
4008 bnx2_disable_nvram_access(bp);
4010 bnx2_release_nvram_lock(bp);
4016 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4019 u32 written, offset32, len32;
4020 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4022 int align_start, align_end;
4027 align_start = align_end = 0;
4029 if ((align_start = (offset32 & 3))) {
4031 len32 += align_start;
4034 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4039 align_end = 4 - (len32 & 3);
4041 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4045 if (align_start || align_end) {
4046 align_buf = kmalloc(len32, GFP_KERNEL);
4047 if (align_buf == NULL)
4050 memcpy(align_buf, start, 4);
4053 memcpy(align_buf + len32 - 4, end, 4);
4055 memcpy(align_buf + align_start, data_buf, buf_size);
4059 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4060 flash_buffer = kmalloc(264, GFP_KERNEL);
4061 if (flash_buffer == NULL) {
4063 goto nvram_write_end;
4068 while ((written < len32) && (rc == 0)) {
4069 u32 page_start, page_end, data_start, data_end;
4070 u32 addr, cmd_flags;
4073 /* Find the page_start addr */
4074 page_start = offset32 + written;
4075 page_start -= (page_start % bp->flash_info->page_size);
4076 /* Find the page_end addr */
4077 page_end = page_start + bp->flash_info->page_size;
4078 /* Find the data_start addr */
4079 data_start = (written == 0) ? offset32 : page_start;
4080 /* Find the data_end addr */
4081 data_end = (page_end > offset32 + len32) ?
4082 (offset32 + len32) : page_end;
4084 /* Request access to the flash interface. */
4085 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4086 goto nvram_write_end;
4088 /* Enable access to flash interface */
4089 bnx2_enable_nvram_access(bp);
4091 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4092 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4095 /* Read the whole page into the buffer
4096 * (non-buffer flash only) */
4097 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4098 if (j == (bp->flash_info->page_size - 4)) {
4099 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4101 rc = bnx2_nvram_read_dword(bp,
4107 goto nvram_write_end;
4113 /* Enable writes to flash interface (unlock write-protect) */
4114 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4115 goto nvram_write_end;
4117 /* Loop to write back the buffer data from page_start to
4120 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4121 /* Erase the page */
4122 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4123 goto nvram_write_end;
4125 /* Re-enable the write again for the actual write */
4126 bnx2_enable_nvram_write(bp);
4128 for (addr = page_start; addr < data_start;
4129 addr += 4, i += 4) {
4131 rc = bnx2_nvram_write_dword(bp, addr,
4132 &flash_buffer[i], cmd_flags);
4135 goto nvram_write_end;
4141 /* Loop to write the new data from data_start to data_end */
4142 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4143 if ((addr == page_end - 4) ||
4144 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4145 (addr == data_end - 4))) {
4147 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4149 rc = bnx2_nvram_write_dword(bp, addr, buf,
4153 goto nvram_write_end;
4159 /* Loop to write back the buffer data from data_end
4161 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4162 for (addr = data_end; addr < page_end;
4163 addr += 4, i += 4) {
4165 if (addr == page_end-4) {
4166 cmd_flags = BNX2_NVM_COMMAND_LAST;
4168 rc = bnx2_nvram_write_dword(bp, addr,
4169 &flash_buffer[i], cmd_flags);
4172 goto nvram_write_end;
4178 /* Disable writes to flash interface (lock write-protect) */
4179 bnx2_disable_nvram_write(bp);
4181 /* Disable access to flash interface */
4182 bnx2_disable_nvram_access(bp);
4183 bnx2_release_nvram_lock(bp);
4185 /* Increment written */
4186 written += data_end - data_start;
4190 kfree(flash_buffer);
4196 bnx2_init_remote_phy(struct bnx2 *bp)
4200 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4201 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4204 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4205 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4208 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4209 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4211 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4212 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4213 bp->phy_port = PORT_FIBRE;
4215 bp->phy_port = PORT_TP;
4217 if (netif_running(bp->dev)) {
4220 if (val & BNX2_LINK_STATUS_LINK_UP) {
4222 netif_carrier_on(bp->dev);
4225 netif_carrier_off(bp->dev);
4227 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4228 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4229 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4235 bnx2_setup_msix_tbl(struct bnx2 *bp)
4237 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4239 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4240 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4244 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4250 /* Wait for the current PCI transaction to complete before
4251 * issuing a reset. */
4252 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4253 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4254 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4255 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4256 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4257 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4260 /* Wait for the firmware to tell us it is ok to issue a reset. */
4261 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4263 /* Deposit a driver reset signature so the firmware knows that
4264 * this is a soft reset. */
4265 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4266 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4268 /* Do a dummy read to force the chip to complete all current transaction
4269 * before we issue a reset. */
4270 val = REG_RD(bp, BNX2_MISC_ID);
4272 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4273 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4274 REG_RD(bp, BNX2_MISC_COMMAND);
4277 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4278 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4280 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4283 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4284 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4285 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4288 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4290 /* Reading back any register after chip reset will hang the
4291 * bus on 5706 A0 and A1. The msleep below provides plenty
4292 * of margin for write posting.
4294 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4295 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4298 /* Reset takes approximate 30 usec */
4299 for (i = 0; i < 10; i++) {
4300 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4301 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4302 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4307 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4308 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4309 printk(KERN_ERR PFX "Chip reset did not complete\n");
4314 /* Make sure byte swapping is properly configured. */
4315 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4316 if (val != 0x01020304) {
4317 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4321 /* Wait for the firmware to finish its initialization. */
4322 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4326 spin_lock_bh(&bp->phy_lock);
4327 old_port = bp->phy_port;
4328 bnx2_init_remote_phy(bp);
4329 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4330 old_port != bp->phy_port)
4331 bnx2_set_default_remote_link(bp);
4332 spin_unlock_bh(&bp->phy_lock);
4334 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4335 /* Adjust the voltage regular to two steps lower. The default
4336 * of this register is 0x0000000e. */
4337 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4339 /* Remove bad rbuf memory from the free pool. */
4340 rc = bnx2_alloc_bad_rbuf(bp);
4343 if (bp->flags & BNX2_FLAG_USING_MSIX)
4344 bnx2_setup_msix_tbl(bp);
4350 bnx2_init_chip(struct bnx2 *bp)
4355 /* Make sure the interrupt is not active. */
4356 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4358 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4359 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4361 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4363 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4364 DMA_READ_CHANS << 12 |
4365 DMA_WRITE_CHANS << 16;
4367 val |= (0x2 << 20) | (1 << 11);
4369 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4372 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4373 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4374 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4376 REG_WR(bp, BNX2_DMA_CONFIG, val);
4378 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4379 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4380 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4381 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4384 if (bp->flags & BNX2_FLAG_PCIX) {
4387 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4389 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4390 val16 & ~PCI_X_CMD_ERO);
4393 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4394 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4395 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4396 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4398 /* Initialize context mapping and zero out the quick contexts. The
4399 * context block must have already been enabled. */
4400 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4401 rc = bnx2_init_5709_context(bp);
4405 bnx2_init_context(bp);
4407 if ((rc = bnx2_init_cpus(bp)) != 0)
4410 bnx2_init_nvram(bp);
4412 bnx2_set_mac_addr(bp);
4414 val = REG_RD(bp, BNX2_MQ_CONFIG);
4415 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4416 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4417 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4418 val |= BNX2_MQ_CONFIG_HALT_DIS;
4420 REG_WR(bp, BNX2_MQ_CONFIG, val);
4422 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4423 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4424 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4426 val = (BCM_PAGE_BITS - 8) << 24;
4427 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4429 /* Configure page size. */
4430 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4431 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4432 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4433 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4435 val = bp->mac_addr[0] +
4436 (bp->mac_addr[1] << 8) +
4437 (bp->mac_addr[2] << 16) +
4439 (bp->mac_addr[4] << 8) +
4440 (bp->mac_addr[5] << 16);
4441 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4443 /* Program the MTU. Also include 4 bytes for CRC32. */
4444 val = bp->dev->mtu + ETH_HLEN + 4;
4445 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4446 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4447 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4449 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4450 bp->bnx2_napi[i].last_status_idx = 0;
4452 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4454 /* Set up how to generate a link change interrupt. */
4455 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4457 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4458 (u64) bp->status_blk_mapping & 0xffffffff);
4459 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4461 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4462 (u64) bp->stats_blk_mapping & 0xffffffff);
4463 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4464 (u64) bp->stats_blk_mapping >> 32);
4466 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4467 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4469 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4470 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4472 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4473 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4475 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4477 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4479 REG_WR(bp, BNX2_HC_COM_TICKS,
4480 (bp->com_ticks_int << 16) | bp->com_ticks);
4482 REG_WR(bp, BNX2_HC_CMD_TICKS,
4483 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4485 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4486 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4488 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4489 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4491 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4492 val = BNX2_HC_CONFIG_COLLECT_STATS;
4494 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4495 BNX2_HC_CONFIG_COLLECT_STATS;
4498 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4499 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4500 BNX2_HC_SB_CONFIG_1;
4502 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4503 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4506 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4507 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4509 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4510 (bp->tx_quick_cons_trip_int << 16) |
4511 bp->tx_quick_cons_trip);
4513 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4514 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4516 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4519 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4520 val |= BNX2_HC_CONFIG_ONE_SHOT;
4522 REG_WR(bp, BNX2_HC_CONFIG, val);
4524 /* Clear internal stats counters. */
4525 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4527 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4529 /* Initialize the receive filter. */
4530 bnx2_set_rx_mode(bp->dev);
4532 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4533 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4534 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4535 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4537 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4540 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4541 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4545 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4551 bnx2_clear_ring_states(struct bnx2 *bp)
4553 struct bnx2_napi *bnapi;
4556 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4557 bnapi = &bp->bnx2_napi[i];
4560 bnapi->hw_tx_cons = 0;
4561 bnapi->rx_prod_bseq = 0;
4564 bnapi->rx_pg_prod = 0;
4565 bnapi->rx_pg_cons = 0;
4570 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4572 u32 val, offset0, offset1, offset2, offset3;
4573 u32 cid_addr = GET_CID_ADDR(cid);
4575 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4576 offset0 = BNX2_L2CTX_TYPE_XI;
4577 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4578 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4579 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4581 offset0 = BNX2_L2CTX_TYPE;
4582 offset1 = BNX2_L2CTX_CMD_TYPE;
4583 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4584 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4586 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4587 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4589 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4590 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4592 val = (u64) bp->tx_desc_mapping >> 32;
4593 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4595 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4596 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4600 bnx2_init_tx_ring(struct bnx2 *bp)
4604 struct bnx2_napi *bnapi;
4607 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4609 bp->tx_vec = BNX2_TX_VEC;
4610 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4613 bnapi = &bp->bnx2_napi[bp->tx_vec];
4615 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4617 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4619 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4620 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4623 bp->tx_prod_bseq = 0;
4625 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4626 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4628 bnx2_init_tx_context(bp, cid);
4632 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4638 for (i = 0; i < num_rings; i++) {
4641 rxbd = &rx_ring[i][0];
4642 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4643 rxbd->rx_bd_len = buf_size;
4644 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4646 if (i == (num_rings - 1))
4650 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4651 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4656 bnx2_init_rx_ring(struct bnx2 *bp)
4659 u16 prod, ring_prod;
4660 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4661 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4663 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4664 bp->rx_buf_use_size, bp->rx_max_ring);
4666 bnx2_init_rx_context0(bp);
4668 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4669 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4670 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4673 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4674 if (bp->rx_pg_ring_size) {
4675 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4676 bp->rx_pg_desc_mapping,
4677 PAGE_SIZE, bp->rx_max_pg_ring);
4678 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4679 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4680 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4681 BNX2_L2CTX_RBDC_JUMBO_KEY);
4683 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4684 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4686 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4687 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4689 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4690 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4693 val = (u64) bp->rx_desc_mapping[0] >> 32;
4694 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4696 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4697 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4699 ring_prod = prod = bnapi->rx_pg_prod;
4700 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4701 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4703 prod = NEXT_RX_BD(prod);
4704 ring_prod = RX_PG_RING_IDX(prod);
4706 bnapi->rx_pg_prod = prod;
4708 ring_prod = prod = bnapi->rx_prod;
4709 for (i = 0; i < bp->rx_ring_size; i++) {
4710 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4713 prod = NEXT_RX_BD(prod);
4714 ring_prod = RX_RING_IDX(prod);
4716 bnapi->rx_prod = prod;
4718 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4720 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4722 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4725 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4727 u32 max, num_rings = 1;
4729 while (ring_size > MAX_RX_DESC_CNT) {
4730 ring_size -= MAX_RX_DESC_CNT;
4733 /* round to next power of 2 */
4735 while ((max & num_rings) == 0)
4738 if (num_rings != max)
4745 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4747 u32 rx_size, rx_space, jumbo_size;
4749 /* 8 for CRC and VLAN */
4750 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4752 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4753 sizeof(struct skb_shared_info);
4755 bp->rx_copy_thresh = RX_COPY_THRESH;
4756 bp->rx_pg_ring_size = 0;
4757 bp->rx_max_pg_ring = 0;
4758 bp->rx_max_pg_ring_idx = 0;
4759 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4760 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4762 jumbo_size = size * pages;
4763 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4764 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4766 bp->rx_pg_ring_size = jumbo_size;
4767 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4769 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4770 rx_size = RX_COPY_THRESH + bp->rx_offset;
4771 bp->rx_copy_thresh = 0;
4774 bp->rx_buf_use_size = rx_size;
4776 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4777 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4778 bp->rx_ring_size = size;
4779 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4780 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4784 bnx2_free_tx_skbs(struct bnx2 *bp)
4788 if (bp->tx_buf_ring == NULL)
4791 for (i = 0; i < TX_DESC_CNT; ) {
4792 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4793 struct sk_buff *skb = tx_buf->skb;
4801 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4802 skb_headlen(skb), PCI_DMA_TODEVICE);
4806 last = skb_shinfo(skb)->nr_frags;
4807 for (j = 0; j < last; j++) {
4808 tx_buf = &bp->tx_buf_ring[i + j + 1];
4809 pci_unmap_page(bp->pdev,
4810 pci_unmap_addr(tx_buf, mapping),
4811 skb_shinfo(skb)->frags[j].size,
4821 bnx2_free_rx_skbs(struct bnx2 *bp)
4825 if (bp->rx_buf_ring == NULL)
4828 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4829 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4830 struct sk_buff *skb = rx_buf->skb;
4835 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4836 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4842 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4843 bnx2_free_rx_page(bp, i);
4847 bnx2_free_skbs(struct bnx2 *bp)
4849 bnx2_free_tx_skbs(bp);
4850 bnx2_free_rx_skbs(bp);
4854 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4858 rc = bnx2_reset_chip(bp, reset_code);
4863 if ((rc = bnx2_init_chip(bp)) != 0)
4866 bnx2_clear_ring_states(bp);
4867 bnx2_init_tx_ring(bp);
4868 bnx2_init_rx_ring(bp);
4873 bnx2_init_nic(struct bnx2 *bp)
4877 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4880 spin_lock_bh(&bp->phy_lock);
4883 spin_unlock_bh(&bp->phy_lock);
4888 bnx2_test_registers(struct bnx2 *bp)
4892 static const struct {
4895 #define BNX2_FL_NOT_5709 1
4899 { 0x006c, 0, 0x00000000, 0x0000003f },
4900 { 0x0090, 0, 0xffffffff, 0x00000000 },
4901 { 0x0094, 0, 0x00000000, 0x00000000 },
4903 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4904 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4905 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4906 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4907 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4908 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4909 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4910 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4911 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4913 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4914 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4915 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4916 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4917 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4918 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4920 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4921 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4922 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4924 { 0x1000, 0, 0x00000000, 0x00000001 },
4925 { 0x1004, 0, 0x00000000, 0x000f0001 },
4927 { 0x1408, 0, 0x01c00800, 0x00000000 },
4928 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4929 { 0x14a8, 0, 0x00000000, 0x000001ff },
4930 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4931 { 0x14b0, 0, 0x00000002, 0x00000001 },
4932 { 0x14b8, 0, 0x00000000, 0x00000000 },
4933 { 0x14c0, 0, 0x00000000, 0x00000009 },
4934 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4935 { 0x14cc, 0, 0x00000000, 0x00000001 },
4936 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4938 { 0x1800, 0, 0x00000000, 0x00000001 },
4939 { 0x1804, 0, 0x00000000, 0x00000003 },
4941 { 0x2800, 0, 0x00000000, 0x00000001 },
4942 { 0x2804, 0, 0x00000000, 0x00003f01 },
4943 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4944 { 0x2810, 0, 0xffff0000, 0x00000000 },
4945 { 0x2814, 0, 0xffff0000, 0x00000000 },
4946 { 0x2818, 0, 0xffff0000, 0x00000000 },
4947 { 0x281c, 0, 0xffff0000, 0x00000000 },
4948 { 0x2834, 0, 0xffffffff, 0x00000000 },
4949 { 0x2840, 0, 0x00000000, 0xffffffff },
4950 { 0x2844, 0, 0x00000000, 0xffffffff },
4951 { 0x2848, 0, 0xffffffff, 0x00000000 },
4952 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4954 { 0x2c00, 0, 0x00000000, 0x00000011 },
4955 { 0x2c04, 0, 0x00000000, 0x00030007 },
4957 { 0x3c00, 0, 0x00000000, 0x00000001 },
4958 { 0x3c04, 0, 0x00000000, 0x00070000 },
4959 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4960 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4961 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4962 { 0x3c14, 0, 0x00000000, 0xffffffff },
4963 { 0x3c18, 0, 0x00000000, 0xffffffff },
4964 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4965 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4967 { 0x5004, 0, 0x00000000, 0x0000007f },
4968 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4970 { 0x5c00, 0, 0x00000000, 0x00000001 },
4971 { 0x5c04, 0, 0x00000000, 0x0003000f },
4972 { 0x5c08, 0, 0x00000003, 0x00000000 },
4973 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4974 { 0x5c10, 0, 0x00000000, 0xffffffff },
4975 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4976 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4977 { 0x5c88, 0, 0x00000000, 0x00077373 },
4978 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4980 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4981 { 0x680c, 0, 0xffffffff, 0x00000000 },
4982 { 0x6810, 0, 0xffffffff, 0x00000000 },
4983 { 0x6814, 0, 0xffffffff, 0x00000000 },
4984 { 0x6818, 0, 0xffffffff, 0x00000000 },
4985 { 0x681c, 0, 0xffffffff, 0x00000000 },
4986 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4987 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4988 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4989 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4990 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4991 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4992 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4993 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4994 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4995 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4996 { 0x684c, 0, 0xffffffff, 0x00000000 },
4997 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4998 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4999 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5000 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5001 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5002 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5004 { 0xffff, 0, 0x00000000, 0x00000000 },
5009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5012 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5013 u32 offset, rw_mask, ro_mask, save_val, val;
5014 u16 flags = reg_tbl[i].flags;
5016 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5019 offset = (u32) reg_tbl[i].offset;
5020 rw_mask = reg_tbl[i].rw_mask;
5021 ro_mask = reg_tbl[i].ro_mask;
5023 save_val = readl(bp->regview + offset);
5025 writel(0, bp->regview + offset);
5027 val = readl(bp->regview + offset);
5028 if ((val & rw_mask) != 0) {
5032 if ((val & ro_mask) != (save_val & ro_mask)) {
5036 writel(0xffffffff, bp->regview + offset);
5038 val = readl(bp->regview + offset);
5039 if ((val & rw_mask) != rw_mask) {
5043 if ((val & ro_mask) != (save_val & ro_mask)) {
5047 writel(save_val, bp->regview + offset);
5051 writel(save_val, bp->regview + offset);
5059 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5061 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5062 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5065 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5068 for (offset = 0; offset < size; offset += 4) {
5070 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5072 if (bnx2_reg_rd_ind(bp, start + offset) !=
5082 bnx2_test_memory(struct bnx2 *bp)
5086 static struct mem_entry {
5089 } mem_tbl_5706[] = {
5090 { 0x60000, 0x4000 },
5091 { 0xa0000, 0x3000 },
5092 { 0xe0000, 0x4000 },
5093 { 0x120000, 0x4000 },
5094 { 0x1a0000, 0x4000 },
5095 { 0x160000, 0x4000 },
5099 { 0x60000, 0x4000 },
5100 { 0xa0000, 0x3000 },
5101 { 0xe0000, 0x4000 },
5102 { 0x120000, 0x4000 },
5103 { 0x1a0000, 0x4000 },
5106 struct mem_entry *mem_tbl;
5108 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5109 mem_tbl = mem_tbl_5709;
5111 mem_tbl = mem_tbl_5706;
5113 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5114 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5115 mem_tbl[i].len)) != 0) {
5123 #define BNX2_MAC_LOOPBACK 0
5124 #define BNX2_PHY_LOOPBACK 1
5127 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5129 unsigned int pkt_size, num_pkts, i;
5130 struct sk_buff *skb, *rx_skb;
5131 unsigned char *packet;
5132 u16 rx_start_idx, rx_idx;
5135 struct sw_bd *rx_buf;
5136 struct l2_fhdr *rx_hdr;
5138 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5141 if (bp->flags & BNX2_FLAG_USING_MSIX)
5142 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5144 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5145 bp->loopback = MAC_LOOPBACK;
5146 bnx2_set_mac_loopback(bp);
5148 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5149 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5152 bp->loopback = PHY_LOOPBACK;
5153 bnx2_set_phy_loopback(bp);
5158 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5159 skb = netdev_alloc_skb(bp->dev, pkt_size);
5162 packet = skb_put(skb, pkt_size);
5163 memcpy(packet, bp->dev->dev_addr, 6);
5164 memset(packet + 6, 0x0, 8);
5165 for (i = 14; i < pkt_size; i++)
5166 packet[i] = (unsigned char) (i & 0xff);
5168 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5171 REG_WR(bp, BNX2_HC_COMMAND,
5172 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5174 REG_RD(bp, BNX2_HC_COMMAND);
5177 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5181 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5183 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5184 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5185 txbd->tx_bd_mss_nbytes = pkt_size;
5186 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5189 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5190 bp->tx_prod_bseq += pkt_size;
5192 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5193 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5197 REG_WR(bp, BNX2_HC_COMMAND,
5198 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5200 REG_RD(bp, BNX2_HC_COMMAND);
5204 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5207 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5208 goto loopback_test_done;
5210 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5211 if (rx_idx != rx_start_idx + num_pkts) {
5212 goto loopback_test_done;
5215 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5216 rx_skb = rx_buf->skb;
5218 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5219 skb_reserve(rx_skb, bp->rx_offset);
5221 pci_dma_sync_single_for_cpu(bp->pdev,
5222 pci_unmap_addr(rx_buf, mapping),
5223 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5225 if (rx_hdr->l2_fhdr_status &
5226 (L2_FHDR_ERRORS_BAD_CRC |
5227 L2_FHDR_ERRORS_PHY_DECODE |
5228 L2_FHDR_ERRORS_ALIGNMENT |
5229 L2_FHDR_ERRORS_TOO_SHORT |
5230 L2_FHDR_ERRORS_GIANT_FRAME)) {
5232 goto loopback_test_done;
5235 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5236 goto loopback_test_done;
5239 for (i = 14; i < pkt_size; i++) {
5240 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5241 goto loopback_test_done;
5252 #define BNX2_MAC_LOOPBACK_FAILED 1
5253 #define BNX2_PHY_LOOPBACK_FAILED 2
5254 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5255 BNX2_PHY_LOOPBACK_FAILED)
5258 bnx2_test_loopback(struct bnx2 *bp)
5262 if (!netif_running(bp->dev))
5263 return BNX2_LOOPBACK_FAILED;
5265 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5266 spin_lock_bh(&bp->phy_lock);
5268 spin_unlock_bh(&bp->phy_lock);
5269 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5270 rc |= BNX2_MAC_LOOPBACK_FAILED;
5271 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5272 rc |= BNX2_PHY_LOOPBACK_FAILED;
5276 #define NVRAM_SIZE 0x200
5277 #define CRC32_RESIDUAL 0xdebb20e3
5280 bnx2_test_nvram(struct bnx2 *bp)
5282 __be32 buf[NVRAM_SIZE / 4];
5283 u8 *data = (u8 *) buf;
5287 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5288 goto test_nvram_done;
5290 magic = be32_to_cpu(buf[0]);
5291 if (magic != 0x669955aa) {
5293 goto test_nvram_done;
5296 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5297 goto test_nvram_done;
5299 csum = ether_crc_le(0x100, data);
5300 if (csum != CRC32_RESIDUAL) {
5302 goto test_nvram_done;
5305 csum = ether_crc_le(0x100, data + 0x100);
5306 if (csum != CRC32_RESIDUAL) {
5315 bnx2_test_link(struct bnx2 *bp)
5319 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5324 spin_lock_bh(&bp->phy_lock);
5325 bnx2_enable_bmsr1(bp);
5326 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5327 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5328 bnx2_disable_bmsr1(bp);
5329 spin_unlock_bh(&bp->phy_lock);
5331 if (bmsr & BMSR_LSTATUS) {
5338 bnx2_test_intr(struct bnx2 *bp)
5343 if (!netif_running(bp->dev))
5346 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5348 /* This register is not touched during run-time. */
5349 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5350 REG_RD(bp, BNX2_HC_COMMAND);
5352 for (i = 0; i < 10; i++) {
5353 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5359 msleep_interruptible(10);
5367 /* Determining link for parallel detection. */
5369 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5371 u32 mode_ctl, an_dbg, exp;
5373 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5376 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5377 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5379 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5382 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5383 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5384 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5386 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5389 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5390 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5391 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5393 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5400 bnx2_5706_serdes_timer(struct bnx2 *bp)
5404 spin_lock(&bp->phy_lock);
5405 if (bp->serdes_an_pending) {
5406 bp->serdes_an_pending--;
5408 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5411 bp->current_interval = bp->timer_interval;
5413 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5415 if (bmcr & BMCR_ANENABLE) {
5416 if (bnx2_5706_serdes_has_link(bp)) {
5417 bmcr &= ~BMCR_ANENABLE;
5418 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5419 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5420 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5424 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5425 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5428 bnx2_write_phy(bp, 0x17, 0x0f01);
5429 bnx2_read_phy(bp, 0x15, &phy2);
5433 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5434 bmcr |= BMCR_ANENABLE;
5435 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5437 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5440 bp->current_interval = bp->timer_interval;
5445 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5446 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5447 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5449 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5450 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5451 bnx2_5706s_force_link_dn(bp, 1);
5452 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5455 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5458 spin_unlock(&bp->phy_lock);
5462 bnx2_5708_serdes_timer(struct bnx2 *bp)
5464 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5467 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5468 bp->serdes_an_pending = 0;
5472 spin_lock(&bp->phy_lock);
5473 if (bp->serdes_an_pending)
5474 bp->serdes_an_pending--;
5475 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5478 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5479 if (bmcr & BMCR_ANENABLE) {
5480 bnx2_enable_forced_2g5(bp);
5481 bp->current_interval = SERDES_FORCED_TIMEOUT;
5483 bnx2_disable_forced_2g5(bp);
5484 bp->serdes_an_pending = 2;
5485 bp->current_interval = bp->timer_interval;
5489 bp->current_interval = bp->timer_interval;
5491 spin_unlock(&bp->phy_lock);
5495 bnx2_timer(unsigned long data)
5497 struct bnx2 *bp = (struct bnx2 *) data;
5499 if (!netif_running(bp->dev))
5502 if (atomic_read(&bp->intr_sem) != 0)
5503 goto bnx2_restart_timer;
5505 bnx2_send_heart_beat(bp);
5507 bp->stats_blk->stat_FwRxDrop =
5508 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5510 /* workaround occasional corrupted counters */
5511 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5512 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5513 BNX2_HC_COMMAND_STATS_NOW);
5515 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5516 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5517 bnx2_5706_serdes_timer(bp);
5519 bnx2_5708_serdes_timer(bp);
5523 mod_timer(&bp->timer, jiffies + bp->current_interval);
5527 bnx2_request_irq(struct bnx2 *bp)
5529 struct net_device *dev = bp->dev;
5530 unsigned long flags;
5531 struct bnx2_irq *irq;
5534 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5537 flags = IRQF_SHARED;
5539 for (i = 0; i < bp->irq_nvecs; i++) {
5540 irq = &bp->irq_tbl[i];
5541 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5551 bnx2_free_irq(struct bnx2 *bp)
5553 struct net_device *dev = bp->dev;
5554 struct bnx2_irq *irq;
5557 for (i = 0; i < bp->irq_nvecs; i++) {
5558 irq = &bp->irq_tbl[i];
5560 free_irq(irq->vector, dev);
5563 if (bp->flags & BNX2_FLAG_USING_MSI)
5564 pci_disable_msi(bp->pdev);
5565 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5566 pci_disable_msix(bp->pdev);
5568 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5572 bnx2_enable_msix(struct bnx2 *bp)
5575 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5577 bnx2_setup_msix_tbl(bp);
5578 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5579 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5580 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5582 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5583 msix_ent[i].entry = i;
5584 msix_ent[i].vector = 0;
5587 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5591 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5592 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5594 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5595 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5596 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5597 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5599 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5600 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5601 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5602 bp->irq_tbl[i].vector = msix_ent[i].vector;
5606 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5608 bp->irq_tbl[0].handler = bnx2_interrupt;
5609 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5611 bp->irq_tbl[0].vector = bp->pdev->irq;
5613 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5614 bnx2_enable_msix(bp);
5616 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5617 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5618 if (pci_enable_msi(bp->pdev) == 0) {
5619 bp->flags |= BNX2_FLAG_USING_MSI;
5620 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5621 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5622 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5624 bp->irq_tbl[0].handler = bnx2_msi;
5626 bp->irq_tbl[0].vector = bp->pdev->irq;
5631 /* Called with rtnl_lock */
5633 bnx2_open(struct net_device *dev)
5635 struct bnx2 *bp = netdev_priv(dev);
5638 netif_carrier_off(dev);
5640 bnx2_set_power_state(bp, PCI_D0);
5641 bnx2_disable_int(bp);
5643 rc = bnx2_alloc_mem(bp);
5647 bnx2_setup_int_mode(bp, disable_msi);
5648 bnx2_napi_enable(bp);
5649 rc = bnx2_request_irq(bp);
5652 bnx2_napi_disable(bp);
5657 rc = bnx2_init_nic(bp);
5660 bnx2_napi_disable(bp);
5667 mod_timer(&bp->timer, jiffies + bp->current_interval);
5669 atomic_set(&bp->intr_sem, 0);
5671 bnx2_enable_int(bp);
5673 if (bp->flags & BNX2_FLAG_USING_MSI) {
5674 /* Test MSI to make sure it is working
5675 * If MSI test fails, go back to INTx mode
5677 if (bnx2_test_intr(bp) != 0) {
5678 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5679 " using MSI, switching to INTx mode. Please"
5680 " report this failure to the PCI maintainer"
5681 " and include system chipset information.\n",
5684 bnx2_disable_int(bp);
5687 bnx2_setup_int_mode(bp, 1);
5689 rc = bnx2_init_nic(bp);
5692 rc = bnx2_request_irq(bp);
5695 bnx2_napi_disable(bp);
5698 del_timer_sync(&bp->timer);
5701 bnx2_enable_int(bp);
5704 if (bp->flags & BNX2_FLAG_USING_MSI)
5705 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5706 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5707 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5709 netif_start_queue(dev);
5715 bnx2_reset_task(struct work_struct *work)
5717 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5719 if (!netif_running(bp->dev))
5722 bp->in_reset_task = 1;
5723 bnx2_netif_stop(bp);
5727 atomic_set(&bp->intr_sem, 1);
5728 bnx2_netif_start(bp);
5729 bp->in_reset_task = 0;
5733 bnx2_tx_timeout(struct net_device *dev)
5735 struct bnx2 *bp = netdev_priv(dev);
5737 /* This allows the netif to be shutdown gracefully before resetting */
5738 schedule_work(&bp->reset_task);
5742 /* Called with rtnl_lock */
5744 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5746 struct bnx2 *bp = netdev_priv(dev);
5748 bnx2_netif_stop(bp);
5751 bnx2_set_rx_mode(dev);
5753 bnx2_netif_start(bp);
5757 /* Called with netif_tx_lock.
5758 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5759 * netif_wake_queue().
5762 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5764 struct bnx2 *bp = netdev_priv(dev);
5767 struct sw_bd *tx_buf;
5768 u32 len, vlan_tag_flags, last_frag, mss;
5769 u16 prod, ring_prod;
5771 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5773 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5774 (skb_shinfo(skb)->nr_frags + 1))) {
5775 netif_stop_queue(dev);
5776 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5779 return NETDEV_TX_BUSY;
5781 len = skb_headlen(skb);
5783 ring_prod = TX_RING_IDX(prod);
5786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5787 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5790 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5792 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5794 if ((mss = skb_shinfo(skb)->gso_size)) {
5795 u32 tcp_opt_len, ip_tcp_len;
5798 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5800 tcp_opt_len = tcp_optlen(skb);
5802 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5803 u32 tcp_off = skb_transport_offset(skb) -
5804 sizeof(struct ipv6hdr) - ETH_HLEN;
5806 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5807 TX_BD_FLAGS_SW_FLAGS;
5808 if (likely(tcp_off == 0))
5809 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5812 vlan_tag_flags |= ((tcp_off & 0x3) <<
5813 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5814 ((tcp_off & 0x10) <<
5815 TX_BD_FLAGS_TCP6_OFF4_SHL);
5816 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5819 if (skb_header_cloned(skb) &&
5820 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5822 return NETDEV_TX_OK;
5825 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5829 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5830 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5834 if (tcp_opt_len || (iph->ihl > 5)) {
5835 vlan_tag_flags |= ((iph->ihl - 5) +
5836 (tcp_opt_len >> 2)) << 8;
5842 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5844 tx_buf = &bp->tx_buf_ring[ring_prod];
5846 pci_unmap_addr_set(tx_buf, mapping, mapping);
5848 txbd = &bp->tx_desc_ring[ring_prod];
5850 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5851 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5852 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5853 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5855 last_frag = skb_shinfo(skb)->nr_frags;
5857 for (i = 0; i < last_frag; i++) {
5858 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5860 prod = NEXT_TX_BD(prod);
5861 ring_prod = TX_RING_IDX(prod);
5862 txbd = &bp->tx_desc_ring[ring_prod];
5865 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5866 len, PCI_DMA_TODEVICE);
5867 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5870 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5871 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5872 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5873 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5876 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5878 prod = NEXT_TX_BD(prod);
5879 bp->tx_prod_bseq += skb->len;
5881 REG_WR16(bp, bp->tx_bidx_addr, prod);
5882 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5887 dev->trans_start = jiffies;
5889 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5890 netif_stop_queue(dev);
5891 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5892 netif_wake_queue(dev);
5895 return NETDEV_TX_OK;
5898 /* Called with rtnl_lock */
5900 bnx2_close(struct net_device *dev)
5902 struct bnx2 *bp = netdev_priv(dev);
5905 /* Calling flush_scheduled_work() may deadlock because
5906 * linkwatch_event() may be on the workqueue and it will try to get
5907 * the rtnl_lock which we are holding.
5909 while (bp->in_reset_task)
5912 bnx2_disable_int_sync(bp);
5913 bnx2_napi_disable(bp);
5914 del_timer_sync(&bp->timer);
5915 if (bp->flags & BNX2_FLAG_NO_WOL)
5916 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5918 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5920 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5921 bnx2_reset_chip(bp, reset_code);
5926 netif_carrier_off(bp->dev);
5927 bnx2_set_power_state(bp, PCI_D3hot);
5931 #define GET_NET_STATS64(ctr) \
5932 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5933 (unsigned long) (ctr##_lo)
5935 #define GET_NET_STATS32(ctr) \
5938 #if (BITS_PER_LONG == 64)
5939 #define GET_NET_STATS GET_NET_STATS64
5941 #define GET_NET_STATS GET_NET_STATS32
5944 static struct net_device_stats *
5945 bnx2_get_stats(struct net_device *dev)
5947 struct bnx2 *bp = netdev_priv(dev);
5948 struct statistics_block *stats_blk = bp->stats_blk;
5949 struct net_device_stats *net_stats = &bp->net_stats;
5951 if (bp->stats_blk == NULL) {
5954 net_stats->rx_packets =
5955 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5956 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5957 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5959 net_stats->tx_packets =
5960 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5961 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5962 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5964 net_stats->rx_bytes =
5965 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5967 net_stats->tx_bytes =
5968 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5970 net_stats->multicast =
5971 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5973 net_stats->collisions =
5974 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5976 net_stats->rx_length_errors =
5977 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5978 stats_blk->stat_EtherStatsOverrsizePkts);
5980 net_stats->rx_over_errors =
5981 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5983 net_stats->rx_frame_errors =
5984 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5986 net_stats->rx_crc_errors =
5987 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5989 net_stats->rx_errors = net_stats->rx_length_errors +
5990 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5991 net_stats->rx_crc_errors;
5993 net_stats->tx_aborted_errors =
5994 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5995 stats_blk->stat_Dot3StatsLateCollisions);
5997 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5998 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5999 net_stats->tx_carrier_errors = 0;
6001 net_stats->tx_carrier_errors =
6003 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6006 net_stats->tx_errors =
6008 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6010 net_stats->tx_aborted_errors +
6011 net_stats->tx_carrier_errors;
6013 net_stats->rx_missed_errors =
6014 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6015 stats_blk->stat_FwRxDrop);
6020 /* All ethtool functions called with rtnl_lock */
6023 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6025 struct bnx2 *bp = netdev_priv(dev);
6026 int support_serdes = 0, support_copper = 0;
6028 cmd->supported = SUPPORTED_Autoneg;
6029 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6032 } else if (bp->phy_port == PORT_FIBRE)
6037 if (support_serdes) {
6038 cmd->supported |= SUPPORTED_1000baseT_Full |
6040 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6041 cmd->supported |= SUPPORTED_2500baseX_Full;
6044 if (support_copper) {
6045 cmd->supported |= SUPPORTED_10baseT_Half |
6046 SUPPORTED_10baseT_Full |
6047 SUPPORTED_100baseT_Half |
6048 SUPPORTED_100baseT_Full |
6049 SUPPORTED_1000baseT_Full |
6054 spin_lock_bh(&bp->phy_lock);
6055 cmd->port = bp->phy_port;
6056 cmd->advertising = bp->advertising;
6058 if (bp->autoneg & AUTONEG_SPEED) {
6059 cmd->autoneg = AUTONEG_ENABLE;
6062 cmd->autoneg = AUTONEG_DISABLE;
6065 if (netif_carrier_ok(dev)) {
6066 cmd->speed = bp->line_speed;
6067 cmd->duplex = bp->duplex;
6073 spin_unlock_bh(&bp->phy_lock);
6075 cmd->transceiver = XCVR_INTERNAL;
6076 cmd->phy_address = bp->phy_addr;
6082 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6084 struct bnx2 *bp = netdev_priv(dev);
6085 u8 autoneg = bp->autoneg;
6086 u8 req_duplex = bp->req_duplex;
6087 u16 req_line_speed = bp->req_line_speed;
6088 u32 advertising = bp->advertising;
6091 spin_lock_bh(&bp->phy_lock);
6093 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6094 goto err_out_unlock;
6096 if (cmd->port != bp->phy_port &&
6097 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6098 goto err_out_unlock;
6100 if (cmd->autoneg == AUTONEG_ENABLE) {
6101 autoneg |= AUTONEG_SPEED;
6103 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6105 /* allow advertising 1 speed */
6106 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6107 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6108 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6109 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6111 if (cmd->port == PORT_FIBRE)
6112 goto err_out_unlock;
6114 advertising = cmd->advertising;
6116 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6117 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6118 (cmd->port == PORT_TP))
6119 goto err_out_unlock;
6120 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6121 advertising = cmd->advertising;
6122 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6123 goto err_out_unlock;
6125 if (cmd->port == PORT_FIBRE)
6126 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6128 advertising = ETHTOOL_ALL_COPPER_SPEED;
6130 advertising |= ADVERTISED_Autoneg;
6133 if (cmd->port == PORT_FIBRE) {
6134 if ((cmd->speed != SPEED_1000 &&
6135 cmd->speed != SPEED_2500) ||
6136 (cmd->duplex != DUPLEX_FULL))
6137 goto err_out_unlock;
6139 if (cmd->speed == SPEED_2500 &&
6140 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6141 goto err_out_unlock;
6143 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6144 goto err_out_unlock;
6146 autoneg &= ~AUTONEG_SPEED;
6147 req_line_speed = cmd->speed;
6148 req_duplex = cmd->duplex;
6152 bp->autoneg = autoneg;
6153 bp->advertising = advertising;
6154 bp->req_line_speed = req_line_speed;
6155 bp->req_duplex = req_duplex;
6157 err = bnx2_setup_phy(bp, cmd->port);
6160 spin_unlock_bh(&bp->phy_lock);
6166 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6168 struct bnx2 *bp = netdev_priv(dev);
6170 strcpy(info->driver, DRV_MODULE_NAME);
6171 strcpy(info->version, DRV_MODULE_VERSION);
6172 strcpy(info->bus_info, pci_name(bp->pdev));
6173 strcpy(info->fw_version, bp->fw_version);
6176 #define BNX2_REGDUMP_LEN (32 * 1024)
6179 bnx2_get_regs_len(struct net_device *dev)
6181 return BNX2_REGDUMP_LEN;
6185 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6187 u32 *p = _p, i, offset;
6189 struct bnx2 *bp = netdev_priv(dev);
6190 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6191 0x0800, 0x0880, 0x0c00, 0x0c10,
6192 0x0c30, 0x0d08, 0x1000, 0x101c,
6193 0x1040, 0x1048, 0x1080, 0x10a4,
6194 0x1400, 0x1490, 0x1498, 0x14f0,
6195 0x1500, 0x155c, 0x1580, 0x15dc,
6196 0x1600, 0x1658, 0x1680, 0x16d8,
6197 0x1800, 0x1820, 0x1840, 0x1854,
6198 0x1880, 0x1894, 0x1900, 0x1984,
6199 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6200 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6201 0x2000, 0x2030, 0x23c0, 0x2400,
6202 0x2800, 0x2820, 0x2830, 0x2850,
6203 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6204 0x3c00, 0x3c94, 0x4000, 0x4010,
6205 0x4080, 0x4090, 0x43c0, 0x4458,
6206 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6207 0x4fc0, 0x5010, 0x53c0, 0x5444,
6208 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6209 0x5fc0, 0x6000, 0x6400, 0x6428,
6210 0x6800, 0x6848, 0x684c, 0x6860,
6211 0x6888, 0x6910, 0x8000 };
6215 memset(p, 0, BNX2_REGDUMP_LEN);
6217 if (!netif_running(bp->dev))
6221 offset = reg_boundaries[0];
6223 while (offset < BNX2_REGDUMP_LEN) {
6224 *p++ = REG_RD(bp, offset);
6226 if (offset == reg_boundaries[i + 1]) {
6227 offset = reg_boundaries[i + 2];
6228 p = (u32 *) (orig_p + offset);
6235 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6237 struct bnx2 *bp = netdev_priv(dev);
6239 if (bp->flags & BNX2_FLAG_NO_WOL) {
6244 wol->supported = WAKE_MAGIC;
6246 wol->wolopts = WAKE_MAGIC;
6250 memset(&wol->sopass, 0, sizeof(wol->sopass));
6254 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6256 struct bnx2 *bp = netdev_priv(dev);
6258 if (wol->wolopts & ~WAKE_MAGIC)
6261 if (wol->wolopts & WAKE_MAGIC) {
6262 if (bp->flags & BNX2_FLAG_NO_WOL)
6274 bnx2_nway_reset(struct net_device *dev)
6276 struct bnx2 *bp = netdev_priv(dev);
6279 if (!(bp->autoneg & AUTONEG_SPEED)) {
6283 spin_lock_bh(&bp->phy_lock);
6285 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6288 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6289 spin_unlock_bh(&bp->phy_lock);
6293 /* Force a link down visible on the other side */
6294 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6295 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6296 spin_unlock_bh(&bp->phy_lock);
6300 spin_lock_bh(&bp->phy_lock);
6302 bp->current_interval = SERDES_AN_TIMEOUT;
6303 bp->serdes_an_pending = 1;
6304 mod_timer(&bp->timer, jiffies + bp->current_interval);
6307 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6308 bmcr &= ~BMCR_LOOPBACK;
6309 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6311 spin_unlock_bh(&bp->phy_lock);
6317 bnx2_get_eeprom_len(struct net_device *dev)
6319 struct bnx2 *bp = netdev_priv(dev);
6321 if (bp->flash_info == NULL)
6324 return (int) bp->flash_size;
6328 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6331 struct bnx2 *bp = netdev_priv(dev);
6334 /* parameters already validated in ethtool_get_eeprom */
6336 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6342 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6345 struct bnx2 *bp = netdev_priv(dev);
6348 /* parameters already validated in ethtool_set_eeprom */
6350 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6356 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6358 struct bnx2 *bp = netdev_priv(dev);
6360 memset(coal, 0, sizeof(struct ethtool_coalesce));
6362 coal->rx_coalesce_usecs = bp->rx_ticks;
6363 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6364 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6365 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6367 coal->tx_coalesce_usecs = bp->tx_ticks;
6368 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6369 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6370 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6372 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6378 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6380 struct bnx2 *bp = netdev_priv(dev);
6382 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6383 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6385 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6386 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6388 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6389 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6391 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6392 if (bp->rx_quick_cons_trip_int > 0xff)
6393 bp->rx_quick_cons_trip_int = 0xff;
6395 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6396 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6398 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6399 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6401 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6402 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6404 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6405 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6408 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6409 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6410 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6411 bp->stats_ticks = USEC_PER_SEC;
6413 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6414 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6415 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6417 if (netif_running(bp->dev)) {
6418 bnx2_netif_stop(bp);
6420 bnx2_netif_start(bp);
6427 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6429 struct bnx2 *bp = netdev_priv(dev);
6431 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6432 ering->rx_mini_max_pending = 0;
6433 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6435 ering->rx_pending = bp->rx_ring_size;
6436 ering->rx_mini_pending = 0;
6437 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6439 ering->tx_max_pending = MAX_TX_DESC_CNT;
6440 ering->tx_pending = bp->tx_ring_size;
6444 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6446 if (netif_running(bp->dev)) {
6447 bnx2_netif_stop(bp);
6448 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6453 bnx2_set_rx_ring_size(bp, rx);
6454 bp->tx_ring_size = tx;
6456 if (netif_running(bp->dev)) {
6459 rc = bnx2_alloc_mem(bp);
6463 bnx2_netif_start(bp);
6469 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6471 struct bnx2 *bp = netdev_priv(dev);
6474 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6475 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6476 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6480 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6485 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6487 struct bnx2 *bp = netdev_priv(dev);
6489 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6490 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6491 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6495 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6497 struct bnx2 *bp = netdev_priv(dev);
6499 bp->req_flow_ctrl = 0;
6500 if (epause->rx_pause)
6501 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6502 if (epause->tx_pause)
6503 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6505 if (epause->autoneg) {
6506 bp->autoneg |= AUTONEG_FLOW_CTRL;
6509 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6512 spin_lock_bh(&bp->phy_lock);
6514 bnx2_setup_phy(bp, bp->phy_port);
6516 spin_unlock_bh(&bp->phy_lock);
6522 bnx2_get_rx_csum(struct net_device *dev)
6524 struct bnx2 *bp = netdev_priv(dev);
6530 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6532 struct bnx2 *bp = netdev_priv(dev);
6539 bnx2_set_tso(struct net_device *dev, u32 data)
6541 struct bnx2 *bp = netdev_priv(dev);
6544 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6545 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6546 dev->features |= NETIF_F_TSO6;
6548 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6553 #define BNX2_NUM_STATS 46
6556 char string[ETH_GSTRING_LEN];
6557 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6559 { "rx_error_bytes" },
6561 { "tx_error_bytes" },
6562 { "rx_ucast_packets" },
6563 { "rx_mcast_packets" },
6564 { "rx_bcast_packets" },
6565 { "tx_ucast_packets" },
6566 { "tx_mcast_packets" },
6567 { "tx_bcast_packets" },
6568 { "tx_mac_errors" },
6569 { "tx_carrier_errors" },
6570 { "rx_crc_errors" },
6571 { "rx_align_errors" },
6572 { "tx_single_collisions" },
6573 { "tx_multi_collisions" },
6575 { "tx_excess_collisions" },
6576 { "tx_late_collisions" },
6577 { "tx_total_collisions" },
6580 { "rx_undersize_packets" },
6581 { "rx_oversize_packets" },
6582 { "rx_64_byte_packets" },
6583 { "rx_65_to_127_byte_packets" },
6584 { "rx_128_to_255_byte_packets" },
6585 { "rx_256_to_511_byte_packets" },
6586 { "rx_512_to_1023_byte_packets" },
6587 { "rx_1024_to_1522_byte_packets" },
6588 { "rx_1523_to_9022_byte_packets" },
6589 { "tx_64_byte_packets" },
6590 { "tx_65_to_127_byte_packets" },
6591 { "tx_128_to_255_byte_packets" },
6592 { "tx_256_to_511_byte_packets" },
6593 { "tx_512_to_1023_byte_packets" },
6594 { "tx_1024_to_1522_byte_packets" },
6595 { "tx_1523_to_9022_byte_packets" },
6596 { "rx_xon_frames" },
6597 { "rx_xoff_frames" },
6598 { "tx_xon_frames" },
6599 { "tx_xoff_frames" },
6600 { "rx_mac_ctrl_frames" },
6601 { "rx_filtered_packets" },
6603 { "rx_fw_discards" },
6606 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6608 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6609 STATS_OFFSET32(stat_IfHCInOctets_hi),
6610 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6611 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6612 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6613 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6614 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6615 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6616 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6617 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6618 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6619 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6620 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6621 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6622 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6623 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6624 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6625 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6626 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6627 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6628 STATS_OFFSET32(stat_EtherStatsCollisions),
6629 STATS_OFFSET32(stat_EtherStatsFragments),
6630 STATS_OFFSET32(stat_EtherStatsJabbers),
6631 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6632 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6633 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6634 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6635 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6636 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6637 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6638 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6639 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6640 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6641 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6642 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6643 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6644 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6645 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6646 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6647 STATS_OFFSET32(stat_XonPauseFramesReceived),
6648 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6649 STATS_OFFSET32(stat_OutXonSent),
6650 STATS_OFFSET32(stat_OutXoffSent),
6651 STATS_OFFSET32(stat_MacControlFramesReceived),
6652 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6653 STATS_OFFSET32(stat_IfInMBUFDiscards),
6654 STATS_OFFSET32(stat_FwRxDrop),
6657 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6658 * skipped because of errata.
6660 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6661 8,0,8,8,8,8,8,8,8,8,
6662 4,0,4,4,4,4,4,4,4,4,
6663 4,4,4,4,4,4,4,4,4,4,
6664 4,4,4,4,4,4,4,4,4,4,
6668 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6669 8,0,8,8,8,8,8,8,8,8,
6670 4,4,4,4,4,4,4,4,4,4,
6671 4,4,4,4,4,4,4,4,4,4,
6672 4,4,4,4,4,4,4,4,4,4,
6676 #define BNX2_NUM_TESTS 6
6679 char string[ETH_GSTRING_LEN];
6680 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6681 { "register_test (offline)" },
6682 { "memory_test (offline)" },
6683 { "loopback_test (offline)" },
6684 { "nvram_test (online)" },
6685 { "interrupt_test (online)" },
6686 { "link_test (online)" },
6690 bnx2_get_sset_count(struct net_device *dev, int sset)
6694 return BNX2_NUM_TESTS;
6696 return BNX2_NUM_STATS;
6703 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6705 struct bnx2 *bp = netdev_priv(dev);
6707 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6708 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6711 bnx2_netif_stop(bp);
6712 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6715 if (bnx2_test_registers(bp) != 0) {
6717 etest->flags |= ETH_TEST_FL_FAILED;
6719 if (bnx2_test_memory(bp) != 0) {
6721 etest->flags |= ETH_TEST_FL_FAILED;
6723 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6724 etest->flags |= ETH_TEST_FL_FAILED;
6726 if (!netif_running(bp->dev)) {
6727 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6731 bnx2_netif_start(bp);
6734 /* wait for link up */
6735 for (i = 0; i < 7; i++) {
6738 msleep_interruptible(1000);
6742 if (bnx2_test_nvram(bp) != 0) {
6744 etest->flags |= ETH_TEST_FL_FAILED;
6746 if (bnx2_test_intr(bp) != 0) {
6748 etest->flags |= ETH_TEST_FL_FAILED;
6751 if (bnx2_test_link(bp) != 0) {
6753 etest->flags |= ETH_TEST_FL_FAILED;
6759 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6761 switch (stringset) {
6763 memcpy(buf, bnx2_stats_str_arr,
6764 sizeof(bnx2_stats_str_arr));
6767 memcpy(buf, bnx2_tests_str_arr,
6768 sizeof(bnx2_tests_str_arr));
6774 bnx2_get_ethtool_stats(struct net_device *dev,
6775 struct ethtool_stats *stats, u64 *buf)
6777 struct bnx2 *bp = netdev_priv(dev);
6779 u32 *hw_stats = (u32 *) bp->stats_blk;
6780 u8 *stats_len_arr = NULL;
6782 if (hw_stats == NULL) {
6783 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6787 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6788 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6789 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6790 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6791 stats_len_arr = bnx2_5706_stats_len_arr;
6793 stats_len_arr = bnx2_5708_stats_len_arr;
6795 for (i = 0; i < BNX2_NUM_STATS; i++) {
6796 if (stats_len_arr[i] == 0) {
6797 /* skip this counter */
6801 if (stats_len_arr[i] == 4) {
6802 /* 4-byte counter */
6804 *(hw_stats + bnx2_stats_offset_arr[i]);
6807 /* 8-byte counter */
6808 buf[i] = (((u64) *(hw_stats +
6809 bnx2_stats_offset_arr[i])) << 32) +
6810 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6815 bnx2_phys_id(struct net_device *dev, u32 data)
6817 struct bnx2 *bp = netdev_priv(dev);
6824 save = REG_RD(bp, BNX2_MISC_CFG);
6825 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6827 for (i = 0; i < (data * 2); i++) {
6829 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6832 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6833 BNX2_EMAC_LED_1000MB_OVERRIDE |
6834 BNX2_EMAC_LED_100MB_OVERRIDE |
6835 BNX2_EMAC_LED_10MB_OVERRIDE |
6836 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6837 BNX2_EMAC_LED_TRAFFIC);
6839 msleep_interruptible(500);
6840 if (signal_pending(current))
6843 REG_WR(bp, BNX2_EMAC_LED, 0);
6844 REG_WR(bp, BNX2_MISC_CFG, save);
6849 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6851 struct bnx2 *bp = netdev_priv(dev);
6853 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6854 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6856 return (ethtool_op_set_tx_csum(dev, data));
6859 static const struct ethtool_ops bnx2_ethtool_ops = {
6860 .get_settings = bnx2_get_settings,
6861 .set_settings = bnx2_set_settings,
6862 .get_drvinfo = bnx2_get_drvinfo,
6863 .get_regs_len = bnx2_get_regs_len,
6864 .get_regs = bnx2_get_regs,
6865 .get_wol = bnx2_get_wol,
6866 .set_wol = bnx2_set_wol,
6867 .nway_reset = bnx2_nway_reset,
6868 .get_link = ethtool_op_get_link,
6869 .get_eeprom_len = bnx2_get_eeprom_len,
6870 .get_eeprom = bnx2_get_eeprom,
6871 .set_eeprom = bnx2_set_eeprom,
6872 .get_coalesce = bnx2_get_coalesce,
6873 .set_coalesce = bnx2_set_coalesce,
6874 .get_ringparam = bnx2_get_ringparam,
6875 .set_ringparam = bnx2_set_ringparam,
6876 .get_pauseparam = bnx2_get_pauseparam,
6877 .set_pauseparam = bnx2_set_pauseparam,
6878 .get_rx_csum = bnx2_get_rx_csum,
6879 .set_rx_csum = bnx2_set_rx_csum,
6880 .set_tx_csum = bnx2_set_tx_csum,
6881 .set_sg = ethtool_op_set_sg,
6882 .set_tso = bnx2_set_tso,
6883 .self_test = bnx2_self_test,
6884 .get_strings = bnx2_get_strings,
6885 .phys_id = bnx2_phys_id,
6886 .get_ethtool_stats = bnx2_get_ethtool_stats,
6887 .get_sset_count = bnx2_get_sset_count,
6890 /* Called with rtnl_lock */
6892 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6894 struct mii_ioctl_data *data = if_mii(ifr);
6895 struct bnx2 *bp = netdev_priv(dev);
6900 data->phy_id = bp->phy_addr;
6906 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6909 if (!netif_running(dev))
6912 spin_lock_bh(&bp->phy_lock);
6913 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6914 spin_unlock_bh(&bp->phy_lock);
6916 data->val_out = mii_regval;
6922 if (!capable(CAP_NET_ADMIN))
6925 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6928 if (!netif_running(dev))
6931 spin_lock_bh(&bp->phy_lock);
6932 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6933 spin_unlock_bh(&bp->phy_lock);
6944 /* Called with rtnl_lock */
6946 bnx2_change_mac_addr(struct net_device *dev, void *p)
6948 struct sockaddr *addr = p;
6949 struct bnx2 *bp = netdev_priv(dev);
6951 if (!is_valid_ether_addr(addr->sa_data))
6954 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6955 if (netif_running(dev))
6956 bnx2_set_mac_addr(bp);
6961 /* Called with rtnl_lock */
6963 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6965 struct bnx2 *bp = netdev_priv(dev);
6967 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6968 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6972 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6975 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6977 poll_bnx2(struct net_device *dev)
6979 struct bnx2 *bp = netdev_priv(dev);
6981 disable_irq(bp->pdev->irq);
6982 bnx2_interrupt(bp->pdev->irq, dev);
6983 enable_irq(bp->pdev->irq);
6987 static void __devinit
6988 bnx2_get_5709_media(struct bnx2 *bp)
6990 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6991 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6994 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6996 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6997 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7001 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7002 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7004 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7006 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7011 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7019 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7025 static void __devinit
7026 bnx2_get_pci_speed(struct bnx2 *bp)
7030 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7031 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7034 bp->flags |= BNX2_FLAG_PCIX;
7036 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7038 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7040 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7041 bp->bus_speed_mhz = 133;
7044 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7045 bp->bus_speed_mhz = 100;
7048 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7049 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7050 bp->bus_speed_mhz = 66;
7053 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7054 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7055 bp->bus_speed_mhz = 50;
7058 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7059 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7060 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7061 bp->bus_speed_mhz = 33;
7066 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7067 bp->bus_speed_mhz = 66;
7069 bp->bus_speed_mhz = 33;
7072 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7073 bp->flags |= BNX2_FLAG_PCI_32BIT;
7077 static int __devinit
7078 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7081 unsigned long mem_len;
7084 u64 dma_mask, persist_dma_mask;
7086 SET_NETDEV_DEV(dev, &pdev->dev);
7087 bp = netdev_priv(dev);
7092 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7093 rc = pci_enable_device(pdev);
7095 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7099 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7101 "Cannot find PCI device base address, aborting.\n");
7103 goto err_out_disable;
7106 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7108 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7109 goto err_out_disable;
7112 pci_set_master(pdev);
7114 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7115 if (bp->pm_cap == 0) {
7117 "Cannot find power management capability, aborting.\n");
7119 goto err_out_release;
7125 spin_lock_init(&bp->phy_lock);
7126 spin_lock_init(&bp->indirect_lock);
7127 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7129 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7130 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7131 dev->mem_end = dev->mem_start + mem_len;
7132 dev->irq = pdev->irq;
7134 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7137 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7139 goto err_out_release;
7142 /* Configure byte swap and enable write to the reg_window registers.
7143 * Rely on CPU to do target byte swapping on big endian systems
7144 * The chip's target access swapping will not swap all accesses
7146 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7147 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7148 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7150 bnx2_set_power_state(bp, PCI_D0);
7152 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7154 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7155 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7157 "Cannot find PCIE capability, aborting.\n");
7161 bp->flags |= BNX2_FLAG_PCIE;
7162 if (CHIP_REV(bp) == CHIP_REV_Ax)
7163 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7165 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7166 if (bp->pcix_cap == 0) {
7168 "Cannot find PCIX capability, aborting.\n");
7174 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7175 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7176 bp->flags |= BNX2_FLAG_MSIX_CAP;
7179 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7180 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7181 bp->flags |= BNX2_FLAG_MSI_CAP;
7184 /* 5708 cannot support DMA addresses > 40-bit. */
7185 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7186 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7188 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7190 /* Configure DMA attributes. */
7191 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7192 dev->features |= NETIF_F_HIGHDMA;
7193 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7196 "pci_set_consistent_dma_mask failed, aborting.\n");
7199 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7200 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7204 if (!(bp->flags & BNX2_FLAG_PCIE))
7205 bnx2_get_pci_speed(bp);
7207 /* 5706A0 may falsely detect SERR and PERR. */
7208 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7209 reg = REG_RD(bp, PCI_COMMAND);
7210 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7211 REG_WR(bp, PCI_COMMAND, reg);
7213 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7214 !(bp->flags & BNX2_FLAG_PCIX)) {
7217 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7221 bnx2_init_nvram(bp);
7223 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7225 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7226 BNX2_SHM_HDR_SIGNATURE_SIG) {
7227 u32 off = PCI_FUNC(pdev->devfn) << 2;
7229 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7231 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7233 /* Get the permanent MAC address. First we need to make sure the
7234 * firmware is actually running.
7236 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7238 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7239 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7240 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7245 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7246 for (i = 0, j = 0; i < 3; i++) {
7249 num = (u8) (reg >> (24 - (i * 8)));
7250 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7251 if (num >= k || !skip0 || k == 1) {
7252 bp->fw_version[j++] = (num / k) + '0';
7257 bp->fw_version[j++] = '.';
7259 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7260 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7263 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7264 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7266 for (i = 0; i < 30; i++) {
7267 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7268 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7273 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7274 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7275 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7276 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7278 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7280 bp->fw_version[j++] = ' ';
7281 for (i = 0; i < 3; i++) {
7282 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7284 memcpy(&bp->fw_version[j], ®, 4);
7289 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7290 bp->mac_addr[0] = (u8) (reg >> 8);
7291 bp->mac_addr[1] = (u8) reg;
7293 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7294 bp->mac_addr[2] = (u8) (reg >> 24);
7295 bp->mac_addr[3] = (u8) (reg >> 16);
7296 bp->mac_addr[4] = (u8) (reg >> 8);
7297 bp->mac_addr[5] = (u8) reg;
7299 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7301 bp->tx_ring_size = MAX_TX_DESC_CNT;
7302 bnx2_set_rx_ring_size(bp, 255);
7306 bp->tx_quick_cons_trip_int = 20;
7307 bp->tx_quick_cons_trip = 20;
7308 bp->tx_ticks_int = 80;
7311 bp->rx_quick_cons_trip_int = 6;
7312 bp->rx_quick_cons_trip = 6;
7313 bp->rx_ticks_int = 18;
7316 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7318 bp->timer_interval = HZ;
7319 bp->current_interval = HZ;
7323 /* Disable WOL support if we are running on a SERDES chip. */
7324 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7325 bnx2_get_5709_media(bp);
7326 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7327 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7329 bp->phy_port = PORT_TP;
7330 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7331 bp->phy_port = PORT_FIBRE;
7332 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7333 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7334 bp->flags |= BNX2_FLAG_NO_WOL;
7337 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7338 /* Don't do parallel detect on this board because of
7339 * some board problems. The link will not go down
7340 * if we do parallel detect.
7342 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7343 pdev->subsystem_device == 0x310c)
7344 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7347 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7348 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7350 bnx2_init_remote_phy(bp);
7352 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7353 CHIP_NUM(bp) == CHIP_NUM_5708)
7354 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7355 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7356 (CHIP_REV(bp) == CHIP_REV_Ax ||
7357 CHIP_REV(bp) == CHIP_REV_Bx))
7358 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7360 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7361 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7362 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7363 bp->flags |= BNX2_FLAG_NO_WOL;
7367 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7368 bp->tx_quick_cons_trip_int =
7369 bp->tx_quick_cons_trip;
7370 bp->tx_ticks_int = bp->tx_ticks;
7371 bp->rx_quick_cons_trip_int =
7372 bp->rx_quick_cons_trip;
7373 bp->rx_ticks_int = bp->rx_ticks;
7374 bp->comp_prod_trip_int = bp->comp_prod_trip;
7375 bp->com_ticks_int = bp->com_ticks;
7376 bp->cmd_ticks_int = bp->cmd_ticks;
7379 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7381 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7382 * with byte enables disabled on the unused 32-bit word. This is legal
7383 * but causes problems on the AMD 8132 which will eventually stop
7384 * responding after a while.
7386 * AMD believes this incompatibility is unique to the 5706, and
7387 * prefers to locally disable MSI rather than globally disabling it.
7389 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7390 struct pci_dev *amd_8132 = NULL;
7392 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7393 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7396 if (amd_8132->revision >= 0x10 &&
7397 amd_8132->revision <= 0x13) {
7399 pci_dev_put(amd_8132);
7405 bnx2_set_default_link(bp);
7406 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7408 init_timer(&bp->timer);
7409 bp->timer.expires = RUN_AT(bp->timer_interval);
7410 bp->timer.data = (unsigned long) bp;
7411 bp->timer.function = bnx2_timer;
7417 iounmap(bp->regview);
7422 pci_release_regions(pdev);
7425 pci_disable_device(pdev);
7426 pci_set_drvdata(pdev, NULL);
7432 static char * __devinit
7433 bnx2_bus_string(struct bnx2 *bp, char *str)
7437 if (bp->flags & BNX2_FLAG_PCIE) {
7438 s += sprintf(s, "PCI Express");
7440 s += sprintf(s, "PCI");
7441 if (bp->flags & BNX2_FLAG_PCIX)
7442 s += sprintf(s, "-X");
7443 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7444 s += sprintf(s, " 32-bit");
7446 s += sprintf(s, " 64-bit");
7447 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7452 static void __devinit
7453 bnx2_init_napi(struct bnx2 *bp)
7456 struct bnx2_napi *bnapi;
7458 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7459 bnapi = &bp->bnx2_napi[i];
7462 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7463 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7467 static int __devinit
7468 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7470 static int version_printed = 0;
7471 struct net_device *dev = NULL;
7475 DECLARE_MAC_BUF(mac);
7477 if (version_printed++ == 0)
7478 printk(KERN_INFO "%s", version);
7480 /* dev zeroed in init_etherdev */
7481 dev = alloc_etherdev(sizeof(*bp));
7486 rc = bnx2_init_board(pdev, dev);
7492 dev->open = bnx2_open;
7493 dev->hard_start_xmit = bnx2_start_xmit;
7494 dev->stop = bnx2_close;
7495 dev->get_stats = bnx2_get_stats;
7496 dev->set_multicast_list = bnx2_set_rx_mode;
7497 dev->do_ioctl = bnx2_ioctl;
7498 dev->set_mac_address = bnx2_change_mac_addr;
7499 dev->change_mtu = bnx2_change_mtu;
7500 dev->tx_timeout = bnx2_tx_timeout;
7501 dev->watchdog_timeo = TX_TIMEOUT;
7503 dev->vlan_rx_register = bnx2_vlan_rx_register;
7505 dev->ethtool_ops = &bnx2_ethtool_ops;
7507 bp = netdev_priv(dev);
7510 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7511 dev->poll_controller = poll_bnx2;
7514 pci_set_drvdata(pdev, dev);
7516 memcpy(dev->dev_addr, bp->mac_addr, 6);
7517 memcpy(dev->perm_addr, bp->mac_addr, 6);
7518 bp->name = board_info[ent->driver_data].name;
7520 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7521 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7522 dev->features |= NETIF_F_IPV6_CSUM;
7525 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7527 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7528 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7529 dev->features |= NETIF_F_TSO6;
7531 if ((rc = register_netdev(dev))) {
7532 dev_err(&pdev->dev, "Cannot register net device\n");
7534 iounmap(bp->regview);
7535 pci_release_regions(pdev);
7536 pci_disable_device(pdev);
7537 pci_set_drvdata(pdev, NULL);
7542 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7543 "IRQ %d, node addr %s\n",
7546 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7547 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7548 bnx2_bus_string(bp, str),
7550 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7555 static void __devexit
7556 bnx2_remove_one(struct pci_dev *pdev)
7558 struct net_device *dev = pci_get_drvdata(pdev);
7559 struct bnx2 *bp = netdev_priv(dev);
7561 flush_scheduled_work();
7563 unregister_netdev(dev);
7566 iounmap(bp->regview);
7569 pci_release_regions(pdev);
7570 pci_disable_device(pdev);
7571 pci_set_drvdata(pdev, NULL);
7575 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7577 struct net_device *dev = pci_get_drvdata(pdev);
7578 struct bnx2 *bp = netdev_priv(dev);
7581 /* PCI register 4 needs to be saved whether netif_running() or not.
7582 * MSI address and data need to be saved if using MSI and
7585 pci_save_state(pdev);
7586 if (!netif_running(dev))
7589 flush_scheduled_work();
7590 bnx2_netif_stop(bp);
7591 netif_device_detach(dev);
7592 del_timer_sync(&bp->timer);
7593 if (bp->flags & BNX2_FLAG_NO_WOL)
7594 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7596 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7598 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7599 bnx2_reset_chip(bp, reset_code);
7601 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7606 bnx2_resume(struct pci_dev *pdev)
7608 struct net_device *dev = pci_get_drvdata(pdev);
7609 struct bnx2 *bp = netdev_priv(dev);
7611 pci_restore_state(pdev);
7612 if (!netif_running(dev))
7615 bnx2_set_power_state(bp, PCI_D0);
7616 netif_device_attach(dev);
7618 bnx2_netif_start(bp);
7622 static struct pci_driver bnx2_pci_driver = {
7623 .name = DRV_MODULE_NAME,
7624 .id_table = bnx2_pci_tbl,
7625 .probe = bnx2_init_one,
7626 .remove = __devexit_p(bnx2_remove_one),
7627 .suspend = bnx2_suspend,
7628 .resume = bnx2_resume,
7631 static int __init bnx2_init(void)
7633 return pci_register_driver(&bnx2_pci_driver);
7636 static void __exit bnx2_cleanup(void)
7638 pci_unregister_driver(&bnx2_pci_driver);
7641 module_init(bnx2_init);
7642 module_exit(bnx2_cleanup);