1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 1.7.7";
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
88 (((subid >= 0x600B) && (subid <= 0x600D)) || \
89 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
91 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
92 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
93 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
96 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
99 mac_info_t *mac_control;
101 mac_control = &sp->mac_control;
102 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
104 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
112 /* Ethtool related variables and Macros. */
113 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
114 "Register test\t(offline)",
115 "Eeprom test\t(offline)",
116 "Link test\t(online)",
117 "RLDRAM test\t(offline)",
118 "BIST Test\t(offline)"
121 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_data_octets"},
127 {"tmac_pause_ctrl_frms"},
128 {"tmac_any_err_frms"},
129 {"tmac_vld_ip_octets"},
137 {"rmac_data_octets"},
138 {"rmac_fcs_err_frms"},
140 {"rmac_vld_mcst_frms"},
141 {"rmac_vld_bcst_frms"},
142 {"rmac_in_rng_len_err_frms"},
144 {"rmac_pause_ctrl_frms"},
145 {"rmac_discarded_frms"},
146 {"rmac_usized_frms"},
147 {"rmac_osized_frms"},
149 {"rmac_jabber_frms"},
157 {"rmac_err_drp_udp"},
159 {"rmac_accepted_ip"},
161 {"\n DRIVER STATISTICS"},
162 {"single_bit_ecc_errs"},
163 {"double_bit_ecc_errs"},
166 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
167 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
170 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
172 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
173 init_timer(&timer); \
174 timer.function = handle; \
175 timer.data = (unsigned long) arg; \
176 mod_timer(&timer, (jiffies + exp)) \
179 static void s2io_vlan_rx_register(struct net_device *dev,
180 struct vlan_group *grp)
182 nic_t *nic = dev->priv;
185 spin_lock_irqsave(&nic->tx_lock, flags);
187 spin_unlock_irqrestore(&nic->tx_lock, flags);
190 /* Unregister the vlan */
191 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193 nic_t *nic = dev->priv;
196 spin_lock_irqsave(&nic->tx_lock, flags);
198 nic->vlgrp->vlan_devices[vid] = NULL;
199 spin_unlock_irqrestore(&nic->tx_lock, flags);
203 * Constants to be programmed into the Xena's registers, to configure
207 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
210 static u64 default_mdio_cfg[] = {
212 0xC001010000000000ULL, 0xC0010100000000E0ULL,
213 0xC0010100008000E4ULL,
214 /* Remove Reset from PMA PLL */
215 0xC001010000000000ULL, 0xC0010100000000E0ULL,
216 0xC0010100000000E4ULL,
220 static u64 default_dtx_cfg[] = {
221 0x8000051500000000ULL, 0x80000515000000E0ULL,
222 0x80000515D93500E4ULL, 0x8001051500000000ULL,
223 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
224 0x8002051500000000ULL, 0x80020515000000E0ULL,
225 0x80020515F21000E4ULL,
226 /* Set PADLOOPBACKN */
227 0x8002051500000000ULL, 0x80020515000000E0ULL,
228 0x80020515B20000E4ULL, 0x8003051500000000ULL,
229 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
230 0x8004051500000000ULL, 0x80040515000000E0ULL,
231 0x80040515B20000E4ULL, 0x8005051500000000ULL,
232 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
234 /* Remove PADLOOPBACKN */
235 0x8002051500000000ULL, 0x80020515000000E0ULL,
236 0x80020515F20000E4ULL, 0x8003051500000000ULL,
237 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
238 0x8004051500000000ULL, 0x80040515000000E0ULL,
239 0x80040515F20000E4ULL, 0x8005051500000000ULL,
240 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
245 * Constants for Fixing the MacAddress problem seen mostly on
248 static u64 fix_mac[] = {
249 0x0060000000000000ULL, 0x0060600000000000ULL,
250 0x0040600000000000ULL, 0x0000600000000000ULL,
251 0x0020600000000000ULL, 0x0060600000000000ULL,
252 0x0020600000000000ULL, 0x0060600000000000ULL,
253 0x0020600000000000ULL, 0x0060600000000000ULL,
254 0x0020600000000000ULL, 0x0060600000000000ULL,
255 0x0020600000000000ULL, 0x0060600000000000ULL,
256 0x0020600000000000ULL, 0x0060600000000000ULL,
257 0x0020600000000000ULL, 0x0060600000000000ULL,
258 0x0020600000000000ULL, 0x0060600000000000ULL,
259 0x0020600000000000ULL, 0x0060600000000000ULL,
260 0x0020600000000000ULL, 0x0060600000000000ULL,
261 0x0020600000000000ULL, 0x0000600000000000ULL,
262 0x0040600000000000ULL, 0x0060600000000000ULL,
266 /* Module Loadable parameters. */
267 static unsigned int tx_fifo_num = 1;
268 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
269 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
270 static unsigned int rx_ring_num = 1;
271 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
272 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
273 static unsigned int rts_frm_len[MAX_RX_RINGS] =
274 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
275 static unsigned int use_continuous_tx_intrs = 1;
276 static unsigned int rmac_pause_time = 65535;
277 static unsigned int mc_pause_threshold_q0q3 = 187;
278 static unsigned int mc_pause_threshold_q4q7 = 187;
279 static unsigned int shared_splits;
280 static unsigned int tmac_util_period = 5;
281 static unsigned int rmac_util_period = 5;
282 #ifndef CONFIG_S2IO_NAPI
283 static unsigned int indicate_max_pkts;
288 * This table lists all the devices that this driver supports.
290 static struct pci_device_id s2io_tbl[] __devinitdata = {
291 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
292 PCI_ANY_ID, PCI_ANY_ID},
293 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
294 PCI_ANY_ID, PCI_ANY_ID},
295 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
296 PCI_ANY_ID, PCI_ANY_ID},
297 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
298 PCI_ANY_ID, PCI_ANY_ID},
302 MODULE_DEVICE_TABLE(pci, s2io_tbl);
304 static struct pci_driver s2io_driver = {
306 .id_table = s2io_tbl,
307 .probe = s2io_init_nic,
308 .remove = __devexit_p(s2io_rem_nic),
311 /* A simplifier macro used both by init and free shared_mem Fns(). */
312 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
315 * init_shared_mem - Allocation and Initialization of Memory
316 * @nic: Device private variable.
317 * Description: The function allocates all the memory areas shared
318 * between the NIC and the driver. This includes Tx descriptors,
319 * Rx descriptors and the statistics block.
322 static int init_shared_mem(struct s2io_nic *nic)
325 void *tmp_v_addr, *tmp_v_addr_next;
326 dma_addr_t tmp_p_addr, tmp_p_addr_next;
327 RxD_block_t *pre_rxd_blk = NULL;
328 int i, j, blk_cnt, rx_sz, tx_sz;
329 int lst_size, lst_per_page;
330 struct net_device *dev = nic->dev;
331 #ifdef CONFIG_2BUFF_MODE
336 mac_info_t *mac_control;
337 struct config_param *config;
339 mac_control = &nic->mac_control;
340 config = &nic->config;
343 /* Allocation and initialization of TXDLs in FIOFs */
345 for (i = 0; i < config->tx_fifo_num; i++) {
346 size += config->tx_cfg[i].fifo_len;
348 if (size > MAX_AVAILABLE_TXDS) {
349 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
351 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
352 DBG_PRINT(ERR_DBG, "that can be used\n");
356 lst_size = (sizeof(TxD_t) * config->max_txds);
357 tx_sz = lst_size * size;
358 lst_per_page = PAGE_SIZE / lst_size;
360 for (i = 0; i < config->tx_fifo_num; i++) {
361 int fifo_len = config->tx_cfg[i].fifo_len;
362 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
363 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
365 if (!mac_control->fifos[i].list_info) {
367 "Malloc failed for list_info\n");
370 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
372 for (i = 0; i < config->tx_fifo_num; i++) {
373 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
375 mac_control->fifos[i].tx_curr_put_info.offset = 0;
376 mac_control->fifos[i].tx_curr_put_info.fifo_len =
377 config->tx_cfg[i].fifo_len - 1;
378 mac_control->fifos[i].tx_curr_get_info.offset = 0;
379 mac_control->fifos[i].tx_curr_get_info.fifo_len =
380 config->tx_cfg[i].fifo_len - 1;
381 mac_control->fifos[i].fifo_no = i;
382 mac_control->fifos[i].nic = nic;
383 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
385 for (j = 0; j < page_num; j++) {
389 tmp_v = pci_alloc_consistent(nic->pdev,
393 "pci_alloc_consistent ");
394 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
397 while (k < lst_per_page) {
398 int l = (j * lst_per_page) + k;
399 if (l == config->tx_cfg[i].fifo_len)
401 mac_control->fifos[i].list_info[l].list_virt_addr =
402 tmp_v + (k * lst_size);
403 mac_control->fifos[i].list_info[l].list_phy_addr =
404 tmp_p + (k * lst_size);
410 /* Allocation and initialization of RXDs in Rings */
412 for (i = 0; i < config->rx_ring_num; i++) {
413 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
414 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
415 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
417 DBG_PRINT(ERR_DBG, "RxDs per Block");
420 size += config->rx_cfg[i].num_rxd;
421 mac_control->rings[i].block_count =
422 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
423 mac_control->rings[i].pkt_cnt =
424 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
426 size = (size * (sizeof(RxD_t)));
429 for (i = 0; i < config->rx_ring_num; i++) {
430 mac_control->rings[i].rx_curr_get_info.block_index = 0;
431 mac_control->rings[i].rx_curr_get_info.offset = 0;
432 mac_control->rings[i].rx_curr_get_info.ring_len =
433 config->rx_cfg[i].num_rxd - 1;
434 mac_control->rings[i].rx_curr_put_info.block_index = 0;
435 mac_control->rings[i].rx_curr_put_info.offset = 0;
436 mac_control->rings[i].rx_curr_put_info.ring_len =
437 config->rx_cfg[i].num_rxd - 1;
438 mac_control->rings[i].nic = nic;
439 mac_control->rings[i].ring_no = i;
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 /* Allocating all the Rx blocks */
444 for (j = 0; j < blk_cnt; j++) {
445 #ifndef CONFIG_2BUFF_MODE
446 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
448 size = SIZE_OF_BLOCK;
450 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
452 if (tmp_v_addr == NULL) {
454 * In case of failure, free_shared_mem()
455 * is called, which should free any
456 * memory that was alloced till the
459 mac_control->rings[i].rx_blocks[j].block_virt_addr =
463 memset(tmp_v_addr, 0, size);
464 mac_control->rings[i].rx_blocks[j].block_virt_addr =
466 mac_control->rings[i].rx_blocks[j].block_dma_addr =
469 /* Interlinking all Rx Blocks */
470 for (j = 0; j < blk_cnt; j++) {
472 mac_control->rings[i].rx_blocks[j].block_virt_addr;
474 mac_control->rings[i].rx_blocks[(j + 1) %
475 blk_cnt].block_virt_addr;
477 mac_control->rings[i].rx_blocks[j].block_dma_addr;
479 mac_control->rings[i].rx_blocks[(j + 1) %
480 blk_cnt].block_dma_addr;
482 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
483 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
486 #ifndef CONFIG_2BUFF_MODE
487 pre_rxd_blk->reserved_2_pNext_RxD_block =
488 (unsigned long) tmp_v_addr_next;
490 pre_rxd_blk->pNext_RxD_Blk_physical =
491 (u64) tmp_p_addr_next;
495 #ifdef CONFIG_2BUFF_MODE
497 * Allocation of Storages for buffer addresses in 2BUFF mode
498 * and the buffers as well.
500 for (i = 0; i < config->rx_ring_num; i++) {
502 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
503 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
505 if (!mac_control->rings[i].ba)
507 for (j = 0; j < blk_cnt; j++) {
509 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
510 (MAX_RXDS_PER_BLOCK + 1)),
512 if (!mac_control->rings[i].ba[j])
514 while (k != MAX_RXDS_PER_BLOCK) {
515 ba = &mac_control->rings[i].ba[j][k];
517 ba->ba_0_org = (void *) kmalloc
518 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
521 tmp = (u64) ba->ba_0_org;
523 tmp &= ~((u64) ALIGN_SIZE);
524 ba->ba_0 = (void *) tmp;
526 ba->ba_1_org = (void *) kmalloc
527 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
530 tmp = (u64) ba->ba_1_org;
532 tmp &= ~((u64) ALIGN_SIZE);
533 ba->ba_1 = (void *) tmp;
540 /* Allocation and initialization of Statistics block */
541 size = sizeof(StatInfo_t);
542 mac_control->stats_mem = pci_alloc_consistent
543 (nic->pdev, size, &mac_control->stats_mem_phy);
545 if (!mac_control->stats_mem) {
547 * In case of failure, free_shared_mem() is called, which
548 * should free any memory that was alloced till the
553 mac_control->stats_mem_sz = size;
555 tmp_v_addr = mac_control->stats_mem;
556 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
557 memset(tmp_v_addr, 0, size);
558 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
559 (unsigned long long) tmp_p_addr);
565 * free_shared_mem - Free the allocated Memory
566 * @nic: Device private variable.
567 * Description: This function is to free all memory locations allocated by
568 * the init_shared_mem() function and return it to the kernel.
571 static void free_shared_mem(struct s2io_nic *nic)
573 int i, j, blk_cnt, size;
575 dma_addr_t tmp_p_addr;
576 mac_info_t *mac_control;
577 struct config_param *config;
578 int lst_size, lst_per_page;
584 mac_control = &nic->mac_control;
585 config = &nic->config;
587 lst_size = (sizeof(TxD_t) * config->max_txds);
588 lst_per_page = PAGE_SIZE / lst_size;
590 for (i = 0; i < config->tx_fifo_num; i++) {
591 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
593 for (j = 0; j < page_num; j++) {
594 int mem_blks = (j * lst_per_page);
595 if (!mac_control->fifos[i].list_info[mem_blks].
598 pci_free_consistent(nic->pdev, PAGE_SIZE,
599 mac_control->fifos[i].
602 mac_control->fifos[i].
606 kfree(mac_control->fifos[i].list_info);
609 #ifndef CONFIG_2BUFF_MODE
610 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
612 size = SIZE_OF_BLOCK;
614 for (i = 0; i < config->rx_ring_num; i++) {
615 blk_cnt = mac_control->rings[i].block_count;
616 for (j = 0; j < blk_cnt; j++) {
617 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
619 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
621 if (tmp_v_addr == NULL)
623 pci_free_consistent(nic->pdev, size,
624 tmp_v_addr, tmp_p_addr);
628 #ifdef CONFIG_2BUFF_MODE
629 /* Freeing buffer storage addresses in 2BUFF mode. */
630 for (i = 0; i < config->rx_ring_num; i++) {
632 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
633 for (j = 0; j < blk_cnt; j++) {
635 if (!mac_control->rings[i].ba[j])
637 while (k != MAX_RXDS_PER_BLOCK) {
638 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
643 kfree(mac_control->rings[i].ba[j]);
645 if (mac_control->rings[i].ba)
646 kfree(mac_control->rings[i].ba);
650 if (mac_control->stats_mem) {
651 pci_free_consistent(nic->pdev,
652 mac_control->stats_mem_sz,
653 mac_control->stats_mem,
654 mac_control->stats_mem_phy);
659 * init_nic - Initialization of hardware
660 * @nic: device peivate variable
661 * Description: The function sequentially configures every block
662 * of the H/W from their reset values.
663 * Return Value: SUCCESS on success and
664 * '-1' on failure (endian settings incorrect).
667 static int init_nic(struct s2io_nic *nic)
669 XENA_dev_config_t __iomem *bar0 = nic->bar0;
670 struct net_device *dev = nic->dev;
671 register u64 val64 = 0;
675 mac_info_t *mac_control;
676 struct config_param *config;
677 int mdio_cnt = 0, dtx_cnt = 0;
678 unsigned long long mem_share;
681 mac_control = &nic->mac_control;
682 config = &nic->config;
684 /* to set the swapper controle on the card */
685 if(s2io_set_swapper(nic)) {
686 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
690 /* Remove XGXS from reset state */
692 writeq(val64, &bar0->sw_reset);
694 val64 = readq(&bar0->sw_reset);
696 /* Enable Receiving broadcasts */
697 add = &bar0->mac_cfg;
698 val64 = readq(&bar0->mac_cfg);
699 val64 |= MAC_RMAC_BCAST_ENABLE;
700 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
701 writel((u32) val64, add);
702 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
703 writel((u32) (val64 >> 32), (add + 4));
705 /* Read registers in all blocks */
706 val64 = readq(&bar0->mac_int_mask);
707 val64 = readq(&bar0->mc_int_mask);
708 val64 = readq(&bar0->xgxs_int_mask);
712 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
715 * Configuring the XAUI Interface of Xena.
716 * ***************************************
717 * To Configure the Xena's XAUI, one has to write a series
718 * of 64 bit values into two registers in a particular
719 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
720 * which will be defined in the array of configuration values
721 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
722 * to switch writing from one regsiter to another. We continue
723 * writing these values until we encounter the 'END_SIGN' macro.
724 * For example, After making a series of 21 writes into
725 * dtx_control register the 'SWITCH_SIGN' appears and hence we
726 * start writing into mdio_control until we encounter END_SIGN.
730 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
731 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
735 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
736 &bar0->dtx_control, UF);
737 val64 = readq(&bar0->dtx_control);
741 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
742 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
746 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
747 &bar0->mdio_control, UF);
748 val64 = readq(&bar0->mdio_control);
751 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
752 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
759 /* Tx DMA Initialization */
761 writeq(val64, &bar0->tx_fifo_partition_0);
762 writeq(val64, &bar0->tx_fifo_partition_1);
763 writeq(val64, &bar0->tx_fifo_partition_2);
764 writeq(val64, &bar0->tx_fifo_partition_3);
767 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
769 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
770 13) | vBIT(config->tx_cfg[i].fifo_priority,
773 if (i == (config->tx_fifo_num - 1)) {
780 writeq(val64, &bar0->tx_fifo_partition_0);
784 writeq(val64, &bar0->tx_fifo_partition_1);
788 writeq(val64, &bar0->tx_fifo_partition_2);
792 writeq(val64, &bar0->tx_fifo_partition_3);
797 /* Enable Tx FIFO partition 0. */
798 val64 = readq(&bar0->tx_fifo_partition_0);
799 val64 |= BIT(0); /* To enable the FIFO partition. */
800 writeq(val64, &bar0->tx_fifo_partition_0);
803 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
804 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
806 if (get_xena_rev_id(nic->pdev) < 4)
807 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
809 val64 = readq(&bar0->tx_fifo_partition_0);
810 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
811 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
814 * Initialization of Tx_PA_CONFIG register to ignore packet
815 * integrity checking.
817 val64 = readq(&bar0->tx_pa_cfg);
818 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
819 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
820 writeq(val64, &bar0->tx_pa_cfg);
822 /* Rx DMA intialization. */
824 for (i = 0; i < config->rx_ring_num; i++) {
826 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
829 writeq(val64, &bar0->rx_queue_priority);
832 * Allocating equal share of memory to all the
837 for (i = 0; i < config->rx_ring_num; i++) {
840 mem_share = (mem_size / config->rx_ring_num +
841 mem_size % config->rx_ring_num);
842 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
845 mem_share = (mem_size / config->rx_ring_num);
846 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
849 mem_share = (mem_size / config->rx_ring_num);
850 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
853 mem_share = (mem_size / config->rx_ring_num);
854 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
857 mem_share = (mem_size / config->rx_ring_num);
858 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
861 mem_share = (mem_size / config->rx_ring_num);
862 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
865 mem_share = (mem_size / config->rx_ring_num);
866 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
869 mem_share = (mem_size / config->rx_ring_num);
870 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
874 writeq(val64, &bar0->rx_queue_cfg);
877 * Filling Tx round robin registers
878 * as per the number of FIFOs
880 switch (config->tx_fifo_num) {
882 val64 = 0x0000000000000000ULL;
883 writeq(val64, &bar0->tx_w_round_robin_0);
884 writeq(val64, &bar0->tx_w_round_robin_1);
885 writeq(val64, &bar0->tx_w_round_robin_2);
886 writeq(val64, &bar0->tx_w_round_robin_3);
887 writeq(val64, &bar0->tx_w_round_robin_4);
890 val64 = 0x0000010000010000ULL;
891 writeq(val64, &bar0->tx_w_round_robin_0);
892 val64 = 0x0100000100000100ULL;
893 writeq(val64, &bar0->tx_w_round_robin_1);
894 val64 = 0x0001000001000001ULL;
895 writeq(val64, &bar0->tx_w_round_robin_2);
896 val64 = 0x0000010000010000ULL;
897 writeq(val64, &bar0->tx_w_round_robin_3);
898 val64 = 0x0100000000000000ULL;
899 writeq(val64, &bar0->tx_w_round_robin_4);
902 val64 = 0x0001000102000001ULL;
903 writeq(val64, &bar0->tx_w_round_robin_0);
904 val64 = 0x0001020000010001ULL;
905 writeq(val64, &bar0->tx_w_round_robin_1);
906 val64 = 0x0200000100010200ULL;
907 writeq(val64, &bar0->tx_w_round_robin_2);
908 val64 = 0x0001000102000001ULL;
909 writeq(val64, &bar0->tx_w_round_robin_3);
910 val64 = 0x0001020000000000ULL;
911 writeq(val64, &bar0->tx_w_round_robin_4);
914 val64 = 0x0001020300010200ULL;
915 writeq(val64, &bar0->tx_w_round_robin_0);
916 val64 = 0x0100000102030001ULL;
917 writeq(val64, &bar0->tx_w_round_robin_1);
918 val64 = 0x0200010000010203ULL;
919 writeq(val64, &bar0->tx_w_round_robin_2);
920 val64 = 0x0001020001000001ULL;
921 writeq(val64, &bar0->tx_w_round_robin_3);
922 val64 = 0x0203000100000000ULL;
923 writeq(val64, &bar0->tx_w_round_robin_4);
926 val64 = 0x0001000203000102ULL;
927 writeq(val64, &bar0->tx_w_round_robin_0);
928 val64 = 0x0001020001030004ULL;
929 writeq(val64, &bar0->tx_w_round_robin_1);
930 val64 = 0x0001000203000102ULL;
931 writeq(val64, &bar0->tx_w_round_robin_2);
932 val64 = 0x0001020001030004ULL;
933 writeq(val64, &bar0->tx_w_round_robin_3);
934 val64 = 0x0001000000000000ULL;
935 writeq(val64, &bar0->tx_w_round_robin_4);
938 val64 = 0x0001020304000102ULL;
939 writeq(val64, &bar0->tx_w_round_robin_0);
940 val64 = 0x0304050001020001ULL;
941 writeq(val64, &bar0->tx_w_round_robin_1);
942 val64 = 0x0203000100000102ULL;
943 writeq(val64, &bar0->tx_w_round_robin_2);
944 val64 = 0x0304000102030405ULL;
945 writeq(val64, &bar0->tx_w_round_robin_3);
946 val64 = 0x0001000200000000ULL;
947 writeq(val64, &bar0->tx_w_round_robin_4);
950 val64 = 0x0001020001020300ULL;
951 writeq(val64, &bar0->tx_w_round_robin_0);
952 val64 = 0x0102030400010203ULL;
953 writeq(val64, &bar0->tx_w_round_robin_1);
954 val64 = 0x0405060001020001ULL;
955 writeq(val64, &bar0->tx_w_round_robin_2);
956 val64 = 0x0304050000010200ULL;
957 writeq(val64, &bar0->tx_w_round_robin_3);
958 val64 = 0x0102030000000000ULL;
959 writeq(val64, &bar0->tx_w_round_robin_4);
962 val64 = 0x0001020300040105ULL;
963 writeq(val64, &bar0->tx_w_round_robin_0);
964 val64 = 0x0200030106000204ULL;
965 writeq(val64, &bar0->tx_w_round_robin_1);
966 val64 = 0x0103000502010007ULL;
967 writeq(val64, &bar0->tx_w_round_robin_2);
968 val64 = 0x0304010002060500ULL;
969 writeq(val64, &bar0->tx_w_round_robin_3);
970 val64 = 0x0103020400000000ULL;
971 writeq(val64, &bar0->tx_w_round_robin_4);
975 /* Filling the Rx round robin registers as per the
976 * number of Rings and steering based on QoS.
978 switch (config->rx_ring_num) {
980 val64 = 0x8080808080808080ULL;
981 writeq(val64, &bar0->rts_qos_steering);
984 val64 = 0x0000010000010000ULL;
985 writeq(val64, &bar0->rx_w_round_robin_0);
986 val64 = 0x0100000100000100ULL;
987 writeq(val64, &bar0->rx_w_round_robin_1);
988 val64 = 0x0001000001000001ULL;
989 writeq(val64, &bar0->rx_w_round_robin_2);
990 val64 = 0x0000010000010000ULL;
991 writeq(val64, &bar0->rx_w_round_robin_3);
992 val64 = 0x0100000000000000ULL;
993 writeq(val64, &bar0->rx_w_round_robin_4);
995 val64 = 0x8080808040404040ULL;
996 writeq(val64, &bar0->rts_qos_steering);
999 val64 = 0x0001000102000001ULL;
1000 writeq(val64, &bar0->rx_w_round_robin_0);
1001 val64 = 0x0001020000010001ULL;
1002 writeq(val64, &bar0->rx_w_round_robin_1);
1003 val64 = 0x0200000100010200ULL;
1004 writeq(val64, &bar0->rx_w_round_robin_2);
1005 val64 = 0x0001000102000001ULL;
1006 writeq(val64, &bar0->rx_w_round_robin_3);
1007 val64 = 0x0001020000000000ULL;
1008 writeq(val64, &bar0->rx_w_round_robin_4);
1010 val64 = 0x8080804040402020ULL;
1011 writeq(val64, &bar0->rts_qos_steering);
1014 val64 = 0x0001020300010200ULL;
1015 writeq(val64, &bar0->rx_w_round_robin_0);
1016 val64 = 0x0100000102030001ULL;
1017 writeq(val64, &bar0->rx_w_round_robin_1);
1018 val64 = 0x0200010000010203ULL;
1019 writeq(val64, &bar0->rx_w_round_robin_2);
1020 val64 = 0x0001020001000001ULL;
1021 writeq(val64, &bar0->rx_w_round_robin_3);
1022 val64 = 0x0203000100000000ULL;
1023 writeq(val64, &bar0->rx_w_round_robin_4);
1025 val64 = 0x8080404020201010ULL;
1026 writeq(val64, &bar0->rts_qos_steering);
1029 val64 = 0x0001000203000102ULL;
1030 writeq(val64, &bar0->rx_w_round_robin_0);
1031 val64 = 0x0001020001030004ULL;
1032 writeq(val64, &bar0->rx_w_round_robin_1);
1033 val64 = 0x0001000203000102ULL;
1034 writeq(val64, &bar0->rx_w_round_robin_2);
1035 val64 = 0x0001020001030004ULL;
1036 writeq(val64, &bar0->rx_w_round_robin_3);
1037 val64 = 0x0001000000000000ULL;
1038 writeq(val64, &bar0->rx_w_round_robin_4);
1040 val64 = 0x8080404020201008ULL;
1041 writeq(val64, &bar0->rts_qos_steering);
1044 val64 = 0x0001020304000102ULL;
1045 writeq(val64, &bar0->rx_w_round_robin_0);
1046 val64 = 0x0304050001020001ULL;
1047 writeq(val64, &bar0->rx_w_round_robin_1);
1048 val64 = 0x0203000100000102ULL;
1049 writeq(val64, &bar0->rx_w_round_robin_2);
1050 val64 = 0x0304000102030405ULL;
1051 writeq(val64, &bar0->rx_w_round_robin_3);
1052 val64 = 0x0001000200000000ULL;
1053 writeq(val64, &bar0->rx_w_round_robin_4);
1055 val64 = 0x8080404020100804ULL;
1056 writeq(val64, &bar0->rts_qos_steering);
1059 val64 = 0x0001020001020300ULL;
1060 writeq(val64, &bar0->rx_w_round_robin_0);
1061 val64 = 0x0102030400010203ULL;
1062 writeq(val64, &bar0->rx_w_round_robin_1);
1063 val64 = 0x0405060001020001ULL;
1064 writeq(val64, &bar0->rx_w_round_robin_2);
1065 val64 = 0x0304050000010200ULL;
1066 writeq(val64, &bar0->rx_w_round_robin_3);
1067 val64 = 0x0102030000000000ULL;
1068 writeq(val64, &bar0->rx_w_round_robin_4);
1070 val64 = 0x8080402010080402ULL;
1071 writeq(val64, &bar0->rts_qos_steering);
1074 val64 = 0x0001020300040105ULL;
1075 writeq(val64, &bar0->rx_w_round_robin_0);
1076 val64 = 0x0200030106000204ULL;
1077 writeq(val64, &bar0->rx_w_round_robin_1);
1078 val64 = 0x0103000502010007ULL;
1079 writeq(val64, &bar0->rx_w_round_robin_2);
1080 val64 = 0x0304010002060500ULL;
1081 writeq(val64, &bar0->rx_w_round_robin_3);
1082 val64 = 0x0103020400000000ULL;
1083 writeq(val64, &bar0->rx_w_round_robin_4);
1085 val64 = 0x8040201008040201ULL;
1086 writeq(val64, &bar0->rts_qos_steering);
1092 for (i = 0; i < 8; i++)
1093 writeq(val64, &bar0->rts_frm_len_n[i]);
1095 /* Set the default rts frame length for the rings configured */
1096 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1097 for (i = 0 ; i < config->rx_ring_num ; i++)
1098 writeq(val64, &bar0->rts_frm_len_n[i]);
1100 /* Set the frame length for the configured rings
1101 * desired by the user
1103 for (i = 0; i < config->rx_ring_num; i++) {
1104 /* If rts_frm_len[i] == 0 then it is assumed that user not
1105 * specified frame length steering.
1106 * If the user provides the frame length then program
1107 * the rts_frm_len register for those values or else
1108 * leave it as it is.
1110 if (rts_frm_len[i] != 0) {
1111 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1112 &bar0->rts_frm_len_n[i]);
1116 /* Program statistics memory */
1117 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1120 * Initializing the sampling rate for the device to calculate the
1121 * bandwidth utilization.
1123 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1124 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1125 writeq(val64, &bar0->mac_link_util);
1129 * Initializing the Transmit and Receive Traffic Interrupt
1133 * TTI Initialization. Default Tx timer gets us about
1134 * 250 interrupts per sec. Continuous interrupts are enabled
1137 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1138 TTI_DATA1_MEM_TX_URNG_A(0xA) |
1139 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1140 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1141 if (use_continuous_tx_intrs)
1142 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1143 writeq(val64, &bar0->tti_data1_mem);
1145 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1146 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1147 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1148 writeq(val64, &bar0->tti_data2_mem);
1150 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1151 writeq(val64, &bar0->tti_command_mem);
1154 * Once the operation completes, the Strobe bit of the command
1155 * register will be reset. We poll for this particular condition
1156 * We wait for a maximum of 500ms for the operation to complete,
1157 * if it's not complete by then we return error.
1161 val64 = readq(&bar0->tti_command_mem);
1162 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1166 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1174 /* RTI Initialization */
1175 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1176 RTI_DATA1_MEM_RX_URNG_A(0xA) |
1177 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1178 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1180 writeq(val64, &bar0->rti_data1_mem);
1182 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1183 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1184 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1185 writeq(val64, &bar0->rti_data2_mem);
1187 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1188 writeq(val64, &bar0->rti_command_mem);
1191 * Once the operation completes, the Strobe bit of the
1192 * command register will be reset. We poll for this
1193 * particular condition. We wait for a maximum of 500ms
1194 * for the operation to complete, if it's not complete
1195 * by then we return error.
1199 val64 = readq(&bar0->rti_command_mem);
1200 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1204 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1213 * Initializing proper values as Pause threshold into all
1214 * the 8 Queues on Rx side.
1216 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1217 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1219 /* Disable RMAC PAD STRIPPING */
1220 add = (void *) &bar0->mac_cfg;
1221 val64 = readq(&bar0->mac_cfg);
1222 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1223 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1224 writel((u32) (val64), add);
1225 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1226 writel((u32) (val64 >> 32), (add + 4));
1227 val64 = readq(&bar0->mac_cfg);
1230 * Set the time value to be inserted in the pause frame
1231 * generated by xena.
1233 val64 = readq(&bar0->rmac_pause_cfg);
1234 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1235 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1236 writeq(val64, &bar0->rmac_pause_cfg);
1239 * Set the Threshold Limit for Generating the pause frame
1240 * If the amount of data in any Queue exceeds ratio of
1241 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1242 * pause frame is generated
1245 for (i = 0; i < 4; i++) {
1247 (((u64) 0xFF00 | nic->mac_control.
1248 mc_pause_threshold_q0q3)
1251 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1254 for (i = 0; i < 4; i++) {
1256 (((u64) 0xFF00 | nic->mac_control.
1257 mc_pause_threshold_q4q7)
1260 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1263 * TxDMA will stop Read request if the number of read split has
1264 * exceeded the limit pointed by shared_splits
1266 val64 = readq(&bar0->pic_control);
1267 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1268 writeq(val64, &bar0->pic_control);
1274 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1275 * @nic: device private variable,
1276 * @mask: A mask indicating which Intr block must be modified and,
1277 * @flag: A flag indicating whether to enable or disable the Intrs.
1278 * Description: This function will either disable or enable the interrupts
1279 * depending on the flag argument. The mask argument can be used to
1280 * enable/disable any Intr block.
1281 * Return Value: NONE.
1284 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1286 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1287 register u64 val64 = 0, temp64 = 0;
1289 /* Top level interrupt classification */
1290 /* PIC Interrupts */
1291 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1292 /* Enable PIC Intrs in the general intr mask register */
1293 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1294 if (flag == ENABLE_INTRS) {
1295 temp64 = readq(&bar0->general_int_mask);
1296 temp64 &= ~((u64) val64);
1297 writeq(temp64, &bar0->general_int_mask);
1299 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1300 * interrupts for now.
1303 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1305 * No MSI Support is available presently, so TTI and
1306 * RTI interrupts are also disabled.
1308 } else if (flag == DISABLE_INTRS) {
1310 * Disable PIC Intrs in the general
1311 * intr mask register
1313 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1314 temp64 = readq(&bar0->general_int_mask);
1316 writeq(val64, &bar0->general_int_mask);
1320 /* DMA Interrupts */
1321 /* Enabling/Disabling Tx DMA interrupts */
1322 if (mask & TX_DMA_INTR) {
1323 /* Enable TxDMA Intrs in the general intr mask register */
1324 val64 = TXDMA_INT_M;
1325 if (flag == ENABLE_INTRS) {
1326 temp64 = readq(&bar0->general_int_mask);
1327 temp64 &= ~((u64) val64);
1328 writeq(temp64, &bar0->general_int_mask);
1330 * Keep all interrupts other than PFC interrupt
1331 * and PCC interrupt disabled in DMA level.
1333 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1335 writeq(val64, &bar0->txdma_int_mask);
1337 * Enable only the MISC error 1 interrupt in PFC block
1339 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1340 writeq(val64, &bar0->pfc_err_mask);
1342 * Enable only the FB_ECC error interrupt in PCC block
1344 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1345 writeq(val64, &bar0->pcc_err_mask);
1346 } else if (flag == DISABLE_INTRS) {
1348 * Disable TxDMA Intrs in the general intr mask
1351 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1352 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1353 temp64 = readq(&bar0->general_int_mask);
1355 writeq(val64, &bar0->general_int_mask);
1359 /* Enabling/Disabling Rx DMA interrupts */
1360 if (mask & RX_DMA_INTR) {
1361 /* Enable RxDMA Intrs in the general intr mask register */
1362 val64 = RXDMA_INT_M;
1363 if (flag == ENABLE_INTRS) {
1364 temp64 = readq(&bar0->general_int_mask);
1365 temp64 &= ~((u64) val64);
1366 writeq(temp64, &bar0->general_int_mask);
1368 * All RxDMA block interrupts are disabled for now
1371 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1372 } else if (flag == DISABLE_INTRS) {
1374 * Disable RxDMA Intrs in the general intr mask
1377 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1378 temp64 = readq(&bar0->general_int_mask);
1380 writeq(val64, &bar0->general_int_mask);
1384 /* MAC Interrupts */
1385 /* Enabling/Disabling MAC interrupts */
1386 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1387 val64 = TXMAC_INT_M | RXMAC_INT_M;
1388 if (flag == ENABLE_INTRS) {
1389 temp64 = readq(&bar0->general_int_mask);
1390 temp64 &= ~((u64) val64);
1391 writeq(temp64, &bar0->general_int_mask);
1393 * All MAC block error interrupts are disabled for now
1394 * except the link status change interrupt.
1397 val64 = MAC_INT_STATUS_RMAC_INT;
1398 temp64 = readq(&bar0->mac_int_mask);
1399 temp64 &= ~((u64) val64);
1400 writeq(temp64, &bar0->mac_int_mask);
1402 val64 = readq(&bar0->mac_rmac_err_mask);
1403 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1404 writeq(val64, &bar0->mac_rmac_err_mask);
1405 } else if (flag == DISABLE_INTRS) {
1407 * Disable MAC Intrs in the general intr mask register
1409 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1410 writeq(DISABLE_ALL_INTRS,
1411 &bar0->mac_rmac_err_mask);
1413 temp64 = readq(&bar0->general_int_mask);
1415 writeq(val64, &bar0->general_int_mask);
1419 /* XGXS Interrupts */
1420 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1421 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1422 if (flag == ENABLE_INTRS) {
1423 temp64 = readq(&bar0->general_int_mask);
1424 temp64 &= ~((u64) val64);
1425 writeq(temp64, &bar0->general_int_mask);
1427 * All XGXS block error interrupts are disabled for now
1430 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1431 } else if (flag == DISABLE_INTRS) {
1433 * Disable MC Intrs in the general intr mask register
1435 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1436 temp64 = readq(&bar0->general_int_mask);
1438 writeq(val64, &bar0->general_int_mask);
1442 /* Memory Controller(MC) interrupts */
1443 if (mask & MC_INTR) {
1445 if (flag == ENABLE_INTRS) {
1446 temp64 = readq(&bar0->general_int_mask);
1447 temp64 &= ~((u64) val64);
1448 writeq(temp64, &bar0->general_int_mask);
1450 * Enable all MC Intrs.
1452 writeq(0x0, &bar0->mc_int_mask);
1453 writeq(0x0, &bar0->mc_err_mask);
1454 } else if (flag == DISABLE_INTRS) {
1456 * Disable MC Intrs in the general intr mask register
1458 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1459 temp64 = readq(&bar0->general_int_mask);
1461 writeq(val64, &bar0->general_int_mask);
1466 /* Tx traffic interrupts */
1467 if (mask & TX_TRAFFIC_INTR) {
1468 val64 = TXTRAFFIC_INT_M;
1469 if (flag == ENABLE_INTRS) {
1470 temp64 = readq(&bar0->general_int_mask);
1471 temp64 &= ~((u64) val64);
1472 writeq(temp64, &bar0->general_int_mask);
1474 * Enable all the Tx side interrupts
1475 * writing 0 Enables all 64 TX interrupt levels
1477 writeq(0x0, &bar0->tx_traffic_mask);
1478 } else if (flag == DISABLE_INTRS) {
1480 * Disable Tx Traffic Intrs in the general intr mask
1483 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1484 temp64 = readq(&bar0->general_int_mask);
1486 writeq(val64, &bar0->general_int_mask);
1490 /* Rx traffic interrupts */
1491 if (mask & RX_TRAFFIC_INTR) {
1492 val64 = RXTRAFFIC_INT_M;
1493 if (flag == ENABLE_INTRS) {
1494 temp64 = readq(&bar0->general_int_mask);
1495 temp64 &= ~((u64) val64);
1496 writeq(temp64, &bar0->general_int_mask);
1497 /* writing 0 Enables all 8 RX interrupt levels */
1498 writeq(0x0, &bar0->rx_traffic_mask);
1499 } else if (flag == DISABLE_INTRS) {
1501 * Disable Rx Traffic Intrs in the general intr mask
1504 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1505 temp64 = readq(&bar0->general_int_mask);
1507 writeq(val64, &bar0->general_int_mask);
1512 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1516 if (flag == FALSE) {
1518 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1519 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1520 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1524 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1525 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1526 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1532 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1533 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1534 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1535 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1536 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1540 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1541 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1542 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1543 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1544 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1553 * verify_xena_quiescence - Checks whether the H/W is ready
1554 * @val64 : Value read from adapter status register.
1555 * @flag : indicates if the adapter enable bit was ever written once
1557 * Description: Returns whether the H/W is ready to go or not. Depending
1558 * on whether adapter enable bit was written or not the comparison
1559 * differs and the calling function passes the input argument flag to
1561 * Return: 1 If xena is quiescence
1562 * 0 If Xena is not quiescence
1565 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1568 u64 tmp64 = ~((u64) val64);
1569 int rev_id = get_xena_rev_id(sp->pdev);
1573 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1574 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1575 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1576 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1577 ADAPTER_STATUS_P_PLL_LOCK))) {
1578 ret = check_prc_pcc_state(val64, flag, rev_id);
1585 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1586 * @sp: Pointer to device specifc structure
1588 * New procedure to clear mac address reading problems on Alpha platforms
1592 void fix_mac_address(nic_t * sp)
1594 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1598 while (fix_mac[i] != END_SIGN) {
1599 writeq(fix_mac[i++], &bar0->gpio_control);
1601 val64 = readq(&bar0->gpio_control);
1606 * start_nic - Turns the device on
1607 * @nic : device private variable.
1609 * This function actually turns the device on. Before this function is
1610 * called,all Registers are configured from their reset states
1611 * and shared memory is allocated but the NIC is still quiescent. On
1612 * calling this function, the device interrupts are cleared and the NIC is
1613 * literally switched on by writing into the adapter control register.
1615 * SUCCESS on success and -1 on failure.
1618 static int start_nic(struct s2io_nic *nic)
1620 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1621 struct net_device *dev = nic->dev;
1622 register u64 val64 = 0;
1625 mac_info_t *mac_control;
1626 struct config_param *config;
1628 mac_control = &nic->mac_control;
1629 config = &nic->config;
1631 /* PRC Initialization and configuration */
1632 for (i = 0; i < config->rx_ring_num; i++) {
1633 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1634 &bar0->prc_rxd0_n[i]);
1636 val64 = readq(&bar0->prc_ctrl_n[i]);
1637 #ifndef CONFIG_2BUFF_MODE
1638 val64 |= PRC_CTRL_RC_ENABLED;
1640 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1642 writeq(val64, &bar0->prc_ctrl_n[i]);
1645 #ifdef CONFIG_2BUFF_MODE
1646 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1647 val64 = readq(&bar0->rx_pa_cfg);
1648 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1649 writeq(val64, &bar0->rx_pa_cfg);
1653 * Enabling MC-RLDRAM. After enabling the device, we timeout
1654 * for around 100ms, which is approximately the time required
1655 * for the device to be ready for operation.
1657 val64 = readq(&bar0->mc_rldram_mrs);
1658 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1659 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1660 val64 = readq(&bar0->mc_rldram_mrs);
1662 msleep(100); /* Delay by around 100 ms. */
1664 /* Enabling ECC Protection. */
1665 val64 = readq(&bar0->adapter_control);
1666 val64 &= ~ADAPTER_ECC_EN;
1667 writeq(val64, &bar0->adapter_control);
1670 * Clearing any possible Link state change interrupts that
1671 * could have popped up just before Enabling the card.
1673 val64 = readq(&bar0->mac_rmac_err_reg);
1675 writeq(val64, &bar0->mac_rmac_err_reg);
1678 * Verify if the device is ready to be enabled, if so enable
1681 val64 = readq(&bar0->adapter_status);
1682 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1683 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1684 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1685 (unsigned long long) val64);
1689 /* Enable select interrupts */
1690 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1691 RX_MAC_INTR | MC_INTR;
1692 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1695 * With some switches, link might be already up at this point.
1696 * Because of this weird behavior, when we enable laser,
1697 * we may not get link. We need to handle this. We cannot
1698 * figure out which switch is misbehaving. So we are forced to
1699 * make a global change.
1702 /* Enabling Laser. */
1703 val64 = readq(&bar0->adapter_control);
1704 val64 |= ADAPTER_EOI_TX_ON;
1705 writeq(val64, &bar0->adapter_control);
1707 /* SXE-002: Initialize link and activity LED */
1708 subid = nic->pdev->subsystem_device;
1709 if ((subid & 0xFF) >= 0x07) {
1710 val64 = readq(&bar0->gpio_control);
1711 val64 |= 0x0000800000000000ULL;
1712 writeq(val64, &bar0->gpio_control);
1713 val64 = 0x0411040400000000ULL;
1714 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1718 * Don't see link state interrupts on certain switches, so
1719 * directly scheduling a link state task from here.
1721 schedule_work(&nic->set_link_task);
1727 * free_tx_buffers - Free all queued Tx buffers
1728 * @nic : device private variable.
1730 * Free all queued Tx buffers.
1731 * Return Value: void
1734 static void free_tx_buffers(struct s2io_nic *nic)
1736 struct net_device *dev = nic->dev;
1737 struct sk_buff *skb;
1740 mac_info_t *mac_control;
1741 struct config_param *config;
1742 int cnt = 0, frg_cnt;
1744 mac_control = &nic->mac_control;
1745 config = &nic->config;
1747 for (i = 0; i < config->tx_fifo_num; i++) {
1748 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1749 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1752 (struct sk_buff *) ((unsigned long) txdp->
1755 memset(txdp, 0, sizeof(TxD_t) *
1759 frg_cnt = skb_shinfo(skb)->nr_frags;
1760 pci_unmap_single(nic->pdev, (dma_addr_t)
1761 txdp->Buffer_Pointer,
1762 skb->len - skb->data_len,
1768 for (j = 0; j < frg_cnt; j++, txdp++) {
1770 &skb_shinfo(skb)->frags[j];
1771 pci_unmap_page(nic->pdev,
1781 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1785 "%s:forcibly freeing %d skbs on FIFO%d\n",
1787 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1788 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1793 * stop_nic - To stop the nic
1794 * @nic ; device private variable.
1796 * This function does exactly the opposite of what the start_nic()
1797 * function does. This function is called to stop the device.
1802 static void stop_nic(struct s2io_nic *nic)
1804 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1805 register u64 val64 = 0;
1806 u16 interruptible, i;
1807 mac_info_t *mac_control;
1808 struct config_param *config;
1810 mac_control = &nic->mac_control;
1811 config = &nic->config;
1813 /* Disable all interrupts */
1814 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1815 RX_MAC_INTR | MC_INTR;
1816 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1819 for (i = 0; i < config->rx_ring_num; i++) {
1820 val64 = readq(&bar0->prc_ctrl_n[i]);
1821 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1822 writeq(val64, &bar0->prc_ctrl_n[i]);
1827 * fill_rx_buffers - Allocates the Rx side skbs
1828 * @nic: device private variable
1829 * @ring_no: ring number
1831 * The function allocates Rx side skbs and puts the physical
1832 * address of these buffers into the RxD buffer pointers, so that the NIC
1833 * can DMA the received frame into these locations.
1834 * The NIC supports 3 receive modes, viz
1836 * 2. three buffer and
1837 * 3. Five buffer modes.
1838 * Each mode defines how many fragments the received frame will be split
1839 * up into by the NIC. The frame is split into L3 header, L4 Header,
1840 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1841 * is split into 3 fragments. As of now only single buffer mode is
1844 * SUCCESS on success or an appropriate -ve value on failure.
1847 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1849 struct net_device *dev = nic->dev;
1850 struct sk_buff *skb;
1852 int off, off1, size, block_no, block_no1;
1853 int offset, offset1;
1856 mac_info_t *mac_control;
1857 struct config_param *config;
1858 #ifdef CONFIG_2BUFF_MODE
1863 dma_addr_t rxdpphys;
1865 #ifndef CONFIG_S2IO_NAPI
1866 unsigned long flags;
1869 mac_control = &nic->mac_control;
1870 config = &nic->config;
1871 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1872 atomic_read(&nic->rx_bufs_left[ring_no]);
1873 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1874 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1876 while (alloc_tab < alloc_cnt) {
1877 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1879 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1881 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1882 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1883 #ifndef CONFIG_2BUFF_MODE
1884 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1885 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1887 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1888 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1891 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1892 block_virt_addr + off;
1893 if ((offset == offset1) && (rxdp->Host_Control)) {
1894 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1895 DBG_PRINT(INTR_DBG, " info equated\n");
1898 #ifndef CONFIG_2BUFF_MODE
1899 if (rxdp->Control_1 == END_OF_BLOCK) {
1900 mac_control->rings[ring_no].rx_curr_put_info.
1902 mac_control->rings[ring_no].rx_curr_put_info.
1903 block_index %= mac_control->rings[ring_no].block_count;
1904 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1907 off %= (MAX_RXDS_PER_BLOCK + 1);
1908 mac_control->rings[ring_no].rx_curr_put_info.offset =
1910 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1911 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1914 #ifndef CONFIG_S2IO_NAPI
1915 spin_lock_irqsave(&nic->put_lock, flags);
1916 mac_control->rings[ring_no].put_pos =
1917 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1918 spin_unlock_irqrestore(&nic->put_lock, flags);
1921 if (rxdp->Host_Control == END_OF_BLOCK) {
1922 mac_control->rings[ring_no].rx_curr_put_info.
1924 mac_control->rings[ring_no].rx_curr_put_info.block_index
1925 %= mac_control->rings[ring_no].block_count;
1926 block_no = mac_control->rings[ring_no].rx_curr_put_info
1929 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1930 dev->name, block_no,
1931 (unsigned long long) rxdp->Control_1);
1932 mac_control->rings[ring_no].rx_curr_put_info.offset =
1934 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1937 #ifndef CONFIG_S2IO_NAPI
1938 spin_lock_irqsave(&nic->put_lock, flags);
1939 mac_control->rings[ring_no].put_pos = (block_no *
1940 (MAX_RXDS_PER_BLOCK + 1)) + off;
1941 spin_unlock_irqrestore(&nic->put_lock, flags);
1945 #ifndef CONFIG_2BUFF_MODE
1946 if (rxdp->Control_1 & RXD_OWN_XENA)
1948 if (rxdp->Control_2 & BIT(0))
1951 mac_control->rings[ring_no].rx_curr_put_info.
1955 #ifdef CONFIG_2BUFF_MODE
1957 * RxDs Spanning cache lines will be replenished only
1958 * if the succeeding RxD is also owned by Host. It
1959 * will always be the ((8*i)+3) and ((8*i)+6)
1960 * descriptors for the 48 byte descriptor. The offending
1961 * decsriptor is of-course the 3rd descriptor.
1963 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1964 block_dma_addr + (off * sizeof(RxD_t));
1965 if (((u64) (rxdpphys)) % 128 > 80) {
1966 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1967 block_virt_addr + (off + 1);
1968 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1969 nextblk = (block_no + 1) %
1970 (mac_control->rings[ring_no].block_count);
1971 rxdpnext = mac_control->rings[ring_no].rx_blocks
1972 [nextblk].block_virt_addr;
1974 if (rxdpnext->Control_2 & BIT(0))
1979 #ifndef CONFIG_2BUFF_MODE
1980 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1982 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1985 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1986 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1989 #ifndef CONFIG_2BUFF_MODE
1990 skb_reserve(skb, NET_IP_ALIGN);
1991 memset(rxdp, 0, sizeof(RxD_t));
1992 rxdp->Buffer0_ptr = pci_map_single
1993 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1994 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1995 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1996 rxdp->Host_Control = (unsigned long) (skb);
1997 rxdp->Control_1 |= RXD_OWN_XENA;
1999 off %= (MAX_RXDS_PER_BLOCK + 1);
2000 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2002 ba = &mac_control->rings[ring_no].ba[block_no][off];
2003 skb_reserve(skb, BUF0_LEN);
2004 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2006 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2008 memset(rxdp, 0, sizeof(RxD_t));
2009 rxdp->Buffer2_ptr = pci_map_single
2010 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2011 PCI_DMA_FROMDEVICE);
2013 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2014 PCI_DMA_FROMDEVICE);
2016 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2017 PCI_DMA_FROMDEVICE);
2019 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2020 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2021 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2022 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2023 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2024 rxdp->Control_1 |= RXD_OWN_XENA;
2026 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2028 rxdp->Control_2 |= SET_RXD_MARKER;
2030 atomic_inc(&nic->rx_bufs_left[ring_no]);
2039 * free_rx_buffers - Frees all Rx buffers
2040 * @sp: device private variable.
2042 * This function will free all Rx buffers allocated by host.
2047 static void free_rx_buffers(struct s2io_nic *sp)
2049 struct net_device *dev = sp->dev;
2050 int i, j, blk = 0, off, buf_cnt = 0;
2052 struct sk_buff *skb;
2053 mac_info_t *mac_control;
2054 struct config_param *config;
2055 #ifdef CONFIG_2BUFF_MODE
2059 mac_control = &sp->mac_control;
2060 config = &sp->config;
2062 for (i = 0; i < config->rx_ring_num; i++) {
2063 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2064 off = j % (MAX_RXDS_PER_BLOCK + 1);
2065 rxdp = mac_control->rings[i].rx_blocks[blk].
2066 block_virt_addr + off;
2068 #ifndef CONFIG_2BUFF_MODE
2069 if (rxdp->Control_1 == END_OF_BLOCK) {
2071 (RxD_t *) ((unsigned long) rxdp->
2077 if (rxdp->Host_Control == END_OF_BLOCK) {
2083 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2084 memset(rxdp, 0, sizeof(RxD_t));
2089 (struct sk_buff *) ((unsigned long) rxdp->
2092 #ifndef CONFIG_2BUFF_MODE
2093 pci_unmap_single(sp->pdev, (dma_addr_t)
2096 HEADER_ETHERNET_II_802_3_SIZE
2097 + HEADER_802_2_SIZE +
2099 PCI_DMA_FROMDEVICE);
2101 ba = &mac_control->rings[i].ba[blk][off];
2102 pci_unmap_single(sp->pdev, (dma_addr_t)
2105 PCI_DMA_FROMDEVICE);
2106 pci_unmap_single(sp->pdev, (dma_addr_t)
2109 PCI_DMA_FROMDEVICE);
2110 pci_unmap_single(sp->pdev, (dma_addr_t)
2112 dev->mtu + BUF0_LEN + 4,
2113 PCI_DMA_FROMDEVICE);
2116 atomic_dec(&sp->rx_bufs_left[i]);
2119 memset(rxdp, 0, sizeof(RxD_t));
2121 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2122 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2123 mac_control->rings[i].rx_curr_put_info.offset = 0;
2124 mac_control->rings[i].rx_curr_get_info.offset = 0;
2125 atomic_set(&sp->rx_bufs_left[i], 0);
2126 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2127 dev->name, buf_cnt, i);
2132 * s2io_poll - Rx interrupt handler for NAPI support
2133 * @dev : pointer to the device structure.
2134 * @budget : The number of packets that were budgeted to be processed
2135 * during one pass through the 'Poll" function.
2137 * Comes into picture only if NAPI support has been incorporated. It does
2138 * the same thing that rx_intr_handler does, but not in a interrupt context
2139 * also It will process only a given number of packets.
2141 * 0 on success and 1 if there are No Rx packets to be processed.
2144 #if defined(CONFIG_S2IO_NAPI)
2145 static int s2io_poll(struct net_device *dev, int *budget)
2147 nic_t *nic = dev->priv;
2148 int pkt_cnt = 0, org_pkts_to_process;
2149 mac_info_t *mac_control;
2150 struct config_param *config;
2151 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2155 atomic_inc(&nic->isr_cnt);
2156 mac_control = &nic->mac_control;
2157 config = &nic->config;
2159 nic->pkts_to_process = *budget;
2160 if (nic->pkts_to_process > dev->quota)
2161 nic->pkts_to_process = dev->quota;
2162 org_pkts_to_process = nic->pkts_to_process;
2164 val64 = readq(&bar0->rx_traffic_int);
2165 writeq(val64, &bar0->rx_traffic_int);
2167 for (i = 0; i < config->rx_ring_num; i++) {
2168 rx_intr_handler(&mac_control->rings[i]);
2169 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2170 if (!nic->pkts_to_process) {
2171 /* Quota for the current iteration has been met */
2178 dev->quota -= pkt_cnt;
2180 netif_rx_complete(dev);
2182 for (i = 0; i < config->rx_ring_num; i++) {
2183 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2184 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2185 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2189 /* Re enable the Rx interrupts. */
2190 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2191 atomic_dec(&nic->isr_cnt);
2195 dev->quota -= pkt_cnt;
2198 for (i = 0; i < config->rx_ring_num; i++) {
2199 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2200 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2201 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2205 atomic_dec(&nic->isr_cnt);
2211 * rx_intr_handler - Rx interrupt handler
2212 * @nic: device private variable.
2214 * If the interrupt is because of a received frame or if the
2215 * receive ring contains fresh as yet un-processed frames,this function is
2216 * called. It picks out the RxD at which place the last Rx processing had
2217 * stopped and sends the skb to the OSM's Rx handler and then increments
2222 static void rx_intr_handler(ring_info_t *ring_data)
2224 nic_t *nic = ring_data->nic;
2225 struct net_device *dev = (struct net_device *) nic->dev;
2226 int get_block, get_offset, put_block, put_offset, ring_bufs;
2227 rx_curr_get_info_t get_info, put_info;
2229 struct sk_buff *skb;
2230 #ifndef CONFIG_S2IO_NAPI
2233 spin_lock(&nic->rx_lock);
2234 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2235 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2236 __FUNCTION__, dev->name);
2237 spin_unlock(&nic->rx_lock);
2240 get_info = ring_data->rx_curr_get_info;
2241 get_block = get_info.block_index;
2242 put_info = ring_data->rx_curr_put_info;
2243 put_block = put_info.block_index;
2244 ring_bufs = get_info.ring_len+1;
2245 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2247 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2249 #ifndef CONFIG_S2IO_NAPI
2250 spin_lock(&nic->put_lock);
2251 put_offset = ring_data->put_pos;
2252 spin_unlock(&nic->put_lock);
2254 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2257 while (RXD_IS_UP2DT(rxdp) &&
2258 (((get_offset + 1) % ring_bufs) != put_offset)) {
2259 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2261 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2263 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2264 spin_unlock(&nic->rx_lock);
2267 #ifndef CONFIG_2BUFF_MODE
2268 pci_unmap_single(nic->pdev, (dma_addr_t)
2271 HEADER_ETHERNET_II_802_3_SIZE +
2274 PCI_DMA_FROMDEVICE);
2276 pci_unmap_single(nic->pdev, (dma_addr_t)
2278 BUF0_LEN, PCI_DMA_FROMDEVICE);
2279 pci_unmap_single(nic->pdev, (dma_addr_t)
2281 BUF1_LEN, PCI_DMA_FROMDEVICE);
2282 pci_unmap_single(nic->pdev, (dma_addr_t)
2284 dev->mtu + BUF0_LEN + 4,
2285 PCI_DMA_FROMDEVICE);
2287 rx_osm_handler(ring_data, rxdp);
2289 ring_data->rx_curr_get_info.offset =
2291 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2293 if (get_info.offset &&
2294 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2295 get_info.offset = 0;
2296 ring_data->rx_curr_get_info.offset
2299 get_block %= ring_data->block_count;
2300 ring_data->rx_curr_get_info.block_index
2302 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2305 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2307 #ifdef CONFIG_S2IO_NAPI
2308 nic->pkts_to_process -= 1;
2309 if (!nic->pkts_to_process)
2313 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2317 spin_unlock(&nic->rx_lock);
2321 * tx_intr_handler - Transmit interrupt handler
2322 * @nic : device private variable
2324 * If an interrupt was raised to indicate DMA complete of the
2325 * Tx packet, this function is called. It identifies the last TxD
2326 * whose buffer was freed and frees all skbs whose data have already
2327 * DMA'ed into the NICs internal memory.
2332 static void tx_intr_handler(fifo_info_t *fifo_data)
2334 nic_t *nic = fifo_data->nic;
2335 struct net_device *dev = (struct net_device *) nic->dev;
2336 tx_curr_get_info_t get_info, put_info;
2337 struct sk_buff *skb;
2341 get_info = fifo_data->tx_curr_get_info;
2342 put_info = fifo_data->tx_curr_put_info;
2343 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2345 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2346 (get_info.offset != put_info.offset) &&
2347 (txdlp->Host_Control)) {
2348 /* Check for TxD errors */
2349 if (txdlp->Control_1 & TXD_T_CODE) {
2350 unsigned long long err;
2351 err = txdlp->Control_1 & TXD_T_CODE;
2352 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2356 skb = (struct sk_buff *) ((unsigned long)
2357 txdlp->Host_Control);
2359 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2361 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2365 frg_cnt = skb_shinfo(skb)->nr_frags;
2366 nic->tx_pkt_count++;
2368 pci_unmap_single(nic->pdev, (dma_addr_t)
2369 txdlp->Buffer_Pointer,
2370 skb->len - skb->data_len,
2376 for (j = 0; j < frg_cnt; j++, txdlp++) {
2378 &skb_shinfo(skb)->frags[j];
2379 pci_unmap_page(nic->pdev,
2389 (sizeof(TxD_t) * fifo_data->max_txds));
2391 /* Updating the statistics block */
2392 nic->stats.tx_bytes += skb->len;
2393 dev_kfree_skb_irq(skb);
2396 get_info.offset %= get_info.fifo_len + 1;
2397 txdlp = (TxD_t *) fifo_data->list_info
2398 [get_info.offset].list_virt_addr;
2399 fifo_data->tx_curr_get_info.offset =
2403 spin_lock(&nic->tx_lock);
2404 if (netif_queue_stopped(dev))
2405 netif_wake_queue(dev);
2406 spin_unlock(&nic->tx_lock);
2410 * alarm_intr_handler - Alarm Interrrupt handler
2411 * @nic: device private variable
2412 * Description: If the interrupt was neither because of Rx packet or Tx
2413 * complete, this function is called. If the interrupt was to indicate
2414 * a loss of link, the OSM link status handler is invoked for any other
2415 * alarm interrupt the block that raised the interrupt is displayed
2416 * and a H/W reset is issued.
2421 static void alarm_intr_handler(struct s2io_nic *nic)
2423 struct net_device *dev = (struct net_device *) nic->dev;
2424 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2425 register u64 val64 = 0, err_reg = 0;
2427 /* Handling link status change error Intr */
2428 err_reg = readq(&bar0->mac_rmac_err_reg);
2429 writeq(err_reg, &bar0->mac_rmac_err_reg);
2430 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2431 schedule_work(&nic->set_link_task);
2434 /* Handling Ecc errors */
2435 val64 = readq(&bar0->mc_err_reg);
2436 writeq(val64, &bar0->mc_err_reg);
2437 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2438 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2439 nic->mac_control.stats_info->sw_stat.
2441 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2443 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2444 netif_stop_queue(dev);
2445 schedule_work(&nic->rst_timer_task);
2447 nic->mac_control.stats_info->sw_stat.
2452 /* In case of a serious error, the device will be Reset. */
2453 val64 = readq(&bar0->serr_source);
2454 if (val64 & SERR_SOURCE_ANY) {
2455 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2456 DBG_PRINT(ERR_DBG, "serious error!!\n");
2457 netif_stop_queue(dev);
2458 schedule_work(&nic->rst_timer_task);
2462 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2463 * Error occurs, the adapter will be recycled by disabling the
2464 * adapter enable bit and enabling it again after the device
2465 * becomes Quiescent.
2467 val64 = readq(&bar0->pcc_err_reg);
2468 writeq(val64, &bar0->pcc_err_reg);
2469 if (val64 & PCC_FB_ECC_DB_ERR) {
2470 u64 ac = readq(&bar0->adapter_control);
2471 ac &= ~(ADAPTER_CNTL_EN);
2472 writeq(ac, &bar0->adapter_control);
2473 ac = readq(&bar0->adapter_control);
2474 schedule_work(&nic->set_link_task);
2477 /* Other type of interrupts are not being handled now, TODO */
2481 * wait_for_cmd_complete - waits for a command to complete.
2482 * @sp : private member of the device structure, which is a pointer to the
2483 * s2io_nic structure.
2484 * Description: Function that waits for a command to Write into RMAC
2485 * ADDR DATA registers to be completed and returns either success or
2486 * error depending on whether the command was complete or not.
2488 * SUCCESS on success and FAILURE on failure.
2491 int wait_for_cmd_complete(nic_t * sp)
2493 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2494 int ret = FAILURE, cnt = 0;
2498 val64 = readq(&bar0->rmac_addr_cmd_mem);
2499 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2512 * s2io_reset - Resets the card.
2513 * @sp : private member of the device structure.
2514 * Description: Function to Reset the card. This function then also
2515 * restores the previously saved PCI configuration space registers as
2516 * the card reset also resets the configuration space.
2521 void s2io_reset(nic_t * sp)
2523 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2527 val64 = SW_RESET_ALL;
2528 writeq(val64, &bar0->sw_reset);
2531 * At this stage, if the PCI write is indeed completed, the
2532 * card is reset and so is the PCI Config space of the device.
2533 * So a read cannot be issued at this stage on any of the
2534 * registers to ensure the write into "sw_reset" register
2536 * Question: Is there any system call that will explicitly force
2537 * all the write commands still pending on the bus to be pushed
2539 * As of now I'am just giving a 250ms delay and hoping that the
2540 * PCI write to sw_reset register is done by this time.
2544 /* Restore the PCI state saved during initializarion. */
2545 pci_restore_state(sp->pdev);
2551 /* Set swapper to enable I/O register access */
2552 s2io_set_swapper(sp);
2554 /* Clear certain PCI/PCI-X fields after reset */
2555 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2556 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2557 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2559 val64 = readq(&bar0->txpic_int_reg);
2560 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2561 writeq(val64, &bar0->txpic_int_reg);
2563 /* Clearing PCIX Ecc status register */
2564 pci_write_config_dword(sp->pdev, 0x68, 0);
2566 /* Reset device statistics maintained by OS */
2567 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2569 /* SXE-002: Configure link and activity LED to turn it off */
2570 subid = sp->pdev->subsystem_device;
2571 if ((subid & 0xFF) >= 0x07) {
2572 val64 = readq(&bar0->gpio_control);
2573 val64 |= 0x0000800000000000ULL;
2574 writeq(val64, &bar0->gpio_control);
2575 val64 = 0x0411040400000000ULL;
2576 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2579 sp->device_enabled_once = FALSE;
2583 * s2io_set_swapper - to set the swapper controle on the card
2584 * @sp : private member of the device structure,
2585 * pointer to the s2io_nic structure.
2586 * Description: Function to set the swapper control on the card
2587 * correctly depending on the 'endianness' of the system.
2589 * SUCCESS on success and FAILURE on failure.
2592 int s2io_set_swapper(nic_t * sp)
2594 struct net_device *dev = sp->dev;
2595 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2596 u64 val64, valt, valr;
2599 * Set proper endian settings and verify the same by reading
2600 * the PIF Feed-back register.
2603 val64 = readq(&bar0->pif_rd_swapper_fb);
2604 if (val64 != 0x0123456789ABCDEFULL) {
2606 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2607 0x8100008181000081ULL, /* FE=1, SE=0 */
2608 0x4200004242000042ULL, /* FE=0, SE=1 */
2609 0}; /* FE=0, SE=0 */
2612 writeq(value[i], &bar0->swapper_ctrl);
2613 val64 = readq(&bar0->pif_rd_swapper_fb);
2614 if (val64 == 0x0123456789ABCDEFULL)
2619 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2621 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2622 (unsigned long long) val64);
2627 valr = readq(&bar0->swapper_ctrl);
2630 valt = 0x0123456789ABCDEFULL;
2631 writeq(valt, &bar0->xmsi_address);
2632 val64 = readq(&bar0->xmsi_address);
2636 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2637 0x0081810000818100ULL, /* FE=1, SE=0 */
2638 0x0042420000424200ULL, /* FE=0, SE=1 */
2639 0}; /* FE=0, SE=0 */
2642 writeq((value[i] | valr), &bar0->swapper_ctrl);
2643 writeq(valt, &bar0->xmsi_address);
2644 val64 = readq(&bar0->xmsi_address);
2650 unsigned long long x = val64;
2651 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2652 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2656 val64 = readq(&bar0->swapper_ctrl);
2657 val64 &= 0xFFFF000000000000ULL;
2661 * The device by default set to a big endian format, so a
2662 * big endian driver need not set anything.
2664 val64 |= (SWAPPER_CTRL_TXP_FE |
2665 SWAPPER_CTRL_TXP_SE |
2666 SWAPPER_CTRL_TXD_R_FE |
2667 SWAPPER_CTRL_TXD_W_FE |
2668 SWAPPER_CTRL_TXF_R_FE |
2669 SWAPPER_CTRL_RXD_R_FE |
2670 SWAPPER_CTRL_RXD_W_FE |
2671 SWAPPER_CTRL_RXF_W_FE |
2672 SWAPPER_CTRL_XMSI_FE |
2673 SWAPPER_CTRL_XMSI_SE |
2674 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2675 writeq(val64, &bar0->swapper_ctrl);
2678 * Initially we enable all bits to make it accessible by the
2679 * driver, then we selectively enable only those bits that
2682 val64 |= (SWAPPER_CTRL_TXP_FE |
2683 SWAPPER_CTRL_TXP_SE |
2684 SWAPPER_CTRL_TXD_R_FE |
2685 SWAPPER_CTRL_TXD_R_SE |
2686 SWAPPER_CTRL_TXD_W_FE |
2687 SWAPPER_CTRL_TXD_W_SE |
2688 SWAPPER_CTRL_TXF_R_FE |
2689 SWAPPER_CTRL_RXD_R_FE |
2690 SWAPPER_CTRL_RXD_R_SE |
2691 SWAPPER_CTRL_RXD_W_FE |
2692 SWAPPER_CTRL_RXD_W_SE |
2693 SWAPPER_CTRL_RXF_W_FE |
2694 SWAPPER_CTRL_XMSI_FE |
2695 SWAPPER_CTRL_XMSI_SE |
2696 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2697 writeq(val64, &bar0->swapper_ctrl);
2699 val64 = readq(&bar0->swapper_ctrl);
2702 * Verifying if endian settings are accurate by reading a
2703 * feedback register.
2705 val64 = readq(&bar0->pif_rd_swapper_fb);
2706 if (val64 != 0x0123456789ABCDEFULL) {
2707 /* Endian settings are incorrect, calls for another dekko. */
2708 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2710 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2711 (unsigned long long) val64);
2718 /* ********************************************************* *
2719 * Functions defined below concern the OS part of the driver *
2720 * ********************************************************* */
2723 * s2io_open - open entry point of the driver
2724 * @dev : pointer to the device structure.
2726 * This function is the open entry point of the driver. It mainly calls a
2727 * function to allocate Rx buffers and inserts them into the buffer
2728 * descriptors and then enables the Rx part of the NIC.
2730 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2734 int s2io_open(struct net_device *dev)
2736 nic_t *sp = dev->priv;
2740 * Make sure you have link off by default every time
2741 * Nic is initialized
2743 netif_carrier_off(dev);
2744 sp->last_link_state = 0; /* Unkown link state */
2746 /* Initialize H/W and enable interrupts */
2747 if (s2io_card_up(sp)) {
2748 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2751 goto hw_init_failed;
2754 /* After proper initialization of H/W, register ISR */
2755 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2758 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2760 goto isr_registration_failed;
2763 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2764 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2766 goto setting_mac_address_failed;
2769 netif_start_queue(dev);
2772 setting_mac_address_failed:
2773 free_irq(sp->pdev->irq, dev);
2774 isr_registration_failed:
2775 del_timer_sync(&sp->alarm_timer);
2782 * s2io_close -close entry point of the driver
2783 * @dev : device pointer.
2785 * This is the stop entry point of the driver. It needs to undo exactly
2786 * whatever was done by the open entry point,thus it's usually referred to
2787 * as the close function.Among other things this function mainly stops the
2788 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2790 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2794 int s2io_close(struct net_device *dev)
2796 nic_t *sp = dev->priv;
2797 flush_scheduled_work();
2798 netif_stop_queue(dev);
2799 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2802 free_irq(sp->pdev->irq, dev);
2803 sp->device_close_flag = TRUE; /* Device is shut down. */
2808 * s2io_xmit - Tx entry point of te driver
2809 * @skb : the socket buffer containing the Tx data.
2810 * @dev : device pointer.
2812 * This function is the Tx entry point of the driver. S2IO NIC supports
2813 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2814 * NOTE: when device cant queue the pkt,just the trans_start variable will
2817 * 0 on success & 1 on failure.
2820 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2822 nic_t *sp = dev->priv;
2823 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2826 TxFIFO_element_t __iomem *tx_fifo;
2827 unsigned long flags;
2832 int vlan_priority = 0;
2833 mac_info_t *mac_control;
2834 struct config_param *config;
2836 mac_control = &sp->mac_control;
2837 config = &sp->config;
2839 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2840 spin_lock_irqsave(&sp->tx_lock, flags);
2841 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2842 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2844 spin_unlock_irqrestore(&sp->tx_lock, flags);
2851 /* Get Fifo number to Transmit based on vlan priority */
2852 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
2853 vlan_tag = vlan_tx_tag_get(skb);
2854 vlan_priority = vlan_tag >> 13;
2855 queue = config->fifo_mapping[vlan_priority];
2858 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2859 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2860 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2863 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2864 /* Avoid "put" pointer going beyond "get" pointer */
2865 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2866 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2867 netif_stop_queue(dev);
2869 spin_unlock_irqrestore(&sp->tx_lock, flags);
2873 mss = skb_shinfo(skb)->tso_size;
2875 txdp->Control_1 |= TXD_TCP_LSO_EN;
2876 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2880 frg_cnt = skb_shinfo(skb)->nr_frags;
2881 frg_len = skb->len - skb->data_len;
2883 txdp->Buffer_Pointer = pci_map_single
2884 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2885 txdp->Host_Control = (unsigned long) skb;
2886 if (skb->ip_summed == CHECKSUM_HW) {
2888 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2892 txdp->Control_2 |= config->tx_intr_type;
2894 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
2895 txdp->Control_2 |= TXD_VLAN_ENABLE;
2896 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
2899 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2900 TXD_GATHER_CODE_FIRST);
2901 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2903 /* For fragmented SKB. */
2904 for (i = 0; i < frg_cnt; i++) {
2905 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2907 txdp->Buffer_Pointer = (u64) pci_map_page
2908 (sp->pdev, frag->page, frag->page_offset,
2909 frag->size, PCI_DMA_TODEVICE);
2910 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2912 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2914 tx_fifo = mac_control->tx_FIFO_start[queue];
2915 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2916 writeq(val64, &tx_fifo->TxDL_Pointer);
2920 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2925 val64 |= TX_FIFO_SPECIAL_FUNC;
2927 writeq(val64, &tx_fifo->List_Control);
2930 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2931 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2933 /* Avoid "put" pointer going beyond "get" pointer */
2934 if (((put_off + 1) % queue_len) == get_off) {
2936 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2938 netif_stop_queue(dev);
2941 dev->trans_start = jiffies;
2942 spin_unlock_irqrestore(&sp->tx_lock, flags);
2948 s2io_alarm_handle(unsigned long data)
2950 nic_t *sp = (nic_t *)data;
2952 alarm_intr_handler(sp);
2953 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
2957 * s2io_isr - ISR handler of the device .
2958 * @irq: the irq of the device.
2959 * @dev_id: a void pointer to the dev structure of the NIC.
2960 * @pt_regs: pointer to the registers pushed on the stack.
2961 * Description: This function is the ISR handler of the device. It
2962 * identifies the reason for the interrupt and calls the relevant
2963 * service routines. As a contongency measure, this ISR allocates the
2964 * recv buffers, if their numbers are below the panic value which is
2965 * presently set to 25% of the original number of rcv buffers allocated.
2967 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2968 * IRQ_NONE: will be returned if interrupt is not from our device
2970 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2972 struct net_device *dev = (struct net_device *) dev_id;
2973 nic_t *sp = dev->priv;
2974 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2976 u64 reason = 0, val64;
2977 mac_info_t *mac_control;
2978 struct config_param *config;
2980 atomic_inc(&sp->isr_cnt);
2981 mac_control = &sp->mac_control;
2982 config = &sp->config;
2985 * Identify the cause for interrupt and call the appropriate
2986 * interrupt handler. Causes for the interrupt could be;
2990 * 4. Error in any functional blocks of the NIC.
2992 reason = readq(&bar0->general_int_status);
2995 /* The interrupt was not raised by Xena. */
2996 atomic_dec(&sp->isr_cnt);
3000 #ifdef CONFIG_S2IO_NAPI
3001 if (reason & GEN_INTR_RXTRAFFIC) {
3002 if (netif_rx_schedule_prep(dev)) {
3003 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3005 __netif_rx_schedule(dev);
3009 /* If Intr is because of Rx Traffic */
3010 if (reason & GEN_INTR_RXTRAFFIC) {
3012 * rx_traffic_int reg is an R1 register, writing all 1's
3013 * will ensure that the actual interrupt causing bit get's
3014 * cleared and hence a read can be avoided.
3016 val64 = 0xFFFFFFFFFFFFFFFFULL;
3017 writeq(val64, &bar0->rx_traffic_int);
3018 for (i = 0; i < config->rx_ring_num; i++) {
3019 rx_intr_handler(&mac_control->rings[i]);
3024 /* If Intr is because of Tx Traffic */
3025 if (reason & GEN_INTR_TXTRAFFIC) {
3027 * tx_traffic_int reg is an R1 register, writing all 1's
3028 * will ensure that the actual interrupt causing bit get's
3029 * cleared and hence a read can be avoided.
3031 val64 = 0xFFFFFFFFFFFFFFFFULL;
3032 writeq(val64, &bar0->tx_traffic_int);
3034 for (i = 0; i < config->tx_fifo_num; i++)
3035 tx_intr_handler(&mac_control->fifos[i]);
3039 * If the Rx buffer count is below the panic threshold then
3040 * reallocate the buffers from the interrupt handler itself,
3041 * else schedule a tasklet to reallocate the buffers.
3043 #ifndef CONFIG_S2IO_NAPI
3044 for (i = 0; i < config->rx_ring_num; i++) {
3046 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3047 int level = rx_buffer_level(sp, rxb_size, i);
3049 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3050 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3051 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3052 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3053 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3055 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3056 clear_bit(0, (&sp->tasklet_status));
3057 atomic_dec(&sp->isr_cnt);
3060 clear_bit(0, (&sp->tasklet_status));
3061 } else if (level == LOW) {
3062 tasklet_schedule(&sp->task);
3067 atomic_dec(&sp->isr_cnt);
3074 static void s2io_updt_stats(nic_t *sp)
3076 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3080 if (atomic_read(&sp->card_state) == CARD_UP) {
3081 /* Apprx 30us on a 133 MHz bus */
3082 val64 = SET_UPDT_CLICKS(10) |
3083 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3084 writeq(val64, &bar0->stat_cfg);
3087 val64 = readq(&bar0->stat_cfg);
3088 if (!(val64 & BIT(0)))
3092 break; /* Updt failed */
3098 * s2io_get_stats - Updates the device statistics structure.
3099 * @dev : pointer to the device structure.
3101 * This function updates the device statistics structure in the s2io_nic
3102 * structure and returns a pointer to the same.
3104 * pointer to the updated net_device_stats structure.
3107 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3109 nic_t *sp = dev->priv;
3110 mac_info_t *mac_control;
3111 struct config_param *config;
3114 mac_control = &sp->mac_control;
3115 config = &sp->config;
3117 /* Configure Stats for immediate updt */
3118 s2io_updt_stats(sp);
3120 sp->stats.tx_packets =
3121 le32_to_cpu(mac_control->stats_info->tmac_frms);
3122 sp->stats.tx_errors =
3123 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3124 sp->stats.rx_errors =
3125 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3126 sp->stats.multicast =
3127 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3128 sp->stats.rx_length_errors =
3129 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3131 return (&sp->stats);
3135 * s2io_set_multicast - entry point for multicast address enable/disable.
3136 * @dev : pointer to the device structure
3138 * This function is a driver entry point which gets called by the kernel
3139 * whenever multicast addresses must be enabled/disabled. This also gets
3140 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3141 * determine, if multicast address must be enabled or if promiscuous mode
3142 * is to be disabled etc.
3147 static void s2io_set_multicast(struct net_device *dev)
3150 struct dev_mc_list *mclist;
3151 nic_t *sp = dev->priv;
3152 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3153 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3155 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3158 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3159 /* Enable all Multicast addresses */
3160 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3161 &bar0->rmac_addr_data0_mem);
3162 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3163 &bar0->rmac_addr_data1_mem);
3164 val64 = RMAC_ADDR_CMD_MEM_WE |
3165 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3166 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3167 writeq(val64, &bar0->rmac_addr_cmd_mem);
3168 /* Wait till command completes */
3169 wait_for_cmd_complete(sp);
3172 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3173 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3174 /* Disable all Multicast addresses */
3175 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3176 &bar0->rmac_addr_data0_mem);
3177 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3178 &bar0->rmac_addr_data1_mem);
3179 val64 = RMAC_ADDR_CMD_MEM_WE |
3180 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3181 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3182 writeq(val64, &bar0->rmac_addr_cmd_mem);
3183 /* Wait till command completes */
3184 wait_for_cmd_complete(sp);
3187 sp->all_multi_pos = 0;
3190 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3191 /* Put the NIC into promiscuous mode */
3192 add = &bar0->mac_cfg;
3193 val64 = readq(&bar0->mac_cfg);
3194 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3196 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3197 writel((u32) val64, add);
3198 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3199 writel((u32) (val64 >> 32), (add + 4));
3201 val64 = readq(&bar0->mac_cfg);
3202 sp->promisc_flg = 1;
3203 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3205 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3206 /* Remove the NIC from promiscuous mode */
3207 add = &bar0->mac_cfg;
3208 val64 = readq(&bar0->mac_cfg);
3209 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3211 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3212 writel((u32) val64, add);
3213 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3214 writel((u32) (val64 >> 32), (add + 4));
3216 val64 = readq(&bar0->mac_cfg);
3217 sp->promisc_flg = 0;
3218 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3222 /* Update individual M_CAST address list */
3223 if ((!sp->m_cast_flg) && dev->mc_count) {
3225 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3226 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3228 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3229 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3233 prev_cnt = sp->mc_addr_count;
3234 sp->mc_addr_count = dev->mc_count;
3236 /* Clear out the previous list of Mc in the H/W. */
3237 for (i = 0; i < prev_cnt; i++) {
3238 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3239 &bar0->rmac_addr_data0_mem);
3240 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3241 &bar0->rmac_addr_data1_mem);
3242 val64 = RMAC_ADDR_CMD_MEM_WE |
3243 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3244 RMAC_ADDR_CMD_MEM_OFFSET
3245 (MAC_MC_ADDR_START_OFFSET + i);
3246 writeq(val64, &bar0->rmac_addr_cmd_mem);
3248 /* Wait for command completes */
3249 if (wait_for_cmd_complete(sp)) {
3250 DBG_PRINT(ERR_DBG, "%s: Adding ",
3252 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3257 /* Create the new Rx filter list and update the same in H/W. */
3258 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3259 i++, mclist = mclist->next) {
3260 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3262 for (j = 0; j < ETH_ALEN; j++) {
3263 mac_addr |= mclist->dmi_addr[j];
3267 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3268 &bar0->rmac_addr_data0_mem);
3269 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3270 &bar0->rmac_addr_data1_mem);
3271 val64 = RMAC_ADDR_CMD_MEM_WE |
3272 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3273 RMAC_ADDR_CMD_MEM_OFFSET
3274 (i + MAC_MC_ADDR_START_OFFSET);
3275 writeq(val64, &bar0->rmac_addr_cmd_mem);
3277 /* Wait for command completes */
3278 if (wait_for_cmd_complete(sp)) {
3279 DBG_PRINT(ERR_DBG, "%s: Adding ",
3281 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3289 * s2io_set_mac_addr - Programs the Xframe mac address
3290 * @dev : pointer to the device structure.
3291 * @addr: a uchar pointer to the new mac address which is to be set.
3292 * Description : This procedure will program the Xframe to receive
3293 * frames with new Mac Address
3294 * Return value: SUCCESS on success and an appropriate (-)ve integer
3295 * as defined in errno.h file on failure.
3298 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3300 nic_t *sp = dev->priv;
3301 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3302 register u64 val64, mac_addr = 0;
3306 * Set the new MAC address as the new unicast filter and reflect this
3307 * change on the device address registered with the OS. It will be
3310 for (i = 0; i < ETH_ALEN; i++) {
3312 mac_addr |= addr[i];
3315 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3316 &bar0->rmac_addr_data0_mem);
3319 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3320 RMAC_ADDR_CMD_MEM_OFFSET(0);
3321 writeq(val64, &bar0->rmac_addr_cmd_mem);
3322 /* Wait till command completes */
3323 if (wait_for_cmd_complete(sp)) {
3324 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3332 * s2io_ethtool_sset - Sets different link parameters.
3333 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3334 * @info: pointer to the structure with parameters given by ethtool to set
3337 * The function sets different link parameters provided by the user onto
3343 static int s2io_ethtool_sset(struct net_device *dev,
3344 struct ethtool_cmd *info)
3346 nic_t *sp = dev->priv;
3347 if ((info->autoneg == AUTONEG_ENABLE) ||
3348 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3351 s2io_close(sp->dev);
3359 * s2io_ethtol_gset - Return link specific information.
3360 * @sp : private member of the device structure, pointer to the
3361 * s2io_nic structure.
3362 * @info : pointer to the structure with parameters given by ethtool
3363 * to return link information.
3365 * Returns link specific information like speed, duplex etc.. to ethtool.
3367 * return 0 on success.
3370 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3372 nic_t *sp = dev->priv;
3373 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3374 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3375 info->port = PORT_FIBRE;
3376 /* info->transceiver?? TODO */
3378 if (netif_carrier_ok(sp->dev)) {
3379 info->speed = 10000;
3380 info->duplex = DUPLEX_FULL;
3386 info->autoneg = AUTONEG_DISABLE;
3391 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3392 * @sp : private member of the device structure, which is a pointer to the
3393 * s2io_nic structure.
3394 * @info : pointer to the structure with parameters given by ethtool to
3395 * return driver information.
3397 * Returns driver specefic information like name, version etc.. to ethtool.
3402 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3403 struct ethtool_drvinfo *info)
3405 nic_t *sp = dev->priv;
3407 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3408 strncpy(info->version, s2io_driver_version,
3409 sizeof(s2io_driver_version));
3410 strncpy(info->fw_version, "", 32);
3411 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3412 info->regdump_len = XENA_REG_SPACE;
3413 info->eedump_len = XENA_EEPROM_SPACE;
3414 info->testinfo_len = S2IO_TEST_LEN;
3415 info->n_stats = S2IO_STAT_LEN;
3419 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3420 * @sp: private member of the device structure, which is a pointer to the
3421 * s2io_nic structure.
3422 * @regs : pointer to the structure with parameters given by ethtool for
3423 * dumping the registers.
3424 * @reg_space: The input argumnet into which all the registers are dumped.
3426 * Dumps the entire register space of xFrame NIC into the user given
3432 static void s2io_ethtool_gregs(struct net_device *dev,
3433 struct ethtool_regs *regs, void *space)
3437 u8 *reg_space = (u8 *) space;
3438 nic_t *sp = dev->priv;
3440 regs->len = XENA_REG_SPACE;
3441 regs->version = sp->pdev->subsystem_device;
3443 for (i = 0; i < regs->len; i += 8) {
3444 reg = readq(sp->bar0 + i);
3445 memcpy((reg_space + i), ®, 8);
3450 * s2io_phy_id - timer function that alternates adapter LED.
3451 * @data : address of the private member of the device structure, which
3452 * is a pointer to the s2io_nic structure, provided as an u32.
3453 * Description: This is actually the timer function that alternates the
3454 * adapter LED bit of the adapter control bit to set/reset every time on
3455 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3456 * once every second.
3458 static void s2io_phy_id(unsigned long data)
3460 nic_t *sp = (nic_t *) data;
3461 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3465 subid = sp->pdev->subsystem_device;
3466 if ((subid & 0xFF) >= 0x07) {
3467 val64 = readq(&bar0->gpio_control);
3468 val64 ^= GPIO_CTRL_GPIO_0;
3469 writeq(val64, &bar0->gpio_control);
3471 val64 = readq(&bar0->adapter_control);
3472 val64 ^= ADAPTER_LED_ON;
3473 writeq(val64, &bar0->adapter_control);
3476 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3480 * s2io_ethtool_idnic - To physically identify the nic on the system.
3481 * @sp : private member of the device structure, which is a pointer to the
3482 * s2io_nic structure.
3483 * @id : pointer to the structure with identification parameters given by
3485 * Description: Used to physically identify the NIC on the system.
3486 * The Link LED will blink for a time specified by the user for
3488 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3489 * identification is possible only if it's link is up.
3491 * int , returns 0 on success
3494 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3496 u64 val64 = 0, last_gpio_ctrl_val;
3497 nic_t *sp = dev->priv;
3498 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3501 subid = sp->pdev->subsystem_device;
3502 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3503 if ((subid & 0xFF) < 0x07) {
3504 val64 = readq(&bar0->adapter_control);
3505 if (!(val64 & ADAPTER_CNTL_EN)) {
3507 "Adapter Link down, cannot blink LED\n");
3511 if (sp->id_timer.function == NULL) {
3512 init_timer(&sp->id_timer);
3513 sp->id_timer.function = s2io_phy_id;
3514 sp->id_timer.data = (unsigned long) sp;
3516 mod_timer(&sp->id_timer, jiffies);
3518 msleep_interruptible(data * HZ);
3520 msleep_interruptible(MAX_FLICKER_TIME);
3521 del_timer_sync(&sp->id_timer);
3523 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3524 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3525 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3532 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3533 * @sp : private member of the device structure, which is a pointer to the
3534 * s2io_nic structure.
3535 * @ep : pointer to the structure with pause parameters given by ethtool.
3537 * Returns the Pause frame generation and reception capability of the NIC.
3541 static void s2io_ethtool_getpause_data(struct net_device *dev,
3542 struct ethtool_pauseparam *ep)
3545 nic_t *sp = dev->priv;
3546 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3548 val64 = readq(&bar0->rmac_pause_cfg);
3549 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3550 ep->tx_pause = TRUE;
3551 if (val64 & RMAC_PAUSE_RX_ENABLE)
3552 ep->rx_pause = TRUE;
3553 ep->autoneg = FALSE;
3557 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3558 * @sp : private member of the device structure, which is a pointer to the
3559 * s2io_nic structure.
3560 * @ep : pointer to the structure with pause parameters given by ethtool.
3562 * It can be used to set or reset Pause frame generation or reception
3563 * support of the NIC.
3565 * int, returns 0 on Success
3568 static int s2io_ethtool_setpause_data(struct net_device *dev,
3569 struct ethtool_pauseparam *ep)
3572 nic_t *sp = dev->priv;
3573 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3575 val64 = readq(&bar0->rmac_pause_cfg);
3577 val64 |= RMAC_PAUSE_GEN_ENABLE;
3579 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3581 val64 |= RMAC_PAUSE_RX_ENABLE;
3583 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3584 writeq(val64, &bar0->rmac_pause_cfg);
3589 * read_eeprom - reads 4 bytes of data from user given offset.
3590 * @sp : private member of the device structure, which is a pointer to the
3591 * s2io_nic structure.
3592 * @off : offset at which the data must be written
3593 * @data : Its an output parameter where the data read at the given
3596 * Will read 4 bytes of data from the user given offset and return the
3598 * NOTE: Will allow to read only part of the EEPROM visible through the
3601 * -1 on failure and 0 on success.
3604 #define S2IO_DEV_ID 5
3605 static int read_eeprom(nic_t * sp, int off, u32 * data)
3610 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3612 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3613 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3614 I2C_CONTROL_CNTL_START;
3615 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3617 while (exit_cnt < 5) {
3618 val64 = readq(&bar0->i2c_control);
3619 if (I2C_CONTROL_CNTL_END(val64)) {
3620 *data = I2C_CONTROL_GET_DATA(val64);
3632 * write_eeprom - actually writes the relevant part of the data value.
3633 * @sp : private member of the device structure, which is a pointer to the
3634 * s2io_nic structure.
3635 * @off : offset at which the data must be written
3636 * @data : The data that is to be written
3637 * @cnt : Number of bytes of the data that are actually to be written into
3638 * the Eeprom. (max of 3)
3640 * Actually writes the relevant part of the data value into the Eeprom
3641 * through the I2C bus.
3643 * 0 on success, -1 on failure.
3646 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3648 int exit_cnt = 0, ret = -1;
3650 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3652 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3653 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3654 I2C_CONTROL_CNTL_START;
3655 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3657 while (exit_cnt < 5) {
3658 val64 = readq(&bar0->i2c_control);
3659 if (I2C_CONTROL_CNTL_END(val64)) {
3660 if (!(val64 & I2C_CONTROL_NACK))
3672 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3673 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3674 * @eeprom : pointer to the user level structure provided by ethtool,
3675 * containing all relevant information.
3676 * @data_buf : user defined value to be written into Eeprom.
3677 * Description: Reads the values stored in the Eeprom at given offset
3678 * for a given length. Stores these values int the input argument data
3679 * buffer 'data_buf' and returns these to the caller (ethtool.)
3684 static int s2io_ethtool_geeprom(struct net_device *dev,
3685 struct ethtool_eeprom *eeprom, u8 * data_buf)
3688 nic_t *sp = dev->priv;
3690 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3692 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3693 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3695 for (i = 0; i < eeprom->len; i += 4) {
3696 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3697 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3701 memcpy((data_buf + i), &valid, 4);
3707 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3708 * @sp : private member of the device structure, which is a pointer to the
3709 * s2io_nic structure.
3710 * @eeprom : pointer to the user level structure provided by ethtool,
3711 * containing all relevant information.
3712 * @data_buf ; user defined value to be written into Eeprom.
3714 * Tries to write the user provided value in the Eeprom, at the offset
3715 * given by the user.
3717 * 0 on success, -EFAULT on failure.
3720 static int s2io_ethtool_seeprom(struct net_device *dev,
3721 struct ethtool_eeprom *eeprom,
3724 int len = eeprom->len, cnt = 0;
3725 u32 valid = 0, data;
3726 nic_t *sp = dev->priv;
3728 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3730 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3731 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3737 data = (u32) data_buf[cnt] & 0x000000FF;
3739 valid = (u32) (data << 24);
3743 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3745 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3747 "write into the specified offset\n");
3758 * s2io_register_test - reads and writes into all clock domains.
3759 * @sp : private member of the device structure, which is a pointer to the
3760 * s2io_nic structure.
3761 * @data : variable that returns the result of each of the test conducted b
3764 * Read and write into all clock domains. The NIC has 3 clock domains,
3765 * see that registers in all the three regions are accessible.
3770 static int s2io_register_test(nic_t * sp, uint64_t * data)
3772 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3776 val64 = readq(&bar0->pif_rd_swapper_fb);
3777 if (val64 != 0x123456789abcdefULL) {
3779 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3782 val64 = readq(&bar0->rmac_pause_cfg);
3783 if (val64 != 0xc000ffff00000000ULL) {
3785 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3788 val64 = readq(&bar0->rx_queue_cfg);
3789 if (val64 != 0x0808080808080808ULL) {
3791 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3794 val64 = readq(&bar0->xgxs_efifo_cfg);
3795 if (val64 != 0x000000001923141EULL) {
3797 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3800 val64 = 0x5A5A5A5A5A5A5A5AULL;
3801 writeq(val64, &bar0->xmsi_data);
3802 val64 = readq(&bar0->xmsi_data);
3803 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3805 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3808 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3809 writeq(val64, &bar0->xmsi_data);
3810 val64 = readq(&bar0->xmsi_data);
3811 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3813 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3821 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3822 * @sp : private member of the device structure, which is a pointer to the
3823 * s2io_nic structure.
3824 * @data:variable that returns the result of each of the test conducted by
3827 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3833 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3838 /* Test Write Error at offset 0 */
3839 if (!write_eeprom(sp, 0, 0, 3))
3842 /* Test Write at offset 4f0 */
3843 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3845 if (read_eeprom(sp, 0x4F0, &ret_data))
3848 if (ret_data != 0x01234567)
3851 /* Reset the EEPROM data go FFFF */
3852 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3854 /* Test Write Request Error at offset 0x7c */
3855 if (!write_eeprom(sp, 0x07C, 0, 3))
3858 /* Test Write Request at offset 0x7fc */
3859 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3861 if (read_eeprom(sp, 0x7FC, &ret_data))
3864 if (ret_data != 0x01234567)
3867 /* Reset the EEPROM data go FFFF */
3868 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3870 /* Test Write Error at offset 0x80 */
3871 if (!write_eeprom(sp, 0x080, 0, 3))
3874 /* Test Write Error at offset 0xfc */
3875 if (!write_eeprom(sp, 0x0FC, 0, 3))
3878 /* Test Write Error at offset 0x100 */
3879 if (!write_eeprom(sp, 0x100, 0, 3))
3882 /* Test Write Error at offset 4ec */
3883 if (!write_eeprom(sp, 0x4EC, 0, 3))
3891 * s2io_bist_test - invokes the MemBist test of the card .
3892 * @sp : private member of the device structure, which is a pointer to the
3893 * s2io_nic structure.
3894 * @data:variable that returns the result of each of the test conducted by
3897 * This invokes the MemBist test of the card. We give around
3898 * 2 secs time for the Test to complete. If it's still not complete
3899 * within this peiod, we consider that the test failed.
3901 * 0 on success and -1 on failure.
3904 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3907 int cnt = 0, ret = -1;
3909 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3910 bist |= PCI_BIST_START;
3911 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3914 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3915 if (!(bist & PCI_BIST_START)) {
3916 *data = (bist & PCI_BIST_CODE_MASK);
3928 * s2io-link_test - verifies the link state of the nic
3929 * @sp ; private member of the device structure, which is a pointer to the
3930 * s2io_nic structure.
3931 * @data: variable that returns the result of each of the test conducted by
3934 * The function verifies the link state of the NIC and updates the input
3935 * argument 'data' appropriately.
3940 static int s2io_link_test(nic_t * sp, uint64_t * data)
3942 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3945 val64 = readq(&bar0->adapter_status);
3946 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3953 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3954 * @sp - private member of the device structure, which is a pointer to the
3955 * s2io_nic structure.
3956 * @data - variable that returns the result of each of the test
3957 * conducted by the driver.
3959 * This is one of the offline test that tests the read and write
3960 * access to the RldRam chip on the NIC.
3965 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3967 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3969 int cnt, iteration = 0, test_pass = 0;
3971 val64 = readq(&bar0->adapter_control);
3972 val64 &= ~ADAPTER_ECC_EN;
3973 writeq(val64, &bar0->adapter_control);
3975 val64 = readq(&bar0->mc_rldram_test_ctrl);
3976 val64 |= MC_RLDRAM_TEST_MODE;
3977 writeq(val64, &bar0->mc_rldram_test_ctrl);
3979 val64 = readq(&bar0->mc_rldram_mrs);
3980 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3981 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3983 val64 |= MC_RLDRAM_MRS_ENABLE;
3984 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3986 while (iteration < 2) {
3987 val64 = 0x55555555aaaa0000ULL;
3988 if (iteration == 1) {
3989 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3991 writeq(val64, &bar0->mc_rldram_test_d0);
3993 val64 = 0xaaaa5a5555550000ULL;
3994 if (iteration == 1) {
3995 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3997 writeq(val64, &bar0->mc_rldram_test_d1);
3999 val64 = 0x55aaaaaaaa5a0000ULL;
4000 if (iteration == 1) {
4001 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4003 writeq(val64, &bar0->mc_rldram_test_d2);
4005 val64 = (u64) (0x0000003fffff0000ULL);
4006 writeq(val64, &bar0->mc_rldram_test_add);
4009 val64 = MC_RLDRAM_TEST_MODE;
4010 writeq(val64, &bar0->mc_rldram_test_ctrl);
4013 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4015 writeq(val64, &bar0->mc_rldram_test_ctrl);
4017 for (cnt = 0; cnt < 5; cnt++) {
4018 val64 = readq(&bar0->mc_rldram_test_ctrl);
4019 if (val64 & MC_RLDRAM_TEST_DONE)
4027 val64 = MC_RLDRAM_TEST_MODE;
4028 writeq(val64, &bar0->mc_rldram_test_ctrl);
4030 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4031 writeq(val64, &bar0->mc_rldram_test_ctrl);
4033 for (cnt = 0; cnt < 5; cnt++) {
4034 val64 = readq(&bar0->mc_rldram_test_ctrl);
4035 if (val64 & MC_RLDRAM_TEST_DONE)
4043 val64 = readq(&bar0->mc_rldram_test_ctrl);
4044 if (val64 & MC_RLDRAM_TEST_PASS)
4059 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4060 * @sp : private member of the device structure, which is a pointer to the
4061 * s2io_nic structure.
4062 * @ethtest : pointer to a ethtool command specific structure that will be
4063 * returned to the user.
4064 * @data : variable that returns the result of each of the test
4065 * conducted by the driver.
4067 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4068 * the health of the card.
4073 static void s2io_ethtool_test(struct net_device *dev,
4074 struct ethtool_test *ethtest,
4077 nic_t *sp = dev->priv;
4078 int orig_state = netif_running(sp->dev);
4080 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4081 /* Offline Tests. */
4083 s2io_close(sp->dev);
4085 if (s2io_register_test(sp, &data[0]))
4086 ethtest->flags |= ETH_TEST_FL_FAILED;
4090 if (s2io_rldram_test(sp, &data[3]))
4091 ethtest->flags |= ETH_TEST_FL_FAILED;
4095 if (s2io_eeprom_test(sp, &data[1]))
4096 ethtest->flags |= ETH_TEST_FL_FAILED;
4098 if (s2io_bist_test(sp, &data[4]))
4099 ethtest->flags |= ETH_TEST_FL_FAILED;
4109 "%s: is not up, cannot run test\n",
4118 if (s2io_link_test(sp, &data[2]))
4119 ethtest->flags |= ETH_TEST_FL_FAILED;
4128 static void s2io_get_ethtool_stats(struct net_device *dev,
4129 struct ethtool_stats *estats,
4133 nic_t *sp = dev->priv;
4134 StatInfo_t *stat_info = sp->mac_control.stats_info;
4136 s2io_updt_stats(sp);
4137 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4138 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4139 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4140 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4141 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4142 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4143 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4144 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4145 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4146 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4147 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4148 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4149 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4150 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4151 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4152 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4153 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4154 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4155 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4156 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4157 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4158 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4159 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4160 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4161 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4162 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4163 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4164 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4165 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4166 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4167 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4168 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4169 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4170 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4171 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4172 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4173 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4174 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4175 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4177 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4178 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4181 int s2io_ethtool_get_regs_len(struct net_device *dev)
4183 return (XENA_REG_SPACE);
4187 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4189 nic_t *sp = dev->priv;
4191 return (sp->rx_csum);
4193 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4195 nic_t *sp = dev->priv;
4204 int s2io_get_eeprom_len(struct net_device *dev)
4206 return (XENA_EEPROM_SPACE);
4209 int s2io_ethtool_self_test_count(struct net_device *dev)
4211 return (S2IO_TEST_LEN);
4213 void s2io_ethtool_get_strings(struct net_device *dev,
4214 u32 stringset, u8 * data)
4216 switch (stringset) {
4218 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4221 memcpy(data, ðtool_stats_keys,
4222 sizeof(ethtool_stats_keys));
4225 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4227 return (S2IO_STAT_LEN);
4230 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4233 dev->features |= NETIF_F_IP_CSUM;
4235 dev->features &= ~NETIF_F_IP_CSUM;
4241 static struct ethtool_ops netdev_ethtool_ops = {
4242 .get_settings = s2io_ethtool_gset,
4243 .set_settings = s2io_ethtool_sset,
4244 .get_drvinfo = s2io_ethtool_gdrvinfo,
4245 .get_regs_len = s2io_ethtool_get_regs_len,
4246 .get_regs = s2io_ethtool_gregs,
4247 .get_link = ethtool_op_get_link,
4248 .get_eeprom_len = s2io_get_eeprom_len,
4249 .get_eeprom = s2io_ethtool_geeprom,
4250 .set_eeprom = s2io_ethtool_seeprom,
4251 .get_pauseparam = s2io_ethtool_getpause_data,
4252 .set_pauseparam = s2io_ethtool_setpause_data,
4253 .get_rx_csum = s2io_ethtool_get_rx_csum,
4254 .set_rx_csum = s2io_ethtool_set_rx_csum,
4255 .get_tx_csum = ethtool_op_get_tx_csum,
4256 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4257 .get_sg = ethtool_op_get_sg,
4258 .set_sg = ethtool_op_set_sg,
4260 .get_tso = ethtool_op_get_tso,
4261 .set_tso = ethtool_op_set_tso,
4263 .self_test_count = s2io_ethtool_self_test_count,
4264 .self_test = s2io_ethtool_test,
4265 .get_strings = s2io_ethtool_get_strings,
4266 .phys_id = s2io_ethtool_idnic,
4267 .get_stats_count = s2io_ethtool_get_stats_count,
4268 .get_ethtool_stats = s2io_get_ethtool_stats
4272 * s2io_ioctl - Entry point for the Ioctl
4273 * @dev : Device pointer.
4274 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4275 * a proprietary structure used to pass information to the driver.
4276 * @cmd : This is used to distinguish between the different commands that
4277 * can be passed to the IOCTL functions.
4279 * Currently there are no special functionality supported in IOCTL, hence
4280 * function always return EOPNOTSUPPORTED
4283 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4289 * s2io_change_mtu - entry point to change MTU size for the device.
4290 * @dev : device pointer.
4291 * @new_mtu : the new MTU size for the device.
4292 * Description: A driver entry point to change MTU size for the device.
4293 * Before changing the MTU the device must be stopped.
4295 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4299 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4301 nic_t *sp = dev->priv;
4303 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4304 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4310 if (netif_running(dev)) {
4312 netif_stop_queue(dev);
4313 if (s2io_card_up(sp)) {
4314 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4317 if (netif_queue_stopped(dev))
4318 netif_wake_queue(dev);
4319 } else { /* Device is down */
4320 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4321 u64 val64 = new_mtu;
4323 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4330 * s2io_tasklet - Bottom half of the ISR.
4331 * @dev_adr : address of the device structure in dma_addr_t format.
4333 * This is the tasklet or the bottom half of the ISR. This is
4334 * an extension of the ISR which is scheduled by the scheduler to be run
4335 * when the load on the CPU is low. All low priority tasks of the ISR can
4336 * be pushed into the tasklet. For now the tasklet is used only to
4337 * replenish the Rx buffers in the Rx buffer descriptors.
4342 static void s2io_tasklet(unsigned long dev_addr)
4344 struct net_device *dev = (struct net_device *) dev_addr;
4345 nic_t *sp = dev->priv;
4347 mac_info_t *mac_control;
4348 struct config_param *config;
4350 mac_control = &sp->mac_control;
4351 config = &sp->config;
4353 if (!TASKLET_IN_USE) {
4354 for (i = 0; i < config->rx_ring_num; i++) {
4355 ret = fill_rx_buffers(sp, i);
4356 if (ret == -ENOMEM) {
4357 DBG_PRINT(ERR_DBG, "%s: Out of ",
4359 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4361 } else if (ret == -EFILL) {
4363 "%s: Rx Ring %d is full\n",
4368 clear_bit(0, (&sp->tasklet_status));
4373 * s2io_set_link - Set the LInk status
4374 * @data: long pointer to device private structue
4375 * Description: Sets the link status for the adapter
4378 static void s2io_set_link(unsigned long data)
4380 nic_t *nic = (nic_t *) data;
4381 struct net_device *dev = nic->dev;
4382 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4386 if (test_and_set_bit(0, &(nic->link_state))) {
4387 /* The card is being reset, no point doing anything */
4391 subid = nic->pdev->subsystem_device;
4393 * Allow a small delay for the NICs self initiated
4394 * cleanup to complete.
4398 val64 = readq(&bar0->adapter_status);
4399 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4400 if (LINK_IS_UP(val64)) {
4401 val64 = readq(&bar0->adapter_control);
4402 val64 |= ADAPTER_CNTL_EN;
4403 writeq(val64, &bar0->adapter_control);
4404 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4405 val64 = readq(&bar0->gpio_control);
4406 val64 |= GPIO_CTRL_GPIO_0;
4407 writeq(val64, &bar0->gpio_control);
4408 val64 = readq(&bar0->gpio_control);
4410 val64 |= ADAPTER_LED_ON;
4411 writeq(val64, &bar0->adapter_control);
4413 val64 = readq(&bar0->adapter_status);
4414 if (!LINK_IS_UP(val64)) {
4415 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4416 DBG_PRINT(ERR_DBG, " Link down");
4417 DBG_PRINT(ERR_DBG, "after ");
4418 DBG_PRINT(ERR_DBG, "enabling ");
4419 DBG_PRINT(ERR_DBG, "device \n");
4421 if (nic->device_enabled_once == FALSE) {
4422 nic->device_enabled_once = TRUE;
4424 s2io_link(nic, LINK_UP);
4426 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4427 val64 = readq(&bar0->gpio_control);
4428 val64 &= ~GPIO_CTRL_GPIO_0;
4429 writeq(val64, &bar0->gpio_control);
4430 val64 = readq(&bar0->gpio_control);
4432 s2io_link(nic, LINK_DOWN);
4434 } else { /* NIC is not Quiescent. */
4435 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4436 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4437 netif_stop_queue(dev);
4439 clear_bit(0, &(nic->link_state));
4442 static void s2io_card_down(nic_t * sp)
4445 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4446 unsigned long flags;
4447 register u64 val64 = 0;
4449 del_timer_sync(&sp->alarm_timer);
4450 /* If s2io_set_link task is executing, wait till it completes. */
4451 while (test_and_set_bit(0, &(sp->link_state))) {
4454 atomic_set(&sp->card_state, CARD_DOWN);
4456 /* disable Tx and Rx traffic on the NIC */
4460 tasklet_kill(&sp->task);
4462 /* Check if the device is Quiescent and then Reset the NIC */
4464 val64 = readq(&bar0->adapter_status);
4465 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4473 "s2io_close:Device not Quiescent ");
4474 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4475 (unsigned long long) val64);
4481 /* Waiting till all Interrupt handlers are complete */
4485 if (!atomic_read(&sp->isr_cnt))
4490 spin_lock_irqsave(&sp->tx_lock, flags);
4491 /* Free all Tx buffers */
4492 free_tx_buffers(sp);
4493 spin_unlock_irqrestore(&sp->tx_lock, flags);
4495 /* Free all Rx buffers */
4496 spin_lock_irqsave(&sp->rx_lock, flags);
4497 free_rx_buffers(sp);
4498 spin_unlock_irqrestore(&sp->rx_lock, flags);
4500 clear_bit(0, &(sp->link_state));
4503 static int s2io_card_up(nic_t * sp)
4506 mac_info_t *mac_control;
4507 struct config_param *config;
4508 struct net_device *dev = (struct net_device *) sp->dev;
4510 /* Initialize the H/W I/O registers */
4511 if (init_nic(sp) != 0) {
4512 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4518 * Initializing the Rx buffers. For now we are considering only 1
4519 * Rx ring and initializing buffers into 30 Rx blocks
4521 mac_control = &sp->mac_control;
4522 config = &sp->config;
4524 for (i = 0; i < config->rx_ring_num; i++) {
4525 if ((ret = fill_rx_buffers(sp, i))) {
4526 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4529 free_rx_buffers(sp);
4532 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4533 atomic_read(&sp->rx_bufs_left[i]));
4536 /* Setting its receive mode */
4537 s2io_set_multicast(dev);
4539 /* Enable tasklet for the device */
4540 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4542 /* Enable Rx Traffic and interrupts on the NIC */
4543 if (start_nic(sp)) {
4544 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4545 tasklet_kill(&sp->task);
4547 free_irq(dev->irq, dev);
4548 free_rx_buffers(sp);
4552 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4554 atomic_set(&sp->card_state, CARD_UP);
4559 * s2io_restart_nic - Resets the NIC.
4560 * @data : long pointer to the device private structure
4562 * This function is scheduled to be run by the s2io_tx_watchdog
4563 * function after 0.5 secs to reset the NIC. The idea is to reduce
4564 * the run time of the watch dog routine which is run holding a
4568 static void s2io_restart_nic(unsigned long data)
4570 struct net_device *dev = (struct net_device *) data;
4571 nic_t *sp = dev->priv;
4574 if (s2io_card_up(sp)) {
4575 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4578 netif_wake_queue(dev);
4579 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4585 * s2io_tx_watchdog - Watchdog for transmit side.
4586 * @dev : Pointer to net device structure
4588 * This function is triggered if the Tx Queue is stopped
4589 * for a pre-defined amount of time when the Interface is still up.
4590 * If the Interface is jammed in such a situation, the hardware is
4591 * reset (by s2io_close) and restarted again (by s2io_open) to
4592 * overcome any problem that might have been caused in the hardware.
4597 static void s2io_tx_watchdog(struct net_device *dev)
4599 nic_t *sp = dev->priv;
4601 if (netif_carrier_ok(dev)) {
4602 schedule_work(&sp->rst_timer_task);
4607 * rx_osm_handler - To perform some OS related operations on SKB.
4608 * @sp: private member of the device structure,pointer to s2io_nic structure.
4609 * @skb : the socket buffer pointer.
4610 * @len : length of the packet
4611 * @cksum : FCS checksum of the frame.
4612 * @ring_no : the ring from which this RxD was extracted.
4614 * This function is called by the Tx interrupt serivce routine to perform
4615 * some OS related operations on the SKB before passing it to the upper
4616 * layers. It mainly checks if the checksum is OK, if so adds it to the
4617 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4618 * to the upper layer. If the checksum is wrong, it increments the Rx
4619 * packet error count, frees the SKB and returns error.
4621 * SUCCESS on success and -1 on failure.
4623 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4625 nic_t *sp = ring_data->nic;
4626 struct net_device *dev = (struct net_device *) sp->dev;
4627 struct sk_buff *skb = (struct sk_buff *)
4628 ((unsigned long) rxdp->Host_Control);
4629 int ring_no = ring_data->ring_no;
4630 u16 l3_csum, l4_csum;
4631 #ifdef CONFIG_2BUFF_MODE
4632 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4633 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4634 int get_block = ring_data->rx_curr_get_info.block_index;
4635 int get_off = ring_data->rx_curr_get_info.offset;
4636 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4637 unsigned char *buff;
4639 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4642 if (rxdp->Control_1 & RXD_T_CODE) {
4643 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4644 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4647 sp->stats.rx_crc_errors++;
4648 atomic_dec(&sp->rx_bufs_left[ring_no]);
4649 rxdp->Host_Control = 0;
4653 /* Updating statistics */
4654 rxdp->Host_Control = 0;
4656 sp->stats.rx_packets++;
4657 #ifndef CONFIG_2BUFF_MODE
4658 sp->stats.rx_bytes += len;
4660 sp->stats.rx_bytes += buf0_len + buf2_len;
4663 #ifndef CONFIG_2BUFF_MODE
4666 buff = skb_push(skb, buf0_len);
4667 memcpy(buff, ba->ba_0, buf0_len);
4668 skb_put(skb, buf2_len);
4671 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4673 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4674 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4675 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4677 * NIC verifies if the Checksum of the received
4678 * frame is Ok or not and accordingly returns
4679 * a flag in the RxD.
4681 skb->ip_summed = CHECKSUM_UNNECESSARY;
4684 * Packet with erroneous checksum, let the
4685 * upper layers deal with it.
4687 skb->ip_summed = CHECKSUM_NONE;
4690 skb->ip_summed = CHECKSUM_NONE;
4693 skb->protocol = eth_type_trans(skb, dev);
4694 #ifdef CONFIG_S2IO_NAPI
4695 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4696 /* Queueing the vlan frame to the upper layer */
4697 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
4698 RXD_GET_VLAN_TAG(rxdp->Control_2));
4700 netif_receive_skb(skb);
4703 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4704 /* Queueing the vlan frame to the upper layer */
4705 vlan_hwaccel_rx(skb, sp->vlgrp,
4706 RXD_GET_VLAN_TAG(rxdp->Control_2));
4712 dev->last_rx = jiffies;
4713 atomic_dec(&sp->rx_bufs_left[ring_no]);
4718 * s2io_link - stops/starts the Tx queue.
4719 * @sp : private member of the device structure, which is a pointer to the
4720 * s2io_nic structure.
4721 * @link : inidicates whether link is UP/DOWN.
4723 * This function stops/starts the Tx queue depending on whether the link
4724 * status of the NIC is is down or up. This is called by the Alarm
4725 * interrupt handler whenever a link change interrupt comes up.
4730 void s2io_link(nic_t * sp, int link)
4732 struct net_device *dev = (struct net_device *) sp->dev;
4734 if (link != sp->last_link_state) {
4735 if (link == LINK_DOWN) {
4736 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4737 netif_carrier_off(dev);
4739 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4740 netif_carrier_on(dev);
4743 sp->last_link_state = link;
4747 * get_xena_rev_id - to identify revision ID of xena.
4748 * @pdev : PCI Dev structure
4750 * Function to identify the Revision ID of xena.
4752 * returns the revision ID of the device.
4755 int get_xena_rev_id(struct pci_dev *pdev)
4759 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4764 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4765 * @sp : private member of the device structure, which is a pointer to the
4766 * s2io_nic structure.
4768 * This function initializes a few of the PCI and PCI-X configuration registers
4769 * with recommended values.
4774 static void s2io_init_pci(nic_t * sp)
4776 u16 pci_cmd = 0, pcix_cmd = 0;
4778 /* Enable Data Parity Error Recovery in PCI-X command register. */
4779 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4781 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4783 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4786 /* Set the PErr Response bit in PCI command register. */
4787 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4788 pci_write_config_word(sp->pdev, PCI_COMMAND,
4789 (pci_cmd | PCI_COMMAND_PARITY));
4790 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4792 /* Forcibly disabling relaxed ordering capability of the card. */
4794 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4796 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4800 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4801 MODULE_LICENSE("GPL");
4802 module_param(tx_fifo_num, int, 0);
4803 module_param(rx_ring_num, int, 0);
4804 module_param_array(tx_fifo_len, uint, NULL, 0);
4805 module_param_array(rx_ring_sz, uint, NULL, 0);
4806 module_param_array(rts_frm_len, uint, NULL, 0);
4807 module_param(use_continuous_tx_intrs, int, 1);
4808 module_param(rmac_pause_time, int, 0);
4809 module_param(mc_pause_threshold_q0q3, int, 0);
4810 module_param(mc_pause_threshold_q4q7, int, 0);
4811 module_param(shared_splits, int, 0);
4812 module_param(tmac_util_period, int, 0);
4813 module_param(rmac_util_period, int, 0);
4814 #ifndef CONFIG_S2IO_NAPI
4815 module_param(indicate_max_pkts, int, 0);
4819 * s2io_init_nic - Initialization of the adapter .
4820 * @pdev : structure containing the PCI related information of the device.
4821 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4823 * The function initializes an adapter identified by the pci_dec structure.
4824 * All OS related initialization including memory and device structure and
4825 * initlaization of the device private variable is done. Also the swapper
4826 * control register is initialized to enable read and write into the I/O
4827 * registers of the device.
4829 * returns 0 on success and negative on failure.
4832 static int __devinit
4833 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4836 struct net_device *dev;
4838 int dma_flag = FALSE;
4839 u32 mac_up, mac_down;
4840 u64 val64 = 0, tmp64 = 0;
4841 XENA_dev_config_t __iomem *bar0 = NULL;
4843 mac_info_t *mac_control;
4844 struct config_param *config;
4846 #ifdef CONFIG_S2IO_NAPI
4847 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4850 if ((ret = pci_enable_device(pdev))) {
4852 "s2io_init_nic: pci_enable_device failed\n");
4856 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4857 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4859 if (pci_set_consistent_dma_mask
4860 (pdev, DMA_64BIT_MASK)) {
4862 "Unable to obtain 64bit DMA for \
4863 consistent allocations\n");
4864 pci_disable_device(pdev);
4867 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4868 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4870 pci_disable_device(pdev);
4874 if (pci_request_regions(pdev, s2io_driver_name)) {
4875 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4876 pci_disable_device(pdev);
4880 dev = alloc_etherdev(sizeof(nic_t));
4882 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4883 pci_disable_device(pdev);
4884 pci_release_regions(pdev);
4888 pci_set_master(pdev);
4889 pci_set_drvdata(pdev, dev);
4890 SET_MODULE_OWNER(dev);
4891 SET_NETDEV_DEV(dev, &pdev->dev);
4893 /* Private member variable initialized to s2io NIC structure */
4895 memset(sp, 0, sizeof(nic_t));
4898 sp->high_dma_flag = dma_flag;
4899 sp->device_enabled_once = FALSE;
4901 /* Initialize some PCI/PCI-X fields of the NIC. */
4905 * Setting the device configuration parameters.
4906 * Most of these parameters can be specified by the user during
4907 * module insertion as they are module loadable parameters. If
4908 * these parameters are not not specified during load time, they
4909 * are initialized with default values.
4911 mac_control = &sp->mac_control;
4912 config = &sp->config;
4914 /* Tx side parameters. */
4915 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4916 config->tx_fifo_num = tx_fifo_num;
4917 for (i = 0; i < MAX_TX_FIFOS; i++) {
4918 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4919 config->tx_cfg[i].fifo_priority = i;
4922 /* mapping the QoS priority to the configured fifos */
4923 for (i = 0; i < MAX_TX_FIFOS; i++)
4924 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4926 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4927 for (i = 0; i < config->tx_fifo_num; i++) {
4928 config->tx_cfg[i].f_no_snoop =
4929 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4930 if (config->tx_cfg[i].fifo_len < 65) {
4931 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4935 config->max_txds = MAX_SKB_FRAGS;
4937 /* Rx side parameters. */
4938 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4939 config->rx_ring_num = rx_ring_num;
4940 for (i = 0; i < MAX_RX_RINGS; i++) {
4941 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4942 (MAX_RXDS_PER_BLOCK + 1);
4943 config->rx_cfg[i].ring_priority = i;
4946 for (i = 0; i < rx_ring_num; i++) {
4947 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4948 config->rx_cfg[i].f_no_snoop =
4949 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4952 /* Setting Mac Control parameters */
4953 mac_control->rmac_pause_time = rmac_pause_time;
4954 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4955 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4958 /* Initialize Ring buffer parameters. */
4959 for (i = 0; i < config->rx_ring_num; i++)
4960 atomic_set(&sp->rx_bufs_left[i], 0);
4962 /* Initialize the number of ISRs currently running */
4963 atomic_set(&sp->isr_cnt, 0);
4965 /* initialize the shared memory used by the NIC and the host */
4966 if (init_shared_mem(sp)) {
4967 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4970 goto mem_alloc_failed;
4973 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4974 pci_resource_len(pdev, 0));
4976 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4979 goto bar0_remap_failed;
4982 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4983 pci_resource_len(pdev, 2));
4985 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4988 goto bar1_remap_failed;
4991 dev->irq = pdev->irq;
4992 dev->base_addr = (unsigned long) sp->bar0;
4994 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4995 for (j = 0; j < MAX_TX_FIFOS; j++) {
4996 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4997 (sp->bar1 + (j * 0x00020000));
5000 /* Driver entry points */
5001 dev->open = &s2io_open;
5002 dev->stop = &s2io_close;
5003 dev->hard_start_xmit = &s2io_xmit;
5004 dev->get_stats = &s2io_get_stats;
5005 dev->set_multicast_list = &s2io_set_multicast;
5006 dev->do_ioctl = &s2io_ioctl;
5007 dev->change_mtu = &s2io_change_mtu;
5008 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5009 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5010 dev->vlan_rx_register = s2io_vlan_rx_register;
5011 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5014 * will use eth_mac_addr() for dev->set_mac_address
5015 * mac address will be set every time dev->open() is called
5017 #if defined(CONFIG_S2IO_NAPI)
5018 dev->poll = s2io_poll;
5022 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5023 if (sp->high_dma_flag == TRUE)
5024 dev->features |= NETIF_F_HIGHDMA;
5026 dev->features |= NETIF_F_TSO;
5029 dev->tx_timeout = &s2io_tx_watchdog;
5030 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5031 INIT_WORK(&sp->rst_timer_task,
5032 (void (*)(void *)) s2io_restart_nic, dev);
5033 INIT_WORK(&sp->set_link_task,
5034 (void (*)(void *)) s2io_set_link, sp);
5036 pci_save_state(sp->pdev);
5038 /* Setting swapper control on the NIC, for proper reset operation */
5039 if (s2io_set_swapper(sp)) {
5040 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5043 goto set_swap_failed;
5047 * Fix for all "FFs" MAC address problems observed on
5050 fix_mac_address(sp);
5054 * MAC address initialization.
5055 * For now only one mac address will be read and used.
5058 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5059 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5060 writeq(val64, &bar0->rmac_addr_cmd_mem);
5061 wait_for_cmd_complete(sp);
5063 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5064 mac_down = (u32) tmp64;
5065 mac_up = (u32) (tmp64 >> 32);
5067 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5069 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5070 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5071 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5072 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5073 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5074 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5077 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5078 sp->def_mac_addr[0].mac_addr[0],
5079 sp->def_mac_addr[0].mac_addr[1],
5080 sp->def_mac_addr[0].mac_addr[2],
5081 sp->def_mac_addr[0].mac_addr[3],
5082 sp->def_mac_addr[0].mac_addr[4],
5083 sp->def_mac_addr[0].mac_addr[5]);
5085 /* Set the factory defined MAC address initially */
5086 dev->addr_len = ETH_ALEN;
5087 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5090 * Initialize the tasklet status and link state flags
5091 * and the card statte parameter
5093 atomic_set(&(sp->card_state), 0);
5094 sp->tasklet_status = 0;
5097 /* Initialize spinlocks */
5098 spin_lock_init(&sp->tx_lock);
5099 #ifndef CONFIG_S2IO_NAPI
5100 spin_lock_init(&sp->put_lock);
5102 spin_lock_init(&sp->rx_lock);
5105 * SXE-002: Configure link and activity LED to init state
5108 subid = sp->pdev->subsystem_device;
5109 if ((subid & 0xFF) >= 0x07) {
5110 val64 = readq(&bar0->gpio_control);
5111 val64 |= 0x0000800000000000ULL;
5112 writeq(val64, &bar0->gpio_control);
5113 val64 = 0x0411040400000000ULL;
5114 writeq(val64, (void __iomem *) bar0 + 0x2700);
5115 val64 = readq(&bar0->gpio_control);
5118 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5120 if (register_netdev(dev)) {
5121 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5123 goto register_failed;
5126 /* Initialize device name */
5127 strcpy(sp->name, dev->name);
5128 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5131 * Make Link state as off at this point, when the Link change
5132 * interrupt comes the state will be automatically changed to
5135 netif_carrier_off(dev);
5146 free_shared_mem(sp);
5147 pci_disable_device(pdev);
5148 pci_release_regions(pdev);
5149 pci_set_drvdata(pdev, NULL);
5156 * s2io_rem_nic - Free the PCI device
5157 * @pdev: structure containing the PCI related information of the device.
5158 * Description: This function is called by the Pci subsystem to release a
5159 * PCI device and free up all resource held up by the device. This could
5160 * be in response to a Hot plug event or when the driver is to be removed
5164 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5166 struct net_device *dev =
5167 (struct net_device *) pci_get_drvdata(pdev);
5171 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5176 unregister_netdev(dev);
5178 free_shared_mem(sp);
5181 pci_disable_device(pdev);
5182 pci_release_regions(pdev);
5183 pci_set_drvdata(pdev, NULL);
5188 * s2io_starter - Entry point for the driver
5189 * Description: This function is the entry point for the driver. It verifies
5190 * the module loadable parameters and initializes PCI configuration space.
5193 int __init s2io_starter(void)
5195 return pci_module_init(&s2io_driver);
5199 * s2io_closer - Cleanup routine for the driver
5200 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5203 void s2io_closer(void)
5205 pci_unregister_driver(&s2io_driver);
5206 DBG_PRINT(INIT_DBG, "cleanup done\n");
5209 module_init(s2io_starter);
5210 module_exit(s2io_closer);