]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/s2io.c
[PATCH] S2io: VLAN support
[karo-tx-linux.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
59
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
62 #include <asm/io.h>
63
64 /* local include */
65 #include "s2io.h"
66 #include "s2io-regs.h"
67
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 1.7.7";
71
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73 {
74         int ret;
75
76         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79         return ret;
80 }
81
82 /*
83  * Cards with following subsystem_id have a link state indication
84  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85  * macro below identifies these cards given the subsystem_id.
86  */
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
88                 (((subid >= 0x600B) && (subid <= 0x600D)) || \
89                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
90
91 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
92                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
93 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
94 #define PANIC   1
95 #define LOW     2
96 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
97 {
98         int level = 0;
99         mac_info_t *mac_control;
100
101         mac_control = &sp->mac_control;
102         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
103                 level = LOW;
104                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
105                         level = PANIC;
106                 }
107         }
108
109         return level;
110 }
111
112 /* Ethtool related variables and Macros. */
113 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
114         "Register test\t(offline)",
115         "Eeprom test\t(offline)",
116         "Link test\t(online)",
117         "RLDRAM test\t(offline)",
118         "BIST Test\t(offline)"
119 };
120
121 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
122         {"tmac_frms"},
123         {"tmac_data_octets"},
124         {"tmac_drop_frms"},
125         {"tmac_mcst_frms"},
126         {"tmac_bcst_frms"},
127         {"tmac_pause_ctrl_frms"},
128         {"tmac_any_err_frms"},
129         {"tmac_vld_ip_octets"},
130         {"tmac_vld_ip"},
131         {"tmac_drop_ip"},
132         {"tmac_icmp"},
133         {"tmac_rst_tcp"},
134         {"tmac_tcp"},
135         {"tmac_udp"},
136         {"rmac_vld_frms"},
137         {"rmac_data_octets"},
138         {"rmac_fcs_err_frms"},
139         {"rmac_drop_frms"},
140         {"rmac_vld_mcst_frms"},
141         {"rmac_vld_bcst_frms"},
142         {"rmac_in_rng_len_err_frms"},
143         {"rmac_long_frms"},
144         {"rmac_pause_ctrl_frms"},
145         {"rmac_discarded_frms"},
146         {"rmac_usized_frms"},
147         {"rmac_osized_frms"},
148         {"rmac_frag_frms"},
149         {"rmac_jabber_frms"},
150         {"rmac_ip"},
151         {"rmac_ip_octets"},
152         {"rmac_hdr_err_ip"},
153         {"rmac_drop_ip"},
154         {"rmac_icmp"},
155         {"rmac_tcp"},
156         {"rmac_udp"},
157         {"rmac_err_drp_udp"},
158         {"rmac_pause_cnt"},
159         {"rmac_accepted_ip"},
160         {"rmac_err_tcp"},
161         {"\n DRIVER STATISTICS"},
162         {"single_bit_ecc_errs"},
163         {"double_bit_ecc_errs"},
164 };
165
166 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
167 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
168
169 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
170 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
171
172 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
173                         init_timer(&timer);                     \
174                         timer.function = handle;                \
175                         timer.data = (unsigned long) arg;       \
176                         mod_timer(&timer, (jiffies + exp))      \
177
178 /* Add the vlan */
179 static void s2io_vlan_rx_register(struct net_device *dev,
180                                         struct vlan_group *grp)
181 {
182         nic_t *nic = dev->priv;
183         unsigned long flags;
184
185         spin_lock_irqsave(&nic->tx_lock, flags);
186         nic->vlgrp = grp;
187         spin_unlock_irqrestore(&nic->tx_lock, flags);
188 }
189
190 /* Unregister the vlan */
191 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
192 {
193         nic_t *nic = dev->priv;
194         unsigned long flags;
195
196         spin_lock_irqsave(&nic->tx_lock, flags);
197         if (nic->vlgrp)
198                 nic->vlgrp->vlan_devices[vid] = NULL;
199         spin_unlock_irqrestore(&nic->tx_lock, flags);
200 }
201
202 /*
203  * Constants to be programmed into the Xena's registers, to configure
204  * the XAUI.
205  */
206
207 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
208 #define END_SIGN        0x0
209
210 static u64 default_mdio_cfg[] = {
211         /* Reset PMA PLL */
212         0xC001010000000000ULL, 0xC0010100000000E0ULL,
213         0xC0010100008000E4ULL,
214         /* Remove Reset from PMA PLL */
215         0xC001010000000000ULL, 0xC0010100000000E0ULL,
216         0xC0010100000000E4ULL,
217         END_SIGN
218 };
219
220 static u64 default_dtx_cfg[] = {
221         0x8000051500000000ULL, 0x80000515000000E0ULL,
222         0x80000515D93500E4ULL, 0x8001051500000000ULL,
223         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
224         0x8002051500000000ULL, 0x80020515000000E0ULL,
225         0x80020515F21000E4ULL,
226         /* Set PADLOOPBACKN */
227         0x8002051500000000ULL, 0x80020515000000E0ULL,
228         0x80020515B20000E4ULL, 0x8003051500000000ULL,
229         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
230         0x8004051500000000ULL, 0x80040515000000E0ULL,
231         0x80040515B20000E4ULL, 0x8005051500000000ULL,
232         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
233         SWITCH_SIGN,
234         /* Remove PADLOOPBACKN */
235         0x8002051500000000ULL, 0x80020515000000E0ULL,
236         0x80020515F20000E4ULL, 0x8003051500000000ULL,
237         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
238         0x8004051500000000ULL, 0x80040515000000E0ULL,
239         0x80040515F20000E4ULL, 0x8005051500000000ULL,
240         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
241         END_SIGN
242 };
243
244 /*
245  * Constants for Fixing the MacAddress problem seen mostly on
246  * Alpha machines.
247  */
248 static u64 fix_mac[] = {
249         0x0060000000000000ULL, 0x0060600000000000ULL,
250         0x0040600000000000ULL, 0x0000600000000000ULL,
251         0x0020600000000000ULL, 0x0060600000000000ULL,
252         0x0020600000000000ULL, 0x0060600000000000ULL,
253         0x0020600000000000ULL, 0x0060600000000000ULL,
254         0x0020600000000000ULL, 0x0060600000000000ULL,
255         0x0020600000000000ULL, 0x0060600000000000ULL,
256         0x0020600000000000ULL, 0x0060600000000000ULL,
257         0x0020600000000000ULL, 0x0060600000000000ULL,
258         0x0020600000000000ULL, 0x0060600000000000ULL,
259         0x0020600000000000ULL, 0x0060600000000000ULL,
260         0x0020600000000000ULL, 0x0060600000000000ULL,
261         0x0020600000000000ULL, 0x0000600000000000ULL,
262         0x0040600000000000ULL, 0x0060600000000000ULL,
263         END_SIGN
264 };
265
266 /* Module Loadable parameters. */
267 static unsigned int tx_fifo_num = 1;
268 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
269     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
270 static unsigned int rx_ring_num = 1;
271 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
272     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
273 static unsigned int rts_frm_len[MAX_RX_RINGS] =
274     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
275 static unsigned int use_continuous_tx_intrs = 1;
276 static unsigned int rmac_pause_time = 65535;
277 static unsigned int mc_pause_threshold_q0q3 = 187;
278 static unsigned int mc_pause_threshold_q4q7 = 187;
279 static unsigned int shared_splits;
280 static unsigned int tmac_util_period = 5;
281 static unsigned int rmac_util_period = 5;
282 #ifndef CONFIG_S2IO_NAPI
283 static unsigned int indicate_max_pkts;
284 #endif
285
286 /*
287  * S2IO device table.
288  * This table lists all the devices that this driver supports.
289  */
290 static struct pci_device_id s2io_tbl[] __devinitdata = {
291         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
292          PCI_ANY_ID, PCI_ANY_ID},
293         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
294          PCI_ANY_ID, PCI_ANY_ID},
295         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
296          PCI_ANY_ID, PCI_ANY_ID},
297         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
298          PCI_ANY_ID, PCI_ANY_ID},
299         {0,}
300 };
301
302 MODULE_DEVICE_TABLE(pci, s2io_tbl);
303
304 static struct pci_driver s2io_driver = {
305       .name = "S2IO",
306       .id_table = s2io_tbl,
307       .probe = s2io_init_nic,
308       .remove = __devexit_p(s2io_rem_nic),
309 };
310
311 /* A simplifier macro used both by init and free shared_mem Fns(). */
312 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
313
314 /**
315  * init_shared_mem - Allocation and Initialization of Memory
316  * @nic: Device private variable.
317  * Description: The function allocates all the memory areas shared
318  * between the NIC and the driver. This includes Tx descriptors,
319  * Rx descriptors and the statistics block.
320  */
321
322 static int init_shared_mem(struct s2io_nic *nic)
323 {
324         u32 size;
325         void *tmp_v_addr, *tmp_v_addr_next;
326         dma_addr_t tmp_p_addr, tmp_p_addr_next;
327         RxD_block_t *pre_rxd_blk = NULL;
328         int i, j, blk_cnt, rx_sz, tx_sz;
329         int lst_size, lst_per_page;
330         struct net_device *dev = nic->dev;
331 #ifdef CONFIG_2BUFF_MODE
332         u64 tmp;
333         buffAdd_t *ba;
334 #endif
335
336         mac_info_t *mac_control;
337         struct config_param *config;
338
339         mac_control = &nic->mac_control;
340         config = &nic->config;
341
342
343         /* Allocation and initialization of TXDLs in FIOFs */
344         size = 0;
345         for (i = 0; i < config->tx_fifo_num; i++) {
346                 size += config->tx_cfg[i].fifo_len;
347         }
348         if (size > MAX_AVAILABLE_TXDS) {
349                 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
350                           dev->name);
351                 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
352                 DBG_PRINT(ERR_DBG, "that can be used\n");
353                 return FAILURE;
354         }
355
356         lst_size = (sizeof(TxD_t) * config->max_txds);
357         tx_sz = lst_size * size;
358         lst_per_page = PAGE_SIZE / lst_size;
359
360         for (i = 0; i < config->tx_fifo_num; i++) {
361                 int fifo_len = config->tx_cfg[i].fifo_len;
362                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
363                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
364                                                           GFP_KERNEL);
365                 if (!mac_control->fifos[i].list_info) {
366                         DBG_PRINT(ERR_DBG,
367                                   "Malloc failed for list_info\n");
368                         return -ENOMEM;
369                 }
370                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
371         }
372         for (i = 0; i < config->tx_fifo_num; i++) {
373                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
374                                                 lst_per_page);
375                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
376                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
377                     config->tx_cfg[i].fifo_len - 1;
378                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
379                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
380                     config->tx_cfg[i].fifo_len - 1;
381                 mac_control->fifos[i].fifo_no = i;
382                 mac_control->fifos[i].nic = nic;
383                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
384
385                 for (j = 0; j < page_num; j++) {
386                         int k = 0;
387                         dma_addr_t tmp_p;
388                         void *tmp_v;
389                         tmp_v = pci_alloc_consistent(nic->pdev,
390                                                      PAGE_SIZE, &tmp_p);
391                         if (!tmp_v) {
392                                 DBG_PRINT(ERR_DBG,
393                                           "pci_alloc_consistent ");
394                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
395                                 return -ENOMEM;
396                         }
397                         while (k < lst_per_page) {
398                                 int l = (j * lst_per_page) + k;
399                                 if (l == config->tx_cfg[i].fifo_len)
400                                         break;
401                                 mac_control->fifos[i].list_info[l].list_virt_addr =
402                                     tmp_v + (k * lst_size);
403                                 mac_control->fifos[i].list_info[l].list_phy_addr =
404                                     tmp_p + (k * lst_size);
405                                 k++;
406                         }
407                 }
408         }
409
410         /* Allocation and initialization of RXDs in Rings */
411         size = 0;
412         for (i = 0; i < config->rx_ring_num; i++) {
413                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
414                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
415                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
416                                   i);
417                         DBG_PRINT(ERR_DBG, "RxDs per Block");
418                         return FAILURE;
419                 }
420                 size += config->rx_cfg[i].num_rxd;
421                 mac_control->rings[i].block_count =
422                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
423                 mac_control->rings[i].pkt_cnt =
424                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
425         }
426         size = (size * (sizeof(RxD_t)));
427         rx_sz = size;
428
429         for (i = 0; i < config->rx_ring_num; i++) {
430                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
431                 mac_control->rings[i].rx_curr_get_info.offset = 0;
432                 mac_control->rings[i].rx_curr_get_info.ring_len =
433                     config->rx_cfg[i].num_rxd - 1;
434                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
435                 mac_control->rings[i].rx_curr_put_info.offset = 0;
436                 mac_control->rings[i].rx_curr_put_info.ring_len =
437                     config->rx_cfg[i].num_rxd - 1;
438                 mac_control->rings[i].nic = nic;
439                 mac_control->rings[i].ring_no = i;
440
441                 blk_cnt =
442                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443                 /*  Allocating all the Rx blocks */
444                 for (j = 0; j < blk_cnt; j++) {
445 #ifndef CONFIG_2BUFF_MODE
446                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
447 #else
448                         size = SIZE_OF_BLOCK;
449 #endif
450                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
451                                                           &tmp_p_addr);
452                         if (tmp_v_addr == NULL) {
453                                 /*
454                                  * In case of failure, free_shared_mem()
455                                  * is called, which should free any
456                                  * memory that was alloced till the
457                                  * failure happened.
458                                  */
459                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
460                                     tmp_v_addr;
461                                 return -ENOMEM;
462                         }
463                         memset(tmp_v_addr, 0, size);
464                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
465                                 tmp_v_addr;
466                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
467                                 tmp_p_addr;
468                 }
469                 /* Interlinking all Rx Blocks */
470                 for (j = 0; j < blk_cnt; j++) {
471                         tmp_v_addr =
472                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
473                         tmp_v_addr_next =
474                                 mac_control->rings[i].rx_blocks[(j + 1) %
475                                               blk_cnt].block_virt_addr;
476                         tmp_p_addr =
477                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
478                         tmp_p_addr_next =
479                                 mac_control->rings[i].rx_blocks[(j + 1) %
480                                               blk_cnt].block_dma_addr;
481
482                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
483                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
484                                                                  * marker.
485                                                                  */
486 #ifndef CONFIG_2BUFF_MODE
487                         pre_rxd_blk->reserved_2_pNext_RxD_block =
488                             (unsigned long) tmp_v_addr_next;
489 #endif
490                         pre_rxd_blk->pNext_RxD_Blk_physical =
491                             (u64) tmp_p_addr_next;
492                 }
493         }
494
495 #ifdef CONFIG_2BUFF_MODE
496         /*
497          * Allocation of Storages for buffer addresses in 2BUFF mode
498          * and the buffers as well.
499          */
500         for (i = 0; i < config->rx_ring_num; i++) {
501                 blk_cnt =
502                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
503                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
504                                      GFP_KERNEL);
505                 if (!mac_control->rings[i].ba)
506                         return -ENOMEM;
507                 for (j = 0; j < blk_cnt; j++) {
508                         int k = 0;
509                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
510                                                  (MAX_RXDS_PER_BLOCK + 1)),
511                                                 GFP_KERNEL);
512                         if (!mac_control->rings[i].ba[j])
513                                 return -ENOMEM;
514                         while (k != MAX_RXDS_PER_BLOCK) {
515                                 ba = &mac_control->rings[i].ba[j][k];
516
517                                 ba->ba_0_org = (void *) kmalloc
518                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
519                                 if (!ba->ba_0_org)
520                                         return -ENOMEM;
521                                 tmp = (u64) ba->ba_0_org;
522                                 tmp += ALIGN_SIZE;
523                                 tmp &= ~((u64) ALIGN_SIZE);
524                                 ba->ba_0 = (void *) tmp;
525
526                                 ba->ba_1_org = (void *) kmalloc
527                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
528                                 if (!ba->ba_1_org)
529                                         return -ENOMEM;
530                                 tmp = (u64) ba->ba_1_org;
531                                 tmp += ALIGN_SIZE;
532                                 tmp &= ~((u64) ALIGN_SIZE);
533                                 ba->ba_1 = (void *) tmp;
534                                 k++;
535                         }
536                 }
537         }
538 #endif
539
540         /* Allocation and initialization of Statistics block */
541         size = sizeof(StatInfo_t);
542         mac_control->stats_mem = pci_alloc_consistent
543             (nic->pdev, size, &mac_control->stats_mem_phy);
544
545         if (!mac_control->stats_mem) {
546                 /*
547                  * In case of failure, free_shared_mem() is called, which
548                  * should free any memory that was alloced till the
549                  * failure happened.
550                  */
551                 return -ENOMEM;
552         }
553         mac_control->stats_mem_sz = size;
554
555         tmp_v_addr = mac_control->stats_mem;
556         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
557         memset(tmp_v_addr, 0, size);
558         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
559                   (unsigned long long) tmp_p_addr);
560
561         return SUCCESS;
562 }
563
564 /**
565  * free_shared_mem - Free the allocated Memory
566  * @nic:  Device private variable.
567  * Description: This function is to free all memory locations allocated by
568  * the init_shared_mem() function and return it to the kernel.
569  */
570
571 static void free_shared_mem(struct s2io_nic *nic)
572 {
573         int i, j, blk_cnt, size;
574         void *tmp_v_addr;
575         dma_addr_t tmp_p_addr;
576         mac_info_t *mac_control;
577         struct config_param *config;
578         int lst_size, lst_per_page;
579
580
581         if (!nic)
582                 return;
583
584         mac_control = &nic->mac_control;
585         config = &nic->config;
586
587         lst_size = (sizeof(TxD_t) * config->max_txds);
588         lst_per_page = PAGE_SIZE / lst_size;
589
590         for (i = 0; i < config->tx_fifo_num; i++) {
591                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
592                                                 lst_per_page);
593                 for (j = 0; j < page_num; j++) {
594                         int mem_blks = (j * lst_per_page);
595                         if (!mac_control->fifos[i].list_info[mem_blks].
596                             list_virt_addr)
597                                 break;
598                         pci_free_consistent(nic->pdev, PAGE_SIZE,
599                                             mac_control->fifos[i].
600                                             list_info[mem_blks].
601                                             list_virt_addr,
602                                             mac_control->fifos[i].
603                                             list_info[mem_blks].
604                                             list_phy_addr);
605                 }
606                 kfree(mac_control->fifos[i].list_info);
607         }
608
609 #ifndef CONFIG_2BUFF_MODE
610         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
611 #else
612         size = SIZE_OF_BLOCK;
613 #endif
614         for (i = 0; i < config->rx_ring_num; i++) {
615                 blk_cnt = mac_control->rings[i].block_count;
616                 for (j = 0; j < blk_cnt; j++) {
617                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
618                                 block_virt_addr;
619                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
620                                 block_dma_addr;
621                         if (tmp_v_addr == NULL)
622                                 break;
623                         pci_free_consistent(nic->pdev, size,
624                                             tmp_v_addr, tmp_p_addr);
625                 }
626         }
627
628 #ifdef CONFIG_2BUFF_MODE
629         /* Freeing buffer storage addresses in 2BUFF mode. */
630         for (i = 0; i < config->rx_ring_num; i++) {
631                 blk_cnt =
632                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
633                 for (j = 0; j < blk_cnt; j++) {
634                         int k = 0;
635                         if (!mac_control->rings[i].ba[j])
636                                 continue;
637                         while (k != MAX_RXDS_PER_BLOCK) {
638                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
639                                 kfree(ba->ba_0_org);
640                                 kfree(ba->ba_1_org);
641                                 k++;
642                         }
643                         kfree(mac_control->rings[i].ba[j]);
644                 }
645                 if (mac_control->rings[i].ba)
646                         kfree(mac_control->rings[i].ba);
647         }
648 #endif
649
650         if (mac_control->stats_mem) {
651                 pci_free_consistent(nic->pdev,
652                                     mac_control->stats_mem_sz,
653                                     mac_control->stats_mem,
654                                     mac_control->stats_mem_phy);
655         }
656 }
657
658 /**
659  *  init_nic - Initialization of hardware
660  *  @nic: device peivate variable
661  *  Description: The function sequentially configures every block
662  *  of the H/W from their reset values.
663  *  Return Value:  SUCCESS on success and
664  *  '-1' on failure (endian settings incorrect).
665  */
666
667 static int init_nic(struct s2io_nic *nic)
668 {
669         XENA_dev_config_t __iomem *bar0 = nic->bar0;
670         struct net_device *dev = nic->dev;
671         register u64 val64 = 0;
672         void __iomem *add;
673         u32 time;
674         int i, j;
675         mac_info_t *mac_control;
676         struct config_param *config;
677         int mdio_cnt = 0, dtx_cnt = 0;
678         unsigned long long mem_share;
679         int mem_size;
680
681         mac_control = &nic->mac_control;
682         config = &nic->config;
683
684         /* to set the swapper controle on the card */
685         if(s2io_set_swapper(nic)) {
686                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
687                 return -1;
688         }
689
690         /* Remove XGXS from reset state */
691         val64 = 0;
692         writeq(val64, &bar0->sw_reset);
693         msleep(500);
694         val64 = readq(&bar0->sw_reset);
695
696         /*  Enable Receiving broadcasts */
697         add = &bar0->mac_cfg;
698         val64 = readq(&bar0->mac_cfg);
699         val64 |= MAC_RMAC_BCAST_ENABLE;
700         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
701         writel((u32) val64, add);
702         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
703         writel((u32) (val64 >> 32), (add + 4));
704
705         /* Read registers in all blocks */
706         val64 = readq(&bar0->mac_int_mask);
707         val64 = readq(&bar0->mc_int_mask);
708         val64 = readq(&bar0->xgxs_int_mask);
709
710         /*  Set MTU */
711         val64 = dev->mtu;
712         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
713
714         /*
715          * Configuring the XAUI Interface of Xena.
716          * ***************************************
717          * To Configure the Xena's XAUI, one has to write a series
718          * of 64 bit values into two registers in a particular
719          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
720          * which will be defined in the array of configuration values
721          * (default_dtx_cfg & default_mdio_cfg) at appropriate places
722          * to switch writing from one regsiter to another. We continue
723          * writing these values until we encounter the 'END_SIGN' macro.
724          * For example, After making a series of 21 writes into
725          * dtx_control register the 'SWITCH_SIGN' appears and hence we
726          * start writing into mdio_control until we encounter END_SIGN.
727          */
728         while (1) {
729               dtx_cfg:
730                 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
731                         if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
732                                 dtx_cnt++;
733                                 goto mdio_cfg;
734                         }
735                         SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
736                                           &bar0->dtx_control, UF);
737                         val64 = readq(&bar0->dtx_control);
738                         dtx_cnt++;
739                 }
740               mdio_cfg:
741                 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
742                         if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
743                                 mdio_cnt++;
744                                 goto dtx_cfg;
745                         }
746                         SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
747                                           &bar0->mdio_control, UF);
748                         val64 = readq(&bar0->mdio_control);
749                         mdio_cnt++;
750                 }
751                 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
752                     (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
753                         break;
754                 } else {
755                         goto dtx_cfg;
756                 }
757         }
758
759         /*  Tx DMA Initialization */
760         val64 = 0;
761         writeq(val64, &bar0->tx_fifo_partition_0);
762         writeq(val64, &bar0->tx_fifo_partition_1);
763         writeq(val64, &bar0->tx_fifo_partition_2);
764         writeq(val64, &bar0->tx_fifo_partition_3);
765
766
767         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
768                 val64 |=
769                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
770                          13) | vBIT(config->tx_cfg[i].fifo_priority,
771                                     ((i * 32) + 5), 3);
772
773                 if (i == (config->tx_fifo_num - 1)) {
774                         if (i % 2 == 0)
775                                 i++;
776                 }
777
778                 switch (i) {
779                 case 1:
780                         writeq(val64, &bar0->tx_fifo_partition_0);
781                         val64 = 0;
782                         break;
783                 case 3:
784                         writeq(val64, &bar0->tx_fifo_partition_1);
785                         val64 = 0;
786                         break;
787                 case 5:
788                         writeq(val64, &bar0->tx_fifo_partition_2);
789                         val64 = 0;
790                         break;
791                 case 7:
792                         writeq(val64, &bar0->tx_fifo_partition_3);
793                         break;
794                 }
795         }
796
797         /* Enable Tx FIFO partition 0. */
798         val64 = readq(&bar0->tx_fifo_partition_0);
799         val64 |= BIT(0);        /* To enable the FIFO partition. */
800         writeq(val64, &bar0->tx_fifo_partition_0);
801
802         /*
803          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
804          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
805          */
806         if (get_xena_rev_id(nic->pdev) < 4)
807                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
808
809         val64 = readq(&bar0->tx_fifo_partition_0);
810         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
811                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
812
813         /*
814          * Initialization of Tx_PA_CONFIG register to ignore packet
815          * integrity checking.
816          */
817         val64 = readq(&bar0->tx_pa_cfg);
818         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
819             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
820         writeq(val64, &bar0->tx_pa_cfg);
821
822         /* Rx DMA intialization. */
823         val64 = 0;
824         for (i = 0; i < config->rx_ring_num; i++) {
825                 val64 |=
826                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
827                          3);
828         }
829         writeq(val64, &bar0->rx_queue_priority);
830
831         /*
832          * Allocating equal share of memory to all the
833          * configured Rings.
834          */
835         val64 = 0;
836         mem_size = 64;
837         for (i = 0; i < config->rx_ring_num; i++) {
838                 switch (i) {
839                 case 0:
840                         mem_share = (mem_size / config->rx_ring_num +
841                                      mem_size % config->rx_ring_num);
842                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
843                         continue;
844                 case 1:
845                         mem_share = (mem_size / config->rx_ring_num);
846                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
847                         continue;
848                 case 2:
849                         mem_share = (mem_size / config->rx_ring_num);
850                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
851                         continue;
852                 case 3:
853                         mem_share = (mem_size / config->rx_ring_num);
854                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
855                         continue;
856                 case 4:
857                         mem_share = (mem_size / config->rx_ring_num);
858                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
859                         continue;
860                 case 5:
861                         mem_share = (mem_size / config->rx_ring_num);
862                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
863                         continue;
864                 case 6:
865                         mem_share = (mem_size / config->rx_ring_num);
866                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
867                         continue;
868                 case 7:
869                         mem_share = (mem_size / config->rx_ring_num);
870                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
871                         continue;
872                 }
873         }
874         writeq(val64, &bar0->rx_queue_cfg);
875
876         /*
877          * Filling Tx round robin registers
878          * as per the number of FIFOs
879          */
880         switch (config->tx_fifo_num) {
881         case 1:
882                 val64 = 0x0000000000000000ULL;
883                 writeq(val64, &bar0->tx_w_round_robin_0);
884                 writeq(val64, &bar0->tx_w_round_robin_1);
885                 writeq(val64, &bar0->tx_w_round_robin_2);
886                 writeq(val64, &bar0->tx_w_round_robin_3);
887                 writeq(val64, &bar0->tx_w_round_robin_4);
888                 break;
889         case 2:
890                 val64 = 0x0000010000010000ULL;
891                 writeq(val64, &bar0->tx_w_round_robin_0);
892                 val64 = 0x0100000100000100ULL;
893                 writeq(val64, &bar0->tx_w_round_robin_1);
894                 val64 = 0x0001000001000001ULL;
895                 writeq(val64, &bar0->tx_w_round_robin_2);
896                 val64 = 0x0000010000010000ULL;
897                 writeq(val64, &bar0->tx_w_round_robin_3);
898                 val64 = 0x0100000000000000ULL;
899                 writeq(val64, &bar0->tx_w_round_robin_4);
900                 break;
901         case 3:
902                 val64 = 0x0001000102000001ULL;
903                 writeq(val64, &bar0->tx_w_round_robin_0);
904                 val64 = 0x0001020000010001ULL;
905                 writeq(val64, &bar0->tx_w_round_robin_1);
906                 val64 = 0x0200000100010200ULL;
907                 writeq(val64, &bar0->tx_w_round_robin_2);
908                 val64 = 0x0001000102000001ULL;
909                 writeq(val64, &bar0->tx_w_round_robin_3);
910                 val64 = 0x0001020000000000ULL;
911                 writeq(val64, &bar0->tx_w_round_robin_4);
912                 break;
913         case 4:
914                 val64 = 0x0001020300010200ULL;
915                 writeq(val64, &bar0->tx_w_round_robin_0);
916                 val64 = 0x0100000102030001ULL;
917                 writeq(val64, &bar0->tx_w_round_robin_1);
918                 val64 = 0x0200010000010203ULL;
919                 writeq(val64, &bar0->tx_w_round_robin_2);
920                 val64 = 0x0001020001000001ULL;
921                 writeq(val64, &bar0->tx_w_round_robin_3);
922                 val64 = 0x0203000100000000ULL;
923                 writeq(val64, &bar0->tx_w_round_robin_4);
924                 break;
925         case 5:
926                 val64 = 0x0001000203000102ULL;
927                 writeq(val64, &bar0->tx_w_round_robin_0);
928                 val64 = 0x0001020001030004ULL;
929                 writeq(val64, &bar0->tx_w_round_robin_1);
930                 val64 = 0x0001000203000102ULL;
931                 writeq(val64, &bar0->tx_w_round_robin_2);
932                 val64 = 0x0001020001030004ULL;
933                 writeq(val64, &bar0->tx_w_round_robin_3);
934                 val64 = 0x0001000000000000ULL;
935                 writeq(val64, &bar0->tx_w_round_robin_4);
936                 break;
937         case 6:
938                 val64 = 0x0001020304000102ULL;
939                 writeq(val64, &bar0->tx_w_round_robin_0);
940                 val64 = 0x0304050001020001ULL;
941                 writeq(val64, &bar0->tx_w_round_robin_1);
942                 val64 = 0x0203000100000102ULL;
943                 writeq(val64, &bar0->tx_w_round_robin_2);
944                 val64 = 0x0304000102030405ULL;
945                 writeq(val64, &bar0->tx_w_round_robin_3);
946                 val64 = 0x0001000200000000ULL;
947                 writeq(val64, &bar0->tx_w_round_robin_4);
948                 break;
949         case 7:
950                 val64 = 0x0001020001020300ULL;
951                 writeq(val64, &bar0->tx_w_round_robin_0);
952                 val64 = 0x0102030400010203ULL;
953                 writeq(val64, &bar0->tx_w_round_robin_1);
954                 val64 = 0x0405060001020001ULL;
955                 writeq(val64, &bar0->tx_w_round_robin_2);
956                 val64 = 0x0304050000010200ULL;
957                 writeq(val64, &bar0->tx_w_round_robin_3);
958                 val64 = 0x0102030000000000ULL;
959                 writeq(val64, &bar0->tx_w_round_robin_4);
960                 break;
961         case 8:
962                 val64 = 0x0001020300040105ULL;
963                 writeq(val64, &bar0->tx_w_round_robin_0);
964                 val64 = 0x0200030106000204ULL;
965                 writeq(val64, &bar0->tx_w_round_robin_1);
966                 val64 = 0x0103000502010007ULL;
967                 writeq(val64, &bar0->tx_w_round_robin_2);
968                 val64 = 0x0304010002060500ULL;
969                 writeq(val64, &bar0->tx_w_round_robin_3);
970                 val64 = 0x0103020400000000ULL;
971                 writeq(val64, &bar0->tx_w_round_robin_4);
972                 break;
973         }
974
975         /* Filling the Rx round robin registers as per the
976          * number of Rings and steering based on QoS.
977          */
978         switch (config->rx_ring_num) {
979         case 1:
980                 val64 = 0x8080808080808080ULL;
981                 writeq(val64, &bar0->rts_qos_steering);
982                 break;
983         case 2:
984                 val64 = 0x0000010000010000ULL;
985                 writeq(val64, &bar0->rx_w_round_robin_0);
986                 val64 = 0x0100000100000100ULL;
987                 writeq(val64, &bar0->rx_w_round_robin_1);
988                 val64 = 0x0001000001000001ULL;
989                 writeq(val64, &bar0->rx_w_round_robin_2);
990                 val64 = 0x0000010000010000ULL;
991                 writeq(val64, &bar0->rx_w_round_robin_3);
992                 val64 = 0x0100000000000000ULL;
993                 writeq(val64, &bar0->rx_w_round_robin_4);
994
995                 val64 = 0x8080808040404040ULL;
996                 writeq(val64, &bar0->rts_qos_steering);
997                 break;
998         case 3:
999                 val64 = 0x0001000102000001ULL;
1000                 writeq(val64, &bar0->rx_w_round_robin_0);
1001                 val64 = 0x0001020000010001ULL;
1002                 writeq(val64, &bar0->rx_w_round_robin_1);
1003                 val64 = 0x0200000100010200ULL;
1004                 writeq(val64, &bar0->rx_w_round_robin_2);
1005                 val64 = 0x0001000102000001ULL;
1006                 writeq(val64, &bar0->rx_w_round_robin_3);
1007                 val64 = 0x0001020000000000ULL;
1008                 writeq(val64, &bar0->rx_w_round_robin_4);
1009
1010                 val64 = 0x8080804040402020ULL;
1011                 writeq(val64, &bar0->rts_qos_steering);
1012                 break;
1013         case 4:
1014                 val64 = 0x0001020300010200ULL;
1015                 writeq(val64, &bar0->rx_w_round_robin_0);
1016                 val64 = 0x0100000102030001ULL;
1017                 writeq(val64, &bar0->rx_w_round_robin_1);
1018                 val64 = 0x0200010000010203ULL;
1019                 writeq(val64, &bar0->rx_w_round_robin_2);
1020                 val64 = 0x0001020001000001ULL;  
1021                 writeq(val64, &bar0->rx_w_round_robin_3);
1022                 val64 = 0x0203000100000000ULL;
1023                 writeq(val64, &bar0->rx_w_round_robin_4);
1024
1025                 val64 = 0x8080404020201010ULL;
1026                 writeq(val64, &bar0->rts_qos_steering);
1027                 break;
1028         case 5:
1029                 val64 = 0x0001000203000102ULL;
1030                 writeq(val64, &bar0->rx_w_round_robin_0);
1031                 val64 = 0x0001020001030004ULL;
1032                 writeq(val64, &bar0->rx_w_round_robin_1);
1033                 val64 = 0x0001000203000102ULL;
1034                 writeq(val64, &bar0->rx_w_round_robin_2);
1035                 val64 = 0x0001020001030004ULL;
1036                 writeq(val64, &bar0->rx_w_round_robin_3);
1037                 val64 = 0x0001000000000000ULL;
1038                 writeq(val64, &bar0->rx_w_round_robin_4);
1039
1040                 val64 = 0x8080404020201008ULL;
1041                 writeq(val64, &bar0->rts_qos_steering);
1042                 break;
1043         case 6:
1044                 val64 = 0x0001020304000102ULL;
1045                 writeq(val64, &bar0->rx_w_round_robin_0);
1046                 val64 = 0x0304050001020001ULL;
1047                 writeq(val64, &bar0->rx_w_round_robin_1);
1048                 val64 = 0x0203000100000102ULL;
1049                 writeq(val64, &bar0->rx_w_round_robin_2);
1050                 val64 = 0x0304000102030405ULL;
1051                 writeq(val64, &bar0->rx_w_round_robin_3);
1052                 val64 = 0x0001000200000000ULL;
1053                 writeq(val64, &bar0->rx_w_round_robin_4);
1054
1055                 val64 = 0x8080404020100804ULL;
1056                 writeq(val64, &bar0->rts_qos_steering);
1057                 break;
1058         case 7:
1059                 val64 = 0x0001020001020300ULL;
1060                 writeq(val64, &bar0->rx_w_round_robin_0);
1061                 val64 = 0x0102030400010203ULL;
1062                 writeq(val64, &bar0->rx_w_round_robin_1);
1063                 val64 = 0x0405060001020001ULL;
1064                 writeq(val64, &bar0->rx_w_round_robin_2);
1065                 val64 = 0x0304050000010200ULL;
1066                 writeq(val64, &bar0->rx_w_round_robin_3);
1067                 val64 = 0x0102030000000000ULL;
1068                 writeq(val64, &bar0->rx_w_round_robin_4);
1069
1070                 val64 = 0x8080402010080402ULL;
1071                 writeq(val64, &bar0->rts_qos_steering);
1072                 break;
1073         case 8:
1074                 val64 = 0x0001020300040105ULL;
1075                 writeq(val64, &bar0->rx_w_round_robin_0);
1076                 val64 = 0x0200030106000204ULL;
1077                 writeq(val64, &bar0->rx_w_round_robin_1);
1078                 val64 = 0x0103000502010007ULL;
1079                 writeq(val64, &bar0->rx_w_round_robin_2);
1080                 val64 = 0x0304010002060500ULL;
1081                 writeq(val64, &bar0->rx_w_round_robin_3);
1082                 val64 = 0x0103020400000000ULL;
1083                 writeq(val64, &bar0->rx_w_round_robin_4);
1084
1085                 val64 = 0x8040201008040201ULL;
1086                 writeq(val64, &bar0->rts_qos_steering);
1087                 break;
1088         }
1089
1090         /* UDP Fix */
1091         val64 = 0;
1092         for (i = 0; i < 8; i++)
1093                 writeq(val64, &bar0->rts_frm_len_n[i]);
1094
1095         /* Set the default rts frame length for the rings configured */
1096         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1097         for (i = 0 ; i < config->rx_ring_num ; i++)
1098                 writeq(val64, &bar0->rts_frm_len_n[i]);
1099
1100         /* Set the frame length for the configured rings
1101          * desired by the user
1102          */
1103         for (i = 0; i < config->rx_ring_num; i++) {
1104                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1105                  * specified frame length steering.
1106                  * If the user provides the frame length then program
1107                  * the rts_frm_len register for those values or else
1108                  * leave it as it is.
1109                  */
1110                 if (rts_frm_len[i] != 0) {
1111                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1112                                 &bar0->rts_frm_len_n[i]);
1113                 }
1114         }
1115
1116         /* Program statistics memory */
1117         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1118
1119         /*
1120          * Initializing the sampling rate for the device to calculate the
1121          * bandwidth utilization.
1122          */
1123         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1124             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1125         writeq(val64, &bar0->mac_link_util);
1126
1127
1128         /*
1129          * Initializing the Transmit and Receive Traffic Interrupt
1130          * Scheme.
1131          */
1132         /*
1133          * TTI Initialization. Default Tx timer gets us about
1134          * 250 interrupts per sec. Continuous interrupts are enabled
1135          * by default.
1136          */
1137         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1138             TTI_DATA1_MEM_TX_URNG_A(0xA) |
1139             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1140             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1141         if (use_continuous_tx_intrs)
1142                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1143         writeq(val64, &bar0->tti_data1_mem);
1144
1145         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1146             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1147             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1148         writeq(val64, &bar0->tti_data2_mem);
1149
1150         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1151         writeq(val64, &bar0->tti_command_mem);
1152
1153         /*
1154          * Once the operation completes, the Strobe bit of the command
1155          * register will be reset. We poll for this particular condition
1156          * We wait for a maximum of 500ms for the operation to complete,
1157          * if it's not complete by then we return error.
1158          */
1159         time = 0;
1160         while (TRUE) {
1161                 val64 = readq(&bar0->tti_command_mem);
1162                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1163                         break;
1164                 }
1165                 if (time > 10) {
1166                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1167                                   dev->name);
1168                         return -1;
1169                 }
1170                 msleep(50);
1171                 time++;
1172         }
1173
1174         /* RTI Initialization */
1175         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1176             RTI_DATA1_MEM_RX_URNG_A(0xA) |
1177             RTI_DATA1_MEM_RX_URNG_B(0x10) |
1178             RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1179
1180         writeq(val64, &bar0->rti_data1_mem);
1181
1182         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1183             RTI_DATA2_MEM_RX_UFC_B(0x2) |
1184             RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1185         writeq(val64, &bar0->rti_data2_mem);
1186
1187         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1188         writeq(val64, &bar0->rti_command_mem);
1189
1190         /*
1191          * Once the operation completes, the Strobe bit of the
1192          * command register will be reset. We poll for this
1193          * particular condition. We wait for a maximum of 500ms
1194          * for the operation to complete, if it's not complete
1195          * by then we return error.
1196          */
1197         time = 0;
1198         while (TRUE) {
1199                 val64 = readq(&bar0->rti_command_mem);
1200                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1201                         break;
1202                 }
1203                 if (time > 10) {
1204                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1205                                   dev->name);
1206                         return -1;
1207                 }
1208                 time++;
1209                 msleep(50);
1210         }
1211
1212         /*
1213          * Initializing proper values as Pause threshold into all
1214          * the 8 Queues on Rx side.
1215          */
1216         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1217         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1218
1219         /* Disable RMAC PAD STRIPPING */
1220         add = (void *) &bar0->mac_cfg;
1221         val64 = readq(&bar0->mac_cfg);
1222         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1223         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1224         writel((u32) (val64), add);
1225         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1226         writel((u32) (val64 >> 32), (add + 4));
1227         val64 = readq(&bar0->mac_cfg);
1228
1229         /*
1230          * Set the time value to be inserted in the pause frame
1231          * generated by xena.
1232          */
1233         val64 = readq(&bar0->rmac_pause_cfg);
1234         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1235         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1236         writeq(val64, &bar0->rmac_pause_cfg);
1237
1238         /*
1239          * Set the Threshold Limit for Generating the pause frame
1240          * If the amount of data in any Queue exceeds ratio of
1241          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1242          * pause frame is generated
1243          */
1244         val64 = 0;
1245         for (i = 0; i < 4; i++) {
1246                 val64 |=
1247                     (((u64) 0xFF00 | nic->mac_control.
1248                       mc_pause_threshold_q0q3)
1249                      << (i * 2 * 8));
1250         }
1251         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1252
1253         val64 = 0;
1254         for (i = 0; i < 4; i++) {
1255                 val64 |=
1256                     (((u64) 0xFF00 | nic->mac_control.
1257                       mc_pause_threshold_q4q7)
1258                      << (i * 2 * 8));
1259         }
1260         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1261
1262         /*
1263          * TxDMA will stop Read request if the number of read split has
1264          * exceeded the limit pointed by shared_splits
1265          */
1266         val64 = readq(&bar0->pic_control);
1267         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1268         writeq(val64, &bar0->pic_control);
1269
1270         return SUCCESS;
1271 }
1272
1273 /**
1274  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1275  *  @nic: device private variable,
1276  *  @mask: A mask indicating which Intr block must be modified and,
1277  *  @flag: A flag indicating whether to enable or disable the Intrs.
1278  *  Description: This function will either disable or enable the interrupts
1279  *  depending on the flag argument. The mask argument can be used to
1280  *  enable/disable any Intr block.
1281  *  Return Value: NONE.
1282  */
1283
1284 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1285 {
1286         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1287         register u64 val64 = 0, temp64 = 0;
1288
1289         /*  Top level interrupt classification */
1290         /*  PIC Interrupts */
1291         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1292                 /*  Enable PIC Intrs in the general intr mask register */
1293                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1294                 if (flag == ENABLE_INTRS) {
1295                         temp64 = readq(&bar0->general_int_mask);
1296                         temp64 &= ~((u64) val64);
1297                         writeq(temp64, &bar0->general_int_mask);
1298                         /*
1299                          * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1300                          * interrupts for now.
1301                          * TODO
1302                          */
1303                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1304                         /*
1305                          * No MSI Support is available presently, so TTI and
1306                          * RTI interrupts are also disabled.
1307                          */
1308                 } else if (flag == DISABLE_INTRS) {
1309                         /*
1310                          * Disable PIC Intrs in the general
1311                          * intr mask register
1312                          */
1313                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1314                         temp64 = readq(&bar0->general_int_mask);
1315                         val64 |= temp64;
1316                         writeq(val64, &bar0->general_int_mask);
1317                 }
1318         }
1319
1320         /*  DMA Interrupts */
1321         /*  Enabling/Disabling Tx DMA interrupts */
1322         if (mask & TX_DMA_INTR) {
1323                 /* Enable TxDMA Intrs in the general intr mask register */
1324                 val64 = TXDMA_INT_M;
1325                 if (flag == ENABLE_INTRS) {
1326                         temp64 = readq(&bar0->general_int_mask);
1327                         temp64 &= ~((u64) val64);
1328                         writeq(temp64, &bar0->general_int_mask);
1329                         /*
1330                          * Keep all interrupts other than PFC interrupt
1331                          * and PCC interrupt disabled in DMA level.
1332                          */
1333                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1334                                                       TXDMA_PCC_INT_M);
1335                         writeq(val64, &bar0->txdma_int_mask);
1336                         /*
1337                          * Enable only the MISC error 1 interrupt in PFC block
1338                          */
1339                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1340                         writeq(val64, &bar0->pfc_err_mask);
1341                         /*
1342                          * Enable only the FB_ECC error interrupt in PCC block
1343                          */
1344                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1345                         writeq(val64, &bar0->pcc_err_mask);
1346                 } else if (flag == DISABLE_INTRS) {
1347                         /*
1348                          * Disable TxDMA Intrs in the general intr mask
1349                          * register
1350                          */
1351                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1352                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1353                         temp64 = readq(&bar0->general_int_mask);
1354                         val64 |= temp64;
1355                         writeq(val64, &bar0->general_int_mask);
1356                 }
1357         }
1358
1359         /*  Enabling/Disabling Rx DMA interrupts */
1360         if (mask & RX_DMA_INTR) {
1361                 /*  Enable RxDMA Intrs in the general intr mask register */
1362                 val64 = RXDMA_INT_M;
1363                 if (flag == ENABLE_INTRS) {
1364                         temp64 = readq(&bar0->general_int_mask);
1365                         temp64 &= ~((u64) val64);
1366                         writeq(temp64, &bar0->general_int_mask);
1367                         /*
1368                          * All RxDMA block interrupts are disabled for now
1369                          * TODO
1370                          */
1371                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1372                 } else if (flag == DISABLE_INTRS) {
1373                         /*
1374                          * Disable RxDMA Intrs in the general intr mask
1375                          * register
1376                          */
1377                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1378                         temp64 = readq(&bar0->general_int_mask);
1379                         val64 |= temp64;
1380                         writeq(val64, &bar0->general_int_mask);
1381                 }
1382         }
1383
1384         /*  MAC Interrupts */
1385         /*  Enabling/Disabling MAC interrupts */
1386         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1387                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1388                 if (flag == ENABLE_INTRS) {
1389                         temp64 = readq(&bar0->general_int_mask);
1390                         temp64 &= ~((u64) val64);
1391                         writeq(temp64, &bar0->general_int_mask);
1392                         /*
1393                          * All MAC block error interrupts are disabled for now
1394                          * except the link status change interrupt.
1395                          * TODO
1396                          */
1397                         val64 = MAC_INT_STATUS_RMAC_INT;
1398                         temp64 = readq(&bar0->mac_int_mask);
1399                         temp64 &= ~((u64) val64);
1400                         writeq(temp64, &bar0->mac_int_mask);
1401
1402                         val64 = readq(&bar0->mac_rmac_err_mask);
1403                         val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1404                         writeq(val64, &bar0->mac_rmac_err_mask);
1405                 } else if (flag == DISABLE_INTRS) {
1406                         /*
1407                          * Disable MAC Intrs in the general intr mask register
1408                          */
1409                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1410                         writeq(DISABLE_ALL_INTRS,
1411                                &bar0->mac_rmac_err_mask);
1412
1413                         temp64 = readq(&bar0->general_int_mask);
1414                         val64 |= temp64;
1415                         writeq(val64, &bar0->general_int_mask);
1416                 }
1417         }
1418
1419         /*  XGXS Interrupts */
1420         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1421                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1422                 if (flag == ENABLE_INTRS) {
1423                         temp64 = readq(&bar0->general_int_mask);
1424                         temp64 &= ~((u64) val64);
1425                         writeq(temp64, &bar0->general_int_mask);
1426                         /*
1427                          * All XGXS block error interrupts are disabled for now
1428                          * TODO
1429                          */
1430                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1431                 } else if (flag == DISABLE_INTRS) {
1432                         /*
1433                          * Disable MC Intrs in the general intr mask register
1434                          */
1435                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1436                         temp64 = readq(&bar0->general_int_mask);
1437                         val64 |= temp64;
1438                         writeq(val64, &bar0->general_int_mask);
1439                 }
1440         }
1441
1442         /*  Memory Controller(MC) interrupts */
1443         if (mask & MC_INTR) {
1444                 val64 = MC_INT_M;
1445                 if (flag == ENABLE_INTRS) {
1446                         temp64 = readq(&bar0->general_int_mask);
1447                         temp64 &= ~((u64) val64);
1448                         writeq(temp64, &bar0->general_int_mask);
1449                         /*
1450                          * Enable all MC Intrs.
1451                          */
1452                         writeq(0x0, &bar0->mc_int_mask);
1453                         writeq(0x0, &bar0->mc_err_mask);
1454                 } else if (flag == DISABLE_INTRS) {
1455                         /*
1456                          * Disable MC Intrs in the general intr mask register
1457                          */
1458                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1459                         temp64 = readq(&bar0->general_int_mask);
1460                         val64 |= temp64;
1461                         writeq(val64, &bar0->general_int_mask);
1462                 }
1463         }
1464
1465
1466         /*  Tx traffic interrupts */
1467         if (mask & TX_TRAFFIC_INTR) {
1468                 val64 = TXTRAFFIC_INT_M;
1469                 if (flag == ENABLE_INTRS) {
1470                         temp64 = readq(&bar0->general_int_mask);
1471                         temp64 &= ~((u64) val64);
1472                         writeq(temp64, &bar0->general_int_mask);
1473                         /*
1474                          * Enable all the Tx side interrupts
1475                          * writing 0 Enables all 64 TX interrupt levels
1476                          */
1477                         writeq(0x0, &bar0->tx_traffic_mask);
1478                 } else if (flag == DISABLE_INTRS) {
1479                         /*
1480                          * Disable Tx Traffic Intrs in the general intr mask
1481                          * register.
1482                          */
1483                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1484                         temp64 = readq(&bar0->general_int_mask);
1485                         val64 |= temp64;
1486                         writeq(val64, &bar0->general_int_mask);
1487                 }
1488         }
1489
1490         /*  Rx traffic interrupts */
1491         if (mask & RX_TRAFFIC_INTR) {
1492                 val64 = RXTRAFFIC_INT_M;
1493                 if (flag == ENABLE_INTRS) {
1494                         temp64 = readq(&bar0->general_int_mask);
1495                         temp64 &= ~((u64) val64);
1496                         writeq(temp64, &bar0->general_int_mask);
1497                         /* writing 0 Enables all 8 RX interrupt levels */
1498                         writeq(0x0, &bar0->rx_traffic_mask);
1499                 } else if (flag == DISABLE_INTRS) {
1500                         /*
1501                          * Disable Rx Traffic Intrs in the general intr mask
1502                          * register.
1503                          */
1504                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1505                         temp64 = readq(&bar0->general_int_mask);
1506                         val64 |= temp64;
1507                         writeq(val64, &bar0->general_int_mask);
1508                 }
1509         }
1510 }
1511
1512 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1513 {
1514         int ret = 0;
1515
1516         if (flag == FALSE) {
1517                 if (rev_id >= 4) {
1518                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1519                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1520                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1521                                 ret = 1;
1522                         }
1523                 } else {
1524                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1525                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1526                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1527                                 ret = 1;
1528                         }
1529                 }
1530         } else {
1531                 if (rev_id >= 4) {
1532                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1533                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1534                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1535                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1536                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1537                                 ret = 1;
1538                         }
1539                 } else {
1540                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1541                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1542                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1543                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1544                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1545                                 ret = 1;
1546                         }
1547                 }
1548         }
1549
1550         return ret;
1551 }
1552 /**
1553  *  verify_xena_quiescence - Checks whether the H/W is ready
1554  *  @val64 :  Value read from adapter status register.
1555  *  @flag : indicates if the adapter enable bit was ever written once
1556  *  before.
1557  *  Description: Returns whether the H/W is ready to go or not. Depending
1558  *  on whether adapter enable bit was written or not the comparison
1559  *  differs and the calling function passes the input argument flag to
1560  *  indicate this.
1561  *  Return: 1 If xena is quiescence
1562  *          0 If Xena is not quiescence
1563  */
1564
1565 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1566 {
1567         int ret = 0;
1568         u64 tmp64 = ~((u64) val64);
1569         int rev_id = get_xena_rev_id(sp->pdev);
1570
1571         if (!
1572             (tmp64 &
1573              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1574               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1575               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1576               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1577               ADAPTER_STATUS_P_PLL_LOCK))) {
1578                 ret = check_prc_pcc_state(val64, flag, rev_id);
1579         }
1580
1581         return ret;
1582 }
1583
1584 /**
1585  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1586  * @sp: Pointer to device specifc structure
1587  * Description :
1588  * New procedure to clear mac address reading  problems on Alpha platforms
1589  *
1590  */
1591
1592 void fix_mac_address(nic_t * sp)
1593 {
1594         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1595         u64 val64;
1596         int i = 0;
1597
1598         while (fix_mac[i] != END_SIGN) {
1599                 writeq(fix_mac[i++], &bar0->gpio_control);
1600                 udelay(10);
1601                 val64 = readq(&bar0->gpio_control);
1602         }
1603 }
1604
1605 /**
1606  *  start_nic - Turns the device on
1607  *  @nic : device private variable.
1608  *  Description:
1609  *  This function actually turns the device on. Before this  function is
1610  *  called,all Registers are configured from their reset states
1611  *  and shared memory is allocated but the NIC is still quiescent. On
1612  *  calling this function, the device interrupts are cleared and the NIC is
1613  *  literally switched on by writing into the adapter control register.
1614  *  Return Value:
1615  *  SUCCESS on success and -1 on failure.
1616  */
1617
1618 static int start_nic(struct s2io_nic *nic)
1619 {
1620         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1621         struct net_device *dev = nic->dev;
1622         register u64 val64 = 0;
1623         u16 interruptible;
1624         u16 subid, i;
1625         mac_info_t *mac_control;
1626         struct config_param *config;
1627
1628         mac_control = &nic->mac_control;
1629         config = &nic->config;
1630
1631         /*  PRC Initialization and configuration */
1632         for (i = 0; i < config->rx_ring_num; i++) {
1633                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1634                        &bar0->prc_rxd0_n[i]);
1635
1636                 val64 = readq(&bar0->prc_ctrl_n[i]);
1637 #ifndef CONFIG_2BUFF_MODE
1638                 val64 |= PRC_CTRL_RC_ENABLED;
1639 #else
1640                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1641 #endif
1642                 writeq(val64, &bar0->prc_ctrl_n[i]);
1643         }
1644
1645 #ifdef CONFIG_2BUFF_MODE
1646         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1647         val64 = readq(&bar0->rx_pa_cfg);
1648         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1649         writeq(val64, &bar0->rx_pa_cfg);
1650 #endif
1651
1652         /*
1653          * Enabling MC-RLDRAM. After enabling the device, we timeout
1654          * for around 100ms, which is approximately the time required
1655          * for the device to be ready for operation.
1656          */
1657         val64 = readq(&bar0->mc_rldram_mrs);
1658         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1659         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1660         val64 = readq(&bar0->mc_rldram_mrs);
1661
1662         msleep(100);    /* Delay by around 100 ms. */
1663
1664         /* Enabling ECC Protection. */
1665         val64 = readq(&bar0->adapter_control);
1666         val64 &= ~ADAPTER_ECC_EN;
1667         writeq(val64, &bar0->adapter_control);
1668
1669         /*
1670          * Clearing any possible Link state change interrupts that
1671          * could have popped up just before Enabling the card.
1672          */
1673         val64 = readq(&bar0->mac_rmac_err_reg);
1674         if (val64)
1675                 writeq(val64, &bar0->mac_rmac_err_reg);
1676
1677         /*
1678          * Verify if the device is ready to be enabled, if so enable
1679          * it.
1680          */
1681         val64 = readq(&bar0->adapter_status);
1682         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1683                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1684                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1685                           (unsigned long long) val64);
1686                 return FAILURE;
1687         }
1688
1689         /*  Enable select interrupts */
1690         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1691             RX_MAC_INTR | MC_INTR;
1692         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1693
1694         /*
1695          * With some switches, link might be already up at this point.
1696          * Because of this weird behavior, when we enable laser,
1697          * we may not get link. We need to handle this. We cannot
1698          * figure out which switch is misbehaving. So we are forced to
1699          * make a global change.
1700          */
1701
1702         /* Enabling Laser. */
1703         val64 = readq(&bar0->adapter_control);
1704         val64 |= ADAPTER_EOI_TX_ON;
1705         writeq(val64, &bar0->adapter_control);
1706
1707         /* SXE-002: Initialize link and activity LED */
1708         subid = nic->pdev->subsystem_device;
1709         if ((subid & 0xFF) >= 0x07) {
1710                 val64 = readq(&bar0->gpio_control);
1711                 val64 |= 0x0000800000000000ULL;
1712                 writeq(val64, &bar0->gpio_control);
1713                 val64 = 0x0411040400000000ULL;
1714                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1715         }
1716
1717         /*
1718          * Don't see link state interrupts on certain switches, so
1719          * directly scheduling a link state task from here.
1720          */
1721         schedule_work(&nic->set_link_task);
1722
1723         return SUCCESS;
1724 }
1725
1726 /**
1727  *  free_tx_buffers - Free all queued Tx buffers
1728  *  @nic : device private variable.
1729  *  Description:
1730  *  Free all queued Tx buffers.
1731  *  Return Value: void
1732 */
1733
1734 static void free_tx_buffers(struct s2io_nic *nic)
1735 {
1736         struct net_device *dev = nic->dev;
1737         struct sk_buff *skb;
1738         TxD_t *txdp;
1739         int i, j;
1740         mac_info_t *mac_control;
1741         struct config_param *config;
1742         int cnt = 0, frg_cnt;
1743
1744         mac_control = &nic->mac_control;
1745         config = &nic->config;
1746
1747         for (i = 0; i < config->tx_fifo_num; i++) {
1748                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1749                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1750                             list_virt_addr;
1751                         skb =
1752                             (struct sk_buff *) ((unsigned long) txdp->
1753                                                 Host_Control);
1754                         if (skb == NULL) {
1755                                 memset(txdp, 0, sizeof(TxD_t) *
1756                                        config->max_txds);
1757                                 continue;
1758                         }
1759                         frg_cnt = skb_shinfo(skb)->nr_frags;
1760                         pci_unmap_single(nic->pdev, (dma_addr_t)
1761                                          txdp->Buffer_Pointer,
1762                                          skb->len - skb->data_len,
1763                                          PCI_DMA_TODEVICE);
1764                         if (frg_cnt) {
1765                                 TxD_t *temp;
1766                                 temp = txdp;
1767                                 txdp++;
1768                                 for (j = 0; j < frg_cnt; j++, txdp++) {
1769                                         skb_frag_t *frag =
1770                                             &skb_shinfo(skb)->frags[j];
1771                                         pci_unmap_page(nic->pdev,
1772                                                        (dma_addr_t)
1773                                                        txdp->
1774                                                        Buffer_Pointer,
1775                                                        frag->size,
1776                                                        PCI_DMA_TODEVICE);
1777                                 }
1778                                 txdp = temp;
1779                         }
1780                         dev_kfree_skb(skb);
1781                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1782                         cnt++;
1783                 }
1784                 DBG_PRINT(INTR_DBG,
1785                           "%s:forcibly freeing %d skbs on FIFO%d\n",
1786                           dev->name, cnt, i);
1787                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1788                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1789         }
1790 }
1791
1792 /**
1793  *   stop_nic -  To stop the nic
1794  *   @nic ; device private variable.
1795  *   Description:
1796  *   This function does exactly the opposite of what the start_nic()
1797  *   function does. This function is called to stop the device.
1798  *   Return Value:
1799  *   void.
1800  */
1801
1802 static void stop_nic(struct s2io_nic *nic)
1803 {
1804         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1805         register u64 val64 = 0;
1806         u16 interruptible, i;
1807         mac_info_t *mac_control;
1808         struct config_param *config;
1809
1810         mac_control = &nic->mac_control;
1811         config = &nic->config;
1812
1813         /*  Disable all interrupts */
1814         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1815             RX_MAC_INTR | MC_INTR;
1816         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1817
1818         /*  Disable PRCs */
1819         for (i = 0; i < config->rx_ring_num; i++) {
1820                 val64 = readq(&bar0->prc_ctrl_n[i]);
1821                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1822                 writeq(val64, &bar0->prc_ctrl_n[i]);
1823         }
1824 }
1825
1826 /**
1827  *  fill_rx_buffers - Allocates the Rx side skbs
1828  *  @nic:  device private variable
1829  *  @ring_no: ring number
1830  *  Description:
1831  *  The function allocates Rx side skbs and puts the physical
1832  *  address of these buffers into the RxD buffer pointers, so that the NIC
1833  *  can DMA the received frame into these locations.
1834  *  The NIC supports 3 receive modes, viz
1835  *  1. single buffer,
1836  *  2. three buffer and
1837  *  3. Five buffer modes.
1838  *  Each mode defines how many fragments the received frame will be split
1839  *  up into by the NIC. The frame is split into L3 header, L4 Header,
1840  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1841  *  is split into 3 fragments. As of now only single buffer mode is
1842  *  supported.
1843  *   Return Value:
1844  *  SUCCESS on success or an appropriate -ve value on failure.
1845  */
1846
1847 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1848 {
1849         struct net_device *dev = nic->dev;
1850         struct sk_buff *skb;
1851         RxD_t *rxdp;
1852         int off, off1, size, block_no, block_no1;
1853         int offset, offset1;
1854         u32 alloc_tab = 0;
1855         u32 alloc_cnt;
1856         mac_info_t *mac_control;
1857         struct config_param *config;
1858 #ifdef CONFIG_2BUFF_MODE
1859         RxD_t *rxdpnext;
1860         int nextblk;
1861         u64 tmp;
1862         buffAdd_t *ba;
1863         dma_addr_t rxdpphys;
1864 #endif
1865 #ifndef CONFIG_S2IO_NAPI
1866         unsigned long flags;
1867 #endif
1868
1869         mac_control = &nic->mac_control;
1870         config = &nic->config;
1871         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1872             atomic_read(&nic->rx_bufs_left[ring_no]);
1873         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1874             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1875
1876         while (alloc_tab < alloc_cnt) {
1877                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1878                     block_index;
1879                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1880                     block_index;
1881                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1882                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1883 #ifndef CONFIG_2BUFF_MODE
1884                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1885                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1886 #else
1887                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1888                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1889 #endif
1890
1891                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1892                     block_virt_addr + off;
1893                 if ((offset == offset1) && (rxdp->Host_Control)) {
1894                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1895                         DBG_PRINT(INTR_DBG, " info equated\n");
1896                         goto end;
1897                 }
1898 #ifndef CONFIG_2BUFF_MODE
1899                 if (rxdp->Control_1 == END_OF_BLOCK) {
1900                         mac_control->rings[ring_no].rx_curr_put_info.
1901                             block_index++;
1902                         mac_control->rings[ring_no].rx_curr_put_info.
1903                             block_index %= mac_control->rings[ring_no].block_count;
1904                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
1905                                 block_index;
1906                         off++;
1907                         off %= (MAX_RXDS_PER_BLOCK + 1);
1908                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1909                             off;
1910                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1911                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1912                                   dev->name, rxdp);
1913                 }
1914 #ifndef CONFIG_S2IO_NAPI
1915                 spin_lock_irqsave(&nic->put_lock, flags);
1916                 mac_control->rings[ring_no].put_pos =
1917                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1918                 spin_unlock_irqrestore(&nic->put_lock, flags);
1919 #endif
1920 #else
1921                 if (rxdp->Host_Control == END_OF_BLOCK) {
1922                         mac_control->rings[ring_no].rx_curr_put_info.
1923                             block_index++;
1924                         mac_control->rings[ring_no].rx_curr_put_info.block_index
1925                             %= mac_control->rings[ring_no].block_count;
1926                         block_no = mac_control->rings[ring_no].rx_curr_put_info
1927                             .block_index;
1928                         off = 0;
1929                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1930                                   dev->name, block_no,
1931                                   (unsigned long long) rxdp->Control_1);
1932                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1933                             off;
1934                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1935                             block_virt_addr;
1936                 }
1937 #ifndef CONFIG_S2IO_NAPI
1938                 spin_lock_irqsave(&nic->put_lock, flags);
1939                 mac_control->rings[ring_no].put_pos = (block_no *
1940                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
1941                 spin_unlock_irqrestore(&nic->put_lock, flags);
1942 #endif
1943 #endif
1944
1945 #ifndef CONFIG_2BUFF_MODE
1946                 if (rxdp->Control_1 & RXD_OWN_XENA)
1947 #else
1948                 if (rxdp->Control_2 & BIT(0))
1949 #endif
1950                 {
1951                         mac_control->rings[ring_no].rx_curr_put_info.
1952                             offset = off;
1953                         goto end;
1954                 }
1955 #ifdef  CONFIG_2BUFF_MODE
1956                 /*
1957                  * RxDs Spanning cache lines will be replenished only
1958                  * if the succeeding RxD is also owned by Host. It
1959                  * will always be the ((8*i)+3) and ((8*i)+6)
1960                  * descriptors for the 48 byte descriptor. The offending
1961                  * decsriptor is of-course the 3rd descriptor.
1962                  */
1963                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1964                     block_dma_addr + (off * sizeof(RxD_t));
1965                 if (((u64) (rxdpphys)) % 128 > 80) {
1966                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1967                             block_virt_addr + (off + 1);
1968                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
1969                                 nextblk = (block_no + 1) %
1970                                     (mac_control->rings[ring_no].block_count);
1971                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
1972                                     [nextblk].block_virt_addr;
1973                         }
1974                         if (rxdpnext->Control_2 & BIT(0))
1975                                 goto end;
1976                 }
1977 #endif
1978
1979 #ifndef CONFIG_2BUFF_MODE
1980                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1981 #else
1982                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1983 #endif
1984                 if (!skb) {
1985                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1986                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1987                         return -ENOMEM;
1988                 }
1989 #ifndef CONFIG_2BUFF_MODE
1990                 skb_reserve(skb, NET_IP_ALIGN);
1991                 memset(rxdp, 0, sizeof(RxD_t));
1992                 rxdp->Buffer0_ptr = pci_map_single
1993                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1994                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1995                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1996                 rxdp->Host_Control = (unsigned long) (skb);
1997                 rxdp->Control_1 |= RXD_OWN_XENA;
1998                 off++;
1999                 off %= (MAX_RXDS_PER_BLOCK + 1);
2000                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2001 #else
2002                 ba = &mac_control->rings[ring_no].ba[block_no][off];
2003                 skb_reserve(skb, BUF0_LEN);
2004                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2005                 if (tmp)
2006                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2007
2008                 memset(rxdp, 0, sizeof(RxD_t));
2009                 rxdp->Buffer2_ptr = pci_map_single
2010                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2011                      PCI_DMA_FROMDEVICE);
2012                 rxdp->Buffer0_ptr =
2013                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2014                                    PCI_DMA_FROMDEVICE);
2015                 rxdp->Buffer1_ptr =
2016                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2017                                    PCI_DMA_FROMDEVICE);
2018
2019                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2020                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2021                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2022                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
2023                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2024                 rxdp->Control_1 |= RXD_OWN_XENA;
2025                 off++;
2026                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2027 #endif
2028                 rxdp->Control_2 |= SET_RXD_MARKER;
2029
2030                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2031                 alloc_tab++;
2032         }
2033
2034       end:
2035         return SUCCESS;
2036 }
2037
2038 /**
2039  *  free_rx_buffers - Frees all Rx buffers
2040  *  @sp: device private variable.
2041  *  Description:
2042  *  This function will free all Rx buffers allocated by host.
2043  *  Return Value:
2044  *  NONE.
2045  */
2046
2047 static void free_rx_buffers(struct s2io_nic *sp)
2048 {
2049         struct net_device *dev = sp->dev;
2050         int i, j, blk = 0, off, buf_cnt = 0;
2051         RxD_t *rxdp;
2052         struct sk_buff *skb;
2053         mac_info_t *mac_control;
2054         struct config_param *config;
2055 #ifdef CONFIG_2BUFF_MODE
2056         buffAdd_t *ba;
2057 #endif
2058
2059         mac_control = &sp->mac_control;
2060         config = &sp->config;
2061
2062         for (i = 0; i < config->rx_ring_num; i++) {
2063                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2064                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2065                         rxdp = mac_control->rings[i].rx_blocks[blk].
2066                                 block_virt_addr + off;
2067
2068 #ifndef CONFIG_2BUFF_MODE
2069                         if (rxdp->Control_1 == END_OF_BLOCK) {
2070                                 rxdp =
2071                                     (RxD_t *) ((unsigned long) rxdp->
2072                                                Control_2);
2073                                 j++;
2074                                 blk++;
2075                         }
2076 #else
2077                         if (rxdp->Host_Control == END_OF_BLOCK) {
2078                                 blk++;
2079                                 continue;
2080                         }
2081 #endif
2082
2083                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2084                                 memset(rxdp, 0, sizeof(RxD_t));
2085                                 continue;
2086                         }
2087
2088                         skb =
2089                             (struct sk_buff *) ((unsigned long) rxdp->
2090                                                 Host_Control);
2091                         if (skb) {
2092 #ifndef CONFIG_2BUFF_MODE
2093                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2094                                                  rxdp->Buffer0_ptr,
2095                                                  dev->mtu +
2096                                                  HEADER_ETHERNET_II_802_3_SIZE
2097                                                  + HEADER_802_2_SIZE +
2098                                                  HEADER_SNAP_SIZE,
2099                                                  PCI_DMA_FROMDEVICE);
2100 #else
2101                                 ba = &mac_control->rings[i].ba[blk][off];
2102                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2103                                                  rxdp->Buffer0_ptr,
2104                                                  BUF0_LEN,
2105                                                  PCI_DMA_FROMDEVICE);
2106                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2107                                                  rxdp->Buffer1_ptr,
2108                                                  BUF1_LEN,
2109                                                  PCI_DMA_FROMDEVICE);
2110                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2111                                                  rxdp->Buffer2_ptr,
2112                                                  dev->mtu + BUF0_LEN + 4,
2113                                                  PCI_DMA_FROMDEVICE);
2114 #endif
2115                                 dev_kfree_skb(skb);
2116                                 atomic_dec(&sp->rx_bufs_left[i]);
2117                                 buf_cnt++;
2118                         }
2119                         memset(rxdp, 0, sizeof(RxD_t));
2120                 }
2121                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2122                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2123                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2124                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2125                 atomic_set(&sp->rx_bufs_left[i], 0);
2126                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2127                           dev->name, buf_cnt, i);
2128         }
2129 }
2130
2131 /**
2132  * s2io_poll - Rx interrupt handler for NAPI support
2133  * @dev : pointer to the device structure.
2134  * @budget : The number of packets that were budgeted to be processed
2135  * during  one pass through the 'Poll" function.
2136  * Description:
2137  * Comes into picture only if NAPI support has been incorporated. It does
2138  * the same thing that rx_intr_handler does, but not in a interrupt context
2139  * also It will process only a given number of packets.
2140  * Return value:
2141  * 0 on success and 1 if there are No Rx packets to be processed.
2142  */
2143
2144 #if defined(CONFIG_S2IO_NAPI)
2145 static int s2io_poll(struct net_device *dev, int *budget)
2146 {
2147         nic_t *nic = dev->priv;
2148         int pkt_cnt = 0, org_pkts_to_process;
2149         mac_info_t *mac_control;
2150         struct config_param *config;
2151         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2152         u64 val64;
2153         int i;
2154
2155         atomic_inc(&nic->isr_cnt);
2156         mac_control = &nic->mac_control;
2157         config = &nic->config;
2158
2159         nic->pkts_to_process = *budget;
2160         if (nic->pkts_to_process > dev->quota)
2161                 nic->pkts_to_process = dev->quota;
2162         org_pkts_to_process = nic->pkts_to_process;
2163
2164         val64 = readq(&bar0->rx_traffic_int);
2165         writeq(val64, &bar0->rx_traffic_int);
2166
2167         for (i = 0; i < config->rx_ring_num; i++) {
2168                 rx_intr_handler(&mac_control->rings[i]);
2169                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2170                 if (!nic->pkts_to_process) {
2171                         /* Quota for the current iteration has been met */
2172                         goto no_rx;
2173                 }
2174         }
2175         if (!pkt_cnt)
2176                 pkt_cnt = 1;
2177
2178         dev->quota -= pkt_cnt;
2179         *budget -= pkt_cnt;
2180         netif_rx_complete(dev);
2181
2182         for (i = 0; i < config->rx_ring_num; i++) {
2183                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2184                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2185                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2186                         break;
2187                 }
2188         }
2189         /* Re enable the Rx interrupts. */
2190         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2191         atomic_dec(&nic->isr_cnt);
2192         return 0;
2193
2194 no_rx:
2195         dev->quota -= pkt_cnt;
2196         *budget -= pkt_cnt;
2197
2198         for (i = 0; i < config->rx_ring_num; i++) {
2199                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2200                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2201                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2202                         break;
2203                 }
2204         }
2205         atomic_dec(&nic->isr_cnt);
2206         return 1;
2207 }
2208 #endif
2209
2210 /**
2211  *  rx_intr_handler - Rx interrupt handler
2212  *  @nic: device private variable.
2213  *  Description:
2214  *  If the interrupt is because of a received frame or if the
2215  *  receive ring contains fresh as yet un-processed frames,this function is
2216  *  called. It picks out the RxD at which place the last Rx processing had
2217  *  stopped and sends the skb to the OSM's Rx handler and then increments
2218  *  the offset.
2219  *  Return Value:
2220  *  NONE.
2221  */
2222 static void rx_intr_handler(ring_info_t *ring_data)
2223 {
2224         nic_t *nic = ring_data->nic;
2225         struct net_device *dev = (struct net_device *) nic->dev;
2226         int get_block, get_offset, put_block, put_offset, ring_bufs;
2227         rx_curr_get_info_t get_info, put_info;
2228         RxD_t *rxdp;
2229         struct sk_buff *skb;
2230 #ifndef CONFIG_S2IO_NAPI
2231         int pkt_cnt = 0;
2232 #endif
2233         spin_lock(&nic->rx_lock);
2234         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2235                 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2236                           __FUNCTION__, dev->name);
2237                 spin_unlock(&nic->rx_lock);
2238         }
2239
2240         get_info = ring_data->rx_curr_get_info;
2241         get_block = get_info.block_index;
2242         put_info = ring_data->rx_curr_put_info;
2243         put_block = put_info.block_index;
2244         ring_bufs = get_info.ring_len+1;
2245         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2246                     get_info.offset;
2247         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2248                 get_info.offset;
2249 #ifndef CONFIG_S2IO_NAPI
2250         spin_lock(&nic->put_lock);
2251         put_offset = ring_data->put_pos;
2252         spin_unlock(&nic->put_lock);
2253 #else
2254         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2255                 put_info.offset;
2256 #endif
2257         while (RXD_IS_UP2DT(rxdp) &&
2258                (((get_offset + 1) % ring_bufs) != put_offset)) {
2259                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2260                 if (skb == NULL) {
2261                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2262                                   dev->name);
2263                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2264                         spin_unlock(&nic->rx_lock);
2265                         return;
2266                 }
2267 #ifndef CONFIG_2BUFF_MODE
2268                 pci_unmap_single(nic->pdev, (dma_addr_t)
2269                                  rxdp->Buffer0_ptr,
2270                                  dev->mtu +
2271                                  HEADER_ETHERNET_II_802_3_SIZE +
2272                                  HEADER_802_2_SIZE +
2273                                  HEADER_SNAP_SIZE,
2274                                  PCI_DMA_FROMDEVICE);
2275 #else
2276                 pci_unmap_single(nic->pdev, (dma_addr_t)
2277                                  rxdp->Buffer0_ptr,
2278                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2279                 pci_unmap_single(nic->pdev, (dma_addr_t)
2280                                  rxdp->Buffer1_ptr,
2281                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2282                 pci_unmap_single(nic->pdev, (dma_addr_t)
2283                                  rxdp->Buffer2_ptr,
2284                                  dev->mtu + BUF0_LEN + 4,
2285                                  PCI_DMA_FROMDEVICE);
2286 #endif
2287                 rx_osm_handler(ring_data, rxdp);
2288                 get_info.offset++;
2289                 ring_data->rx_curr_get_info.offset =
2290                     get_info.offset;
2291                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2292                     get_info.offset;
2293                 if (get_info.offset &&
2294                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2295                         get_info.offset = 0;
2296                         ring_data->rx_curr_get_info.offset
2297                             = get_info.offset;
2298                         get_block++;
2299                         get_block %= ring_data->block_count;
2300                         ring_data->rx_curr_get_info.block_index
2301                             = get_block;
2302                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2303                 }
2304
2305                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2306                             get_info.offset;
2307 #ifdef CONFIG_S2IO_NAPI
2308                 nic->pkts_to_process -= 1;
2309                 if (!nic->pkts_to_process)
2310                         break;
2311 #else
2312                 pkt_cnt++;
2313                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2314                         break;
2315 #endif
2316         }
2317         spin_unlock(&nic->rx_lock);
2318 }
2319
2320 /**
2321  *  tx_intr_handler - Transmit interrupt handler
2322  *  @nic : device private variable
2323  *  Description:
2324  *  If an interrupt was raised to indicate DMA complete of the
2325  *  Tx packet, this function is called. It identifies the last TxD
2326  *  whose buffer was freed and frees all skbs whose data have already
2327  *  DMA'ed into the NICs internal memory.
2328  *  Return Value:
2329  *  NONE
2330  */
2331
2332 static void tx_intr_handler(fifo_info_t *fifo_data)
2333 {
2334         nic_t *nic = fifo_data->nic;
2335         struct net_device *dev = (struct net_device *) nic->dev;
2336         tx_curr_get_info_t get_info, put_info;
2337         struct sk_buff *skb;
2338         TxD_t *txdlp;
2339         u16 j, frg_cnt;
2340
2341         get_info = fifo_data->tx_curr_get_info;
2342         put_info = fifo_data->tx_curr_put_info;
2343         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2344             list_virt_addr;
2345         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2346                (get_info.offset != put_info.offset) &&
2347                (txdlp->Host_Control)) {
2348                 /* Check for TxD errors */
2349                 if (txdlp->Control_1 & TXD_T_CODE) {
2350                         unsigned long long err;
2351                         err = txdlp->Control_1 & TXD_T_CODE;
2352                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2353                                   err);
2354                 }
2355
2356                 skb = (struct sk_buff *) ((unsigned long)
2357                                 txdlp->Host_Control);
2358                 if (skb == NULL) {
2359                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2360                         __FUNCTION__);
2361                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2362                         return;
2363                 }
2364
2365                 frg_cnt = skb_shinfo(skb)->nr_frags;
2366                 nic->tx_pkt_count++;
2367
2368                 pci_unmap_single(nic->pdev, (dma_addr_t)
2369                                  txdlp->Buffer_Pointer,
2370                                  skb->len - skb->data_len,
2371                                  PCI_DMA_TODEVICE);
2372                 if (frg_cnt) {
2373                         TxD_t *temp;
2374                         temp = txdlp;
2375                         txdlp++;
2376                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2377                                 skb_frag_t *frag =
2378                                     &skb_shinfo(skb)->frags[j];
2379                                 pci_unmap_page(nic->pdev,
2380                                                (dma_addr_t)
2381                                                txdlp->
2382                                                Buffer_Pointer,
2383                                                frag->size,
2384                                                PCI_DMA_TODEVICE);
2385                         }
2386                         txdlp = temp;
2387                 }
2388                 memset(txdlp, 0,
2389                        (sizeof(TxD_t) * fifo_data->max_txds));
2390
2391                 /* Updating the statistics block */
2392                 nic->stats.tx_bytes += skb->len;
2393                 dev_kfree_skb_irq(skb);
2394
2395                 get_info.offset++;
2396                 get_info.offset %= get_info.fifo_len + 1;
2397                 txdlp = (TxD_t *) fifo_data->list_info
2398                     [get_info.offset].list_virt_addr;
2399                 fifo_data->tx_curr_get_info.offset =
2400                     get_info.offset;
2401         }
2402
2403         spin_lock(&nic->tx_lock);
2404         if (netif_queue_stopped(dev))
2405                 netif_wake_queue(dev);
2406         spin_unlock(&nic->tx_lock);
2407 }
2408
2409 /**
2410  *  alarm_intr_handler - Alarm Interrrupt handler
2411  *  @nic: device private variable
2412  *  Description: If the interrupt was neither because of Rx packet or Tx
2413  *  complete, this function is called. If the interrupt was to indicate
2414  *  a loss of link, the OSM link status handler is invoked for any other
2415  *  alarm interrupt the block that raised the interrupt is displayed
2416  *  and a H/W reset is issued.
2417  *  Return Value:
2418  *  NONE
2419 */
2420
2421 static void alarm_intr_handler(struct s2io_nic *nic)
2422 {
2423         struct net_device *dev = (struct net_device *) nic->dev;
2424         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2425         register u64 val64 = 0, err_reg = 0;
2426
2427         /* Handling link status change error Intr */
2428         err_reg = readq(&bar0->mac_rmac_err_reg);
2429         writeq(err_reg, &bar0->mac_rmac_err_reg);
2430         if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2431                 schedule_work(&nic->set_link_task);
2432         }
2433
2434         /* Handling Ecc errors */
2435         val64 = readq(&bar0->mc_err_reg);
2436         writeq(val64, &bar0->mc_err_reg);
2437         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2438                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2439                         nic->mac_control.stats_info->sw_stat.
2440                                 double_ecc_errs++;
2441                         DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2442                                   dev->name);
2443                         DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2444                         netif_stop_queue(dev);
2445                         schedule_work(&nic->rst_timer_task);
2446                 } else {
2447                         nic->mac_control.stats_info->sw_stat.
2448                                 single_ecc_errs++;
2449                 }
2450         }
2451
2452         /* In case of a serious error, the device will be Reset. */
2453         val64 = readq(&bar0->serr_source);
2454         if (val64 & SERR_SOURCE_ANY) {
2455                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2456                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2457                 netif_stop_queue(dev);
2458                 schedule_work(&nic->rst_timer_task);
2459         }
2460
2461         /*
2462          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2463          * Error occurs, the adapter will be recycled by disabling the
2464          * adapter enable bit and enabling it again after the device
2465          * becomes Quiescent.
2466          */
2467         val64 = readq(&bar0->pcc_err_reg);
2468         writeq(val64, &bar0->pcc_err_reg);
2469         if (val64 & PCC_FB_ECC_DB_ERR) {
2470                 u64 ac = readq(&bar0->adapter_control);
2471                 ac &= ~(ADAPTER_CNTL_EN);
2472                 writeq(ac, &bar0->adapter_control);
2473                 ac = readq(&bar0->adapter_control);
2474                 schedule_work(&nic->set_link_task);
2475         }
2476
2477         /* Other type of interrupts are not being handled now,  TODO */
2478 }
2479
2480 /**
2481  *  wait_for_cmd_complete - waits for a command to complete.
2482  *  @sp : private member of the device structure, which is a pointer to the
2483  *  s2io_nic structure.
2484  *  Description: Function that waits for a command to Write into RMAC
2485  *  ADDR DATA registers to be completed and returns either success or
2486  *  error depending on whether the command was complete or not.
2487  *  Return value:
2488  *   SUCCESS on success and FAILURE on failure.
2489  */
2490
2491 int wait_for_cmd_complete(nic_t * sp)
2492 {
2493         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2494         int ret = FAILURE, cnt = 0;
2495         u64 val64;
2496
2497         while (TRUE) {
2498                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2499                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2500                         ret = SUCCESS;
2501                         break;
2502                 }
2503                 msleep(50);
2504                 if (cnt++ > 10)
2505                         break;
2506         }
2507
2508         return ret;
2509 }
2510
2511 /**
2512  *  s2io_reset - Resets the card.
2513  *  @sp : private member of the device structure.
2514  *  Description: Function to Reset the card. This function then also
2515  *  restores the previously saved PCI configuration space registers as
2516  *  the card reset also resets the configuration space.
2517  *  Return value:
2518  *  void.
2519  */
2520
2521 void s2io_reset(nic_t * sp)
2522 {
2523         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2524         u64 val64;
2525         u16 subid, pci_cmd;
2526
2527         val64 = SW_RESET_ALL;
2528         writeq(val64, &bar0->sw_reset);
2529
2530         /*
2531          * At this stage, if the PCI write is indeed completed, the
2532          * card is reset and so is the PCI Config space of the device.
2533          * So a read cannot be issued at this stage on any of the
2534          * registers to ensure the write into "sw_reset" register
2535          * has gone through.
2536          * Question: Is there any system call that will explicitly force
2537          * all the write commands still pending on the bus to be pushed
2538          * through?
2539          * As of now I'am just giving a 250ms delay and hoping that the
2540          * PCI write to sw_reset register is done by this time.
2541          */
2542         msleep(250);
2543
2544         /* Restore the PCI state saved during initializarion. */
2545         pci_restore_state(sp->pdev);
2546
2547         s2io_init_pci(sp);
2548
2549         msleep(250);
2550
2551         /* Set swapper to enable I/O register access */
2552         s2io_set_swapper(sp);
2553
2554         /* Clear certain PCI/PCI-X fields after reset */
2555         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2556         pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2557         pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2558
2559         val64 = readq(&bar0->txpic_int_reg);
2560         val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2561         writeq(val64, &bar0->txpic_int_reg);
2562
2563         /* Clearing PCIX Ecc status register */
2564         pci_write_config_dword(sp->pdev, 0x68, 0);
2565
2566         /* Reset device statistics maintained by OS */
2567         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2568
2569         /* SXE-002: Configure link and activity LED to turn it off */
2570         subid = sp->pdev->subsystem_device;
2571         if ((subid & 0xFF) >= 0x07) {
2572                 val64 = readq(&bar0->gpio_control);
2573                 val64 |= 0x0000800000000000ULL;
2574                 writeq(val64, &bar0->gpio_control);
2575                 val64 = 0x0411040400000000ULL;
2576                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2577         }
2578
2579         sp->device_enabled_once = FALSE;
2580 }
2581
2582 /**
2583  *  s2io_set_swapper - to set the swapper controle on the card
2584  *  @sp : private member of the device structure,
2585  *  pointer to the s2io_nic structure.
2586  *  Description: Function to set the swapper control on the card
2587  *  correctly depending on the 'endianness' of the system.
2588  *  Return value:
2589  *  SUCCESS on success and FAILURE on failure.
2590  */
2591
2592 int s2io_set_swapper(nic_t * sp)
2593 {
2594         struct net_device *dev = sp->dev;
2595         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2596         u64 val64, valt, valr;
2597
2598         /*
2599          * Set proper endian settings and verify the same by reading
2600          * the PIF Feed-back register.
2601          */
2602
2603         val64 = readq(&bar0->pif_rd_swapper_fb);
2604         if (val64 != 0x0123456789ABCDEFULL) {
2605                 int i = 0;
2606                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2607                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2608                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2609                                 0};                     /* FE=0, SE=0 */
2610
2611                 while(i<4) {
2612                         writeq(value[i], &bar0->swapper_ctrl);
2613                         val64 = readq(&bar0->pif_rd_swapper_fb);
2614                         if (val64 == 0x0123456789ABCDEFULL)
2615                                 break;
2616                         i++;
2617                 }
2618                 if (i == 4) {
2619                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2620                                 dev->name);
2621                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2622                                 (unsigned long long) val64);
2623                         return FAILURE;
2624                 }
2625                 valr = value[i];
2626         } else {
2627                 valr = readq(&bar0->swapper_ctrl);
2628         }
2629
2630         valt = 0x0123456789ABCDEFULL;
2631         writeq(valt, &bar0->xmsi_address);
2632         val64 = readq(&bar0->xmsi_address);
2633
2634         if(val64 != valt) {
2635                 int i = 0;
2636                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2637                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2638                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2639                                 0};                     /* FE=0, SE=0 */
2640
2641                 while(i<4) {
2642                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2643                         writeq(valt, &bar0->xmsi_address);
2644                         val64 = readq(&bar0->xmsi_address);
2645                         if(val64 == valt)
2646                                 break;
2647                         i++;
2648                 }
2649                 if(i == 4) {
2650                         unsigned long long x = val64;
2651                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2652                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2653                         return FAILURE;
2654                 }
2655         }
2656         val64 = readq(&bar0->swapper_ctrl);
2657         val64 &= 0xFFFF000000000000ULL;
2658
2659 #ifdef  __BIG_ENDIAN
2660         /*
2661          * The device by default set to a big endian format, so a
2662          * big endian driver need not set anything.
2663          */
2664         val64 |= (SWAPPER_CTRL_TXP_FE |
2665                  SWAPPER_CTRL_TXP_SE |
2666                  SWAPPER_CTRL_TXD_R_FE |
2667                  SWAPPER_CTRL_TXD_W_FE |
2668                  SWAPPER_CTRL_TXF_R_FE |
2669                  SWAPPER_CTRL_RXD_R_FE |
2670                  SWAPPER_CTRL_RXD_W_FE |
2671                  SWAPPER_CTRL_RXF_W_FE |
2672                  SWAPPER_CTRL_XMSI_FE |
2673                  SWAPPER_CTRL_XMSI_SE |
2674                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2675         writeq(val64, &bar0->swapper_ctrl);
2676 #else
2677         /*
2678          * Initially we enable all bits to make it accessible by the
2679          * driver, then we selectively enable only those bits that
2680          * we want to set.
2681          */
2682         val64 |= (SWAPPER_CTRL_TXP_FE |
2683                  SWAPPER_CTRL_TXP_SE |
2684                  SWAPPER_CTRL_TXD_R_FE |
2685                  SWAPPER_CTRL_TXD_R_SE |
2686                  SWAPPER_CTRL_TXD_W_FE |
2687                  SWAPPER_CTRL_TXD_W_SE |
2688                  SWAPPER_CTRL_TXF_R_FE |
2689                  SWAPPER_CTRL_RXD_R_FE |
2690                  SWAPPER_CTRL_RXD_R_SE |
2691                  SWAPPER_CTRL_RXD_W_FE |
2692                  SWAPPER_CTRL_RXD_W_SE |
2693                  SWAPPER_CTRL_RXF_W_FE |
2694                  SWAPPER_CTRL_XMSI_FE |
2695                  SWAPPER_CTRL_XMSI_SE |
2696                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2697         writeq(val64, &bar0->swapper_ctrl);
2698 #endif
2699         val64 = readq(&bar0->swapper_ctrl);
2700
2701         /*
2702          * Verifying if endian settings are accurate by reading a
2703          * feedback register.
2704          */
2705         val64 = readq(&bar0->pif_rd_swapper_fb);
2706         if (val64 != 0x0123456789ABCDEFULL) {
2707                 /* Endian settings are incorrect, calls for another dekko. */
2708                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2709                           dev->name);
2710                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2711                           (unsigned long long) val64);
2712                 return FAILURE;
2713         }
2714
2715         return SUCCESS;
2716 }
2717
2718 /* ********************************************************* *
2719  * Functions defined below concern the OS part of the driver *
2720  * ********************************************************* */
2721
2722 /**
2723  *  s2io_open - open entry point of the driver
2724  *  @dev : pointer to the device structure.
2725  *  Description:
2726  *  This function is the open entry point of the driver. It mainly calls a
2727  *  function to allocate Rx buffers and inserts them into the buffer
2728  *  descriptors and then enables the Rx part of the NIC.
2729  *  Return value:
2730  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2731  *   file on failure.
2732  */
2733
2734 int s2io_open(struct net_device *dev)
2735 {
2736         nic_t *sp = dev->priv;
2737         int err = 0;
2738
2739         /*
2740          * Make sure you have link off by default every time
2741          * Nic is initialized
2742          */
2743         netif_carrier_off(dev);
2744         sp->last_link_state = 0; /* Unkown link state */
2745
2746         /* Initialize H/W and enable interrupts */
2747         if (s2io_card_up(sp)) {
2748                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2749                           dev->name);
2750                 err = -ENODEV;
2751                 goto hw_init_failed;
2752         }
2753
2754         /* After proper initialization of H/W, register ISR */
2755         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2756                           sp->name, dev);
2757         if (err) {
2758                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2759                           dev->name);
2760                 goto isr_registration_failed;
2761         }
2762
2763         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2764                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2765                 err = -ENODEV;
2766                 goto setting_mac_address_failed;
2767         }
2768
2769         netif_start_queue(dev);
2770         return 0;
2771
2772 setting_mac_address_failed:
2773         free_irq(sp->pdev->irq, dev);
2774 isr_registration_failed:
2775         del_timer_sync(&sp->alarm_timer);
2776         s2io_reset(sp);
2777 hw_init_failed:
2778         return err;
2779 }
2780
2781 /**
2782  *  s2io_close -close entry point of the driver
2783  *  @dev : device pointer.
2784  *  Description:
2785  *  This is the stop entry point of the driver. It needs to undo exactly
2786  *  whatever was done by the open entry point,thus it's usually referred to
2787  *  as the close function.Among other things this function mainly stops the
2788  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2789  *  Return value:
2790  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2791  *  file on failure.
2792  */
2793
2794 int s2io_close(struct net_device *dev)
2795 {
2796         nic_t *sp = dev->priv;
2797         flush_scheduled_work();
2798         netif_stop_queue(dev);
2799         /* Reset card, kill tasklet and free Tx and Rx buffers. */
2800         s2io_card_down(sp);
2801
2802         free_irq(sp->pdev->irq, dev);
2803         sp->device_close_flag = TRUE;   /* Device is shut down. */
2804         return 0;
2805 }
2806
2807 /**
2808  *  s2io_xmit - Tx entry point of te driver
2809  *  @skb : the socket buffer containing the Tx data.
2810  *  @dev : device pointer.
2811  *  Description :
2812  *  This function is the Tx entry point of the driver. S2IO NIC supports
2813  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
2814  *  NOTE: when device cant queue the pkt,just the trans_start variable will
2815  *  not be upadted.
2816  *  Return value:
2817  *  0 on success & 1 on failure.
2818  */
2819
2820 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2821 {
2822         nic_t *sp = dev->priv;
2823         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2824         register u64 val64;
2825         TxD_t *txdp;
2826         TxFIFO_element_t __iomem *tx_fifo;
2827         unsigned long flags;
2828 #ifdef NETIF_F_TSO
2829         int mss;
2830 #endif
2831         u16 vlan_tag = 0;
2832         int vlan_priority = 0;
2833         mac_info_t *mac_control;
2834         struct config_param *config;
2835
2836         mac_control = &sp->mac_control;
2837         config = &sp->config;
2838
2839         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2840         spin_lock_irqsave(&sp->tx_lock, flags);
2841         if (atomic_read(&sp->card_state) == CARD_DOWN) {
2842                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2843                           dev->name);
2844                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2845                 dev_kfree_skb(skb);
2846                 return 0;
2847         }
2848
2849         queue = 0;
2850
2851         /* Get Fifo number to Transmit based on vlan priority */
2852         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
2853                 vlan_tag = vlan_tx_tag_get(skb);
2854                 vlan_priority = vlan_tag >> 13;
2855                 queue = config->fifo_mapping[vlan_priority];
2856         }
2857
2858         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2859         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2860         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2861                 list_virt_addr;
2862
2863         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2864         /* Avoid "put" pointer going beyond "get" pointer */
2865         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2866                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2867                 netif_stop_queue(dev);
2868                 dev_kfree_skb(skb);
2869                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2870                 return 0;
2871         }
2872 #ifdef NETIF_F_TSO
2873         mss = skb_shinfo(skb)->tso_size;
2874         if (mss) {
2875                 txdp->Control_1 |= TXD_TCP_LSO_EN;
2876                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2877         }
2878 #endif
2879
2880         frg_cnt = skb_shinfo(skb)->nr_frags;
2881         frg_len = skb->len - skb->data_len;
2882
2883         txdp->Buffer_Pointer = pci_map_single
2884             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2885         txdp->Host_Control = (unsigned long) skb;
2886         if (skb->ip_summed == CHECKSUM_HW) {
2887                 txdp->Control_2 |=
2888                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2889                      TXD_TX_CKO_UDP_EN);
2890         }
2891
2892         txdp->Control_2 |= config->tx_intr_type;
2893
2894         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
2895                 txdp->Control_2 |= TXD_VLAN_ENABLE;
2896                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
2897         }
2898
2899         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2900                             TXD_GATHER_CODE_FIRST);
2901         txdp->Control_1 |= TXD_LIST_OWN_XENA;
2902
2903         /* For fragmented SKB. */
2904         for (i = 0; i < frg_cnt; i++) {
2905                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2906                 txdp++;
2907                 txdp->Buffer_Pointer = (u64) pci_map_page
2908                     (sp->pdev, frag->page, frag->page_offset,
2909                      frag->size, PCI_DMA_TODEVICE);
2910                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2911         }
2912         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2913
2914         tx_fifo = mac_control->tx_FIFO_start[queue];
2915         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2916         writeq(val64, &tx_fifo->TxDL_Pointer);
2917
2918         wmb();
2919
2920         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2921                  TX_FIFO_LAST_LIST);
2922
2923 #ifdef NETIF_F_TSO
2924         if (mss)
2925                 val64 |= TX_FIFO_SPECIAL_FUNC;
2926 #endif
2927         writeq(val64, &tx_fifo->List_Control);
2928
2929         put_off++;
2930         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2931         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2932
2933         /* Avoid "put" pointer going beyond "get" pointer */
2934         if (((put_off + 1) % queue_len) == get_off) {
2935                 DBG_PRINT(TX_DBG,
2936                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2937                           put_off, get_off);
2938                 netif_stop_queue(dev);
2939         }
2940
2941         dev->trans_start = jiffies;
2942         spin_unlock_irqrestore(&sp->tx_lock, flags);
2943
2944         return 0;
2945 }
2946
2947 static void
2948 s2io_alarm_handle(unsigned long data)
2949 {
2950         nic_t *sp = (nic_t *)data;
2951
2952         alarm_intr_handler(sp);
2953         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
2954 }
2955
2956 /**
2957  *  s2io_isr - ISR handler of the device .
2958  *  @irq: the irq of the device.
2959  *  @dev_id: a void pointer to the dev structure of the NIC.
2960  *  @pt_regs: pointer to the registers pushed on the stack.
2961  *  Description:  This function is the ISR handler of the device. It
2962  *  identifies the reason for the interrupt and calls the relevant
2963  *  service routines. As a contongency measure, this ISR allocates the
2964  *  recv buffers, if their numbers are below the panic value which is
2965  *  presently set to 25% of the original number of rcv buffers allocated.
2966  *  Return value:
2967  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
2968  *   IRQ_NONE: will be returned if interrupt is not from our device
2969  */
2970 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2971 {
2972         struct net_device *dev = (struct net_device *) dev_id;
2973         nic_t *sp = dev->priv;
2974         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2975         int i;
2976         u64 reason = 0, val64;
2977         mac_info_t *mac_control;
2978         struct config_param *config;
2979
2980         atomic_inc(&sp->isr_cnt);
2981         mac_control = &sp->mac_control;
2982         config = &sp->config;
2983
2984         /*
2985          * Identify the cause for interrupt and call the appropriate
2986          * interrupt handler. Causes for the interrupt could be;
2987          * 1. Rx of packet.
2988          * 2. Tx complete.
2989          * 3. Link down.
2990          * 4. Error in any functional blocks of the NIC.
2991          */
2992         reason = readq(&bar0->general_int_status);
2993
2994         if (!reason) {
2995                 /* The interrupt was not raised by Xena. */
2996                 atomic_dec(&sp->isr_cnt);
2997                 return IRQ_NONE;
2998         }
2999
3000 #ifdef CONFIG_S2IO_NAPI
3001         if (reason & GEN_INTR_RXTRAFFIC) {
3002                 if (netif_rx_schedule_prep(dev)) {
3003                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3004                                               DISABLE_INTRS);
3005                         __netif_rx_schedule(dev);
3006                 }
3007         }
3008 #else
3009         /* If Intr is because of Rx Traffic */
3010         if (reason & GEN_INTR_RXTRAFFIC) {
3011                 /*
3012                  * rx_traffic_int reg is an R1 register, writing all 1's
3013                  * will ensure that the actual interrupt causing bit get's
3014                  * cleared and hence a read can be avoided.
3015                  */
3016                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3017                 writeq(val64, &bar0->rx_traffic_int);
3018                 for (i = 0; i < config->rx_ring_num; i++) {
3019                         rx_intr_handler(&mac_control->rings[i]);
3020                 }
3021         }
3022 #endif
3023
3024         /* If Intr is because of Tx Traffic */
3025         if (reason & GEN_INTR_TXTRAFFIC) {
3026                 /*
3027                  * tx_traffic_int reg is an R1 register, writing all 1's
3028                  * will ensure that the actual interrupt causing bit get's
3029                  * cleared and hence a read can be avoided.
3030                  */
3031                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3032                 writeq(val64, &bar0->tx_traffic_int);
3033
3034                 for (i = 0; i < config->tx_fifo_num; i++)
3035                         tx_intr_handler(&mac_control->fifos[i]);
3036         }
3037
3038         /*
3039          * If the Rx buffer count is below the panic threshold then
3040          * reallocate the buffers from the interrupt handler itself,
3041          * else schedule a tasklet to reallocate the buffers.
3042          */
3043 #ifndef CONFIG_S2IO_NAPI
3044         for (i = 0; i < config->rx_ring_num; i++) {
3045                 int ret;
3046                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3047                 int level = rx_buffer_level(sp, rxb_size, i);
3048
3049                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3050                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3051                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3052                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3053                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3054                                           dev->name);
3055                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3056                                 clear_bit(0, (&sp->tasklet_status));
3057                                 atomic_dec(&sp->isr_cnt);
3058                                 return IRQ_HANDLED;
3059                         }
3060                         clear_bit(0, (&sp->tasklet_status));
3061                 } else if (level == LOW) {
3062                         tasklet_schedule(&sp->task);
3063                 }
3064         }
3065 #endif
3066
3067         atomic_dec(&sp->isr_cnt);
3068         return IRQ_HANDLED;
3069 }
3070
3071 /**
3072  * s2io_updt_stats -
3073  */
3074 static void s2io_updt_stats(nic_t *sp)
3075 {
3076         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3077         u64 val64;
3078         int cnt = 0;
3079
3080         if (atomic_read(&sp->card_state) == CARD_UP) {
3081                 /* Apprx 30us on a 133 MHz bus */
3082                 val64 = SET_UPDT_CLICKS(10) |
3083                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3084                 writeq(val64, &bar0->stat_cfg);
3085                 do {
3086                         udelay(100);
3087                         val64 = readq(&bar0->stat_cfg);
3088                         if (!(val64 & BIT(0)))
3089                                 break;
3090                         cnt++;
3091                         if (cnt == 5)
3092                                 break; /* Updt failed */
3093                 } while(1);
3094         }
3095 }
3096
3097 /**
3098  *  s2io_get_stats - Updates the device statistics structure.
3099  *  @dev : pointer to the device structure.
3100  *  Description:
3101  *  This function updates the device statistics structure in the s2io_nic
3102  *  structure and returns a pointer to the same.
3103  *  Return value:
3104  *  pointer to the updated net_device_stats structure.
3105  */
3106
3107 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3108 {
3109         nic_t *sp = dev->priv;
3110         mac_info_t *mac_control;
3111         struct config_param *config;
3112
3113
3114         mac_control = &sp->mac_control;
3115         config = &sp->config;
3116
3117         /* Configure Stats for immediate updt */
3118         s2io_updt_stats(sp);
3119
3120         sp->stats.tx_packets =
3121                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3122         sp->stats.tx_errors =
3123                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3124         sp->stats.rx_errors =
3125                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3126         sp->stats.multicast =
3127                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3128         sp->stats.rx_length_errors =
3129                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3130
3131         return (&sp->stats);
3132 }
3133
3134 /**
3135  *  s2io_set_multicast - entry point for multicast address enable/disable.
3136  *  @dev : pointer to the device structure
3137  *  Description:
3138  *  This function is a driver entry point which gets called by the kernel
3139  *  whenever multicast addresses must be enabled/disabled. This also gets
3140  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3141  *  determine, if multicast address must be enabled or if promiscuous mode
3142  *  is to be disabled etc.
3143  *  Return value:
3144  *  void.
3145  */
3146
3147 static void s2io_set_multicast(struct net_device *dev)
3148 {
3149         int i, j, prev_cnt;
3150         struct dev_mc_list *mclist;
3151         nic_t *sp = dev->priv;
3152         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3153         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3154             0xfeffffffffffULL;
3155         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3156         void __iomem *add;
3157
3158         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3159                 /*  Enable all Multicast addresses */
3160                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3161                        &bar0->rmac_addr_data0_mem);
3162                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3163                        &bar0->rmac_addr_data1_mem);
3164                 val64 = RMAC_ADDR_CMD_MEM_WE |
3165                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3166                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3167                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3168                 /* Wait till command completes */
3169                 wait_for_cmd_complete(sp);
3170
3171                 sp->m_cast_flg = 1;
3172                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3173         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3174                 /*  Disable all Multicast addresses */
3175                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3176                        &bar0->rmac_addr_data0_mem);
3177                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3178                        &bar0->rmac_addr_data1_mem);
3179                 val64 = RMAC_ADDR_CMD_MEM_WE |
3180                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3181                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3182                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3183                 /* Wait till command completes */
3184                 wait_for_cmd_complete(sp);
3185
3186                 sp->m_cast_flg = 0;
3187                 sp->all_multi_pos = 0;
3188         }
3189
3190         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3191                 /*  Put the NIC into promiscuous mode */
3192                 add = &bar0->mac_cfg;
3193                 val64 = readq(&bar0->mac_cfg);
3194                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3195
3196                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3197                 writel((u32) val64, add);
3198                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3199                 writel((u32) (val64 >> 32), (add + 4));
3200
3201                 val64 = readq(&bar0->mac_cfg);
3202                 sp->promisc_flg = 1;
3203                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3204                           dev->name);
3205         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3206                 /*  Remove the NIC from promiscuous mode */
3207                 add = &bar0->mac_cfg;
3208                 val64 = readq(&bar0->mac_cfg);
3209                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3210
3211                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3212                 writel((u32) val64, add);
3213                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3214                 writel((u32) (val64 >> 32), (add + 4));
3215
3216                 val64 = readq(&bar0->mac_cfg);
3217                 sp->promisc_flg = 0;
3218                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3219                           dev->name);
3220         }
3221
3222         /*  Update individual M_CAST address list */
3223         if ((!sp->m_cast_flg) && dev->mc_count) {
3224                 if (dev->mc_count >
3225                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3226                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3227                                   dev->name);
3228                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3229                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3230                         return;
3231                 }
3232
3233                 prev_cnt = sp->mc_addr_count;
3234                 sp->mc_addr_count = dev->mc_count;
3235
3236                 /* Clear out the previous list of Mc in the H/W. */
3237                 for (i = 0; i < prev_cnt; i++) {
3238                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3239                                &bar0->rmac_addr_data0_mem);
3240                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3241                                 &bar0->rmac_addr_data1_mem);
3242                         val64 = RMAC_ADDR_CMD_MEM_WE |
3243                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3244                             RMAC_ADDR_CMD_MEM_OFFSET
3245                             (MAC_MC_ADDR_START_OFFSET + i);
3246                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3247
3248                         /* Wait for command completes */
3249                         if (wait_for_cmd_complete(sp)) {
3250                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3251                                           dev->name);
3252                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3253                                 return;
3254                         }
3255                 }
3256
3257                 /* Create the new Rx filter list and update the same in H/W. */
3258                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3259                      i++, mclist = mclist->next) {
3260                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3261                                ETH_ALEN);
3262                         for (j = 0; j < ETH_ALEN; j++) {
3263                                 mac_addr |= mclist->dmi_addr[j];
3264                                 mac_addr <<= 8;
3265                         }
3266                         mac_addr >>= 8;
3267                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3268                                &bar0->rmac_addr_data0_mem);
3269                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3270                                 &bar0->rmac_addr_data1_mem);
3271                         val64 = RMAC_ADDR_CMD_MEM_WE |
3272                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3273                             RMAC_ADDR_CMD_MEM_OFFSET
3274                             (i + MAC_MC_ADDR_START_OFFSET);
3275                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3276
3277                         /* Wait for command completes */
3278                         if (wait_for_cmd_complete(sp)) {
3279                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3280                                           dev->name);
3281                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3282                                 return;
3283                         }
3284                 }
3285         }
3286 }
3287
3288 /**
3289  *  s2io_set_mac_addr - Programs the Xframe mac address
3290  *  @dev : pointer to the device structure.
3291  *  @addr: a uchar pointer to the new mac address which is to be set.
3292  *  Description : This procedure will program the Xframe to receive
3293  *  frames with new Mac Address
3294  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3295  *  as defined in errno.h file on failure.
3296  */
3297
3298 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3299 {
3300         nic_t *sp = dev->priv;
3301         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3302         register u64 val64, mac_addr = 0;
3303         int i;
3304
3305         /*
3306          * Set the new MAC address as the new unicast filter and reflect this
3307          * change on the device address registered with the OS. It will be
3308          * at offset 0.
3309          */
3310         for (i = 0; i < ETH_ALEN; i++) {
3311                 mac_addr <<= 8;
3312                 mac_addr |= addr[i];
3313         }
3314
3315         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3316                &bar0->rmac_addr_data0_mem);
3317
3318         val64 =
3319             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3320             RMAC_ADDR_CMD_MEM_OFFSET(0);
3321         writeq(val64, &bar0->rmac_addr_cmd_mem);
3322         /* Wait till command completes */
3323         if (wait_for_cmd_complete(sp)) {
3324                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3325                 return FAILURE;
3326         }
3327
3328         return SUCCESS;
3329 }
3330
3331 /**
3332  * s2io_ethtool_sset - Sets different link parameters.
3333  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3334  * @info: pointer to the structure with parameters given by ethtool to set
3335  * link information.
3336  * Description:
3337  * The function sets different link parameters provided by the user onto
3338  * the NIC.
3339  * Return value:
3340  * 0 on success.
3341 */
3342
3343 static int s2io_ethtool_sset(struct net_device *dev,
3344                              struct ethtool_cmd *info)
3345 {
3346         nic_t *sp = dev->priv;
3347         if ((info->autoneg == AUTONEG_ENABLE) ||
3348             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3349                 return -EINVAL;
3350         else {
3351                 s2io_close(sp->dev);
3352                 s2io_open(sp->dev);
3353         }
3354
3355         return 0;
3356 }
3357
3358 /**
3359  * s2io_ethtol_gset - Return link specific information.
3360  * @sp : private member of the device structure, pointer to the
3361  *      s2io_nic structure.
3362  * @info : pointer to the structure with parameters given by ethtool
3363  * to return link information.
3364  * Description:
3365  * Returns link specific information like speed, duplex etc.. to ethtool.
3366  * Return value :
3367  * return 0 on success.
3368  */
3369
3370 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3371 {
3372         nic_t *sp = dev->priv;
3373         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3374         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3375         info->port = PORT_FIBRE;
3376         /* info->transceiver?? TODO */
3377
3378         if (netif_carrier_ok(sp->dev)) {
3379                 info->speed = 10000;
3380                 info->duplex = DUPLEX_FULL;
3381         } else {
3382                 info->speed = -1;
3383                 info->duplex = -1;
3384         }
3385
3386         info->autoneg = AUTONEG_DISABLE;
3387         return 0;
3388 }
3389
3390 /**
3391  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3392  * @sp : private member of the device structure, which is a pointer to the
3393  * s2io_nic structure.
3394  * @info : pointer to the structure with parameters given by ethtool to
3395  * return driver information.
3396  * Description:
3397  * Returns driver specefic information like name, version etc.. to ethtool.
3398  * Return value:
3399  *  void
3400  */
3401
3402 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3403                                   struct ethtool_drvinfo *info)
3404 {
3405         nic_t *sp = dev->priv;
3406
3407         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3408         strncpy(info->version, s2io_driver_version,
3409                 sizeof(s2io_driver_version));
3410         strncpy(info->fw_version, "", 32);
3411         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3412         info->regdump_len = XENA_REG_SPACE;
3413         info->eedump_len = XENA_EEPROM_SPACE;
3414         info->testinfo_len = S2IO_TEST_LEN;
3415         info->n_stats = S2IO_STAT_LEN;
3416 }
3417
3418 /**
3419  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3420  *  @sp: private member of the device structure, which is a pointer to the
3421  *  s2io_nic structure.
3422  *  @regs : pointer to the structure with parameters given by ethtool for
3423  *  dumping the registers.
3424  *  @reg_space: The input argumnet into which all the registers are dumped.
3425  *  Description:
3426  *  Dumps the entire register space of xFrame NIC into the user given
3427  *  buffer area.
3428  * Return value :
3429  * void .
3430 */
3431
3432 static void s2io_ethtool_gregs(struct net_device *dev,
3433                                struct ethtool_regs *regs, void *space)
3434 {
3435         int i;
3436         u64 reg;
3437         u8 *reg_space = (u8 *) space;
3438         nic_t *sp = dev->priv;
3439
3440         regs->len = XENA_REG_SPACE;
3441         regs->version = sp->pdev->subsystem_device;
3442
3443         for (i = 0; i < regs->len; i += 8) {
3444                 reg = readq(sp->bar0 + i);
3445                 memcpy((reg_space + i), &reg, 8);
3446         }
3447 }
3448
3449 /**
3450  *  s2io_phy_id  - timer function that alternates adapter LED.
3451  *  @data : address of the private member of the device structure, which
3452  *  is a pointer to the s2io_nic structure, provided as an u32.
3453  * Description: This is actually the timer function that alternates the
3454  * adapter LED bit of the adapter control bit to set/reset every time on
3455  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3456  *  once every second.
3457 */
3458 static void s2io_phy_id(unsigned long data)
3459 {
3460         nic_t *sp = (nic_t *) data;
3461         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3462         u64 val64 = 0;
3463         u16 subid;
3464
3465         subid = sp->pdev->subsystem_device;
3466         if ((subid & 0xFF) >= 0x07) {
3467                 val64 = readq(&bar0->gpio_control);
3468                 val64 ^= GPIO_CTRL_GPIO_0;
3469                 writeq(val64, &bar0->gpio_control);
3470         } else {
3471                 val64 = readq(&bar0->adapter_control);
3472                 val64 ^= ADAPTER_LED_ON;
3473                 writeq(val64, &bar0->adapter_control);
3474         }
3475
3476         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3477 }
3478
3479 /**
3480  * s2io_ethtool_idnic - To physically identify the nic on the system.
3481  * @sp : private member of the device structure, which is a pointer to the
3482  * s2io_nic structure.
3483  * @id : pointer to the structure with identification parameters given by
3484  * ethtool.
3485  * Description: Used to physically identify the NIC on the system.
3486  * The Link LED will blink for a time specified by the user for
3487  * identification.
3488  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3489  * identification is possible only if it's link is up.
3490  * Return value:
3491  * int , returns 0 on success
3492  */
3493
3494 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3495 {
3496         u64 val64 = 0, last_gpio_ctrl_val;
3497         nic_t *sp = dev->priv;
3498         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3499         u16 subid;
3500
3501         subid = sp->pdev->subsystem_device;
3502         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3503         if ((subid & 0xFF) < 0x07) {
3504                 val64 = readq(&bar0->adapter_control);
3505                 if (!(val64 & ADAPTER_CNTL_EN)) {
3506                         printk(KERN_ERR
3507                                "Adapter Link down, cannot blink LED\n");
3508                         return -EFAULT;
3509                 }
3510         }
3511         if (sp->id_timer.function == NULL) {
3512                 init_timer(&sp->id_timer);
3513                 sp->id_timer.function = s2io_phy_id;
3514                 sp->id_timer.data = (unsigned long) sp;
3515         }
3516         mod_timer(&sp->id_timer, jiffies);
3517         if (data)
3518                 msleep_interruptible(data * HZ);
3519         else
3520                 msleep_interruptible(MAX_FLICKER_TIME);
3521         del_timer_sync(&sp->id_timer);
3522
3523         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3524                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3525                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3526         }
3527
3528         return 0;
3529 }
3530
3531 /**
3532  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3533  * @sp : private member of the device structure, which is a pointer to the
3534  *      s2io_nic structure.
3535  * @ep : pointer to the structure with pause parameters given by ethtool.
3536  * Description:
3537  * Returns the Pause frame generation and reception capability of the NIC.
3538  * Return value:
3539  *  void
3540  */
3541 static void s2io_ethtool_getpause_data(struct net_device *dev,
3542                                        struct ethtool_pauseparam *ep)
3543 {
3544         u64 val64;
3545         nic_t *sp = dev->priv;
3546         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3547
3548         val64 = readq(&bar0->rmac_pause_cfg);
3549         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3550                 ep->tx_pause = TRUE;
3551         if (val64 & RMAC_PAUSE_RX_ENABLE)
3552                 ep->rx_pause = TRUE;
3553         ep->autoneg = FALSE;
3554 }
3555
3556 /**
3557  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3558  * @sp : private member of the device structure, which is a pointer to the
3559  *      s2io_nic structure.
3560  * @ep : pointer to the structure with pause parameters given by ethtool.
3561  * Description:
3562  * It can be used to set or reset Pause frame generation or reception
3563  * support of the NIC.
3564  * Return value:
3565  * int, returns 0 on Success
3566  */
3567
3568 static int s2io_ethtool_setpause_data(struct net_device *dev,
3569                                struct ethtool_pauseparam *ep)
3570 {
3571         u64 val64;
3572         nic_t *sp = dev->priv;
3573         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3574
3575         val64 = readq(&bar0->rmac_pause_cfg);
3576         if (ep->tx_pause)
3577                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3578         else
3579                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3580         if (ep->rx_pause)
3581                 val64 |= RMAC_PAUSE_RX_ENABLE;
3582         else
3583                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3584         writeq(val64, &bar0->rmac_pause_cfg);
3585         return 0;
3586 }
3587
3588 /**
3589  * read_eeprom - reads 4 bytes of data from user given offset.
3590  * @sp : private member of the device structure, which is a pointer to the
3591  *      s2io_nic structure.
3592  * @off : offset at which the data must be written
3593  * @data : Its an output parameter where the data read at the given
3594  *      offset is stored.
3595  * Description:
3596  * Will read 4 bytes of data from the user given offset and return the
3597  * read data.
3598  * NOTE: Will allow to read only part of the EEPROM visible through the
3599  *   I2C bus.
3600  * Return value:
3601  *  -1 on failure and 0 on success.
3602  */
3603
3604 #define S2IO_DEV_ID             5
3605 static int read_eeprom(nic_t * sp, int off, u32 * data)
3606 {
3607         int ret = -1;
3608         u32 exit_cnt = 0;
3609         u64 val64;
3610         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3611
3612         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3613             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3614             I2C_CONTROL_CNTL_START;
3615         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3616
3617         while (exit_cnt < 5) {
3618                 val64 = readq(&bar0->i2c_control);
3619                 if (I2C_CONTROL_CNTL_END(val64)) {
3620                         *data = I2C_CONTROL_GET_DATA(val64);
3621                         ret = 0;
3622                         break;
3623                 }
3624                 msleep(50);
3625                 exit_cnt++;
3626         }
3627
3628         return ret;
3629 }
3630
3631 /**
3632  *  write_eeprom - actually writes the relevant part of the data value.
3633  *  @sp : private member of the device structure, which is a pointer to the
3634  *       s2io_nic structure.
3635  *  @off : offset at which the data must be written
3636  *  @data : The data that is to be written
3637  *  @cnt : Number of bytes of the data that are actually to be written into
3638  *  the Eeprom. (max of 3)
3639  * Description:
3640  *  Actually writes the relevant part of the data value into the Eeprom
3641  *  through the I2C bus.
3642  * Return value:
3643  *  0 on success, -1 on failure.
3644  */
3645
3646 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3647 {
3648         int exit_cnt = 0, ret = -1;
3649         u64 val64;
3650         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3651
3652         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3653             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3654             I2C_CONTROL_CNTL_START;
3655         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3656
3657         while (exit_cnt < 5) {
3658                 val64 = readq(&bar0->i2c_control);
3659                 if (I2C_CONTROL_CNTL_END(val64)) {
3660                         if (!(val64 & I2C_CONTROL_NACK))
3661                                 ret = 0;
3662                         break;
3663                 }
3664                 msleep(50);
3665                 exit_cnt++;
3666         }
3667
3668         return ret;
3669 }
3670
3671 /**
3672  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
3673  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
3674  *  @eeprom : pointer to the user level structure provided by ethtool,
3675  *  containing all relevant information.
3676  *  @data_buf : user defined value to be written into Eeprom.
3677  *  Description: Reads the values stored in the Eeprom at given offset
3678  *  for a given length. Stores these values int the input argument data
3679  *  buffer 'data_buf' and returns these to the caller (ethtool.)
3680  *  Return value:
3681  *  int  0 on success
3682  */
3683
3684 static int s2io_ethtool_geeprom(struct net_device *dev,
3685                          struct ethtool_eeprom *eeprom, u8 * data_buf)
3686 {
3687         u32 data, i, valid;
3688         nic_t *sp = dev->priv;
3689
3690         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3691
3692         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3693                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3694
3695         for (i = 0; i < eeprom->len; i += 4) {
3696                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3697                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3698                         return -EFAULT;
3699                 }
3700                 valid = INV(data);
3701                 memcpy((data_buf + i), &valid, 4);
3702         }
3703         return 0;
3704 }
3705
3706 /**
3707  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3708  *  @sp : private member of the device structure, which is a pointer to the
3709  *  s2io_nic structure.
3710  *  @eeprom : pointer to the user level structure provided by ethtool,
3711  *  containing all relevant information.
3712  *  @data_buf ; user defined value to be written into Eeprom.
3713  *  Description:
3714  *  Tries to write the user provided value in the Eeprom, at the offset
3715  *  given by the user.
3716  *  Return value:
3717  *  0 on success, -EFAULT on failure.
3718  */
3719
3720 static int s2io_ethtool_seeprom(struct net_device *dev,
3721                                 struct ethtool_eeprom *eeprom,
3722                                 u8 * data_buf)
3723 {
3724         int len = eeprom->len, cnt = 0;
3725         u32 valid = 0, data;
3726         nic_t *sp = dev->priv;
3727
3728         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3729                 DBG_PRINT(ERR_DBG,
3730                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3731                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3732                           eeprom->magic);
3733                 return -EFAULT;
3734         }
3735
3736         while (len) {
3737                 data = (u32) data_buf[cnt] & 0x000000FF;
3738                 if (data) {
3739                         valid = (u32) (data << 24);
3740                 } else
3741                         valid = data;
3742
3743                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3744                         DBG_PRINT(ERR_DBG,
3745                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3746                         DBG_PRINT(ERR_DBG,
3747                                   "write into the specified offset\n");
3748                         return -EFAULT;
3749                 }
3750                 cnt++;
3751                 len--;
3752         }
3753
3754         return 0;
3755 }
3756
3757 /**
3758  * s2io_register_test - reads and writes into all clock domains.
3759  * @sp : private member of the device structure, which is a pointer to the
3760  * s2io_nic structure.
3761  * @data : variable that returns the result of each of the test conducted b
3762  * by the driver.
3763  * Description:
3764  * Read and write into all clock domains. The NIC has 3 clock domains,
3765  * see that registers in all the three regions are accessible.
3766  * Return value:
3767  * 0 on success.
3768  */
3769
3770 static int s2io_register_test(nic_t * sp, uint64_t * data)
3771 {
3772         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3773         u64 val64 = 0;
3774         int fail = 0;
3775
3776         val64 = readq(&bar0->pif_rd_swapper_fb);
3777         if (val64 != 0x123456789abcdefULL) {
3778                 fail = 1;
3779                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3780         }
3781
3782         val64 = readq(&bar0->rmac_pause_cfg);
3783         if (val64 != 0xc000ffff00000000ULL) {
3784                 fail = 1;
3785                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3786         }
3787
3788         val64 = readq(&bar0->rx_queue_cfg);
3789         if (val64 != 0x0808080808080808ULL) {
3790                 fail = 1;
3791                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3792         }
3793
3794         val64 = readq(&bar0->xgxs_efifo_cfg);
3795         if (val64 != 0x000000001923141EULL) {
3796                 fail = 1;
3797                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3798         }
3799
3800         val64 = 0x5A5A5A5A5A5A5A5AULL;
3801         writeq(val64, &bar0->xmsi_data);
3802         val64 = readq(&bar0->xmsi_data);
3803         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3804                 fail = 1;
3805                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3806         }
3807
3808         val64 = 0xA5A5A5A5A5A5A5A5ULL;
3809         writeq(val64, &bar0->xmsi_data);
3810         val64 = readq(&bar0->xmsi_data);
3811         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3812                 fail = 1;
3813                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3814         }
3815
3816         *data = fail;
3817         return 0;
3818 }
3819
3820 /**
3821  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3822  * @sp : private member of the device structure, which is a pointer to the
3823  * s2io_nic structure.
3824  * @data:variable that returns the result of each of the test conducted by
3825  * the driver.
3826  * Description:
3827  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3828  * register.
3829  * Return value:
3830  * 0 on success.
3831  */
3832
3833 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3834 {
3835         int fail = 0;
3836         u32 ret_data;
3837
3838         /* Test Write Error at offset 0 */
3839         if (!write_eeprom(sp, 0, 0, 3))
3840                 fail = 1;
3841
3842         /* Test Write at offset 4f0 */
3843         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3844                 fail = 1;
3845         if (read_eeprom(sp, 0x4F0, &ret_data))
3846                 fail = 1;
3847
3848         if (ret_data != 0x01234567)
3849                 fail = 1;
3850
3851         /* Reset the EEPROM data go FFFF */
3852         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3853
3854         /* Test Write Request Error at offset 0x7c */
3855         if (!write_eeprom(sp, 0x07C, 0, 3))
3856                 fail = 1;
3857
3858         /* Test Write Request at offset 0x7fc */
3859         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3860                 fail = 1;
3861         if (read_eeprom(sp, 0x7FC, &ret_data))
3862                 fail = 1;
3863
3864         if (ret_data != 0x01234567)
3865                 fail = 1;
3866
3867         /* Reset the EEPROM data go FFFF */
3868         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3869
3870         /* Test Write Error at offset 0x80 */
3871         if (!write_eeprom(sp, 0x080, 0, 3))
3872                 fail = 1;
3873
3874         /* Test Write Error at offset 0xfc */
3875         if (!write_eeprom(sp, 0x0FC, 0, 3))
3876                 fail = 1;
3877
3878         /* Test Write Error at offset 0x100 */
3879         if (!write_eeprom(sp, 0x100, 0, 3))
3880                 fail = 1;
3881
3882         /* Test Write Error at offset 4ec */
3883         if (!write_eeprom(sp, 0x4EC, 0, 3))
3884                 fail = 1;
3885
3886         *data = fail;
3887         return 0;
3888 }
3889
3890 /**
3891  * s2io_bist_test - invokes the MemBist test of the card .
3892  * @sp : private member of the device structure, which is a pointer to the
3893  * s2io_nic structure.
3894  * @data:variable that returns the result of each of the test conducted by
3895  * the driver.
3896  * Description:
3897  * This invokes the MemBist test of the card. We give around
3898  * 2 secs time for the Test to complete. If it's still not complete
3899  * within this peiod, we consider that the test failed.
3900  * Return value:
3901  * 0 on success and -1 on failure.
3902  */
3903
3904 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3905 {
3906         u8 bist = 0;
3907         int cnt = 0, ret = -1;
3908
3909         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3910         bist |= PCI_BIST_START;
3911         pci_write_config_word(sp->pdev, PCI_BIST, bist);
3912
3913         while (cnt < 20) {
3914                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3915                 if (!(bist & PCI_BIST_START)) {
3916                         *data = (bist & PCI_BIST_CODE_MASK);
3917                         ret = 0;
3918                         break;
3919                 }
3920                 msleep(100);
3921                 cnt++;
3922         }
3923
3924         return ret;
3925 }
3926
3927 /**
3928  * s2io-link_test - verifies the link state of the nic
3929  * @sp ; private member of the device structure, which is a pointer to the
3930  * s2io_nic structure.
3931  * @data: variable that returns the result of each of the test conducted by
3932  * the driver.
3933  * Description:
3934  * The function verifies the link state of the NIC and updates the input
3935  * argument 'data' appropriately.
3936  * Return value:
3937  * 0 on success.
3938  */
3939
3940 static int s2io_link_test(nic_t * sp, uint64_t * data)
3941 {
3942         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3943         u64 val64;
3944
3945         val64 = readq(&bar0->adapter_status);
3946         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3947                 *data = 1;
3948
3949         return 0;
3950 }
3951
3952 /**
3953  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3954  * @sp - private member of the device structure, which is a pointer to the
3955  * s2io_nic structure.
3956  * @data - variable that returns the result of each of the test
3957  * conducted by the driver.
3958  * Description:
3959  *  This is one of the offline test that tests the read and write
3960  *  access to the RldRam chip on the NIC.
3961  * Return value:
3962  *  0 on success.
3963  */
3964
3965 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3966 {
3967         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3968         u64 val64;
3969         int cnt, iteration = 0, test_pass = 0;
3970
3971         val64 = readq(&bar0->adapter_control);
3972         val64 &= ~ADAPTER_ECC_EN;
3973         writeq(val64, &bar0->adapter_control);
3974
3975         val64 = readq(&bar0->mc_rldram_test_ctrl);
3976         val64 |= MC_RLDRAM_TEST_MODE;
3977         writeq(val64, &bar0->mc_rldram_test_ctrl);
3978
3979         val64 = readq(&bar0->mc_rldram_mrs);
3980         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3981         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3982
3983         val64 |= MC_RLDRAM_MRS_ENABLE;
3984         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3985
3986         while (iteration < 2) {
3987                 val64 = 0x55555555aaaa0000ULL;
3988                 if (iteration == 1) {
3989                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3990                 }
3991                 writeq(val64, &bar0->mc_rldram_test_d0);
3992
3993                 val64 = 0xaaaa5a5555550000ULL;
3994                 if (iteration == 1) {
3995                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3996                 }
3997                 writeq(val64, &bar0->mc_rldram_test_d1);
3998
3999                 val64 = 0x55aaaaaaaa5a0000ULL;
4000                 if (iteration == 1) {
4001                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4002                 }
4003                 writeq(val64, &bar0->mc_rldram_test_d2);
4004
4005                 val64 = (u64) (0x0000003fffff0000ULL);
4006                 writeq(val64, &bar0->mc_rldram_test_add);
4007
4008
4009                 val64 = MC_RLDRAM_TEST_MODE;
4010                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4011
4012                 val64 |=
4013                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4014                     MC_RLDRAM_TEST_GO;
4015                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4016
4017                 for (cnt = 0; cnt < 5; cnt++) {
4018                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4019                         if (val64 & MC_RLDRAM_TEST_DONE)
4020                                 break;
4021                         msleep(200);
4022                 }
4023
4024                 if (cnt == 5)
4025                         break;
4026
4027                 val64 = MC_RLDRAM_TEST_MODE;
4028                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4029
4030                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4031                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4032
4033                 for (cnt = 0; cnt < 5; cnt++) {
4034                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4035                         if (val64 & MC_RLDRAM_TEST_DONE)
4036                                 break;
4037                         msleep(500);
4038                 }
4039
4040                 if (cnt == 5)
4041                         break;
4042
4043                 val64 = readq(&bar0->mc_rldram_test_ctrl);
4044                 if (val64 & MC_RLDRAM_TEST_PASS)
4045                         test_pass = 1;
4046
4047                 iteration++;
4048         }
4049
4050         if (!test_pass)
4051                 *data = 1;
4052         else
4053                 *data = 0;
4054
4055         return 0;
4056 }
4057
4058 /**
4059  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4060  *  @sp : private member of the device structure, which is a pointer to the
4061  *  s2io_nic structure.
4062  *  @ethtest : pointer to a ethtool command specific structure that will be
4063  *  returned to the user.
4064  *  @data : variable that returns the result of each of the test
4065  * conducted by the driver.
4066  * Description:
4067  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4068  *  the health of the card.
4069  * Return value:
4070  *  void
4071  */
4072
4073 static void s2io_ethtool_test(struct net_device *dev,
4074                               struct ethtool_test *ethtest,
4075                               uint64_t * data)
4076 {
4077         nic_t *sp = dev->priv;
4078         int orig_state = netif_running(sp->dev);
4079
4080         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4081                 /* Offline Tests. */
4082                 if (orig_state)
4083                         s2io_close(sp->dev);
4084
4085                 if (s2io_register_test(sp, &data[0]))
4086                         ethtest->flags |= ETH_TEST_FL_FAILED;
4087
4088                 s2io_reset(sp);
4089
4090                 if (s2io_rldram_test(sp, &data[3]))
4091                         ethtest->flags |= ETH_TEST_FL_FAILED;
4092
4093                 s2io_reset(sp);
4094
4095                 if (s2io_eeprom_test(sp, &data[1]))
4096                         ethtest->flags |= ETH_TEST_FL_FAILED;
4097
4098                 if (s2io_bist_test(sp, &data[4]))
4099                         ethtest->flags |= ETH_TEST_FL_FAILED;
4100
4101                 if (orig_state)
4102                         s2io_open(sp->dev);
4103
4104                 data[2] = 0;
4105         } else {
4106                 /* Online Tests. */
4107                 if (!orig_state) {
4108                         DBG_PRINT(ERR_DBG,
4109                                   "%s: is not up, cannot run test\n",
4110                                   dev->name);
4111                         data[0] = -1;
4112                         data[1] = -1;
4113                         data[2] = -1;
4114                         data[3] = -1;
4115                         data[4] = -1;
4116                 }
4117
4118                 if (s2io_link_test(sp, &data[2]))
4119                         ethtest->flags |= ETH_TEST_FL_FAILED;
4120
4121                 data[0] = 0;
4122                 data[1] = 0;
4123                 data[3] = 0;
4124                 data[4] = 0;
4125         }
4126 }
4127
4128 static void s2io_get_ethtool_stats(struct net_device *dev,
4129                                    struct ethtool_stats *estats,
4130                                    u64 * tmp_stats)
4131 {
4132         int i = 0;
4133         nic_t *sp = dev->priv;
4134         StatInfo_t *stat_info = sp->mac_control.stats_info;
4135
4136         s2io_updt_stats(sp);
4137         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4138         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4139         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4140         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4141         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4142         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4143         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4144         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4145         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4146         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4147         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4148         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4149         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4150         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4151         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4152         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4153         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4154         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4155         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4156         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4157         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4158         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4159         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4160         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4161         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4162         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4163         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4164         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4165         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4166         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4167         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4168         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4169         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4170         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4171         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4172         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4173         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4174         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4175         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4176         tmp_stats[i++] = 0;
4177         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4178         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4179 }
4180
4181 int s2io_ethtool_get_regs_len(struct net_device *dev)
4182 {
4183         return (XENA_REG_SPACE);
4184 }
4185
4186
4187 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4188 {
4189         nic_t *sp = dev->priv;
4190
4191         return (sp->rx_csum);
4192 }
4193 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4194 {
4195         nic_t *sp = dev->priv;
4196
4197         if (data)
4198                 sp->rx_csum = 1;
4199         else
4200                 sp->rx_csum = 0;
4201
4202         return 0;
4203 }
4204 int s2io_get_eeprom_len(struct net_device *dev)
4205 {
4206         return (XENA_EEPROM_SPACE);
4207 }
4208
4209 int s2io_ethtool_self_test_count(struct net_device *dev)
4210 {
4211         return (S2IO_TEST_LEN);
4212 }
4213 void s2io_ethtool_get_strings(struct net_device *dev,
4214                               u32 stringset, u8 * data)
4215 {
4216         switch (stringset) {
4217         case ETH_SS_TEST:
4218                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4219                 break;
4220         case ETH_SS_STATS:
4221                 memcpy(data, &ethtool_stats_keys,
4222                        sizeof(ethtool_stats_keys));
4223         }
4224 }
4225 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4226 {
4227         return (S2IO_STAT_LEN);
4228 }
4229
4230 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4231 {
4232         if (data)
4233                 dev->features |= NETIF_F_IP_CSUM;
4234         else
4235                 dev->features &= ~NETIF_F_IP_CSUM;
4236
4237         return 0;
4238 }
4239
4240
4241 static struct ethtool_ops netdev_ethtool_ops = {
4242         .get_settings = s2io_ethtool_gset,
4243         .set_settings = s2io_ethtool_sset,
4244         .get_drvinfo = s2io_ethtool_gdrvinfo,
4245         .get_regs_len = s2io_ethtool_get_regs_len,
4246         .get_regs = s2io_ethtool_gregs,
4247         .get_link = ethtool_op_get_link,
4248         .get_eeprom_len = s2io_get_eeprom_len,
4249         .get_eeprom = s2io_ethtool_geeprom,
4250         .set_eeprom = s2io_ethtool_seeprom,
4251         .get_pauseparam = s2io_ethtool_getpause_data,
4252         .set_pauseparam = s2io_ethtool_setpause_data,
4253         .get_rx_csum = s2io_ethtool_get_rx_csum,
4254         .set_rx_csum = s2io_ethtool_set_rx_csum,
4255         .get_tx_csum = ethtool_op_get_tx_csum,
4256         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4257         .get_sg = ethtool_op_get_sg,
4258         .set_sg = ethtool_op_set_sg,
4259 #ifdef NETIF_F_TSO
4260         .get_tso = ethtool_op_get_tso,
4261         .set_tso = ethtool_op_set_tso,
4262 #endif
4263         .self_test_count = s2io_ethtool_self_test_count,
4264         .self_test = s2io_ethtool_test,
4265         .get_strings = s2io_ethtool_get_strings,
4266         .phys_id = s2io_ethtool_idnic,
4267         .get_stats_count = s2io_ethtool_get_stats_count,
4268         .get_ethtool_stats = s2io_get_ethtool_stats
4269 };
4270
4271 /**
4272  *  s2io_ioctl - Entry point for the Ioctl
4273  *  @dev :  Device pointer.
4274  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4275  *  a proprietary structure used to pass information to the driver.
4276  *  @cmd :  This is used to distinguish between the different commands that
4277  *  can be passed to the IOCTL functions.
4278  *  Description:
4279  *  Currently there are no special functionality supported in IOCTL, hence
4280  *  function always return EOPNOTSUPPORTED
4281  */
4282
4283 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4284 {
4285         return -EOPNOTSUPP;
4286 }
4287
4288 /**
4289  *  s2io_change_mtu - entry point to change MTU size for the device.
4290  *   @dev : device pointer.
4291  *   @new_mtu : the new MTU size for the device.
4292  *   Description: A driver entry point to change MTU size for the device.
4293  *   Before changing the MTU the device must be stopped.
4294  *  Return value:
4295  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4296  *   file on failure.
4297  */
4298
4299 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4300 {
4301         nic_t *sp = dev->priv;
4302
4303         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4304                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4305                           dev->name);
4306                 return -EPERM;
4307         }
4308
4309         dev->mtu = new_mtu;
4310         if (netif_running(dev)) {
4311                 s2io_card_down(sp);
4312                 netif_stop_queue(dev);
4313                 if (s2io_card_up(sp)) {
4314                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4315                                   __FUNCTION__);
4316                 }
4317                 if (netif_queue_stopped(dev))
4318                         netif_wake_queue(dev);
4319         } else { /* Device is down */
4320                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4321                 u64 val64 = new_mtu;
4322
4323                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4324         }
4325
4326         return 0;
4327 }
4328
4329 /**
4330  *  s2io_tasklet - Bottom half of the ISR.
4331  *  @dev_adr : address of the device structure in dma_addr_t format.
4332  *  Description:
4333  *  This is the tasklet or the bottom half of the ISR. This is
4334  *  an extension of the ISR which is scheduled by the scheduler to be run
4335  *  when the load on the CPU is low. All low priority tasks of the ISR can
4336  *  be pushed into the tasklet. For now the tasklet is used only to
4337  *  replenish the Rx buffers in the Rx buffer descriptors.
4338  *  Return value:
4339  *  void.
4340  */
4341
4342 static void s2io_tasklet(unsigned long dev_addr)
4343 {
4344         struct net_device *dev = (struct net_device *) dev_addr;
4345         nic_t *sp = dev->priv;
4346         int i, ret;
4347         mac_info_t *mac_control;
4348         struct config_param *config;
4349
4350         mac_control = &sp->mac_control;
4351         config = &sp->config;
4352
4353         if (!TASKLET_IN_USE) {
4354                 for (i = 0; i < config->rx_ring_num; i++) {
4355                         ret = fill_rx_buffers(sp, i);
4356                         if (ret == -ENOMEM) {
4357                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4358                                           dev->name);
4359                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4360                                 break;
4361                         } else if (ret == -EFILL) {
4362                                 DBG_PRINT(ERR_DBG,
4363                                           "%s: Rx Ring %d is full\n",
4364                                           dev->name, i);
4365                                 break;
4366                         }
4367                 }
4368                 clear_bit(0, (&sp->tasklet_status));
4369         }
4370 }
4371
4372 /**
4373  * s2io_set_link - Set the LInk status
4374  * @data: long pointer to device private structue
4375  * Description: Sets the link status for the adapter
4376  */
4377
4378 static void s2io_set_link(unsigned long data)
4379 {
4380         nic_t *nic = (nic_t *) data;
4381         struct net_device *dev = nic->dev;
4382         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4383         register u64 val64;
4384         u16 subid;
4385
4386         if (test_and_set_bit(0, &(nic->link_state))) {
4387                 /* The card is being reset, no point doing anything */
4388                 return;
4389         }
4390
4391         subid = nic->pdev->subsystem_device;
4392         /*
4393          * Allow a small delay for the NICs self initiated
4394          * cleanup to complete.
4395          */
4396         msleep(100);
4397
4398         val64 = readq(&bar0->adapter_status);
4399         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4400                 if (LINK_IS_UP(val64)) {
4401                         val64 = readq(&bar0->adapter_control);
4402                         val64 |= ADAPTER_CNTL_EN;
4403                         writeq(val64, &bar0->adapter_control);
4404                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4405                                 val64 = readq(&bar0->gpio_control);
4406                                 val64 |= GPIO_CTRL_GPIO_0;
4407                                 writeq(val64, &bar0->gpio_control);
4408                                 val64 = readq(&bar0->gpio_control);
4409                         } else {
4410                                 val64 |= ADAPTER_LED_ON;
4411                                 writeq(val64, &bar0->adapter_control);
4412                         }
4413                         val64 = readq(&bar0->adapter_status);
4414                         if (!LINK_IS_UP(val64)) {
4415                                 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4416                                 DBG_PRINT(ERR_DBG, " Link down");
4417                                 DBG_PRINT(ERR_DBG, "after ");
4418                                 DBG_PRINT(ERR_DBG, "enabling ");
4419                                 DBG_PRINT(ERR_DBG, "device \n");
4420                         }
4421                         if (nic->device_enabled_once == FALSE) {
4422                                 nic->device_enabled_once = TRUE;
4423                         }
4424                         s2io_link(nic, LINK_UP);
4425                 } else {
4426                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4427                                 val64 = readq(&bar0->gpio_control);
4428                                 val64 &= ~GPIO_CTRL_GPIO_0;
4429                                 writeq(val64, &bar0->gpio_control);
4430                                 val64 = readq(&bar0->gpio_control);
4431                         }
4432                         s2io_link(nic, LINK_DOWN);
4433                 }
4434         } else {                /* NIC is not Quiescent. */
4435                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4436                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4437                 netif_stop_queue(dev);
4438         }
4439         clear_bit(0, &(nic->link_state));
4440 }
4441
4442 static void s2io_card_down(nic_t * sp)
4443 {
4444         int cnt = 0;
4445         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4446         unsigned long flags;
4447         register u64 val64 = 0;
4448
4449         del_timer_sync(&sp->alarm_timer);
4450         /* If s2io_set_link task is executing, wait till it completes. */
4451         while (test_and_set_bit(0, &(sp->link_state))) {
4452                 msleep(50);
4453         }
4454         atomic_set(&sp->card_state, CARD_DOWN);
4455
4456         /* disable Tx and Rx traffic on the NIC */
4457         stop_nic(sp);
4458
4459         /* Kill tasklet. */
4460         tasklet_kill(&sp->task);
4461
4462         /* Check if the device is Quiescent and then Reset the NIC */
4463         do {
4464                 val64 = readq(&bar0->adapter_status);
4465                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4466                         break;
4467                 }
4468
4469                 msleep(50);
4470                 cnt++;
4471                 if (cnt == 10) {
4472                         DBG_PRINT(ERR_DBG,
4473                                   "s2io_close:Device not Quiescent ");
4474                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4475                                   (unsigned long long) val64);
4476                         break;
4477                 }
4478         } while (1);
4479         s2io_reset(sp);
4480
4481         /* Waiting till all Interrupt handlers are complete */
4482         cnt = 0;
4483         do {
4484                 msleep(10);
4485                 if (!atomic_read(&sp->isr_cnt))
4486                         break;
4487                 cnt++;
4488         } while(cnt < 5);
4489
4490         spin_lock_irqsave(&sp->tx_lock, flags);
4491         /* Free all Tx buffers */
4492         free_tx_buffers(sp);
4493         spin_unlock_irqrestore(&sp->tx_lock, flags);
4494
4495         /* Free all Rx buffers */
4496         spin_lock_irqsave(&sp->rx_lock, flags);
4497         free_rx_buffers(sp);
4498         spin_unlock_irqrestore(&sp->rx_lock, flags);
4499
4500         clear_bit(0, &(sp->link_state));
4501 }
4502
4503 static int s2io_card_up(nic_t * sp)
4504 {
4505         int i, ret;
4506         mac_info_t *mac_control;
4507         struct config_param *config;
4508         struct net_device *dev = (struct net_device *) sp->dev;
4509
4510         /* Initialize the H/W I/O registers */
4511         if (init_nic(sp) != 0) {
4512                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4513                           dev->name);
4514                 return -ENODEV;
4515         }
4516
4517         /*
4518          * Initializing the Rx buffers. For now we are considering only 1
4519          * Rx ring and initializing buffers into 30 Rx blocks
4520          */
4521         mac_control = &sp->mac_control;
4522         config = &sp->config;
4523
4524         for (i = 0; i < config->rx_ring_num; i++) {
4525                 if ((ret = fill_rx_buffers(sp, i))) {
4526                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4527                                   dev->name);
4528                         s2io_reset(sp);
4529                         free_rx_buffers(sp);
4530                         return -ENOMEM;
4531                 }
4532                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4533                           atomic_read(&sp->rx_bufs_left[i]));
4534         }
4535
4536         /* Setting its receive mode */
4537         s2io_set_multicast(dev);
4538
4539         /* Enable tasklet for the device */
4540         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4541
4542         /* Enable Rx Traffic and interrupts on the NIC */
4543         if (start_nic(sp)) {
4544                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4545                 tasklet_kill(&sp->task);
4546                 s2io_reset(sp);
4547                 free_irq(dev->irq, dev);
4548                 free_rx_buffers(sp);
4549                 return -ENODEV;
4550         }
4551
4552         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4553
4554         atomic_set(&sp->card_state, CARD_UP);
4555         return 0;
4556 }
4557
4558 /**
4559  * s2io_restart_nic - Resets the NIC.
4560  * @data : long pointer to the device private structure
4561  * Description:
4562  * This function is scheduled to be run by the s2io_tx_watchdog
4563  * function after 0.5 secs to reset the NIC. The idea is to reduce
4564  * the run time of the watch dog routine which is run holding a
4565  * spin lock.
4566  */
4567
4568 static void s2io_restart_nic(unsigned long data)
4569 {
4570         struct net_device *dev = (struct net_device *) data;
4571         nic_t *sp = dev->priv;
4572
4573         s2io_card_down(sp);
4574         if (s2io_card_up(sp)) {
4575                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4576                           dev->name);
4577         }
4578         netif_wake_queue(dev);
4579         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4580                   dev->name);
4581
4582 }
4583
4584 /**
4585  *  s2io_tx_watchdog - Watchdog for transmit side.
4586  *  @dev : Pointer to net device structure
4587  *  Description:
4588  *  This function is triggered if the Tx Queue is stopped
4589  *  for a pre-defined amount of time when the Interface is still up.
4590  *  If the Interface is jammed in such a situation, the hardware is
4591  *  reset (by s2io_close) and restarted again (by s2io_open) to
4592  *  overcome any problem that might have been caused in the hardware.
4593  *  Return value:
4594  *  void
4595  */
4596
4597 static void s2io_tx_watchdog(struct net_device *dev)
4598 {
4599         nic_t *sp = dev->priv;
4600
4601         if (netif_carrier_ok(dev)) {
4602                 schedule_work(&sp->rst_timer_task);
4603         }
4604 }
4605
4606 /**
4607  *   rx_osm_handler - To perform some OS related operations on SKB.
4608  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4609  *   @skb : the socket buffer pointer.
4610  *   @len : length of the packet
4611  *   @cksum : FCS checksum of the frame.
4612  *   @ring_no : the ring from which this RxD was extracted.
4613  *   Description:
4614  *   This function is called by the Tx interrupt serivce routine to perform
4615  *   some OS related operations on the SKB before passing it to the upper
4616  *   layers. It mainly checks if the checksum is OK, if so adds it to the
4617  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
4618  *   to the upper layer. If the checksum is wrong, it increments the Rx
4619  *   packet error count, frees the SKB and returns error.
4620  *   Return value:
4621  *   SUCCESS on success and -1 on failure.
4622  */
4623 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4624 {
4625         nic_t *sp = ring_data->nic;
4626         struct net_device *dev = (struct net_device *) sp->dev;
4627         struct sk_buff *skb = (struct sk_buff *)
4628                 ((unsigned long) rxdp->Host_Control);
4629         int ring_no = ring_data->ring_no;
4630         u16 l3_csum, l4_csum;
4631 #ifdef CONFIG_2BUFF_MODE
4632         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4633         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4634         int get_block = ring_data->rx_curr_get_info.block_index;
4635         int get_off = ring_data->rx_curr_get_info.offset;
4636         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4637         unsigned char *buff;
4638 #else
4639         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4640 #endif
4641         skb->dev = dev;
4642         if (rxdp->Control_1 & RXD_T_CODE) {
4643                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4644                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4645                           dev->name, err);
4646                 dev_kfree_skb(skb);
4647                 sp->stats.rx_crc_errors++;
4648                 atomic_dec(&sp->rx_bufs_left[ring_no]);
4649                 rxdp->Host_Control = 0;
4650                 return 0;
4651         }
4652
4653         /* Updating statistics */
4654         rxdp->Host_Control = 0;
4655         sp->rx_pkt_count++;
4656         sp->stats.rx_packets++;
4657 #ifndef CONFIG_2BUFF_MODE
4658         sp->stats.rx_bytes += len;
4659 #else
4660         sp->stats.rx_bytes += buf0_len + buf2_len;
4661 #endif
4662
4663 #ifndef CONFIG_2BUFF_MODE
4664         skb_put(skb, len);
4665 #else
4666         buff = skb_push(skb, buf0_len);
4667         memcpy(buff, ba->ba_0, buf0_len);
4668         skb_put(skb, buf2_len);
4669 #endif
4670
4671         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4672             (sp->rx_csum)) {
4673                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4674                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4675                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4676                         /*
4677                          * NIC verifies if the Checksum of the received
4678                          * frame is Ok or not and accordingly returns
4679                          * a flag in the RxD.
4680                          */
4681                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4682                 } else {
4683                         /*
4684                          * Packet with erroneous checksum, let the
4685                          * upper layers deal with it.
4686                          */
4687                         skb->ip_summed = CHECKSUM_NONE;
4688                 }
4689         } else {
4690                 skb->ip_summed = CHECKSUM_NONE;
4691         }
4692
4693         skb->protocol = eth_type_trans(skb, dev);
4694 #ifdef CONFIG_S2IO_NAPI
4695         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4696                 /* Queueing the vlan frame to the upper layer */
4697                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
4698                         RXD_GET_VLAN_TAG(rxdp->Control_2));
4699         } else {
4700                 netif_receive_skb(skb);
4701         }
4702 #else
4703         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4704                 /* Queueing the vlan frame to the upper layer */
4705                 vlan_hwaccel_rx(skb, sp->vlgrp,
4706                         RXD_GET_VLAN_TAG(rxdp->Control_2));
4707         } else {
4708                 netif_rx(skb);
4709         }
4710 #endif
4711
4712         dev->last_rx = jiffies;
4713         atomic_dec(&sp->rx_bufs_left[ring_no]);
4714         return SUCCESS;
4715 }
4716
4717 /**
4718  *  s2io_link - stops/starts the Tx queue.
4719  *  @sp : private member of the device structure, which is a pointer to the
4720  *  s2io_nic structure.
4721  *  @link : inidicates whether link is UP/DOWN.
4722  *  Description:
4723  *  This function stops/starts the Tx queue depending on whether the link
4724  *  status of the NIC is is down or up. This is called by the Alarm
4725  *  interrupt handler whenever a link change interrupt comes up.
4726  *  Return value:
4727  *  void.
4728  */
4729
4730 void s2io_link(nic_t * sp, int link)
4731 {
4732         struct net_device *dev = (struct net_device *) sp->dev;
4733
4734         if (link != sp->last_link_state) {
4735                 if (link == LINK_DOWN) {
4736                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4737                         netif_carrier_off(dev);
4738                 } else {
4739                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4740                         netif_carrier_on(dev);
4741                 }
4742         }
4743         sp->last_link_state = link;
4744 }
4745
4746 /**
4747  *  get_xena_rev_id - to identify revision ID of xena.
4748  *  @pdev : PCI Dev structure
4749  *  Description:
4750  *  Function to identify the Revision ID of xena.
4751  *  Return value:
4752  *  returns the revision ID of the device.
4753  */
4754
4755 int get_xena_rev_id(struct pci_dev *pdev)
4756 {
4757         u8 id = 0;
4758         int ret;
4759         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4760         return id;
4761 }
4762
4763 /**
4764  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4765  *  @sp : private member of the device structure, which is a pointer to the
4766  *  s2io_nic structure.
4767  *  Description:
4768  *  This function initializes a few of the PCI and PCI-X configuration registers
4769  *  with recommended values.
4770  *  Return value:
4771  *  void
4772  */
4773
4774 static void s2io_init_pci(nic_t * sp)
4775 {
4776         u16 pci_cmd = 0, pcix_cmd = 0;
4777
4778         /* Enable Data Parity Error Recovery in PCI-X command register. */
4779         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4780                              &(pcix_cmd));
4781         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4782                               (pcix_cmd | 1));
4783         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4784                              &(pcix_cmd));
4785
4786         /* Set the PErr Response bit in PCI command register. */
4787         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4788         pci_write_config_word(sp->pdev, PCI_COMMAND,
4789                               (pci_cmd | PCI_COMMAND_PARITY));
4790         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4791
4792         /* Forcibly disabling relaxed ordering capability of the card. */
4793         pcix_cmd &= 0xfffd;
4794         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4795                               pcix_cmd);
4796         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4797                              &(pcix_cmd));
4798 }
4799
4800 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4801 MODULE_LICENSE("GPL");
4802 module_param(tx_fifo_num, int, 0);
4803 module_param(rx_ring_num, int, 0);
4804 module_param_array(tx_fifo_len, uint, NULL, 0);
4805 module_param_array(rx_ring_sz, uint, NULL, 0);
4806 module_param_array(rts_frm_len, uint, NULL, 0);
4807 module_param(use_continuous_tx_intrs, int, 1);
4808 module_param(rmac_pause_time, int, 0);
4809 module_param(mc_pause_threshold_q0q3, int, 0);
4810 module_param(mc_pause_threshold_q4q7, int, 0);
4811 module_param(shared_splits, int, 0);
4812 module_param(tmac_util_period, int, 0);
4813 module_param(rmac_util_period, int, 0);
4814 #ifndef CONFIG_S2IO_NAPI
4815 module_param(indicate_max_pkts, int, 0);
4816 #endif
4817
4818 /**
4819  *  s2io_init_nic - Initialization of the adapter .
4820  *  @pdev : structure containing the PCI related information of the device.
4821  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4822  *  Description:
4823  *  The function initializes an adapter identified by the pci_dec structure.
4824  *  All OS related initialization including memory and device structure and
4825  *  initlaization of the device private variable is done. Also the swapper
4826  *  control register is initialized to enable read and write into the I/O
4827  *  registers of the device.
4828  *  Return value:
4829  *  returns 0 on success and negative on failure.
4830  */
4831
4832 static int __devinit
4833 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4834 {
4835         nic_t *sp;
4836         struct net_device *dev;
4837         int i, j, ret;
4838         int dma_flag = FALSE;
4839         u32 mac_up, mac_down;
4840         u64 val64 = 0, tmp64 = 0;
4841         XENA_dev_config_t __iomem *bar0 = NULL;
4842         u16 subid;
4843         mac_info_t *mac_control;
4844         struct config_param *config;
4845
4846 #ifdef CONFIG_S2IO_NAPI
4847         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4848 #endif
4849
4850         if ((ret = pci_enable_device(pdev))) {
4851                 DBG_PRINT(ERR_DBG,
4852                           "s2io_init_nic: pci_enable_device failed\n");
4853                 return ret;
4854         }
4855
4856         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4857                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4858                 dma_flag = TRUE;
4859                 if (pci_set_consistent_dma_mask
4860                     (pdev, DMA_64BIT_MASK)) {
4861                         DBG_PRINT(ERR_DBG,
4862                                   "Unable to obtain 64bit DMA for \
4863                                         consistent allocations\n");
4864                         pci_disable_device(pdev);
4865                         return -ENOMEM;
4866                 }
4867         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4868                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4869         } else {
4870                 pci_disable_device(pdev);
4871                 return -ENOMEM;
4872         }
4873
4874         if (pci_request_regions(pdev, s2io_driver_name)) {
4875                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4876                     pci_disable_device(pdev);
4877                 return -ENODEV;
4878         }
4879
4880         dev = alloc_etherdev(sizeof(nic_t));
4881         if (dev == NULL) {
4882                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4883                 pci_disable_device(pdev);
4884                 pci_release_regions(pdev);
4885                 return -ENODEV;
4886         }
4887
4888         pci_set_master(pdev);
4889         pci_set_drvdata(pdev, dev);
4890         SET_MODULE_OWNER(dev);
4891         SET_NETDEV_DEV(dev, &pdev->dev);
4892
4893         /*  Private member variable initialized to s2io NIC structure */
4894         sp = dev->priv;
4895         memset(sp, 0, sizeof(nic_t));
4896         sp->dev = dev;
4897         sp->pdev = pdev;
4898         sp->high_dma_flag = dma_flag;
4899         sp->device_enabled_once = FALSE;
4900
4901         /* Initialize some PCI/PCI-X fields of the NIC. */
4902         s2io_init_pci(sp);
4903
4904         /*
4905          * Setting the device configuration parameters.
4906          * Most of these parameters can be specified by the user during
4907          * module insertion as they are module loadable parameters. If
4908          * these parameters are not not specified during load time, they
4909          * are initialized with default values.
4910          */
4911         mac_control = &sp->mac_control;
4912         config = &sp->config;
4913
4914         /* Tx side parameters. */
4915         tx_fifo_len[0] = DEFAULT_FIFO_LEN;      /* Default value. */
4916         config->tx_fifo_num = tx_fifo_num;
4917         for (i = 0; i < MAX_TX_FIFOS; i++) {
4918                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4919                 config->tx_cfg[i].fifo_priority = i;
4920         }
4921
4922         /* mapping the QoS priority to the configured fifos */
4923         for (i = 0; i < MAX_TX_FIFOS; i++)
4924                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4925
4926         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4927         for (i = 0; i < config->tx_fifo_num; i++) {
4928                 config->tx_cfg[i].f_no_snoop =
4929                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4930                 if (config->tx_cfg[i].fifo_len < 65) {
4931                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4932                         break;
4933                 }
4934         }
4935         config->max_txds = MAX_SKB_FRAGS;
4936
4937         /* Rx side parameters. */
4938         rx_ring_sz[0] = SMALL_BLK_CNT;  /* Default value. */
4939         config->rx_ring_num = rx_ring_num;
4940         for (i = 0; i < MAX_RX_RINGS; i++) {
4941                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4942                     (MAX_RXDS_PER_BLOCK + 1);
4943                 config->rx_cfg[i].ring_priority = i;
4944         }
4945
4946         for (i = 0; i < rx_ring_num; i++) {
4947                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4948                 config->rx_cfg[i].f_no_snoop =
4949                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4950         }
4951
4952         /*  Setting Mac Control parameters */
4953         mac_control->rmac_pause_time = rmac_pause_time;
4954         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4955         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4956
4957
4958         /* Initialize Ring buffer parameters. */
4959         for (i = 0; i < config->rx_ring_num; i++)
4960                 atomic_set(&sp->rx_bufs_left[i], 0);
4961
4962         /* Initialize the number of ISRs currently running */
4963         atomic_set(&sp->isr_cnt, 0);
4964
4965         /*  initialize the shared memory used by the NIC and the host */
4966         if (init_shared_mem(sp)) {
4967                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4968                           dev->name);
4969                 ret = -ENOMEM;
4970                 goto mem_alloc_failed;
4971         }
4972
4973         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4974                                      pci_resource_len(pdev, 0));
4975         if (!sp->bar0) {
4976                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4977                           dev->name);
4978                 ret = -ENOMEM;
4979                 goto bar0_remap_failed;
4980         }
4981
4982         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4983                                      pci_resource_len(pdev, 2));
4984         if (!sp->bar1) {
4985                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4986                           dev->name);
4987                 ret = -ENOMEM;
4988                 goto bar1_remap_failed;
4989         }
4990
4991         dev->irq = pdev->irq;
4992         dev->base_addr = (unsigned long) sp->bar0;
4993
4994         /* Initializing the BAR1 address as the start of the FIFO pointer. */
4995         for (j = 0; j < MAX_TX_FIFOS; j++) {
4996                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4997                     (sp->bar1 + (j * 0x00020000));
4998         }
4999
5000         /*  Driver entry points */
5001         dev->open = &s2io_open;
5002         dev->stop = &s2io_close;
5003         dev->hard_start_xmit = &s2io_xmit;
5004         dev->get_stats = &s2io_get_stats;
5005         dev->set_multicast_list = &s2io_set_multicast;
5006         dev->do_ioctl = &s2io_ioctl;
5007         dev->change_mtu = &s2io_change_mtu;
5008         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5009         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5010         dev->vlan_rx_register = s2io_vlan_rx_register;
5011         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5012
5013         /*
5014          * will use eth_mac_addr() for  dev->set_mac_address
5015          * mac address will be set every time dev->open() is called
5016          */
5017 #if defined(CONFIG_S2IO_NAPI)
5018         dev->poll = s2io_poll;
5019         dev->weight = 32;
5020 #endif
5021
5022         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5023         if (sp->high_dma_flag == TRUE)
5024                 dev->features |= NETIF_F_HIGHDMA;
5025 #ifdef NETIF_F_TSO
5026         dev->features |= NETIF_F_TSO;
5027 #endif
5028
5029         dev->tx_timeout = &s2io_tx_watchdog;
5030         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5031         INIT_WORK(&sp->rst_timer_task,
5032                   (void (*)(void *)) s2io_restart_nic, dev);
5033         INIT_WORK(&sp->set_link_task,
5034                   (void (*)(void *)) s2io_set_link, sp);
5035
5036         pci_save_state(sp->pdev);
5037
5038         /* Setting swapper control on the NIC, for proper reset operation */
5039         if (s2io_set_swapper(sp)) {
5040                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5041                           dev->name);
5042                 ret = -EAGAIN;
5043                 goto set_swap_failed;
5044         }
5045
5046         /*
5047          * Fix for all "FFs" MAC address problems observed on
5048          * Alpha platforms
5049          */
5050         fix_mac_address(sp);
5051         s2io_reset(sp);
5052
5053         /*
5054          * MAC address initialization.
5055          * For now only one mac address will be read and used.
5056          */
5057         bar0 = sp->bar0;
5058         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5059             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5060         writeq(val64, &bar0->rmac_addr_cmd_mem);
5061         wait_for_cmd_complete(sp);
5062
5063         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5064         mac_down = (u32) tmp64;
5065         mac_up = (u32) (tmp64 >> 32);
5066
5067         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5068
5069         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5070         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5071         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5072         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5073         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5074         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5075
5076         DBG_PRINT(INIT_DBG,
5077                   "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5078                   sp->def_mac_addr[0].mac_addr[0],
5079                   sp->def_mac_addr[0].mac_addr[1],
5080                   sp->def_mac_addr[0].mac_addr[2],
5081                   sp->def_mac_addr[0].mac_addr[3],
5082                   sp->def_mac_addr[0].mac_addr[4],
5083                   sp->def_mac_addr[0].mac_addr[5]);
5084
5085         /*  Set the factory defined MAC address initially   */
5086         dev->addr_len = ETH_ALEN;
5087         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5088
5089         /*
5090          * Initialize the tasklet status and link state flags
5091          * and the card statte parameter
5092          */
5093         atomic_set(&(sp->card_state), 0);
5094         sp->tasklet_status = 0;
5095         sp->link_state = 0;
5096
5097         /* Initialize spinlocks */
5098         spin_lock_init(&sp->tx_lock);
5099 #ifndef CONFIG_S2IO_NAPI
5100         spin_lock_init(&sp->put_lock);
5101 #endif
5102         spin_lock_init(&sp->rx_lock);
5103
5104         /*
5105          * SXE-002: Configure link and activity LED to init state
5106          * on driver load.
5107          */
5108         subid = sp->pdev->subsystem_device;
5109         if ((subid & 0xFF) >= 0x07) {
5110                 val64 = readq(&bar0->gpio_control);
5111                 val64 |= 0x0000800000000000ULL;
5112                 writeq(val64, &bar0->gpio_control);
5113                 val64 = 0x0411040400000000ULL;
5114                 writeq(val64, (void __iomem *) bar0 + 0x2700);
5115                 val64 = readq(&bar0->gpio_control);
5116         }
5117
5118         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
5119
5120         if (register_netdev(dev)) {
5121                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5122                 ret = -ENODEV;
5123                 goto register_failed;
5124         }
5125
5126         /* Initialize device name */
5127         strcpy(sp->name, dev->name);
5128         strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5129
5130         /*
5131          * Make Link state as off at this point, when the Link change
5132          * interrupt comes the state will be automatically changed to
5133          * the right state.
5134          */
5135         netif_carrier_off(dev);
5136
5137         return 0;
5138
5139       register_failed:
5140       set_swap_failed:
5141         iounmap(sp->bar1);
5142       bar1_remap_failed:
5143         iounmap(sp->bar0);
5144       bar0_remap_failed:
5145       mem_alloc_failed:
5146         free_shared_mem(sp);
5147         pci_disable_device(pdev);
5148         pci_release_regions(pdev);
5149         pci_set_drvdata(pdev, NULL);
5150         free_netdev(dev);
5151
5152         return ret;
5153 }
5154
5155 /**
5156  * s2io_rem_nic - Free the PCI device
5157  * @pdev: structure containing the PCI related information of the device.
5158  * Description: This function is called by the Pci subsystem to release a
5159  * PCI device and free up all resource held up by the device. This could
5160  * be in response to a Hot plug event or when the driver is to be removed
5161  * from memory.
5162  */
5163
5164 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5165 {
5166         struct net_device *dev =
5167             (struct net_device *) pci_get_drvdata(pdev);
5168         nic_t *sp;
5169
5170         if (dev == NULL) {
5171                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5172                 return;
5173         }
5174
5175         sp = dev->priv;
5176         unregister_netdev(dev);
5177
5178         free_shared_mem(sp);
5179         iounmap(sp->bar0);
5180         iounmap(sp->bar1);
5181         pci_disable_device(pdev);
5182         pci_release_regions(pdev);
5183         pci_set_drvdata(pdev, NULL);
5184         free_netdev(dev);
5185 }
5186
5187 /**
5188  * s2io_starter - Entry point for the driver
5189  * Description: This function is the entry point for the driver. It verifies
5190  * the module loadable parameters and initializes PCI configuration space.
5191  */
5192
5193 int __init s2io_starter(void)
5194 {
5195         return pci_module_init(&s2io_driver);
5196 }
5197
5198 /**
5199  * s2io_closer - Cleanup routine for the driver
5200  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5201  */
5202
5203 void s2io_closer(void)
5204 {
5205         pci_unregister_driver(&s2io_driver);
5206         DBG_PRINT(INIT_DBG, "cleanup done\n");
5207 }
5208
5209 module_init(s2io_starter);
5210 module_exit(s2io_closer);