]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/3com/typhoon.c
a0cacbe846ba3347cf5d1d62a8e3ad84a5b83428
[karo-tx-linux.git] / drivers / net / ethernet / 3com / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
28                 issue. Hopefully 3Com will fix it.
29         *) Waiting for a command response takes 8ms due to non-preemptable
30                 polling. Only significant for getting stats and creating
31                 SAs, but an ugly wart never the less.
32
33         TODO:
34         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
35         *) Add more support for ethtool (especially for NIC stats)
36         *) Allow disabling of RX checksum offloading
37         *) Fix MAC changing to work while the interface is up
38                 (Need to put commands on the TX ring, which changes
39                 the locking)
40         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
41                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
42 */
43
44 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
45  * Setting to > 1518 effectively disables this feature.
46  */
47 static int rx_copybreak = 200;
48
49 /* Should we use MMIO or Port IO?
50  * 0: Port IO
51  * 1: MMIO
52  * 2: Try MMIO, fallback to Port IO
53  */
54 static unsigned int use_mmio = 2;
55
56 /* end user-configurable values */
57
58 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
59  */
60 static const int multicast_filter_limit = 32;
61
62 /* Operational parameters that are set at compile time. */
63
64 /* Keep the ring sizes a power of two for compile efficiency.
65  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
66  * Making the Tx ring too large decreases the effectiveness of channel
67  * bonding and packet priority.
68  * There are no ill effects from too-large receive rings.
69  *
70  * We don't currently use the Hi Tx ring so, don't make it very big.
71  *
72  * Beware that if we start using the Hi Tx ring, we will need to change
73  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
74  */
75 #define TXHI_ENTRIES            2
76 #define TXLO_ENTRIES            128
77 #define RX_ENTRIES              32
78 #define COMMAND_ENTRIES         16
79 #define RESPONSE_ENTRIES        32
80
81 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
82 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
83
84 /* The 3XP will preload and remove 64 entries from the free buffer
85  * list, and we need one entry to keep the ring from wrapping, so
86  * to keep this a power of two, we use 128 entries.
87  */
88 #define RXFREE_ENTRIES          128
89 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
90
91 /* Operational parameters that usually are not changed. */
92
93 /* Time in jiffies before concluding the transmitter is hung. */
94 #define TX_TIMEOUT  (2*HZ)
95
96 #define PKT_BUF_SZ              1536
97 #define FIRMWARE_NAME           "3com/typhoon.bin"
98
99 #define pr_fmt(fmt)             KBUILD_MODNAME " " fmt
100
101 #include <linux/module.h>
102 #include <linux/kernel.h>
103 #include <linux/sched.h>
104 #include <linux/string.h>
105 #include <linux/timer.h>
106 #include <linux/errno.h>
107 #include <linux/ioport.h>
108 #include <linux/interrupt.h>
109 #include <linux/pci.h>
110 #include <linux/netdevice.h>
111 #include <linux/etherdevice.h>
112 #include <linux/skbuff.h>
113 #include <linux/mm.h>
114 #include <linux/init.h>
115 #include <linux/delay.h>
116 #include <linux/ethtool.h>
117 #include <linux/if_vlan.h>
118 #include <linux/crc32.h>
119 #include <linux/bitops.h>
120 #include <asm/processor.h>
121 #include <asm/io.h>
122 #include <asm/uaccess.h>
123 #include <linux/in6.h>
124 #include <linux/dma-mapping.h>
125 #include <linux/firmware.h>
126
127 #include "typhoon.h"
128
129 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
130 MODULE_VERSION("1.0");
131 MODULE_LICENSE("GPL");
132 MODULE_FIRMWARE(FIRMWARE_NAME);
133 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
134 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
135                                "the buffer given back to the NIC. Default "
136                                "is 200.");
137 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
138                            "Default is to try MMIO and fallback to PIO.");
139 module_param(rx_copybreak, int, 0);
140 module_param(use_mmio, int, 0);
141
142 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
143 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
144 #undef NETIF_F_TSO
145 #endif
146
147 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
148 #error TX ring too small!
149 #endif
150
151 struct typhoon_card_info {
152         const char *name;
153         const int capabilities;
154 };
155
156 #define TYPHOON_CRYPTO_NONE             0x00
157 #define TYPHOON_CRYPTO_DES              0x01
158 #define TYPHOON_CRYPTO_3DES             0x02
159 #define TYPHOON_CRYPTO_VARIABLE         0x04
160 #define TYPHOON_FIBER                   0x08
161 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
162
163 enum typhoon_cards {
164         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
165         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
166         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
167         TYPHOON_FXM,
168 };
169
170 /* directly indexed by enum typhoon_cards, above */
171 static struct typhoon_card_info typhoon_card_info[] = {
172         { "3Com Typhoon (3C990-TX)",
173                 TYPHOON_CRYPTO_NONE},
174         { "3Com Typhoon (3CR990-TX-95)",
175                 TYPHOON_CRYPTO_DES},
176         { "3Com Typhoon (3CR990-TX-97)",
177                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
178         { "3Com Typhoon (3C990SVR)",
179                 TYPHOON_CRYPTO_NONE},
180         { "3Com Typhoon (3CR990SVR95)",
181                 TYPHOON_CRYPTO_DES},
182         { "3Com Typhoon (3CR990SVR97)",
183                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184         { "3Com Typhoon2 (3C990B-TX-M)",
185                 TYPHOON_CRYPTO_VARIABLE},
186         { "3Com Typhoon2 (3C990BSVR)",
187                 TYPHOON_CRYPTO_VARIABLE},
188         { "3Com Typhoon (3CR990-FX-95)",
189                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
190         { "3Com Typhoon (3CR990-FX-97)",
191                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
192         { "3Com Typhoon (3CR990-FX-95 Server)",
193                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
194         { "3Com Typhoon (3CR990-FX-97 Server)",
195                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
196         { "3Com Typhoon2 (3C990B-FX-97)",
197                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
198 };
199
200 /* Notes on the new subsystem numbering scheme:
201  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
202  * bit 4 indicates if this card has secured firmware (we don't support it)
203  * bit 8 indicates if this is a (0) copper or (1) fiber card
204  * bits 12-16 indicate card type: (0) client and (1) server
205  */
206 static const struct pci_device_id typhoon_pci_tbl[] = {
207         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
209         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
211         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
213         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
214           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
215         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
216           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
217         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
218           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
219         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
220           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
221         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
222           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
223         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
224           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
225         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
227         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
229         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
231         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
233         { 0, }
234 };
235 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
236
237 /* Define the shared memory area
238  * Align everything the 3XP will normally be using.
239  * We'll need to move/align txHi if we start using that ring.
240  */
241 #define __3xp_aligned   ____cacheline_aligned
242 struct typhoon_shared {
243         struct typhoon_interface        iface;
244         struct typhoon_indexes          indexes                 __3xp_aligned;
245         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
246         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
247         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
248         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
249         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
250         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
251         u32                             zeroWord;
252         struct tx_desc                  txHi[TXHI_ENTRIES];
253 } __packed;
254
255 struct rxbuff_ent {
256         struct sk_buff *skb;
257         dma_addr_t      dma_addr;
258 };
259
260 struct typhoon {
261         /* Tx cache line section */
262         struct transmit_ring    txLoRing        ____cacheline_aligned;
263         struct pci_dev *        tx_pdev;
264         void __iomem            *tx_ioaddr;
265         u32                     txlo_dma_addr;
266
267         /* Irq/Rx cache line section */
268         void __iomem            *ioaddr         ____cacheline_aligned;
269         struct typhoon_indexes *indexes;
270         u8                      awaiting_resp;
271         u8                      duplex;
272         u8                      speed;
273         u8                      card_state;
274         struct basic_ring       rxLoRing;
275         struct pci_dev *        pdev;
276         struct net_device *     dev;
277         struct napi_struct      napi;
278         struct basic_ring       rxHiRing;
279         struct basic_ring       rxBuffRing;
280         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
281
282         /* general section */
283         spinlock_t              command_lock    ____cacheline_aligned;
284         struct basic_ring       cmdRing;
285         struct basic_ring       respRing;
286         struct net_device_stats stats;
287         struct net_device_stats stats_saved;
288         struct typhoon_shared * shared;
289         dma_addr_t              shared_dma;
290         __le16                  xcvr_select;
291         __le16                  wol_events;
292         __le32                  offload;
293
294         /* unused stuff (future use) */
295         int                     capabilities;
296         struct transmit_ring    txHiRing;
297 };
298
299 enum completion_wait_values {
300         NoWait = 0, WaitNoSleep, WaitSleep,
301 };
302
303 /* These are the values for the typhoon.card_state variable.
304  * These determine where the statistics will come from in get_stats().
305  * The sleep image does not support the statistics we need.
306  */
307 enum state_values {
308         Sleeping = 0, Running,
309 };
310
311 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
312  * cannot pass a read, so this forces current writes to post.
313  */
314 #define typhoon_post_pci_writes(x) \
315         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
316
317 /* We'll wait up to six seconds for a reset, and half a second normally.
318  */
319 #define TYPHOON_UDELAY                  50
320 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
321 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
322 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
323
324 #if defined(NETIF_F_TSO)
325 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
326 #define TSO_NUM_DESCRIPTORS     2
327 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
328 #else
329 #define NETIF_F_TSO             0
330 #define skb_tso_size(x)         0
331 #define TSO_NUM_DESCRIPTORS     0
332 #define TSO_OFFLOAD_ON          0
333 #endif
334
335 static inline void
336 typhoon_inc_index(u32 *index, const int count, const int num_entries)
337 {
338         /* Increment a ring index -- we can use this for all rings execept
339          * the Rx rings, as they use different size descriptors
340          * otherwise, everything is the same size as a cmd_desc
341          */
342         *index += count * sizeof(struct cmd_desc);
343         *index %= num_entries * sizeof(struct cmd_desc);
344 }
345
346 static inline void
347 typhoon_inc_cmd_index(u32 *index, const int count)
348 {
349         typhoon_inc_index(index, count, COMMAND_ENTRIES);
350 }
351
352 static inline void
353 typhoon_inc_resp_index(u32 *index, const int count)
354 {
355         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
356 }
357
358 static inline void
359 typhoon_inc_rxfree_index(u32 *index, const int count)
360 {
361         typhoon_inc_index(index, count, RXFREE_ENTRIES);
362 }
363
364 static inline void
365 typhoon_inc_tx_index(u32 *index, const int count)
366 {
367         /* if we start using the Hi Tx ring, this needs updating */
368         typhoon_inc_index(index, count, TXLO_ENTRIES);
369 }
370
371 static inline void
372 typhoon_inc_rx_index(u32 *index, const int count)
373 {
374         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
375         *index += count * sizeof(struct rx_desc);
376         *index %= RX_ENTRIES * sizeof(struct rx_desc);
377 }
378
379 static int
380 typhoon_reset(void __iomem *ioaddr, int wait_type)
381 {
382         int i, err = 0;
383         int timeout;
384
385         if(wait_type == WaitNoSleep)
386                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
387         else
388                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
389
390         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
391         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
392
393         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
394         typhoon_post_pci_writes(ioaddr);
395         udelay(1);
396         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
397
398         if(wait_type != NoWait) {
399                 for(i = 0; i < timeout; i++) {
400                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
401                            TYPHOON_STATUS_WAITING_FOR_HOST)
402                                 goto out;
403
404                         if(wait_type == WaitSleep)
405                                 schedule_timeout_uninterruptible(1);
406                         else
407                                 udelay(TYPHOON_UDELAY);
408                 }
409
410                 err = -ETIMEDOUT;
411         }
412
413 out:
414         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
415         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
416
417         /* The 3XP seems to need a little extra time to complete the load
418          * of the sleep image before we can reliably boot it. Failure to
419          * do this occasionally results in a hung adapter after boot in
420          * typhoon_init_one() while trying to read the MAC address or
421          * putting the card to sleep. 3Com's driver waits 5ms, but
422          * that seems to be overkill. However, if we can sleep, we might
423          * as well give it that much time. Otherwise, we'll give it 500us,
424          * which should be enough (I've see it work well at 100us, but still
425          * saw occasional problems.)
426          */
427         if(wait_type == WaitSleep)
428                 msleep(5);
429         else
430                 udelay(500);
431         return err;
432 }
433
434 static int
435 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
436 {
437         int i, err = 0;
438
439         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
440                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
441                         goto out;
442                 udelay(TYPHOON_UDELAY);
443         }
444
445         err = -ETIMEDOUT;
446
447 out:
448         return err;
449 }
450
451 static inline void
452 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
453 {
454         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
455                 netif_carrier_off(dev);
456         else
457                 netif_carrier_on(dev);
458 }
459
460 static inline void
461 typhoon_hello(struct typhoon *tp)
462 {
463         struct basic_ring *ring = &tp->cmdRing;
464         struct cmd_desc *cmd;
465
466         /* We only get a hello request if we've not sent anything to the
467          * card in a long while. If the lock is held, then we're in the
468          * process of issuing a command, so we don't need to respond.
469          */
470         if(spin_trylock(&tp->command_lock)) {
471                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
472                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
473
474                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
475                 wmb();
476                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
477                 spin_unlock(&tp->command_lock);
478         }
479 }
480
481 static int
482 typhoon_process_response(struct typhoon *tp, int resp_size,
483                                 struct resp_desc *resp_save)
484 {
485         struct typhoon_indexes *indexes = tp->indexes;
486         struct resp_desc *resp;
487         u8 *base = tp->respRing.ringBase;
488         int count, len, wrap_len;
489         u32 cleared;
490         u32 ready;
491
492         cleared = le32_to_cpu(indexes->respCleared);
493         ready = le32_to_cpu(indexes->respReady);
494         while(cleared != ready) {
495                 resp = (struct resp_desc *)(base + cleared);
496                 count = resp->numDesc + 1;
497                 if(resp_save && resp->seqNo) {
498                         if(count > resp_size) {
499                                 resp_save->flags = TYPHOON_RESP_ERROR;
500                                 goto cleanup;
501                         }
502
503                         wrap_len = 0;
504                         len = count * sizeof(*resp);
505                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
506                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
507                                 len = RESPONSE_RING_SIZE - cleared;
508                         }
509
510                         memcpy(resp_save, resp, len);
511                         if(unlikely(wrap_len)) {
512                                 resp_save += len / sizeof(*resp);
513                                 memcpy(resp_save, base, wrap_len);
514                         }
515
516                         resp_save = NULL;
517                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
518                         typhoon_media_status(tp->dev, resp);
519                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
520                         typhoon_hello(tp);
521                 } else {
522                         netdev_err(tp->dev,
523                                    "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
524                                    le16_to_cpu(resp->cmd),
525                                    resp->numDesc, resp->flags,
526                                    le16_to_cpu(resp->parm1),
527                                    le32_to_cpu(resp->parm2),
528                                    le32_to_cpu(resp->parm3));
529                 }
530
531 cleanup:
532                 typhoon_inc_resp_index(&cleared, count);
533         }
534
535         indexes->respCleared = cpu_to_le32(cleared);
536         wmb();
537         return resp_save == NULL;
538 }
539
540 static inline int
541 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
542 {
543         /* this works for all descriptors but rx_desc, as they are a
544          * different size than the cmd_desc -- everyone else is the same
545          */
546         lastWrite /= sizeof(struct cmd_desc);
547         lastRead /= sizeof(struct cmd_desc);
548         return (ringSize + lastRead - lastWrite - 1) % ringSize;
549 }
550
551 static inline int
552 typhoon_num_free_cmd(struct typhoon *tp)
553 {
554         int lastWrite = tp->cmdRing.lastWrite;
555         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
556
557         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
558 }
559
560 static inline int
561 typhoon_num_free_resp(struct typhoon *tp)
562 {
563         int respReady = le32_to_cpu(tp->indexes->respReady);
564         int respCleared = le32_to_cpu(tp->indexes->respCleared);
565
566         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
567 }
568
569 static inline int
570 typhoon_num_free_tx(struct transmit_ring *ring)
571 {
572         /* if we start using the Hi Tx ring, this needs updating */
573         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
574 }
575
576 static int
577 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
578                       int num_resp, struct resp_desc *resp)
579 {
580         struct typhoon_indexes *indexes = tp->indexes;
581         struct basic_ring *ring = &tp->cmdRing;
582         struct resp_desc local_resp;
583         int i, err = 0;
584         int got_resp;
585         int freeCmd, freeResp;
586         int len, wrap_len;
587
588         spin_lock(&tp->command_lock);
589
590         freeCmd = typhoon_num_free_cmd(tp);
591         freeResp = typhoon_num_free_resp(tp);
592
593         if(freeCmd < num_cmd || freeResp < num_resp) {
594                 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
595                            freeCmd, num_cmd, freeResp, num_resp);
596                 err = -ENOMEM;
597                 goto out;
598         }
599
600         if(cmd->flags & TYPHOON_CMD_RESPOND) {
601                 /* If we're expecting a response, but the caller hasn't given
602                  * us a place to put it, we'll provide one.
603                  */
604                 tp->awaiting_resp = 1;
605                 if(resp == NULL) {
606                         resp = &local_resp;
607                         num_resp = 1;
608                 }
609         }
610
611         wrap_len = 0;
612         len = num_cmd * sizeof(*cmd);
613         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
614                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
615                 len = COMMAND_RING_SIZE - ring->lastWrite;
616         }
617
618         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
619         if(unlikely(wrap_len)) {
620                 struct cmd_desc *wrap_ptr = cmd;
621                 wrap_ptr += len / sizeof(*cmd);
622                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
623         }
624
625         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
626
627         /* "I feel a presence... another warrior is on the mesa."
628          */
629         wmb();
630         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
631         typhoon_post_pci_writes(tp->ioaddr);
632
633         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
634                 goto out;
635
636         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
637          * preempt or do anything other than take interrupts. So, don't
638          * wait for a response unless you have to.
639          *
640          * I've thought about trying to sleep here, but we're called
641          * from many contexts that don't allow that. Also, given the way
642          * 3Com has implemented irq coalescing, we would likely timeout --
643          * this has been observed in real life!
644          *
645          * The big killer is we have to wait to get stats from the card,
646          * though we could go to a periodic refresh of those if we don't
647          * mind them getting somewhat stale. The rest of the waiting
648          * commands occur during open/close/suspend/resume, so they aren't
649          * time critical. Creating SAs in the future will also have to
650          * wait here.
651          */
652         got_resp = 0;
653         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
654                 if(indexes->respCleared != indexes->respReady)
655                         got_resp = typhoon_process_response(tp, num_resp,
656                                                                 resp);
657                 udelay(TYPHOON_UDELAY);
658         }
659
660         if(!got_resp) {
661                 err = -ETIMEDOUT;
662                 goto out;
663         }
664
665         /* Collect the error response even if we don't care about the
666          * rest of the response
667          */
668         if(resp->flags & TYPHOON_RESP_ERROR)
669                 err = -EIO;
670
671 out:
672         if(tp->awaiting_resp) {
673                 tp->awaiting_resp = 0;
674                 smp_wmb();
675
676                 /* Ugh. If a response was added to the ring between
677                  * the call to typhoon_process_response() and the clearing
678                  * of tp->awaiting_resp, we could have missed the interrupt
679                  * and it could hang in the ring an indeterminate amount of
680                  * time. So, check for it, and interrupt ourselves if this
681                  * is the case.
682                  */
683                 if(indexes->respCleared != indexes->respReady)
684                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
685         }
686
687         spin_unlock(&tp->command_lock);
688         return err;
689 }
690
691 static inline void
692 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
693                         u32 ring_dma)
694 {
695         struct tcpopt_desc *tcpd;
696         u32 tcpd_offset = ring_dma;
697
698         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
699         tcpd_offset += txRing->lastWrite;
700         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
701         typhoon_inc_tx_index(&txRing->lastWrite, 1);
702
703         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
704         tcpd->numDesc = 1;
705         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
706         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
707         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
708         tcpd->bytesTx = cpu_to_le32(skb->len);
709         tcpd->status = 0;
710 }
711
712 static netdev_tx_t
713 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
714 {
715         struct typhoon *tp = netdev_priv(dev);
716         struct transmit_ring *txRing;
717         struct tx_desc *txd, *first_txd;
718         dma_addr_t skb_dma;
719         int numDesc;
720
721         /* we have two rings to choose from, but we only use txLo for now
722          * If we start using the Hi ring as well, we'll need to update
723          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
724          * and TXHI_ENTRIES to match, as well as update the TSO code below
725          * to get the right DMA address
726          */
727         txRing = &tp->txLoRing;
728
729         /* We need one descriptor for each fragment of the sk_buff, plus the
730          * one for the ->data area of it.
731          *
732          * The docs say a maximum of 16 fragment descriptors per TCP option
733          * descriptor, then make a new packet descriptor and option descriptor
734          * for the next 16 fragments. The engineers say just an option
735          * descriptor is needed. I've tested up to 26 fragments with a single
736          * packet descriptor/option descriptor combo, so I use that for now.
737          *
738          * If problems develop with TSO, check this first.
739          */
740         numDesc = skb_shinfo(skb)->nr_frags + 1;
741         if (skb_is_gso(skb))
742                 numDesc++;
743
744         /* When checking for free space in the ring, we need to also
745          * account for the initial Tx descriptor, and we always must leave
746          * at least one descriptor unused in the ring so that it doesn't
747          * wrap and look empty.
748          *
749          * The only time we should loop here is when we hit the race
750          * between marking the queue awake and updating the cleared index.
751          * Just loop and it will appear. This comes from the acenic driver.
752          */
753         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
754                 smp_rmb();
755
756         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
757         typhoon_inc_tx_index(&txRing->lastWrite, 1);
758
759         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
760         first_txd->numDesc = 0;
761         first_txd->len = 0;
762         first_txd->tx_addr = (u64)((unsigned long) skb);
763         first_txd->processFlags = 0;
764
765         if(skb->ip_summed == CHECKSUM_PARTIAL) {
766                 /* The 3XP will figure out if this is UDP/TCP */
767                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
768                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
769                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
770         }
771
772         if (skb_vlan_tag_present(skb)) {
773                 first_txd->processFlags |=
774                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
775                 first_txd->processFlags |=
776                     cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
777                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
778         }
779
780         if (skb_is_gso(skb)) {
781                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
782                 first_txd->numDesc++;
783
784                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
785         }
786
787         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
788         typhoon_inc_tx_index(&txRing->lastWrite, 1);
789
790         /* No need to worry about padding packet -- the firmware pads
791          * it with zeros to ETH_ZLEN for us.
792          */
793         if(skb_shinfo(skb)->nr_frags == 0) {
794                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
795                                        PCI_DMA_TODEVICE);
796                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
797                 txd->len = cpu_to_le16(skb->len);
798                 txd->frag.addr = cpu_to_le32(skb_dma);
799                 txd->frag.addrHi = 0;
800                 first_txd->numDesc++;
801         } else {
802                 int i, len;
803
804                 len = skb_headlen(skb);
805                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
806                                          PCI_DMA_TODEVICE);
807                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
808                 txd->len = cpu_to_le16(len);
809                 txd->frag.addr = cpu_to_le32(skb_dma);
810                 txd->frag.addrHi = 0;
811                 first_txd->numDesc++;
812
813                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
814                         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
815                         void *frag_addr;
816
817                         txd = (struct tx_desc *) (txRing->ringBase +
818                                                 txRing->lastWrite);
819                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
820
821                         len = skb_frag_size(frag);
822                         frag_addr = skb_frag_address(frag);
823                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
824                                          PCI_DMA_TODEVICE);
825                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
826                         txd->len = cpu_to_le16(len);
827                         txd->frag.addr = cpu_to_le32(skb_dma);
828                         txd->frag.addrHi = 0;
829                         first_txd->numDesc++;
830                 }
831         }
832
833         /* Kick the 3XP
834          */
835         wmb();
836         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
837
838         /* If we don't have room to put the worst case packet on the
839          * queue, then we must stop the queue. We need 2 extra
840          * descriptors -- one to prevent ring wrap, and one for the
841          * Tx header.
842          */
843         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
844
845         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
846                 netif_stop_queue(dev);
847
848                 /* A Tx complete IRQ could have gotten between, making
849                  * the ring free again. Only need to recheck here, since
850                  * Tx is serialized.
851                  */
852                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
853                         netif_wake_queue(dev);
854         }
855
856         return NETDEV_TX_OK;
857 }
858
859 static void
860 typhoon_set_rx_mode(struct net_device *dev)
861 {
862         struct typhoon *tp = netdev_priv(dev);
863         struct cmd_desc xp_cmd;
864         u32 mc_filter[2];
865         __le16 filter;
866
867         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
868         if(dev->flags & IFF_PROMISC) {
869                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
870         } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
871                   (dev->flags & IFF_ALLMULTI)) {
872                 /* Too many to match, or accept all multicasts. */
873                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
874         } else if (!netdev_mc_empty(dev)) {
875                 struct netdev_hw_addr *ha;
876
877                 memset(mc_filter, 0, sizeof(mc_filter));
878                 netdev_for_each_mc_addr(ha, dev) {
879                         int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
880                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
881                 }
882
883                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
884                                          TYPHOON_CMD_SET_MULTICAST_HASH);
885                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
886                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
887                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
888                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
889
890                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
891         }
892
893         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
894         xp_cmd.parm1 = filter;
895         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
896 }
897
898 static int
899 typhoon_do_get_stats(struct typhoon *tp)
900 {
901         struct net_device_stats *stats = &tp->stats;
902         struct net_device_stats *saved = &tp->stats_saved;
903         struct cmd_desc xp_cmd;
904         struct resp_desc xp_resp[7];
905         struct stats_resp *s = (struct stats_resp *) xp_resp;
906         int err;
907
908         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
909         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
910         if(err < 0)
911                 return err;
912
913         /* 3Com's Linux driver uses txMultipleCollisions as it's
914          * collisions value, but there is some other collision info as well...
915          *
916          * The extra status reported would be a good candidate for
917          * ethtool_ops->get_{strings,stats}()
918          */
919         stats->tx_packets = le32_to_cpu(s->txPackets) +
920                         saved->tx_packets;
921         stats->tx_bytes = le64_to_cpu(s->txBytes) +
922                         saved->tx_bytes;
923         stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
924                         saved->tx_errors;
925         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
926                         saved->tx_carrier_errors;
927         stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
928                         saved->collisions;
929         stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
930                         saved->rx_packets;
931         stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
932                         saved->rx_bytes;
933         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
934                         saved->rx_fifo_errors;
935         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
936                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
937                         saved->rx_errors;
938         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
939                         saved->rx_crc_errors;
940         stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
941                         saved->rx_length_errors;
942         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
943                         SPEED_100 : SPEED_10;
944         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
945                         DUPLEX_FULL : DUPLEX_HALF;
946
947         return 0;
948 }
949
950 static struct net_device_stats *
951 typhoon_get_stats(struct net_device *dev)
952 {
953         struct typhoon *tp = netdev_priv(dev);
954         struct net_device_stats *stats = &tp->stats;
955         struct net_device_stats *saved = &tp->stats_saved;
956
957         smp_rmb();
958         if(tp->card_state == Sleeping)
959                 return saved;
960
961         if(typhoon_do_get_stats(tp) < 0) {
962                 netdev_err(dev, "error getting stats\n");
963                 return saved;
964         }
965
966         return stats;
967 }
968
969 static void
970 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
971 {
972         struct typhoon *tp = netdev_priv(dev);
973         struct pci_dev *pci_dev = tp->pdev;
974         struct cmd_desc xp_cmd;
975         struct resp_desc xp_resp[3];
976
977         smp_rmb();
978         if(tp->card_state == Sleeping) {
979                 strlcpy(info->fw_version, "Sleep image",
980                         sizeof(info->fw_version));
981         } else {
982                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
983                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
984                         strlcpy(info->fw_version, "Unknown runtime",
985                                 sizeof(info->fw_version));
986                 } else {
987                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
988                         snprintf(info->fw_version, sizeof(info->fw_version),
989                                 "%02x.%03x.%03x", sleep_ver >> 24,
990                                 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
991                 }
992         }
993
994         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
995         strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
996 }
997
998 static int
999 typhoon_get_link_ksettings(struct net_device *dev,
1000                            struct ethtool_link_ksettings *cmd)
1001 {
1002         struct typhoon *tp = netdev_priv(dev);
1003         u32 supported, advertising = 0;
1004
1005         supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1006                                 SUPPORTED_Autoneg;
1007
1008         switch (tp->xcvr_select) {
1009         case TYPHOON_XCVR_10HALF:
1010                 advertising = ADVERTISED_10baseT_Half;
1011                 break;
1012         case TYPHOON_XCVR_10FULL:
1013                 advertising = ADVERTISED_10baseT_Full;
1014                 break;
1015         case TYPHOON_XCVR_100HALF:
1016                 advertising = ADVERTISED_100baseT_Half;
1017                 break;
1018         case TYPHOON_XCVR_100FULL:
1019                 advertising = ADVERTISED_100baseT_Full;
1020                 break;
1021         case TYPHOON_XCVR_AUTONEG:
1022                 advertising = ADVERTISED_10baseT_Half |
1023                                             ADVERTISED_10baseT_Full |
1024                                             ADVERTISED_100baseT_Half |
1025                                             ADVERTISED_100baseT_Full |
1026                                             ADVERTISED_Autoneg;
1027                 break;
1028         }
1029
1030         if(tp->capabilities & TYPHOON_FIBER) {
1031                 supported |= SUPPORTED_FIBRE;
1032                 advertising |= ADVERTISED_FIBRE;
1033                 cmd->base.port = PORT_FIBRE;
1034         } else {
1035                 supported |= SUPPORTED_10baseT_Half |
1036                                         SUPPORTED_10baseT_Full |
1037                                         SUPPORTED_TP;
1038                 advertising |= ADVERTISED_TP;
1039                 cmd->base.port = PORT_TP;
1040         }
1041
1042         /* need to get stats to make these link speed/duplex valid */
1043         typhoon_do_get_stats(tp);
1044         cmd->base.speed = tp->speed;
1045         cmd->base.duplex = tp->duplex;
1046         cmd->base.phy_address = 0;
1047         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1048                 cmd->base.autoneg = AUTONEG_ENABLE;
1049         else
1050                 cmd->base.autoneg = AUTONEG_DISABLE;
1051
1052         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1053                                                 supported);
1054         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1055                                                 advertising);
1056
1057         return 0;
1058 }
1059
1060 static int
1061 typhoon_set_link_ksettings(struct net_device *dev,
1062                            const struct ethtool_link_ksettings *cmd)
1063 {
1064         struct typhoon *tp = netdev_priv(dev);
1065         u32 speed = cmd->base.speed;
1066         struct cmd_desc xp_cmd;
1067         __le16 xcvr;
1068         int err;
1069
1070         err = -EINVAL;
1071         if (cmd->base.autoneg == AUTONEG_ENABLE) {
1072                 xcvr = TYPHOON_XCVR_AUTONEG;
1073         } else {
1074                 if (cmd->base.duplex == DUPLEX_HALF) {
1075                         if (speed == SPEED_10)
1076                                 xcvr = TYPHOON_XCVR_10HALF;
1077                         else if (speed == SPEED_100)
1078                                 xcvr = TYPHOON_XCVR_100HALF;
1079                         else
1080                                 goto out;
1081                 } else if (cmd->base.duplex == DUPLEX_FULL) {
1082                         if (speed == SPEED_10)
1083                                 xcvr = TYPHOON_XCVR_10FULL;
1084                         else if (speed == SPEED_100)
1085                                 xcvr = TYPHOON_XCVR_100FULL;
1086                         else
1087                                 goto out;
1088                 } else
1089                         goto out;
1090         }
1091
1092         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1093         xp_cmd.parm1 = xcvr;
1094         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1095         if(err < 0)
1096                 goto out;
1097
1098         tp->xcvr_select = xcvr;
1099         if (cmd->base.autoneg == AUTONEG_ENABLE) {
1100                 tp->speed = 0xff;       /* invalid */
1101                 tp->duplex = 0xff;      /* invalid */
1102         } else {
1103                 tp->speed = speed;
1104                 tp->duplex = cmd->base.duplex;
1105         }
1106
1107 out:
1108         return err;
1109 }
1110
1111 static void
1112 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1113 {
1114         struct typhoon *tp = netdev_priv(dev);
1115
1116         wol->supported = WAKE_PHY | WAKE_MAGIC;
1117         wol->wolopts = 0;
1118         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1119                 wol->wolopts |= WAKE_PHY;
1120         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1121                 wol->wolopts |= WAKE_MAGIC;
1122         memset(&wol->sopass, 0, sizeof(wol->sopass));
1123 }
1124
1125 static int
1126 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1127 {
1128         struct typhoon *tp = netdev_priv(dev);
1129
1130         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1131                 return -EINVAL;
1132
1133         tp->wol_events = 0;
1134         if(wol->wolopts & WAKE_PHY)
1135                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1136         if(wol->wolopts & WAKE_MAGIC)
1137                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1138
1139         return 0;
1140 }
1141
1142 static void
1143 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1144 {
1145         ering->rx_max_pending = RXENT_ENTRIES;
1146         ering->tx_max_pending = TXLO_ENTRIES - 1;
1147
1148         ering->rx_pending = RXENT_ENTRIES;
1149         ering->tx_pending = TXLO_ENTRIES - 1;
1150 }
1151
1152 static const struct ethtool_ops typhoon_ethtool_ops = {
1153         .get_drvinfo            = typhoon_get_drvinfo,
1154         .get_wol                = typhoon_get_wol,
1155         .set_wol                = typhoon_set_wol,
1156         .get_link               = ethtool_op_get_link,
1157         .get_ringparam          = typhoon_get_ringparam,
1158         .get_link_ksettings     = typhoon_get_link_ksettings,
1159         .set_link_ksettings     = typhoon_set_link_ksettings,
1160 };
1161
1162 static int
1163 typhoon_wait_interrupt(void __iomem *ioaddr)
1164 {
1165         int i, err = 0;
1166
1167         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1168                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1169                    TYPHOON_INTR_BOOTCMD)
1170                         goto out;
1171                 udelay(TYPHOON_UDELAY);
1172         }
1173
1174         err = -ETIMEDOUT;
1175
1176 out:
1177         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1178         return err;
1179 }
1180
1181 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1182
1183 static void
1184 typhoon_init_interface(struct typhoon *tp)
1185 {
1186         struct typhoon_interface *iface = &tp->shared->iface;
1187         dma_addr_t shared_dma;
1188
1189         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1190
1191         /* The *Hi members of iface are all init'd to zero by the memset().
1192          */
1193         shared_dma = tp->shared_dma + shared_offset(indexes);
1194         iface->ringIndex = cpu_to_le32(shared_dma);
1195
1196         shared_dma = tp->shared_dma + shared_offset(txLo);
1197         iface->txLoAddr = cpu_to_le32(shared_dma);
1198         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1199
1200         shared_dma = tp->shared_dma + shared_offset(txHi);
1201         iface->txHiAddr = cpu_to_le32(shared_dma);
1202         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1203
1204         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1205         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1206         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1207                                         sizeof(struct rx_free));
1208
1209         shared_dma = tp->shared_dma + shared_offset(rxLo);
1210         iface->rxLoAddr = cpu_to_le32(shared_dma);
1211         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1212
1213         shared_dma = tp->shared_dma + shared_offset(rxHi);
1214         iface->rxHiAddr = cpu_to_le32(shared_dma);
1215         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1216
1217         shared_dma = tp->shared_dma + shared_offset(cmd);
1218         iface->cmdAddr = cpu_to_le32(shared_dma);
1219         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1220
1221         shared_dma = tp->shared_dma + shared_offset(resp);
1222         iface->respAddr = cpu_to_le32(shared_dma);
1223         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1224
1225         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1226         iface->zeroAddr = cpu_to_le32(shared_dma);
1227
1228         tp->indexes = &tp->shared->indexes;
1229         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1230         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1231         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1232         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1233         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1234         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1235         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1236
1237         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1238         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1239
1240         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1241         tp->card_state = Sleeping;
1242
1243         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1244         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1245         tp->offload |= TYPHOON_OFFLOAD_VLAN;
1246
1247         spin_lock_init(&tp->command_lock);
1248
1249         /* Force the writes to the shared memory area out before continuing. */
1250         wmb();
1251 }
1252
1253 static void
1254 typhoon_init_rings(struct typhoon *tp)
1255 {
1256         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1257
1258         tp->txLoRing.lastWrite = 0;
1259         tp->txHiRing.lastWrite = 0;
1260         tp->rxLoRing.lastWrite = 0;
1261         tp->rxHiRing.lastWrite = 0;
1262         tp->rxBuffRing.lastWrite = 0;
1263         tp->cmdRing.lastWrite = 0;
1264         tp->respRing.lastWrite = 0;
1265
1266         tp->txLoRing.lastRead = 0;
1267         tp->txHiRing.lastRead = 0;
1268 }
1269
1270 static const struct firmware *typhoon_fw;
1271
1272 static int
1273 typhoon_request_firmware(struct typhoon *tp)
1274 {
1275         const struct typhoon_file_header *fHdr;
1276         const struct typhoon_section_header *sHdr;
1277         const u8 *image_data;
1278         u32 numSections;
1279         u32 section_len;
1280         u32 remaining;
1281         int err;
1282
1283         if (typhoon_fw)
1284                 return 0;
1285
1286         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1287         if (err) {
1288                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1289                            FIRMWARE_NAME);
1290                 return err;
1291         }
1292
1293         image_data = typhoon_fw->data;
1294         remaining = typhoon_fw->size;
1295         if (remaining < sizeof(struct typhoon_file_header))
1296                 goto invalid_fw;
1297
1298         fHdr = (struct typhoon_file_header *) image_data;
1299         if (memcmp(fHdr->tag, "TYPHOON", 8))
1300                 goto invalid_fw;
1301
1302         numSections = le32_to_cpu(fHdr->numSections);
1303         image_data += sizeof(struct typhoon_file_header);
1304         remaining -= sizeof(struct typhoon_file_header);
1305
1306         while (numSections--) {
1307                 if (remaining < sizeof(struct typhoon_section_header))
1308                         goto invalid_fw;
1309
1310                 sHdr = (struct typhoon_section_header *) image_data;
1311                 image_data += sizeof(struct typhoon_section_header);
1312                 section_len = le32_to_cpu(sHdr->len);
1313
1314                 if (remaining < section_len)
1315                         goto invalid_fw;
1316
1317                 image_data += section_len;
1318                 remaining -= section_len;
1319         }
1320
1321         return 0;
1322
1323 invalid_fw:
1324         netdev_err(tp->dev, "Invalid firmware image\n");
1325         release_firmware(typhoon_fw);
1326         typhoon_fw = NULL;
1327         return -EINVAL;
1328 }
1329
1330 static int
1331 typhoon_download_firmware(struct typhoon *tp)
1332 {
1333         void __iomem *ioaddr = tp->ioaddr;
1334         struct pci_dev *pdev = tp->pdev;
1335         const struct typhoon_file_header *fHdr;
1336         const struct typhoon_section_header *sHdr;
1337         const u8 *image_data;
1338         void *dpage;
1339         dma_addr_t dpage_dma;
1340         __sum16 csum;
1341         u32 irqEnabled;
1342         u32 irqMasked;
1343         u32 numSections;
1344         u32 section_len;
1345         u32 len;
1346         u32 load_addr;
1347         u32 hmac;
1348         int i;
1349         int err;
1350
1351         image_data = typhoon_fw->data;
1352         fHdr = (struct typhoon_file_header *) image_data;
1353
1354         /* Cannot just map the firmware image using pci_map_single() as
1355          * the firmware is vmalloc()'d and may not be physically contiguous,
1356          * so we allocate some consistent memory to copy the sections into.
1357          */
1358         err = -ENOMEM;
1359         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1360         if(!dpage) {
1361                 netdev_err(tp->dev, "no DMA mem for firmware\n");
1362                 goto err_out;
1363         }
1364
1365         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1366         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1367                ioaddr + TYPHOON_REG_INTR_ENABLE);
1368         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1369         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1370                ioaddr + TYPHOON_REG_INTR_MASK);
1371
1372         err = -ETIMEDOUT;
1373         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1374                 netdev_err(tp->dev, "card ready timeout\n");
1375                 goto err_out_irq;
1376         }
1377
1378         numSections = le32_to_cpu(fHdr->numSections);
1379         load_addr = le32_to_cpu(fHdr->startAddr);
1380
1381         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1382         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1383         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1384         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1385         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1386         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1387         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1388         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1389         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1390         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1391         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1392         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1393         typhoon_post_pci_writes(ioaddr);
1394         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1395
1396         image_data += sizeof(struct typhoon_file_header);
1397
1398         /* The ioread32() in typhoon_wait_interrupt() will force the
1399          * last write to the command register to post, so
1400          * we don't need a typhoon_post_pci_writes() after it.
1401          */
1402         for(i = 0; i < numSections; i++) {
1403                 sHdr = (struct typhoon_section_header *) image_data;
1404                 image_data += sizeof(struct typhoon_section_header);
1405                 load_addr = le32_to_cpu(sHdr->startAddr);
1406                 section_len = le32_to_cpu(sHdr->len);
1407
1408                 while(section_len) {
1409                         len = min_t(u32, section_len, PAGE_SIZE);
1410
1411                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1412                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1413                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1414                                 netdev_err(tp->dev, "segment ready timeout\n");
1415                                 goto err_out_irq;
1416                         }
1417
1418                         /* Do an pseudo IPv4 checksum on the data -- first
1419                          * need to convert each u16 to cpu order before
1420                          * summing. Fortunately, due to the properties of
1421                          * the checksum, we can do this once, at the end.
1422                          */
1423                         csum = csum_fold(csum_partial_copy_nocheck(image_data,
1424                                                                    dpage, len,
1425                                                                    0));
1426
1427                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1428                         iowrite32(le16_to_cpu((__force __le16)csum),
1429                                         ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1430                         iowrite32(load_addr,
1431                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1432                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1433                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1434                         typhoon_post_pci_writes(ioaddr);
1435                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1436                                         ioaddr + TYPHOON_REG_COMMAND);
1437
1438                         image_data += len;
1439                         load_addr += len;
1440                         section_len -= len;
1441                 }
1442         }
1443
1444         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1445            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1446            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1447                 netdev_err(tp->dev, "final segment ready timeout\n");
1448                 goto err_out_irq;
1449         }
1450
1451         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1452
1453         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1454                 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1455                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1456                 goto err_out_irq;
1457         }
1458
1459         err = 0;
1460
1461 err_out_irq:
1462         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1463         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1464
1465         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1466
1467 err_out:
1468         return err;
1469 }
1470
1471 static int
1472 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1473 {
1474         void __iomem *ioaddr = tp->ioaddr;
1475
1476         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1477                 netdev_err(tp->dev, "boot ready timeout\n");
1478                 goto out_timeout;
1479         }
1480
1481         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1482         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1483         typhoon_post_pci_writes(ioaddr);
1484         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1485                                 ioaddr + TYPHOON_REG_COMMAND);
1486
1487         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1488                 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1489                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1490                 goto out_timeout;
1491         }
1492
1493         /* Clear the Transmit and Command ready registers
1494          */
1495         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1496         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1497         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1498         typhoon_post_pci_writes(ioaddr);
1499         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1500
1501         return 0;
1502
1503 out_timeout:
1504         return -ETIMEDOUT;
1505 }
1506
1507 static u32
1508 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1509                         volatile __le32 * index)
1510 {
1511         u32 lastRead = txRing->lastRead;
1512         struct tx_desc *tx;
1513         dma_addr_t skb_dma;
1514         int dma_len;
1515         int type;
1516
1517         while(lastRead != le32_to_cpu(*index)) {
1518                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1519                 type = tx->flags & TYPHOON_TYPE_MASK;
1520
1521                 if(type == TYPHOON_TX_DESC) {
1522                         /* This tx_desc describes a packet.
1523                          */
1524                         unsigned long ptr = tx->tx_addr;
1525                         struct sk_buff *skb = (struct sk_buff *) ptr;
1526                         dev_kfree_skb_irq(skb);
1527                 } else if(type == TYPHOON_FRAG_DESC) {
1528                         /* This tx_desc describes a memory mapping. Free it.
1529                          */
1530                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1531                         dma_len = le16_to_cpu(tx->len);
1532                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1533                                        PCI_DMA_TODEVICE);
1534                 }
1535
1536                 tx->flags = 0;
1537                 typhoon_inc_tx_index(&lastRead, 1);
1538         }
1539
1540         return lastRead;
1541 }
1542
1543 static void
1544 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1545                         volatile __le32 * index)
1546 {
1547         u32 lastRead;
1548         int numDesc = MAX_SKB_FRAGS + 1;
1549
1550         /* This will need changing if we start to use the Hi Tx ring. */
1551         lastRead = typhoon_clean_tx(tp, txRing, index);
1552         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1553                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1554                 netif_wake_queue(tp->dev);
1555
1556         txRing->lastRead = lastRead;
1557         smp_wmb();
1558 }
1559
1560 static void
1561 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1562 {
1563         struct typhoon_indexes *indexes = tp->indexes;
1564         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1565         struct basic_ring *ring = &tp->rxBuffRing;
1566         struct rx_free *r;
1567
1568         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1569                                 le32_to_cpu(indexes->rxBuffCleared)) {
1570                 /* no room in ring, just drop the skb
1571                  */
1572                 dev_kfree_skb_any(rxb->skb);
1573                 rxb->skb = NULL;
1574                 return;
1575         }
1576
1577         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1578         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1579         r->virtAddr = idx;
1580         r->physAddr = cpu_to_le32(rxb->dma_addr);
1581
1582         /* Tell the card about it */
1583         wmb();
1584         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1585 }
1586
1587 static int
1588 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1589 {
1590         struct typhoon_indexes *indexes = tp->indexes;
1591         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1592         struct basic_ring *ring = &tp->rxBuffRing;
1593         struct rx_free *r;
1594         struct sk_buff *skb;
1595         dma_addr_t dma_addr;
1596
1597         rxb->skb = NULL;
1598
1599         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1600                                 le32_to_cpu(indexes->rxBuffCleared))
1601                 return -ENOMEM;
1602
1603         skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
1604         if(!skb)
1605                 return -ENOMEM;
1606
1607 #if 0
1608         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1609          * address! Pretty please?
1610          */
1611         skb_reserve(skb, 2);
1612 #endif
1613
1614         dma_addr = pci_map_single(tp->pdev, skb->data,
1615                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1616
1617         /* Since no card does 64 bit DAC, the high bits will never
1618          * change from zero.
1619          */
1620         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1621         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1622         r->virtAddr = idx;
1623         r->physAddr = cpu_to_le32(dma_addr);
1624         rxb->skb = skb;
1625         rxb->dma_addr = dma_addr;
1626
1627         /* Tell the card about it */
1628         wmb();
1629         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1630         return 0;
1631 }
1632
1633 static int
1634 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1635            volatile __le32 * cleared, int budget)
1636 {
1637         struct rx_desc *rx;
1638         struct sk_buff *skb, *new_skb;
1639         struct rxbuff_ent *rxb;
1640         dma_addr_t dma_addr;
1641         u32 local_ready;
1642         u32 rxaddr;
1643         int pkt_len;
1644         u32 idx;
1645         __le32 csum_bits;
1646         int received;
1647
1648         received = 0;
1649         local_ready = le32_to_cpu(*ready);
1650         rxaddr = le32_to_cpu(*cleared);
1651         while(rxaddr != local_ready && budget > 0) {
1652                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1653                 idx = rx->addr;
1654                 rxb = &tp->rxbuffers[idx];
1655                 skb = rxb->skb;
1656                 dma_addr = rxb->dma_addr;
1657
1658                 typhoon_inc_rx_index(&rxaddr, 1);
1659
1660                 if(rx->flags & TYPHOON_RX_ERROR) {
1661                         typhoon_recycle_rx_skb(tp, idx);
1662                         continue;
1663                 }
1664
1665                 pkt_len = le16_to_cpu(rx->frameLen);
1666
1667                 if(pkt_len < rx_copybreak &&
1668                    (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
1669                         skb_reserve(new_skb, 2);
1670                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1671                                                     PKT_BUF_SZ,
1672                                                     PCI_DMA_FROMDEVICE);
1673                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1674                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1675                                                        PKT_BUF_SZ,
1676                                                        PCI_DMA_FROMDEVICE);
1677                         skb_put(new_skb, pkt_len);
1678                         typhoon_recycle_rx_skb(tp, idx);
1679                 } else {
1680                         new_skb = skb;
1681                         skb_put(new_skb, pkt_len);
1682                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1683                                        PCI_DMA_FROMDEVICE);
1684                         typhoon_alloc_rx_skb(tp, idx);
1685                 }
1686                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1687                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1688                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1689                 if(csum_bits ==
1690                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1691                    csum_bits ==
1692                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1693                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1694                 } else
1695                         skb_checksum_none_assert(new_skb);
1696
1697                 if (rx->rxStatus & TYPHOON_RX_VLAN)
1698                         __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
1699                                                ntohl(rx->vlanTag) & 0xffff);
1700                 netif_receive_skb(new_skb);
1701
1702                 received++;
1703                 budget--;
1704         }
1705         *cleared = cpu_to_le32(rxaddr);
1706
1707         return received;
1708 }
1709
1710 static void
1711 typhoon_fill_free_ring(struct typhoon *tp)
1712 {
1713         u32 i;
1714
1715         for(i = 0; i < RXENT_ENTRIES; i++) {
1716                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1717                 if(rxb->skb)
1718                         continue;
1719                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1720                         break;
1721         }
1722 }
1723
1724 static int
1725 typhoon_poll(struct napi_struct *napi, int budget)
1726 {
1727         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1728         struct typhoon_indexes *indexes = tp->indexes;
1729         int work_done;
1730
1731         rmb();
1732         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1733                         typhoon_process_response(tp, 0, NULL);
1734
1735         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1736                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1737
1738         work_done = 0;
1739
1740         if(indexes->rxHiCleared != indexes->rxHiReady) {
1741                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1742                                         &indexes->rxHiCleared, budget);
1743         }
1744
1745         if(indexes->rxLoCleared != indexes->rxLoReady) {
1746                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1747                                         &indexes->rxLoCleared, budget - work_done);
1748         }
1749
1750         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1751                 /* rxBuff ring is empty, try to fill it. */
1752                 typhoon_fill_free_ring(tp);
1753         }
1754
1755         if (work_done < budget) {
1756                 napi_complete(napi);
1757                 iowrite32(TYPHOON_INTR_NONE,
1758                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1759                 typhoon_post_pci_writes(tp->ioaddr);
1760         }
1761
1762         return work_done;
1763 }
1764
1765 static irqreturn_t
1766 typhoon_interrupt(int irq, void *dev_instance)
1767 {
1768         struct net_device *dev = dev_instance;
1769         struct typhoon *tp = netdev_priv(dev);
1770         void __iomem *ioaddr = tp->ioaddr;
1771         u32 intr_status;
1772
1773         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1774         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1775                 return IRQ_NONE;
1776
1777         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1778
1779         if (napi_schedule_prep(&tp->napi)) {
1780                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1781                 typhoon_post_pci_writes(ioaddr);
1782                 __napi_schedule(&tp->napi);
1783         } else {
1784                 netdev_err(dev, "Error, poll already scheduled\n");
1785         }
1786         return IRQ_HANDLED;
1787 }
1788
1789 static void
1790 typhoon_free_rx_rings(struct typhoon *tp)
1791 {
1792         u32 i;
1793
1794         for(i = 0; i < RXENT_ENTRIES; i++) {
1795                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1796                 if(rxb->skb) {
1797                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1798                                        PCI_DMA_FROMDEVICE);
1799                         dev_kfree_skb(rxb->skb);
1800                         rxb->skb = NULL;
1801                 }
1802         }
1803 }
1804
1805 static int
1806 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1807 {
1808         struct pci_dev *pdev = tp->pdev;
1809         void __iomem *ioaddr = tp->ioaddr;
1810         struct cmd_desc xp_cmd;
1811         int err;
1812
1813         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1814         xp_cmd.parm1 = events;
1815         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1816         if(err < 0) {
1817                 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1818                            err);
1819                 return err;
1820         }
1821
1822         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1823         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1824         if(err < 0) {
1825                 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1826                 return err;
1827         }
1828
1829         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1830                 return -ETIMEDOUT;
1831
1832         /* Since we cannot monitor the status of the link while sleeping,
1833          * tell the world it went away.
1834          */
1835         netif_carrier_off(tp->dev);
1836
1837         pci_enable_wake(tp->pdev, state, 1);
1838         pci_disable_device(pdev);
1839         return pci_set_power_state(pdev, state);
1840 }
1841
1842 static int
1843 typhoon_wakeup(struct typhoon *tp, int wait_type)
1844 {
1845         struct pci_dev *pdev = tp->pdev;
1846         void __iomem *ioaddr = tp->ioaddr;
1847
1848         pci_set_power_state(pdev, PCI_D0);
1849         pci_restore_state(pdev);
1850
1851         /* Post 2.x.x versions of the Sleep Image require a reset before
1852          * we can download the Runtime Image. But let's not make users of
1853          * the old firmware pay for the reset.
1854          */
1855         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1856         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1857                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1858                 return typhoon_reset(ioaddr, wait_type);
1859
1860         return 0;
1861 }
1862
1863 static int
1864 typhoon_start_runtime(struct typhoon *tp)
1865 {
1866         struct net_device *dev = tp->dev;
1867         void __iomem *ioaddr = tp->ioaddr;
1868         struct cmd_desc xp_cmd;
1869         int err;
1870
1871         typhoon_init_rings(tp);
1872         typhoon_fill_free_ring(tp);
1873
1874         err = typhoon_download_firmware(tp);
1875         if(err < 0) {
1876                 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1877                 goto error_out;
1878         }
1879
1880         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1881                 netdev_err(tp->dev, "cannot boot 3XP\n");
1882                 err = -EIO;
1883                 goto error_out;
1884         }
1885
1886         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1887         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1888         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1889         if(err < 0)
1890                 goto error_out;
1891
1892         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1893         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1894         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1895         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1896         if(err < 0)
1897                 goto error_out;
1898
1899         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1900          * us some more information on how to control it.
1901          */
1902         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1903         xp_cmd.parm1 = 0;
1904         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1905         if(err < 0)
1906                 goto error_out;
1907
1908         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1909         xp_cmd.parm1 = tp->xcvr_select;
1910         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1911         if(err < 0)
1912                 goto error_out;
1913
1914         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1915         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1916         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1917         if(err < 0)
1918                 goto error_out;
1919
1920         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1921         xp_cmd.parm2 = tp->offload;
1922         xp_cmd.parm3 = tp->offload;
1923         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1924         if(err < 0)
1925                 goto error_out;
1926
1927         typhoon_set_rx_mode(dev);
1928
1929         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1930         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1931         if(err < 0)
1932                 goto error_out;
1933
1934         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1935         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1936         if(err < 0)
1937                 goto error_out;
1938
1939         tp->card_state = Running;
1940         smp_wmb();
1941
1942         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1943         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1944         typhoon_post_pci_writes(ioaddr);
1945
1946         return 0;
1947
1948 error_out:
1949         typhoon_reset(ioaddr, WaitNoSleep);
1950         typhoon_free_rx_rings(tp);
1951         typhoon_init_rings(tp);
1952         return err;
1953 }
1954
1955 static int
1956 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1957 {
1958         struct typhoon_indexes *indexes = tp->indexes;
1959         struct transmit_ring *txLo = &tp->txLoRing;
1960         void __iomem *ioaddr = tp->ioaddr;
1961         struct cmd_desc xp_cmd;
1962         int i;
1963
1964         /* Disable interrupts early, since we can't schedule a poll
1965          * when called with !netif_running(). This will be posted
1966          * when we force the posting of the command.
1967          */
1968         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1969
1970         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1971         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1972
1973         /* Wait 1/2 sec for any outstanding transmits to occur
1974          * We'll cleanup after the reset if this times out.
1975          */
1976         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1977                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
1978                         break;
1979                 udelay(TYPHOON_UDELAY);
1980         }
1981
1982         if(i == TYPHOON_WAIT_TIMEOUT)
1983                 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1984
1985         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
1986         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1987
1988         /* save the statistics so when we bring the interface up again,
1989          * the values reported to userspace are correct.
1990          */
1991         tp->card_state = Sleeping;
1992         smp_wmb();
1993         typhoon_do_get_stats(tp);
1994         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
1995
1996         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
1997         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1998
1999         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2000                 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2001
2002         if(typhoon_reset(ioaddr, wait_type) < 0) {
2003                 netdev_err(tp->dev, "unable to reset 3XP\n");
2004                 return -ETIMEDOUT;
2005         }
2006
2007         /* cleanup any outstanding Tx packets */
2008         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2009                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2010                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2011         }
2012
2013         return 0;
2014 }
2015
2016 static void
2017 typhoon_tx_timeout(struct net_device *dev)
2018 {
2019         struct typhoon *tp = netdev_priv(dev);
2020
2021         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2022                 netdev_warn(dev, "could not reset in tx timeout\n");
2023                 goto truly_dead;
2024         }
2025
2026         /* If we ever start using the Hi ring, it will need cleaning too */
2027         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2028         typhoon_free_rx_rings(tp);
2029
2030         if(typhoon_start_runtime(tp) < 0) {
2031                 netdev_err(dev, "could not start runtime in tx timeout\n");
2032                 goto truly_dead;
2033         }
2034
2035         netif_wake_queue(dev);
2036         return;
2037
2038 truly_dead:
2039         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2040         typhoon_reset(tp->ioaddr, NoWait);
2041         netif_carrier_off(dev);
2042 }
2043
2044 static int
2045 typhoon_open(struct net_device *dev)
2046 {
2047         struct typhoon *tp = netdev_priv(dev);
2048         int err;
2049
2050         err = typhoon_request_firmware(tp);
2051         if (err)
2052                 goto out;
2053
2054         err = typhoon_wakeup(tp, WaitSleep);
2055         if(err < 0) {
2056                 netdev_err(dev, "unable to wakeup device\n");
2057                 goto out_sleep;
2058         }
2059
2060         err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2061                                 dev->name, dev);
2062         if(err < 0)
2063                 goto out_sleep;
2064
2065         napi_enable(&tp->napi);
2066
2067         err = typhoon_start_runtime(tp);
2068         if(err < 0) {
2069                 napi_disable(&tp->napi);
2070                 goto out_irq;
2071         }
2072
2073         netif_start_queue(dev);
2074         return 0;
2075
2076 out_irq:
2077         free_irq(dev->irq, dev);
2078
2079 out_sleep:
2080         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2081                 netdev_err(dev, "unable to reboot into sleep img\n");
2082                 typhoon_reset(tp->ioaddr, NoWait);
2083                 goto out;
2084         }
2085
2086         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2087                 netdev_err(dev, "unable to go back to sleep\n");
2088
2089 out:
2090         return err;
2091 }
2092
2093 static int
2094 typhoon_close(struct net_device *dev)
2095 {
2096         struct typhoon *tp = netdev_priv(dev);
2097
2098         netif_stop_queue(dev);
2099         napi_disable(&tp->napi);
2100
2101         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2102                 netdev_err(dev, "unable to stop runtime\n");
2103
2104         /* Make sure there is no irq handler running on a different CPU. */
2105         free_irq(dev->irq, dev);
2106
2107         typhoon_free_rx_rings(tp);
2108         typhoon_init_rings(tp);
2109
2110         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2111                 netdev_err(dev, "unable to boot sleep image\n");
2112
2113         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2114                 netdev_err(dev, "unable to put card to sleep\n");
2115
2116         return 0;
2117 }
2118
2119 #ifdef CONFIG_PM
2120 static int
2121 typhoon_resume(struct pci_dev *pdev)
2122 {
2123         struct net_device *dev = pci_get_drvdata(pdev);
2124         struct typhoon *tp = netdev_priv(dev);
2125
2126         /* If we're down, resume when we are upped.
2127          */
2128         if(!netif_running(dev))
2129                 return 0;
2130
2131         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2132                 netdev_err(dev, "critical: could not wake up in resume\n");
2133                 goto reset;
2134         }
2135
2136         if(typhoon_start_runtime(tp) < 0) {
2137                 netdev_err(dev, "critical: could not start runtime in resume\n");
2138                 goto reset;
2139         }
2140
2141         netif_device_attach(dev);
2142         return 0;
2143
2144 reset:
2145         typhoon_reset(tp->ioaddr, NoWait);
2146         return -EBUSY;
2147 }
2148
2149 static int
2150 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2151 {
2152         struct net_device *dev = pci_get_drvdata(pdev);
2153         struct typhoon *tp = netdev_priv(dev);
2154         struct cmd_desc xp_cmd;
2155
2156         /* If we're down, we're already suspended.
2157          */
2158         if(!netif_running(dev))
2159                 return 0;
2160
2161         /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
2162         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2163                 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2164
2165         netif_device_detach(dev);
2166
2167         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2168                 netdev_err(dev, "unable to stop runtime\n");
2169                 goto need_resume;
2170         }
2171
2172         typhoon_free_rx_rings(tp);
2173         typhoon_init_rings(tp);
2174
2175         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2176                 netdev_err(dev, "unable to boot sleep image\n");
2177                 goto need_resume;
2178         }
2179
2180         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2181         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2182         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2183         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2184                 netdev_err(dev, "unable to set mac address in suspend\n");
2185                 goto need_resume;
2186         }
2187
2188         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2189         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2190         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2191                 netdev_err(dev, "unable to set rx filter in suspend\n");
2192                 goto need_resume;
2193         }
2194
2195         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2196                 netdev_err(dev, "unable to put card to sleep\n");
2197                 goto need_resume;
2198         }
2199
2200         return 0;
2201
2202 need_resume:
2203         typhoon_resume(pdev);
2204         return -EBUSY;
2205 }
2206 #endif
2207
2208 static int
2209 typhoon_test_mmio(struct pci_dev *pdev)
2210 {
2211         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2212         int mode = 0;
2213         u32 val;
2214
2215         if(!ioaddr)
2216                 goto out;
2217
2218         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2219                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2220                 goto out_unmap;
2221
2222         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2223         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2224         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2225
2226         /* Ok, see if we can change our interrupt status register by
2227          * sending ourselves an interrupt. If so, then MMIO works.
2228          * The 50usec delay is arbitrary -- it could probably be smaller.
2229          */
2230         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2231         if((val & TYPHOON_INTR_SELF) == 0) {
2232                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2233                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2234                 udelay(50);
2235                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2236                 if(val & TYPHOON_INTR_SELF)
2237                         mode = 1;
2238         }
2239
2240         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2241         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2242         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2243         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2244
2245 out_unmap:
2246         pci_iounmap(pdev, ioaddr);
2247
2248 out:
2249         if(!mode)
2250                 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2251         return mode;
2252 }
2253
2254 static const struct net_device_ops typhoon_netdev_ops = {
2255         .ndo_open               = typhoon_open,
2256         .ndo_stop               = typhoon_close,
2257         .ndo_start_xmit         = typhoon_start_tx,
2258         .ndo_set_rx_mode        = typhoon_set_rx_mode,
2259         .ndo_tx_timeout         = typhoon_tx_timeout,
2260         .ndo_get_stats          = typhoon_get_stats,
2261         .ndo_validate_addr      = eth_validate_addr,
2262         .ndo_set_mac_address    = eth_mac_addr,
2263 };
2264
2265 static int
2266 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2267 {
2268         struct net_device *dev;
2269         struct typhoon *tp;
2270         int card_id = (int) ent->driver_data;
2271         void __iomem *ioaddr;
2272         void *shared;
2273         dma_addr_t shared_dma;
2274         struct cmd_desc xp_cmd;
2275         struct resp_desc xp_resp[3];
2276         int err = 0;
2277         const char *err_msg;
2278
2279         dev = alloc_etherdev(sizeof(*tp));
2280         if(dev == NULL) {
2281                 err_msg = "unable to alloc new net device";
2282                 err = -ENOMEM;
2283                 goto error_out;
2284         }
2285         SET_NETDEV_DEV(dev, &pdev->dev);
2286
2287         err = pci_enable_device(pdev);
2288         if(err < 0) {
2289                 err_msg = "unable to enable device";
2290                 goto error_out_dev;
2291         }
2292
2293         err = pci_set_mwi(pdev);
2294         if(err < 0) {
2295                 err_msg = "unable to set MWI";
2296                 goto error_out_disable;
2297         }
2298
2299         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2300         if(err < 0) {
2301                 err_msg = "No usable DMA configuration";
2302                 goto error_out_mwi;
2303         }
2304
2305         /* sanity checks on IO and MMIO BARs
2306          */
2307         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2308                 err_msg = "region #1 not a PCI IO resource, aborting";
2309                 err = -ENODEV;
2310                 goto error_out_mwi;
2311         }
2312         if(pci_resource_len(pdev, 0) < 128) {
2313                 err_msg = "Invalid PCI IO region size, aborting";
2314                 err = -ENODEV;
2315                 goto error_out_mwi;
2316         }
2317         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2318                 err_msg = "region #1 not a PCI MMIO resource, aborting";
2319                 err = -ENODEV;
2320                 goto error_out_mwi;
2321         }
2322         if(pci_resource_len(pdev, 1) < 128) {
2323                 err_msg = "Invalid PCI MMIO region size, aborting";
2324                 err = -ENODEV;
2325                 goto error_out_mwi;
2326         }
2327
2328         err = pci_request_regions(pdev, KBUILD_MODNAME);
2329         if(err < 0) {
2330                 err_msg = "could not request regions";
2331                 goto error_out_mwi;
2332         }
2333
2334         /* map our registers
2335          */
2336         if(use_mmio != 0 && use_mmio != 1)
2337                 use_mmio = typhoon_test_mmio(pdev);
2338
2339         ioaddr = pci_iomap(pdev, use_mmio, 128);
2340         if (!ioaddr) {
2341                 err_msg = "cannot remap registers, aborting";
2342                 err = -EIO;
2343                 goto error_out_regions;
2344         }
2345
2346         /* allocate pci dma space for rx and tx descriptor rings
2347          */
2348         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2349                                       &shared_dma);
2350         if(!shared) {
2351                 err_msg = "could not allocate DMA memory";
2352                 err = -ENOMEM;
2353                 goto error_out_remap;
2354         }
2355
2356         dev->irq = pdev->irq;
2357         tp = netdev_priv(dev);
2358         tp->shared = shared;
2359         tp->shared_dma = shared_dma;
2360         tp->pdev = pdev;
2361         tp->tx_pdev = pdev;
2362         tp->ioaddr = ioaddr;
2363         tp->tx_ioaddr = ioaddr;
2364         tp->dev = dev;
2365
2366         /* Init sequence:
2367          * 1) Reset the adapter to clear any bad juju
2368          * 2) Reload the sleep image
2369          * 3) Boot the sleep image
2370          * 4) Get the hardware address.
2371          * 5) Put the card to sleep.
2372          */
2373         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2374                 err_msg = "could not reset 3XP";
2375                 err = -EIO;
2376                 goto error_out_dma;
2377         }
2378
2379         /* Now that we've reset the 3XP and are sure it's not going to
2380          * write all over memory, enable bus mastering, and save our
2381          * state for resuming after a suspend.
2382          */
2383         pci_set_master(pdev);
2384         pci_save_state(pdev);
2385
2386         typhoon_init_interface(tp);
2387         typhoon_init_rings(tp);
2388
2389         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2390                 err_msg = "cannot boot 3XP sleep image";
2391                 err = -EIO;
2392                 goto error_out_reset;
2393         }
2394
2395         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2396         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2397                 err_msg = "cannot read MAC address";
2398                 err = -EIO;
2399                 goto error_out_reset;
2400         }
2401
2402         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2403         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2404
2405         if(!is_valid_ether_addr(dev->dev_addr)) {
2406                 err_msg = "Could not obtain valid ethernet address, aborting";
2407                 goto error_out_reset;
2408         }
2409
2410         /* Read the Sleep Image version last, so the response is valid
2411          * later when we print out the version reported.
2412          */
2413         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2414         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2415                 err_msg = "Could not get Sleep Image version";
2416                 goto error_out_reset;
2417         }
2418
2419         tp->capabilities = typhoon_card_info[card_id].capabilities;
2420         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2421
2422         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2423          * READ_VERSIONS command. Those versions are OK after waking up
2424          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2425          * seem to need a little extra help to get started. Since we don't
2426          * know how to nudge it along, just kick it.
2427          */
2428         if(xp_resp[0].numDesc != 0)
2429                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2430
2431         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2432                 err_msg = "cannot put adapter to sleep";
2433                 err = -EIO;
2434                 goto error_out_reset;
2435         }
2436
2437         /* The chip-specific entries in the device structure. */
2438         dev->netdev_ops         = &typhoon_netdev_ops;
2439         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2440         dev->watchdog_timeo     = TX_TIMEOUT;
2441
2442         dev->ethtool_ops = &typhoon_ethtool_ops;
2443
2444         /* We can handle scatter gather, up to 16 entries, and
2445          * we can do IP checksumming (only version 4, doh...)
2446          *
2447          * There's no way to turn off the RX VLAN offloading and stripping
2448          * on the current 3XP firmware -- it does not respect the offload
2449          * settings -- so we only allow the user to toggle the TX processing.
2450          */
2451         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2452                 NETIF_F_HW_VLAN_CTAG_TX;
2453         dev->features = dev->hw_features |
2454                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2455
2456         if(register_netdev(dev) < 0) {
2457                 err_msg = "unable to register netdev";
2458                 goto error_out_reset;
2459         }
2460
2461         pci_set_drvdata(pdev, dev);
2462
2463         netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2464                     typhoon_card_info[card_id].name,
2465                     use_mmio ? "MMIO" : "IO",
2466                     (unsigned long long)pci_resource_start(pdev, use_mmio),
2467                     dev->dev_addr);
2468
2469         /* xp_resp still contains the response to the READ_VERSIONS command.
2470          * For debugging, let the user know what version he has.
2471          */
2472         if(xp_resp[0].numDesc == 0) {
2473                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2474                  * of version is Month/Day of build.
2475                  */
2476                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2477                 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2478                             monthday >> 8, monthday & 0xff);
2479         } else if(xp_resp[0].numDesc == 2) {
2480                 /* This is the Typhoon 1.1+ type Sleep Image
2481                  */
2482                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2483                 u8 *ver_string = (u8 *) &xp_resp[1];
2484                 ver_string[25] = 0;
2485                 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2486                             sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2487                             sleep_ver & 0xfff, ver_string);
2488         } else {
2489                 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2490                             xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2491         }
2492
2493         return 0;
2494
2495 error_out_reset:
2496         typhoon_reset(ioaddr, NoWait);
2497
2498 error_out_dma:
2499         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2500                             shared, shared_dma);
2501 error_out_remap:
2502         pci_iounmap(pdev, ioaddr);
2503 error_out_regions:
2504         pci_release_regions(pdev);
2505 error_out_mwi:
2506         pci_clear_mwi(pdev);
2507 error_out_disable:
2508         pci_disable_device(pdev);
2509 error_out_dev:
2510         free_netdev(dev);
2511 error_out:
2512         pr_err("%s: %s\n", pci_name(pdev), err_msg);
2513         return err;
2514 }
2515
2516 static void
2517 typhoon_remove_one(struct pci_dev *pdev)
2518 {
2519         struct net_device *dev = pci_get_drvdata(pdev);
2520         struct typhoon *tp = netdev_priv(dev);
2521
2522         unregister_netdev(dev);
2523         pci_set_power_state(pdev, PCI_D0);
2524         pci_restore_state(pdev);
2525         typhoon_reset(tp->ioaddr, NoWait);
2526         pci_iounmap(pdev, tp->ioaddr);
2527         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2528                             tp->shared, tp->shared_dma);
2529         pci_release_regions(pdev);
2530         pci_clear_mwi(pdev);
2531         pci_disable_device(pdev);
2532         free_netdev(dev);
2533 }
2534
2535 static struct pci_driver typhoon_driver = {
2536         .name           = KBUILD_MODNAME,
2537         .id_table       = typhoon_pci_tbl,
2538         .probe          = typhoon_init_one,
2539         .remove         = typhoon_remove_one,
2540 #ifdef CONFIG_PM
2541         .suspend        = typhoon_suspend,
2542         .resume         = typhoon_resume,
2543 #endif
2544 };
2545
2546 static int __init
2547 typhoon_init(void)
2548 {
2549         return pci_register_driver(&typhoon_driver);
2550 }
2551
2552 static void __exit
2553 typhoon_cleanup(void)
2554 {
2555         release_firmware(typhoon_fw);
2556         pci_unregister_driver(&typhoon_driver);
2557 }
2558
2559 module_init(typhoon_init);
2560 module_exit(typhoon_cleanup);