]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/usb/lan78xx.c
xfrm: dst_entries_init() per-net dst_ops
[karo-tx-linux.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/usb.h>
24 #include <linux/crc32.h>
25 #include <linux/signal.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/uaccess.h>
29 #include <linux/list.h>
30 #include <linux/ip.h>
31 #include <linux/ipv6.h>
32 #include <linux/mdio.h>
33 #include <net/ip6_checksum.h>
34 #include "lan78xx.h"
35
36 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME     "lan78xx"
39 #define DRIVER_VERSION  "1.0.0"
40
41 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
42 #define THROTTLE_JIFFIES                (HZ / 8)
43 #define UNLINK_TIMEOUT_MS               3
44
45 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
46
47 #define SS_USB_PKT_SIZE                 (1024)
48 #define HS_USB_PKT_SIZE                 (512)
49 #define FS_USB_PKT_SIZE                 (64)
50
51 #define MAX_RX_FIFO_SIZE                (12 * 1024)
52 #define MAX_TX_FIFO_SIZE                (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY           (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE          (9000)
56 #define DEFAULT_TX_CSUM_ENABLE          (true)
57 #define DEFAULT_RX_CSUM_ENABLE          (true)
58 #define DEFAULT_TSO_CSUM_ENABLE         (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
60 #define INTERNAL_PHY_ID                 (2)     /* 2: GMII */
61 #define TX_OVERHEAD                     (8)
62 #define RXW_PADDING                     2
63
64 #define LAN78XX_USB_VENDOR_ID           (0x0424)
65 #define LAN7800_USB_PRODUCT_ID          (0x7800)
66 #define LAN7850_USB_PRODUCT_ID          (0x7850)
67 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
68 #define LAN78XX_OTP_MAGIC               (0x78F3)
69
70 #define MII_READ                        1
71 #define MII_WRITE                       0
72
73 #define EEPROM_INDICATOR                (0xA5)
74 #define EEPROM_MAC_OFFSET               (0x01)
75 #define MAX_EEPROM_SIZE                 512
76 #define OTP_INDICATOR_1                 (0xF3)
77 #define OTP_INDICATOR_2                 (0xF7)
78
79 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
80                                          WAKE_MCAST | WAKE_BCAST | \
81                                          WAKE_ARP | WAKE_MAGIC)
82
83 /* USB related defines */
84 #define BULK_IN_PIPE                    1
85 #define BULK_OUT_PIPE                   2
86
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
89
90 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
91         "RX FCS Errors",
92         "RX Alignment Errors",
93         "Rx Fragment Errors",
94         "RX Jabber Errors",
95         "RX Undersize Frame Errors",
96         "RX Oversize Frame Errors",
97         "RX Dropped Frames",
98         "RX Unicast Byte Count",
99         "RX Broadcast Byte Count",
100         "RX Multicast Byte Count",
101         "RX Unicast Frames",
102         "RX Broadcast Frames",
103         "RX Multicast Frames",
104         "RX Pause Frames",
105         "RX 64 Byte Frames",
106         "RX 65 - 127 Byte Frames",
107         "RX 128 - 255 Byte Frames",
108         "RX 256 - 511 Bytes Frames",
109         "RX 512 - 1023 Byte Frames",
110         "RX 1024 - 1518 Byte Frames",
111         "RX Greater 1518 Byte Frames",
112         "EEE RX LPI Transitions",
113         "EEE RX LPI Time",
114         "TX FCS Errors",
115         "TX Excess Deferral Errors",
116         "TX Carrier Errors",
117         "TX Bad Byte Count",
118         "TX Single Collisions",
119         "TX Multiple Collisions",
120         "TX Excessive Collision",
121         "TX Late Collisions",
122         "TX Unicast Byte Count",
123         "TX Broadcast Byte Count",
124         "TX Multicast Byte Count",
125         "TX Unicast Frames",
126         "TX Broadcast Frames",
127         "TX Multicast Frames",
128         "TX Pause Frames",
129         "TX 64 Byte Frames",
130         "TX 65 - 127 Byte Frames",
131         "TX 128 - 255 Byte Frames",
132         "TX 256 - 511 Bytes Frames",
133         "TX 512 - 1023 Byte Frames",
134         "TX 1024 - 1518 Byte Frames",
135         "TX Greater 1518 Byte Frames",
136         "EEE TX LPI Transitions",
137         "EEE TX LPI Time",
138 };
139
140 struct lan78xx_statstage {
141         u32 rx_fcs_errors;
142         u32 rx_alignment_errors;
143         u32 rx_fragment_errors;
144         u32 rx_jabber_errors;
145         u32 rx_undersize_frame_errors;
146         u32 rx_oversize_frame_errors;
147         u32 rx_dropped_frames;
148         u32 rx_unicast_byte_count;
149         u32 rx_broadcast_byte_count;
150         u32 rx_multicast_byte_count;
151         u32 rx_unicast_frames;
152         u32 rx_broadcast_frames;
153         u32 rx_multicast_frames;
154         u32 rx_pause_frames;
155         u32 rx_64_byte_frames;
156         u32 rx_65_127_byte_frames;
157         u32 rx_128_255_byte_frames;
158         u32 rx_256_511_bytes_frames;
159         u32 rx_512_1023_byte_frames;
160         u32 rx_1024_1518_byte_frames;
161         u32 rx_greater_1518_byte_frames;
162         u32 eee_rx_lpi_transitions;
163         u32 eee_rx_lpi_time;
164         u32 tx_fcs_errors;
165         u32 tx_excess_deferral_errors;
166         u32 tx_carrier_errors;
167         u32 tx_bad_byte_count;
168         u32 tx_single_collisions;
169         u32 tx_multiple_collisions;
170         u32 tx_excessive_collision;
171         u32 tx_late_collisions;
172         u32 tx_unicast_byte_count;
173         u32 tx_broadcast_byte_count;
174         u32 tx_multicast_byte_count;
175         u32 tx_unicast_frames;
176         u32 tx_broadcast_frames;
177         u32 tx_multicast_frames;
178         u32 tx_pause_frames;
179         u32 tx_64_byte_frames;
180         u32 tx_65_127_byte_frames;
181         u32 tx_128_255_byte_frames;
182         u32 tx_256_511_bytes_frames;
183         u32 tx_512_1023_byte_frames;
184         u32 tx_1024_1518_byte_frames;
185         u32 tx_greater_1518_byte_frames;
186         u32 eee_tx_lpi_transitions;
187         u32 eee_tx_lpi_time;
188 };
189
190 struct lan78xx_net;
191
192 struct lan78xx_priv {
193         struct lan78xx_net *dev;
194         u32 rfe_ctl;
195         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
196         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
197         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
198         struct mutex dataport_mutex; /* for dataport access */
199         spinlock_t rfe_ctl_lock; /* for rfe register access */
200         struct work_struct set_multicast;
201         struct work_struct set_vlan;
202         u32 wol;
203 };
204
205 enum skb_state {
206         illegal = 0,
207         tx_start,
208         tx_done,
209         rx_start,
210         rx_done,
211         rx_cleanup,
212         unlink_start
213 };
214
215 struct skb_data {               /* skb->cb is one of these */
216         struct urb *urb;
217         struct lan78xx_net *dev;
218         enum skb_state state;
219         size_t length;
220 };
221
222 struct usb_context {
223         struct usb_ctrlrequest req;
224         struct lan78xx_net *dev;
225 };
226
227 #define EVENT_TX_HALT                   0
228 #define EVENT_RX_HALT                   1
229 #define EVENT_RX_MEMORY                 2
230 #define EVENT_STS_SPLIT                 3
231 #define EVENT_LINK_RESET                4
232 #define EVENT_RX_PAUSED                 5
233 #define EVENT_DEV_WAKING                6
234 #define EVENT_DEV_ASLEEP                7
235 #define EVENT_DEV_OPEN                  8
236
237 struct lan78xx_net {
238         struct net_device       *net;
239         struct usb_device       *udev;
240         struct usb_interface    *intf;
241         void                    *driver_priv;
242
243         int                     rx_qlen;
244         int                     tx_qlen;
245         struct sk_buff_head     rxq;
246         struct sk_buff_head     txq;
247         struct sk_buff_head     done;
248         struct sk_buff_head     rxq_pause;
249         struct sk_buff_head     txq_pend;
250
251         struct tasklet_struct   bh;
252         struct delayed_work     wq;
253
254         struct usb_host_endpoint *ep_blkin;
255         struct usb_host_endpoint *ep_blkout;
256         struct usb_host_endpoint *ep_intr;
257
258         int                     msg_enable;
259
260         struct urb              *urb_intr;
261         struct usb_anchor       deferred;
262
263         struct mutex            phy_mutex; /* for phy access */
264         unsigned                pipe_in, pipe_out, pipe_intr;
265
266         u32                     hard_mtu;       /* count any extra framing */
267         size_t                  rx_urb_size;    /* size for rx urbs */
268
269         unsigned long           flags;
270
271         wait_queue_head_t       *wait;
272         unsigned char           suspend_count;
273
274         unsigned                maxpacket;
275         struct timer_list       delay;
276
277         unsigned long           data[5];
278         struct mii_if_info      mii;
279
280         int                     link_on;
281         u8                      mdix_ctrl;
282 };
283
284 /* use ethtool to change the level for any given device */
285 static int msg_level = -1;
286 module_param(msg_level, int, 0);
287 MODULE_PARM_DESC(msg_level, "Override default message level");
288
289 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
290 {
291         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
292         int ret;
293
294         if (!buf)
295                 return -ENOMEM;
296
297         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
298                               USB_VENDOR_REQUEST_READ_REGISTER,
299                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
301         if (likely(ret >= 0)) {
302                 le32_to_cpus(buf);
303                 *data = *buf;
304         } else {
305                 netdev_warn(dev->net,
306                             "Failed to read register index 0x%08x. ret = %d",
307                             index, ret);
308         }
309
310         kfree(buf);
311
312         return ret;
313 }
314
315 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
316 {
317         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
318         int ret;
319
320         if (!buf)
321                 return -ENOMEM;
322
323         *buf = data;
324         cpu_to_le32s(buf);
325
326         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
327                               USB_VENDOR_REQUEST_WRITE_REGISTER,
328                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
329                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
330         if (unlikely(ret < 0)) {
331                 netdev_warn(dev->net,
332                             "Failed to write register index 0x%08x. ret = %d",
333                             index, ret);
334         }
335
336         kfree(buf);
337
338         return ret;
339 }
340
341 static int lan78xx_read_stats(struct lan78xx_net *dev,
342                               struct lan78xx_statstage *data)
343 {
344         int ret = 0;
345         int i;
346         struct lan78xx_statstage *stats;
347         u32 *src;
348         u32 *dst;
349
350         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
351         if (!stats)
352                 return -ENOMEM;
353
354         ret = usb_control_msg(dev->udev,
355                               usb_rcvctrlpipe(dev->udev, 0),
356                               USB_VENDOR_REQUEST_GET_STATS,
357                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
358                               0,
359                               0,
360                               (void *)stats,
361                               sizeof(*stats),
362                               USB_CTRL_SET_TIMEOUT);
363         if (likely(ret >= 0)) {
364                 src = (u32 *)stats;
365                 dst = (u32 *)data;
366                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
367                         le32_to_cpus(&src[i]);
368                         dst[i] = src[i];
369                 }
370         } else {
371                 netdev_warn(dev->net,
372                             "Failed to read stat ret = 0x%x", ret);
373         }
374
375         kfree(stats);
376
377         return ret;
378 }
379
380 /* Loop until the read is completed with timeout called with phy_mutex held */
381 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
382 {
383         unsigned long start_time = jiffies;
384         u32 val;
385         int ret;
386
387         do {
388                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
389                 if (unlikely(ret < 0))
390                         return -EIO;
391
392                 if (!(val & MII_ACC_MII_BUSY_))
393                         return 0;
394         } while (!time_after(jiffies, start_time + HZ));
395
396         return -EIO;
397 }
398
399 static inline u32 mii_access(int id, int index, int read)
400 {
401         u32 ret;
402
403         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
404         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
405         if (read)
406                 ret |= MII_ACC_MII_READ_;
407         else
408                 ret |= MII_ACC_MII_WRITE_;
409         ret |= MII_ACC_MII_BUSY_;
410
411         return ret;
412 }
413
414 static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
415 {
416         struct lan78xx_net *dev = netdev_priv(netdev);
417         u32 val, addr;
418         int ret;
419
420         ret = usb_autopm_get_interface(dev->intf);
421         if (ret < 0)
422                 return ret;
423
424         mutex_lock(&dev->phy_mutex);
425
426         /* confirm MII not busy */
427         ret = lan78xx_phy_wait_not_busy(dev);
428         if (ret < 0)
429                 goto done;
430
431         /* set the address, index & direction (read from PHY) */
432         phy_id &= dev->mii.phy_id_mask;
433         idx &= dev->mii.reg_num_mask;
434         addr = mii_access(phy_id, idx, MII_READ);
435         ret = lan78xx_write_reg(dev, MII_ACC, addr);
436
437         ret = lan78xx_phy_wait_not_busy(dev);
438         if (ret < 0)
439                 goto done;
440
441         ret = lan78xx_read_reg(dev, MII_DATA, &val);
442
443         ret = (int)(val & 0xFFFF);
444
445 done:
446         mutex_unlock(&dev->phy_mutex);
447         usb_autopm_put_interface(dev->intf);
448         return ret;
449 }
450
451 static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
452                                int idx, int regval)
453 {
454         struct lan78xx_net *dev = netdev_priv(netdev);
455         u32 val, addr;
456         int ret;
457
458         if (usb_autopm_get_interface(dev->intf) < 0)
459                 return;
460
461         mutex_lock(&dev->phy_mutex);
462
463         /* confirm MII not busy */
464         ret = lan78xx_phy_wait_not_busy(dev);
465         if (ret < 0)
466                 goto done;
467
468         val = regval;
469         ret = lan78xx_write_reg(dev, MII_DATA, val);
470
471         /* set the address, index & direction (write to PHY) */
472         phy_id &= dev->mii.phy_id_mask;
473         idx &= dev->mii.reg_num_mask;
474         addr = mii_access(phy_id, idx, MII_WRITE);
475         ret = lan78xx_write_reg(dev, MII_ACC, addr);
476
477         ret = lan78xx_phy_wait_not_busy(dev);
478         if (ret < 0)
479                 goto done;
480
481 done:
482         mutex_unlock(&dev->phy_mutex);
483         usb_autopm_put_interface(dev->intf);
484 }
485
486 static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
487                               int mmddev, int mmdidx, int regval)
488 {
489         struct lan78xx_net *dev = netdev_priv(netdev);
490         u32 val, addr;
491         int ret;
492
493         if (usb_autopm_get_interface(dev->intf) < 0)
494                 return;
495
496         mutex_lock(&dev->phy_mutex);
497
498         /* confirm MII not busy */
499         ret = lan78xx_phy_wait_not_busy(dev);
500         if (ret < 0)
501                 goto done;
502
503         mmddev &= 0x1F;
504
505         /* set up device address for MMD */
506         ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
507
508         phy_id &= dev->mii.phy_id_mask;
509         addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
510         ret = lan78xx_write_reg(dev, MII_ACC, addr);
511
512         ret = lan78xx_phy_wait_not_busy(dev);
513         if (ret < 0)
514                 goto done;
515
516         /* select register of MMD */
517         val = mmdidx;
518         ret = lan78xx_write_reg(dev, MII_DATA, val);
519
520         phy_id &= dev->mii.phy_id_mask;
521         addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
522         ret = lan78xx_write_reg(dev, MII_ACC, addr);
523
524         ret = lan78xx_phy_wait_not_busy(dev);
525         if (ret < 0)
526                 goto done;
527
528         /* select register data for MMD */
529         val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
530         ret = lan78xx_write_reg(dev, MII_DATA, val);
531
532         phy_id &= dev->mii.phy_id_mask;
533         addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
534         ret = lan78xx_write_reg(dev, MII_ACC, addr);
535
536         ret = lan78xx_phy_wait_not_busy(dev);
537         if (ret < 0)
538                 goto done;
539
540         /* write to MMD */
541         val = regval;
542         ret = lan78xx_write_reg(dev, MII_DATA, val);
543
544         phy_id &= dev->mii.phy_id_mask;
545         addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
546         ret = lan78xx_write_reg(dev, MII_ACC, addr);
547
548         ret = lan78xx_phy_wait_not_busy(dev);
549         if (ret < 0)
550                 goto done;
551
552 done:
553         mutex_unlock(&dev->phy_mutex);
554         usb_autopm_put_interface(dev->intf);
555 }
556
557 static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
558                             int mmddev, int mmdidx)
559 {
560         struct lan78xx_net *dev = netdev_priv(netdev);
561         u32 val, addr;
562         int ret;
563
564         ret = usb_autopm_get_interface(dev->intf);
565         if (ret < 0)
566                 return ret;
567
568         mutex_lock(&dev->phy_mutex);
569
570         /* confirm MII not busy */
571         ret = lan78xx_phy_wait_not_busy(dev);
572         if (ret < 0)
573                 goto done;
574
575         /* set up device address for MMD */
576         ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
577
578         phy_id &= dev->mii.phy_id_mask;
579         addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
580         ret = lan78xx_write_reg(dev, MII_ACC, addr);
581
582         ret = lan78xx_phy_wait_not_busy(dev);
583         if (ret < 0)
584                 goto done;
585
586         /* select register of MMD */
587         val = mmdidx;
588         ret = lan78xx_write_reg(dev, MII_DATA, val);
589
590         phy_id &= dev->mii.phy_id_mask;
591         addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
592         ret = lan78xx_write_reg(dev, MII_ACC, addr);
593
594         ret = lan78xx_phy_wait_not_busy(dev);
595         if (ret < 0)
596                 goto done;
597
598         /* select register data for MMD */
599         val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
600         ret = lan78xx_write_reg(dev, MII_DATA, val);
601
602         phy_id &= dev->mii.phy_id_mask;
603         addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
604         ret = lan78xx_write_reg(dev, MII_ACC, addr);
605
606         ret = lan78xx_phy_wait_not_busy(dev);
607         if (ret < 0)
608                 goto done;
609
610         /* set the address, index & direction (read from PHY) */
611         phy_id &= dev->mii.phy_id_mask;
612         addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
613         ret = lan78xx_write_reg(dev, MII_ACC, addr);
614
615         ret = lan78xx_phy_wait_not_busy(dev);
616         if (ret < 0)
617                 goto done;
618
619         /* read from MMD */
620         ret = lan78xx_read_reg(dev, MII_DATA, &val);
621
622         ret = (int)(val & 0xFFFF);
623
624 done:
625         mutex_unlock(&dev->phy_mutex);
626         usb_autopm_put_interface(dev->intf);
627         return ret;
628 }
629
630 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
631 {
632         unsigned long start_time = jiffies;
633         u32 val;
634         int ret;
635
636         do {
637                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
638                 if (unlikely(ret < 0))
639                         return -EIO;
640
641                 if (!(val & E2P_CMD_EPC_BUSY_) ||
642                     (val & E2P_CMD_EPC_TIMEOUT_))
643                         break;
644                 usleep_range(40, 100);
645         } while (!time_after(jiffies, start_time + HZ));
646
647         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
648                 netdev_warn(dev->net, "EEPROM read operation timeout");
649                 return -EIO;
650         }
651
652         return 0;
653 }
654
655 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
656 {
657         unsigned long start_time = jiffies;
658         u32 val;
659         int ret;
660
661         do {
662                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
663                 if (unlikely(ret < 0))
664                         return -EIO;
665
666                 if (!(val & E2P_CMD_EPC_BUSY_))
667                         return 0;
668
669                 usleep_range(40, 100);
670         } while (!time_after(jiffies, start_time + HZ));
671
672         netdev_warn(dev->net, "EEPROM is busy");
673         return -EIO;
674 }
675
676 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
677                                    u32 length, u8 *data)
678 {
679         u32 val;
680         int i, ret;
681
682         ret = lan78xx_eeprom_confirm_not_busy(dev);
683         if (ret)
684                 return ret;
685
686         for (i = 0; i < length; i++) {
687                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
688                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
689                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
690                 if (unlikely(ret < 0))
691                         return -EIO;
692
693                 ret = lan78xx_wait_eeprom(dev);
694                 if (ret < 0)
695                         return ret;
696
697                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
698                 if (unlikely(ret < 0))
699                         return -EIO;
700
701                 data[i] = val & 0xFF;
702                 offset++;
703         }
704
705         return 0;
706 }
707
708 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
709                                u32 length, u8 *data)
710 {
711         u8 sig;
712         int ret;
713
714         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
715         if ((ret == 0) && (sig == EEPROM_INDICATOR))
716                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
717         else
718                 ret = -EINVAL;
719
720         return ret;
721 }
722
723 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
724                                     u32 length, u8 *data)
725 {
726         u32 val;
727         int i, ret;
728
729         ret = lan78xx_eeprom_confirm_not_busy(dev);
730         if (ret)
731                 return ret;
732
733         /* Issue write/erase enable command */
734         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
735         ret = lan78xx_write_reg(dev, E2P_CMD, val);
736         if (unlikely(ret < 0))
737                 return -EIO;
738
739         ret = lan78xx_wait_eeprom(dev);
740         if (ret < 0)
741                 return ret;
742
743         for (i = 0; i < length; i++) {
744                 /* Fill data register */
745                 val = data[i];
746                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
747                 if (ret < 0)
748                         return ret;
749
750                 /* Send "write" command */
751                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
752                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
753                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
754                 if (ret < 0)
755                         return ret;
756
757                 ret = lan78xx_wait_eeprom(dev);
758                 if (ret < 0)
759                         return ret;
760
761                 offset++;
762         }
763
764         return 0;
765 }
766
767 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
768                                 u32 length, u8 *data)
769 {
770         int i;
771         int ret;
772         u32 buf;
773         unsigned long timeout;
774
775         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
776
777         if (buf & OTP_PWR_DN_PWRDN_N_) {
778                 /* clear it and wait to be cleared */
779                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
780
781                 timeout = jiffies + HZ;
782                 do {
783                         usleep_range(1, 10);
784                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
785                         if (time_after(jiffies, timeout)) {
786                                 netdev_warn(dev->net,
787                                             "timeout on OTP_PWR_DN");
788                                 return -EIO;
789                         }
790                 } while (buf & OTP_PWR_DN_PWRDN_N_);
791         }
792
793         for (i = 0; i < length; i++) {
794                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
795                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
796                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
797                                         ((offset + i) & OTP_ADDR2_10_3));
798
799                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
800                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
801
802                 timeout = jiffies + HZ;
803                 do {
804                         udelay(1);
805                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
806                         if (time_after(jiffies, timeout)) {
807                                 netdev_warn(dev->net,
808                                             "timeout on OTP_STATUS");
809                                 return -EIO;
810                         }
811                 } while (buf & OTP_STATUS_BUSY_);
812
813                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
814
815                 data[i] = (u8)(buf & 0xFF);
816         }
817
818         return 0;
819 }
820
821 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
822                             u32 length, u8 *data)
823 {
824         u8 sig;
825         int ret;
826
827         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
828
829         if (ret == 0) {
830                 if (sig == OTP_INDICATOR_1)
831                         offset = offset;
832                 else if (sig == OTP_INDICATOR_2)
833                         offset += 0x100;
834                 else
835                         ret = -EINVAL;
836                 ret = lan78xx_read_raw_otp(dev, offset, length, data);
837         }
838
839         return ret;
840 }
841
842 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
843 {
844         int i, ret;
845
846         for (i = 0; i < 100; i++) {
847                 u32 dp_sel;
848
849                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
850                 if (unlikely(ret < 0))
851                         return -EIO;
852
853                 if (dp_sel & DP_SEL_DPRDY_)
854                         return 0;
855
856                 usleep_range(40, 100);
857         }
858
859         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
860
861         return -EIO;
862 }
863
864 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
865                                   u32 addr, u32 length, u32 *buf)
866 {
867         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
868         u32 dp_sel;
869         int i, ret;
870
871         if (usb_autopm_get_interface(dev->intf) < 0)
872                         return 0;
873
874         mutex_lock(&pdata->dataport_mutex);
875
876         ret = lan78xx_dataport_wait_not_busy(dev);
877         if (ret < 0)
878                 goto done;
879
880         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
881
882         dp_sel &= ~DP_SEL_RSEL_MASK_;
883         dp_sel |= ram_select;
884         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
885
886         for (i = 0; i < length; i++) {
887                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
888
889                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
890
891                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
892
893                 ret = lan78xx_dataport_wait_not_busy(dev);
894                 if (ret < 0)
895                         goto done;
896         }
897
898 done:
899         mutex_unlock(&pdata->dataport_mutex);
900         usb_autopm_put_interface(dev->intf);
901
902         return ret;
903 }
904
905 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
906                                     int index, u8 addr[ETH_ALEN])
907 {
908         u32     temp;
909
910         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
911                 temp = addr[3];
912                 temp = addr[2] | (temp << 8);
913                 temp = addr[1] | (temp << 8);
914                 temp = addr[0] | (temp << 8);
915                 pdata->pfilter_table[index][1] = temp;
916                 temp = addr[5];
917                 temp = addr[4] | (temp << 8);
918                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
919                 pdata->pfilter_table[index][0] = temp;
920         }
921 }
922
923 /* returns hash bit number for given MAC address */
924 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
925 {
926         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
927 }
928
929 static void lan78xx_deferred_multicast_write(struct work_struct *param)
930 {
931         struct lan78xx_priv *pdata =
932                         container_of(param, struct lan78xx_priv, set_multicast);
933         struct lan78xx_net *dev = pdata->dev;
934         int i;
935         int ret;
936
937         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
938                   pdata->rfe_ctl);
939
940         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
941                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
942
943         for (i = 1; i < NUM_OF_MAF; i++) {
944                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
945                 ret = lan78xx_write_reg(dev, MAF_LO(i),
946                                         pdata->pfilter_table[i][1]);
947                 ret = lan78xx_write_reg(dev, MAF_HI(i),
948                                         pdata->pfilter_table[i][0]);
949         }
950
951         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
952 }
953
954 static void lan78xx_set_multicast(struct net_device *netdev)
955 {
956         struct lan78xx_net *dev = netdev_priv(netdev);
957         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
958         unsigned long flags;
959         int i;
960
961         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
962
963         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
964                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
965
966         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
967                         pdata->mchash_table[i] = 0;
968         /* pfilter_table[0] has own HW address */
969         for (i = 1; i < NUM_OF_MAF; i++) {
970                         pdata->pfilter_table[i][0] =
971                         pdata->pfilter_table[i][1] = 0;
972         }
973
974         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
975
976         if (dev->net->flags & IFF_PROMISC) {
977                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
978                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
979         } else {
980                 if (dev->net->flags & IFF_ALLMULTI) {
981                         netif_dbg(dev, drv, dev->net,
982                                   "receive all multicast enabled");
983                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
984                 }
985         }
986
987         if (netdev_mc_count(dev->net)) {
988                 struct netdev_hw_addr *ha;
989                 int i;
990
991                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
992
993                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
994
995                 i = 1;
996                 netdev_for_each_mc_addr(ha, netdev) {
997                         /* set first 32 into Perfect Filter */
998                         if (i < 33) {
999                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1000                         } else {
1001                                 u32 bitnum = lan78xx_hash(ha->addr);
1002
1003                                 pdata->mchash_table[bitnum / 32] |=
1004                                                         (1 << (bitnum % 32));
1005                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1006                         }
1007                         i++;
1008                 }
1009         }
1010
1011         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1012
1013         /* defer register writes to a sleepable context */
1014         schedule_work(&pdata->set_multicast);
1015 }
1016
1017 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1018                                       u16 lcladv, u16 rmtadv)
1019 {
1020         u32 flow = 0, fct_flow = 0;
1021         int ret;
1022
1023         u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1024
1025         if (cap & FLOW_CTRL_TX)
1026                 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
1027
1028         if (cap & FLOW_CTRL_RX)
1029                 flow |= FLOW_CR_RX_FCEN_;
1030
1031         if (dev->udev->speed == USB_SPEED_SUPER)
1032                 fct_flow = 0x817;
1033         else if (dev->udev->speed == USB_SPEED_HIGH)
1034                 fct_flow = 0x211;
1035
1036         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1037                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1038                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1039
1040         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1041
1042         /* threshold value should be set before enabling flow */
1043         ret = lan78xx_write_reg(dev, FLOW, flow);
1044
1045         return 0;
1046 }
1047
1048 static int lan78xx_link_reset(struct lan78xx_net *dev)
1049 {
1050         struct mii_if_info *mii = &dev->mii;
1051         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1052         int ladv, radv, ret;
1053         u32 buf;
1054
1055         /* clear PHY interrupt status */
1056         /* VTSE PHY */
1057         ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
1058         if (unlikely(ret < 0))
1059                 return -EIO;
1060
1061         /* clear LAN78xx interrupt status */
1062         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1063         if (unlikely(ret < 0))
1064                 return -EIO;
1065
1066         if (!mii_link_ok(mii) && dev->link_on) {
1067                 dev->link_on = false;
1068                 netif_carrier_off(dev->net);
1069
1070                 /* reset MAC */
1071                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1072                 if (unlikely(ret < 0))
1073                         return -EIO;
1074                 buf |= MAC_CR_RST_;
1075                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1076                 if (unlikely(ret < 0))
1077                         return -EIO;
1078         } else if (mii_link_ok(mii) && !dev->link_on) {
1079                 dev->link_on = true;
1080
1081                 mii_check_media(mii, 1, 1);
1082                 mii_ethtool_gset(&dev->mii, &ecmd);
1083
1084                 mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1085
1086                 if (dev->udev->speed == USB_SPEED_SUPER) {
1087                         if (ethtool_cmd_speed(&ecmd) == 1000) {
1088                                 /* disable U2 */
1089                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1090                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1091                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1092                                 /* enable U1 */
1093                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1094                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1095                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1096                         } else {
1097                                 /* enable U1 & U2 */
1098                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1099                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1100                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1101                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1102                         }
1103                 }
1104
1105                 ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
1106                 if (ladv < 0)
1107                         return ladv;
1108
1109                 radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
1110                 if (radv < 0)
1111                         return radv;
1112
1113                 netif_dbg(dev, link, dev->net,
1114                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1115                           ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1116
1117                 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1118                 netif_carrier_on(dev->net);
1119         }
1120
1121         return ret;
1122 }
1123
1124 /* some work can't be done in tasklets, so we use keventd
1125  *
1126  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1127  * but tasklet_schedule() doesn't.      hope the failure is rare.
1128  */
1129 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1130 {
1131         set_bit(work, &dev->flags);
1132         if (!schedule_delayed_work(&dev->wq, 0))
1133                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1134 }
1135
1136 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1137 {
1138         u32 intdata;
1139
1140         if (urb->actual_length != 4) {
1141                 netdev_warn(dev->net,
1142                             "unexpected urb length %d", urb->actual_length);
1143                 return;
1144         }
1145
1146         memcpy(&intdata, urb->transfer_buffer, 4);
1147         le32_to_cpus(&intdata);
1148
1149         if (intdata & INT_ENP_PHY_INT) {
1150                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1151                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1152         } else
1153                 netdev_warn(dev->net,
1154                             "unexpected interrupt: 0x%08x\n", intdata);
1155 }
1156
1157 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1158 {
1159         return MAX_EEPROM_SIZE;
1160 }
1161
1162 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1163                                       struct ethtool_eeprom *ee, u8 *data)
1164 {
1165         struct lan78xx_net *dev = netdev_priv(netdev);
1166
1167         ee->magic = LAN78XX_EEPROM_MAGIC;
1168
1169         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1170 }
1171
1172 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1173                                       struct ethtool_eeprom *ee, u8 *data)
1174 {
1175         struct lan78xx_net *dev = netdev_priv(netdev);
1176
1177         /* Allow entire eeprom update only */
1178         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1179             (ee->offset == 0) &&
1180             (ee->len == 512) &&
1181             (data[0] == EEPROM_INDICATOR))
1182                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1183         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1184                  (ee->offset == 0) &&
1185                  (ee->len == 512) &&
1186                  (data[0] == OTP_INDICATOR_1))
1187                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1188
1189         return -EINVAL;
1190 }
1191
1192 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1193                                 u8 *data)
1194 {
1195         if (stringset == ETH_SS_STATS)
1196                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1197 }
1198
1199 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1200 {
1201         if (sset == ETH_SS_STATS)
1202                 return ARRAY_SIZE(lan78xx_gstrings);
1203         else
1204                 return -EOPNOTSUPP;
1205 }
1206
1207 static void lan78xx_get_stats(struct net_device *netdev,
1208                               struct ethtool_stats *stats, u64 *data)
1209 {
1210         struct lan78xx_net *dev = netdev_priv(netdev);
1211         struct lan78xx_statstage lan78xx_stat;
1212         u32 *p;
1213         int i;
1214
1215         if (usb_autopm_get_interface(dev->intf) < 0)
1216                 return;
1217
1218         if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1219                 p = (u32 *)&lan78xx_stat;
1220                 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1221                         data[i] = p[i];
1222         }
1223
1224         usb_autopm_put_interface(dev->intf);
1225 }
1226
1227 static void lan78xx_get_wol(struct net_device *netdev,
1228                             struct ethtool_wolinfo *wol)
1229 {
1230         struct lan78xx_net *dev = netdev_priv(netdev);
1231         int ret;
1232         u32 buf;
1233         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1234
1235         if (usb_autopm_get_interface(dev->intf) < 0)
1236                         return;
1237
1238         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1239         if (unlikely(ret < 0)) {
1240                 wol->supported = 0;
1241                 wol->wolopts = 0;
1242         } else {
1243                 if (buf & USB_CFG_RMT_WKP_) {
1244                         wol->supported = WAKE_ALL;
1245                         wol->wolopts = pdata->wol;
1246                 } else {
1247                         wol->supported = 0;
1248                         wol->wolopts = 0;
1249                 }
1250         }
1251
1252         usb_autopm_put_interface(dev->intf);
1253 }
1254
1255 static int lan78xx_set_wol(struct net_device *netdev,
1256                            struct ethtool_wolinfo *wol)
1257 {
1258         struct lan78xx_net *dev = netdev_priv(netdev);
1259         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1260         int ret;
1261
1262         ret = usb_autopm_get_interface(dev->intf);
1263         if (ret < 0)
1264                 return ret;
1265
1266         pdata->wol = 0;
1267         if (wol->wolopts & WAKE_UCAST)
1268                 pdata->wol |= WAKE_UCAST;
1269         if (wol->wolopts & WAKE_MCAST)
1270                 pdata->wol |= WAKE_MCAST;
1271         if (wol->wolopts & WAKE_BCAST)
1272                 pdata->wol |= WAKE_BCAST;
1273         if (wol->wolopts & WAKE_MAGIC)
1274                 pdata->wol |= WAKE_MAGIC;
1275         if (wol->wolopts & WAKE_PHY)
1276                 pdata->wol |= WAKE_PHY;
1277         if (wol->wolopts & WAKE_ARP)
1278                 pdata->wol |= WAKE_ARP;
1279
1280         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1281
1282         usb_autopm_put_interface(dev->intf);
1283
1284         return ret;
1285 }
1286
1287 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1288 {
1289         struct lan78xx_net *dev = netdev_priv(net);
1290         int ret;
1291         u32 buf;
1292         u32 adv, lpadv;
1293
1294         ret = usb_autopm_get_interface(dev->intf);
1295         if (ret < 0)
1296                 return ret;
1297
1298         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1299         if (buf & MAC_CR_EEE_EN_) {
1300                 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1301                                        PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
1302                 adv = mmd_eee_adv_to_ethtool_adv_t(buf);
1303                 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1304                                        PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1305                 lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1306
1307                 edata->eee_enabled = true;
1308                 edata->supported = true;
1309                 edata->eee_active = !!(adv & lpadv);
1310                 edata->advertised = adv;
1311                 edata->lp_advertised = lpadv;
1312                 edata->tx_lpi_enabled = true;
1313                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1314                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1315                 edata->tx_lpi_timer = buf;
1316         } else {
1317                 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1318                                        PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1319                 lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1320
1321                 edata->eee_enabled = false;
1322                 edata->eee_active = false;
1323                 edata->supported = false;
1324                 edata->advertised = 0;
1325                 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
1326                 edata->tx_lpi_enabled = false;
1327                 edata->tx_lpi_timer = 0;
1328         }
1329
1330         usb_autopm_put_interface(dev->intf);
1331
1332         return 0;
1333 }
1334
1335 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1336 {
1337         struct lan78xx_net *dev = netdev_priv(net);
1338         int ret;
1339         u32 buf;
1340
1341         ret = usb_autopm_get_interface(dev->intf);
1342         if (ret < 0)
1343                 return ret;
1344
1345         if (edata->eee_enabled) {
1346                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1347                 buf |= MAC_CR_EEE_EN_;
1348                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1349
1350                 buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
1351                 lan78xx_mmd_write(dev->net, dev->mii.phy_id,
1352                                   PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
1353         } else {
1354                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1355                 buf &= ~MAC_CR_EEE_EN_;
1356                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1357         }
1358
1359         usb_autopm_put_interface(dev->intf);
1360
1361         return 0;
1362 }
1363
1364 static u32 lan78xx_get_link(struct net_device *net)
1365 {
1366         struct lan78xx_net *dev = netdev_priv(net);
1367
1368         return mii_link_ok(&dev->mii);
1369 }
1370
1371 int lan78xx_nway_reset(struct net_device *net)
1372 {
1373         struct lan78xx_net *dev = netdev_priv(net);
1374
1375         if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1376                 return -EOPNOTSUPP;
1377
1378         return mii_nway_restart(&dev->mii);
1379 }
1380
1381 static void lan78xx_get_drvinfo(struct net_device *net,
1382                                 struct ethtool_drvinfo *info)
1383 {
1384         struct lan78xx_net *dev = netdev_priv(net);
1385
1386         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1387         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1388         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1389 }
1390
1391 static u32 lan78xx_get_msglevel(struct net_device *net)
1392 {
1393         struct lan78xx_net *dev = netdev_priv(net);
1394
1395         return dev->msg_enable;
1396 }
1397
1398 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1399 {
1400         struct lan78xx_net *dev = netdev_priv(net);
1401
1402         dev->msg_enable = level;
1403 }
1404
1405 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1406 {
1407         struct lan78xx_net *dev = netdev_priv(net);
1408         struct mii_if_info *mii = &dev->mii;
1409         int ret;
1410         int buf;
1411
1412         if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1413                 return -EOPNOTSUPP;
1414
1415         ret = usb_autopm_get_interface(dev->intf);
1416         if (ret < 0)
1417                 return ret;
1418
1419         ret = mii_ethtool_gset(&dev->mii, cmd);
1420
1421         mii->mdio_write(mii->dev, mii->phy_id,
1422                         PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1423         buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1424         mii->mdio_write(mii->dev, mii->phy_id,
1425                         PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1426
1427         buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
1428         if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
1429                 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1430                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1431         } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
1432                 cmd->eth_tp_mdix = ETH_TP_MDI;
1433                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1434         } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
1435                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1436                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1437         }
1438
1439         usb_autopm_put_interface(dev->intf);
1440
1441         return ret;
1442 }
1443
1444 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1445 {
1446         struct lan78xx_net *dev = netdev_priv(net);
1447         struct mii_if_info *mii = &dev->mii;
1448         int ret = 0;
1449         int temp;
1450
1451         if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1452                 return -EOPNOTSUPP;
1453
1454         ret = usb_autopm_get_interface(dev->intf);
1455         if (ret < 0)
1456                 return ret;
1457
1458         if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1459                 if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
1460                         mii->mdio_write(mii->dev, mii->phy_id,
1461                                         PHY_EXT_GPIO_PAGE,
1462                                         PHY_EXT_GPIO_PAGE_SPACE_1);
1463                         temp = mii->mdio_read(mii->dev, mii->phy_id,
1464                                         PHY_EXT_MODE_CTRL);
1465                         temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1466                         mii->mdio_write(mii->dev, mii->phy_id,
1467                                         PHY_EXT_MODE_CTRL,
1468                                         temp | PHY_EXT_MODE_CTRL_MDI_);
1469                         mii->mdio_write(mii->dev, mii->phy_id,
1470                                         PHY_EXT_GPIO_PAGE,
1471                                         PHY_EXT_GPIO_PAGE_SPACE_0);
1472                 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
1473                         mii->mdio_write(mii->dev, mii->phy_id,
1474                                         PHY_EXT_GPIO_PAGE,
1475                                         PHY_EXT_GPIO_PAGE_SPACE_1);
1476                         temp = mii->mdio_read(mii->dev, mii->phy_id,
1477                                         PHY_EXT_MODE_CTRL);
1478                         temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1479                         mii->mdio_write(mii->dev, mii->phy_id,
1480                                         PHY_EXT_MODE_CTRL,
1481                                         temp | PHY_EXT_MODE_CTRL_MDI_X_);
1482                         mii->mdio_write(mii->dev, mii->phy_id,
1483                                         PHY_EXT_GPIO_PAGE,
1484                                         PHY_EXT_GPIO_PAGE_SPACE_0);
1485                 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
1486                         mii->mdio_write(mii->dev, mii->phy_id,
1487                                         PHY_EXT_GPIO_PAGE,
1488                                         PHY_EXT_GPIO_PAGE_SPACE_1);
1489                         temp = mii->mdio_read(mii->dev, mii->phy_id,
1490                                                         PHY_EXT_MODE_CTRL);
1491                         temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1492                         mii->mdio_write(mii->dev, mii->phy_id,
1493                                         PHY_EXT_MODE_CTRL,
1494                                         temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1495                         mii->mdio_write(mii->dev, mii->phy_id,
1496                                         PHY_EXT_GPIO_PAGE,
1497                                         PHY_EXT_GPIO_PAGE_SPACE_0);
1498                 }
1499         }
1500
1501         /* change speed & duplex */
1502         ret = mii_ethtool_sset(&dev->mii, cmd);
1503
1504         if (!cmd->autoneg) {
1505                 /* force link down */
1506                 temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
1507                 mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
1508                                 temp | BMCR_LOOPBACK);
1509                 mdelay(1);
1510                 mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
1511         }
1512
1513         usb_autopm_put_interface(dev->intf);
1514
1515         return ret;
1516 }
1517
1518 static const struct ethtool_ops lan78xx_ethtool_ops = {
1519         .get_link       = lan78xx_get_link,
1520         .nway_reset     = lan78xx_nway_reset,
1521         .get_drvinfo    = lan78xx_get_drvinfo,
1522         .get_msglevel   = lan78xx_get_msglevel,
1523         .set_msglevel   = lan78xx_set_msglevel,
1524         .get_settings   = lan78xx_get_settings,
1525         .set_settings   = lan78xx_set_settings,
1526         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1527         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1528         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1529         .get_ethtool_stats = lan78xx_get_stats,
1530         .get_sset_count = lan78xx_get_sset_count,
1531         .get_strings    = lan78xx_get_strings,
1532         .get_wol        = lan78xx_get_wol,
1533         .set_wol        = lan78xx_set_wol,
1534         .get_eee        = lan78xx_get_eee,
1535         .set_eee        = lan78xx_set_eee,
1536 };
1537
1538 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1539 {
1540         struct lan78xx_net *dev = netdev_priv(netdev);
1541
1542         if (!netif_running(netdev))
1543                 return -EINVAL;
1544
1545         return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
1546 }
1547
1548 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1549 {
1550         u32 addr_lo, addr_hi;
1551         int ret;
1552         u8 addr[6];
1553
1554         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1555         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1556
1557         addr[0] = addr_lo & 0xFF;
1558         addr[1] = (addr_lo >> 8) & 0xFF;
1559         addr[2] = (addr_lo >> 16) & 0xFF;
1560         addr[3] = (addr_lo >> 24) & 0xFF;
1561         addr[4] = addr_hi & 0xFF;
1562         addr[5] = (addr_hi >> 8) & 0xFF;
1563
1564         if (!is_valid_ether_addr(addr)) {
1565                 /* reading mac address from EEPROM or OTP */
1566                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1567                                          addr) == 0) ||
1568                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1569                                       addr) == 0)) {
1570                         if (is_valid_ether_addr(addr)) {
1571                                 /* eeprom values are valid so use them */
1572                                 netif_dbg(dev, ifup, dev->net,
1573                                           "MAC address read from EEPROM");
1574                         } else {
1575                                 /* generate random MAC */
1576                                 random_ether_addr(addr);
1577                                 netif_dbg(dev, ifup, dev->net,
1578                                           "MAC address set to random addr");
1579                         }
1580
1581                         addr_lo = addr[0] | (addr[1] << 8) |
1582                                   (addr[2] << 16) | (addr[3] << 24);
1583                         addr_hi = addr[4] | (addr[5] << 8);
1584
1585                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1586                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1587                 } else {
1588                         /* generate random MAC */
1589                         random_ether_addr(addr);
1590                         netif_dbg(dev, ifup, dev->net,
1591                                   "MAC address set to random addr");
1592                 }
1593         }
1594
1595         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1596         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1597
1598         ether_addr_copy(dev->net->dev_addr, addr);
1599 }
1600
1601 static void lan78xx_mii_init(struct lan78xx_net *dev)
1602 {
1603         /* Initialize MII structure */
1604         dev->mii.dev = dev->net;
1605         dev->mii.mdio_read = lan78xx_mdio_read;
1606         dev->mii.mdio_write = lan78xx_mdio_write;
1607         dev->mii.phy_id_mask = 0x1f;
1608         dev->mii.reg_num_mask = 0x1f;
1609         dev->mii.phy_id = INTERNAL_PHY_ID;
1610         dev->mii.supports_gmii = true;
1611 }
1612
1613 static int lan78xx_phy_init(struct lan78xx_net *dev)
1614 {
1615         int temp;
1616         struct mii_if_info *mii = &dev->mii;
1617
1618         if ((!mii->mdio_write) || (!mii->mdio_read))
1619                 return -EOPNOTSUPP;
1620
1621         temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
1622         temp |= ADVERTISE_ALL;
1623         mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
1624                         temp | ADVERTISE_CSMA |
1625                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1626
1627         /* set to AUTOMDIX */
1628         mii->mdio_write(mii->dev, mii->phy_id,
1629                         PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1630         temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1631         temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1632         mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
1633                         temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1634         mii->mdio_write(mii->dev, mii->phy_id,
1635                         PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1636         dev->mdix_ctrl = ETH_TP_MDI_AUTO;
1637
1638         /* MAC doesn't support 1000HD */
1639         temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
1640         mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
1641                         temp & ~ADVERTISE_1000HALF);
1642
1643         /* clear interrupt */
1644         mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1645         mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
1646                         PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
1647                         PHY_VTSE_INT_MASK_LINK_CHANGE_);
1648
1649         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1650
1651         return 0;
1652 }
1653
1654 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1655 {
1656         int ret = 0;
1657         u32 buf;
1658         bool rxenabled;
1659
1660         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1661
1662         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1663
1664         if (rxenabled) {
1665                 buf &= ~MAC_RX_RXEN_;
1666                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1667         }
1668
1669         /* add 4 to size for FCS */
1670         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1671         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1672
1673         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1674
1675         if (rxenabled) {
1676                 buf |= MAC_RX_RXEN_;
1677                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1678         }
1679
1680         return 0;
1681 }
1682
1683 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1684 {
1685         struct sk_buff *skb;
1686         unsigned long flags;
1687         int count = 0;
1688
1689         spin_lock_irqsave(&q->lock, flags);
1690         while (!skb_queue_empty(q)) {
1691                 struct skb_data *entry;
1692                 struct urb *urb;
1693                 int ret;
1694
1695                 skb_queue_walk(q, skb) {
1696                         entry = (struct skb_data *)skb->cb;
1697                         if (entry->state != unlink_start)
1698                                 goto found;
1699                 }
1700                 break;
1701 found:
1702                 entry->state = unlink_start;
1703                 urb = entry->urb;
1704
1705                 /* Get reference count of the URB to avoid it to be
1706                  * freed during usb_unlink_urb, which may trigger
1707                  * use-after-free problem inside usb_unlink_urb since
1708                  * usb_unlink_urb is always racing with .complete
1709                  * handler(include defer_bh).
1710                  */
1711                 usb_get_urb(urb);
1712                 spin_unlock_irqrestore(&q->lock, flags);
1713                 /* during some PM-driven resume scenarios,
1714                  * these (async) unlinks complete immediately
1715                  */
1716                 ret = usb_unlink_urb(urb);
1717                 if (ret != -EINPROGRESS && ret != 0)
1718                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1719                 else
1720                         count++;
1721                 usb_put_urb(urb);
1722                 spin_lock_irqsave(&q->lock, flags);
1723         }
1724         spin_unlock_irqrestore(&q->lock, flags);
1725         return count;
1726 }
1727
1728 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1729 {
1730         struct lan78xx_net *dev = netdev_priv(netdev);
1731         int ll_mtu = new_mtu + netdev->hard_header_len;
1732         int old_hard_mtu = dev->hard_mtu;
1733         int old_rx_urb_size = dev->rx_urb_size;
1734         int ret;
1735
1736         if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1737                 return -EINVAL;
1738
1739         if (new_mtu <= 0)
1740                 return -EINVAL;
1741         /* no second zero-length packet read wanted after mtu-sized packets */
1742         if ((ll_mtu % dev->maxpacket) == 0)
1743                 return -EDOM;
1744
1745         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1746
1747         netdev->mtu = new_mtu;
1748
1749         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1750         if (dev->rx_urb_size == old_hard_mtu) {
1751                 dev->rx_urb_size = dev->hard_mtu;
1752                 if (dev->rx_urb_size > old_rx_urb_size) {
1753                         if (netif_running(dev->net)) {
1754                                 unlink_urbs(dev, &dev->rxq);
1755                                 tasklet_schedule(&dev->bh);
1756                         }
1757                 }
1758         }
1759
1760         return 0;
1761 }
1762
1763 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1764 {
1765         struct lan78xx_net *dev = netdev_priv(netdev);
1766         struct sockaddr *addr = p;
1767         u32 addr_lo, addr_hi;
1768         int ret;
1769
1770         if (netif_running(netdev))
1771                 return -EBUSY;
1772
1773         if (!is_valid_ether_addr(addr->sa_data))
1774                 return -EADDRNOTAVAIL;
1775
1776         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1777
1778         addr_lo = netdev->dev_addr[0] |
1779                   netdev->dev_addr[1] << 8 |
1780                   netdev->dev_addr[2] << 16 |
1781                   netdev->dev_addr[3] << 24;
1782         addr_hi = netdev->dev_addr[4] |
1783                   netdev->dev_addr[5] << 8;
1784
1785         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1786         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1787
1788         return 0;
1789 }
1790
1791 /* Enable or disable Rx checksum offload engine */
1792 static int lan78xx_set_features(struct net_device *netdev,
1793                                 netdev_features_t features)
1794 {
1795         struct lan78xx_net *dev = netdev_priv(netdev);
1796         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1797         unsigned long flags;
1798         int ret;
1799
1800         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1801
1802         if (features & NETIF_F_RXCSUM) {
1803                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1804                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1805         } else {
1806                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1807                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1808         }
1809
1810         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1811                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1812         else
1813                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1814
1815         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1816
1817         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1818
1819         return 0;
1820 }
1821
1822 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1823 {
1824         struct lan78xx_priv *pdata =
1825                         container_of(param, struct lan78xx_priv, set_vlan);
1826         struct lan78xx_net *dev = pdata->dev;
1827
1828         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1829                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1830 }
1831
1832 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1833                                    __be16 proto, u16 vid)
1834 {
1835         struct lan78xx_net *dev = netdev_priv(netdev);
1836         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1837         u16 vid_bit_index;
1838         u16 vid_dword_index;
1839
1840         vid_dword_index = (vid >> 5) & 0x7F;
1841         vid_bit_index = vid & 0x1F;
1842
1843         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1844
1845         /* defer register writes to a sleepable context */
1846         schedule_work(&pdata->set_vlan);
1847
1848         return 0;
1849 }
1850
1851 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1852                                     __be16 proto, u16 vid)
1853 {
1854         struct lan78xx_net *dev = netdev_priv(netdev);
1855         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1856         u16 vid_bit_index;
1857         u16 vid_dword_index;
1858
1859         vid_dword_index = (vid >> 5) & 0x7F;
1860         vid_bit_index = vid & 0x1F;
1861
1862         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1863
1864         /* defer register writes to a sleepable context */
1865         schedule_work(&pdata->set_vlan);
1866
1867         return 0;
1868 }
1869
1870 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1871 {
1872         int ret;
1873         u32 buf;
1874         u32 regs[6] = { 0 };
1875
1876         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1877         if (buf & USB_CFG1_LTM_ENABLE_) {
1878                 u8 temp[2];
1879                 /* Get values from EEPROM first */
1880                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1881                         if (temp[0] == 24) {
1882                                 ret = lan78xx_read_raw_eeprom(dev,
1883                                                               temp[1] * 2,
1884                                                               24,
1885                                                               (u8 *)regs);
1886                                 if (ret < 0)
1887                                         return;
1888                         }
1889                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1890                         if (temp[0] == 24) {
1891                                 ret = lan78xx_read_raw_otp(dev,
1892                                                            temp[1] * 2,
1893                                                            24,
1894                                                            (u8 *)regs);
1895                                 if (ret < 0)
1896                                         return;
1897                         }
1898                 }
1899         }
1900
1901         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1902         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1903         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1904         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1905         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1906         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1907 }
1908
1909 static int lan78xx_reset(struct lan78xx_net *dev)
1910 {
1911         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1912         u32 buf;
1913         int ret = 0;
1914         unsigned long timeout;
1915
1916         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1917         buf |= HW_CFG_LRST_;
1918         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1919
1920         timeout = jiffies + HZ;
1921         do {
1922                 mdelay(1);
1923                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1924                 if (time_after(jiffies, timeout)) {
1925                         netdev_warn(dev->net,
1926                                     "timeout on completion of LiteReset");
1927                         return -EIO;
1928                 }
1929         } while (buf & HW_CFG_LRST_);
1930
1931         lan78xx_init_mac_address(dev);
1932
1933         /* Respond to the IN token with a NAK */
1934         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1935         buf |= USB_CFG_BIR_;
1936         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1937
1938         /* Init LTM */
1939         lan78xx_init_ltm(dev);
1940
1941         dev->net->hard_header_len += TX_OVERHEAD;
1942         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1943
1944         if (dev->udev->speed == USB_SPEED_SUPER) {
1945                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1946                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1947                 dev->rx_qlen = 4;
1948                 dev->tx_qlen = 4;
1949         } else if (dev->udev->speed == USB_SPEED_HIGH) {
1950                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1951                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1952                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1953                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1954         } else {
1955                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1956                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1957                 dev->rx_qlen = 4;
1958         }
1959
1960         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1961         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1962
1963         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1964         buf |= HW_CFG_MEF_;
1965         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1966
1967         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1968         buf |= USB_CFG_BCE_;
1969         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1970
1971         /* set FIFO sizes */
1972         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1973         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1974
1975         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1976         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1977
1978         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1979         ret = lan78xx_write_reg(dev, FLOW, 0);
1980         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1981
1982         /* Don't need rfe_ctl_lock during initialisation */
1983         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1984         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1985         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1986
1987         /* Enable or disable checksum offload engines */
1988         lan78xx_set_features(dev->net, dev->net->features);
1989
1990         lan78xx_set_multicast(dev->net);
1991
1992         /* reset PHY */
1993         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1994         buf |= PMT_CTL_PHY_RST_;
1995         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1996
1997         timeout = jiffies + HZ;
1998         do {
1999                 mdelay(1);
2000                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2001                 if (time_after(jiffies, timeout)) {
2002                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2003                         return -EIO;
2004                 }
2005         } while (buf & PMT_CTL_PHY_RST_);
2006
2007         lan78xx_mii_init(dev);
2008
2009         ret = lan78xx_phy_init(dev);
2010
2011         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2012
2013         buf |= MAC_CR_GMII_EN_;
2014         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2015
2016         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2017
2018         /* enable on PHY */
2019         if (buf & MAC_CR_EEE_EN_)
2020                 lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
2021
2022         /* enable PHY interrupts */
2023         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2024         buf |= INT_ENP_PHY_INT;
2025         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2026
2027         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2028         buf |= MAC_TX_TXEN_;
2029         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2030
2031         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2032         buf |= FCT_TX_CTL_EN_;
2033         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2034
2035         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2036
2037         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2038         buf |= MAC_RX_RXEN_;
2039         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2040
2041         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2042         buf |= FCT_RX_CTL_EN_;
2043         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2044
2045         if (!mii_nway_restart(&dev->mii))
2046                 netif_dbg(dev, link, dev->net, "autoneg initiated");
2047
2048         return 0;
2049 }
2050
2051 static int lan78xx_open(struct net_device *net)
2052 {
2053         struct lan78xx_net *dev = netdev_priv(net);
2054         int ret;
2055
2056         ret = usb_autopm_get_interface(dev->intf);
2057         if (ret < 0)
2058                 goto out;
2059
2060         ret = lan78xx_reset(dev);
2061         if (ret < 0)
2062                 goto done;
2063
2064         /* for Link Check */
2065         if (dev->urb_intr) {
2066                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2067                 if (ret < 0) {
2068                         netif_err(dev, ifup, dev->net,
2069                                   "intr submit %d\n", ret);
2070                         goto done;
2071                 }
2072         }
2073
2074         set_bit(EVENT_DEV_OPEN, &dev->flags);
2075
2076         netif_start_queue(net);
2077
2078         dev->link_on = false;
2079
2080         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2081 done:
2082         usb_autopm_put_interface(dev->intf);
2083
2084 out:
2085         return ret;
2086 }
2087
2088 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2089 {
2090         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2091         DECLARE_WAITQUEUE(wait, current);
2092         int temp;
2093
2094         /* ensure there are no more active urbs */
2095         add_wait_queue(&unlink_wakeup, &wait);
2096         set_current_state(TASK_UNINTERRUPTIBLE);
2097         dev->wait = &unlink_wakeup;
2098         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2099
2100         /* maybe wait for deletions to finish. */
2101         while (!skb_queue_empty(&dev->rxq) &&
2102                !skb_queue_empty(&dev->txq) &&
2103                !skb_queue_empty(&dev->done)) {
2104                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2105                 set_current_state(TASK_UNINTERRUPTIBLE);
2106                 netif_dbg(dev, ifdown, dev->net,
2107                           "waited for %d urb completions\n", temp);
2108         }
2109         set_current_state(TASK_RUNNING);
2110         dev->wait = NULL;
2111         remove_wait_queue(&unlink_wakeup, &wait);
2112 }
2113
2114 int lan78xx_stop(struct net_device *net)
2115 {
2116         struct lan78xx_net              *dev = netdev_priv(net);
2117
2118         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2119         netif_stop_queue(net);
2120
2121         netif_info(dev, ifdown, dev->net,
2122                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2123                    net->stats.rx_packets, net->stats.tx_packets,
2124                    net->stats.rx_errors, net->stats.tx_errors);
2125
2126         lan78xx_terminate_urbs(dev);
2127
2128         usb_kill_urb(dev->urb_intr);
2129
2130         skb_queue_purge(&dev->rxq_pause);
2131
2132         /* deferred work (task, timer, softirq) must also stop.
2133          * can't flush_scheduled_work() until we drop rtnl (later),
2134          * else workers could deadlock; so make workers a NOP.
2135          */
2136         dev->flags = 0;
2137         cancel_delayed_work_sync(&dev->wq);
2138         tasklet_kill(&dev->bh);
2139
2140         usb_autopm_put_interface(dev->intf);
2141
2142         return 0;
2143 }
2144
2145 static int lan78xx_linearize(struct sk_buff *skb)
2146 {
2147         return skb_linearize(skb);
2148 }
2149
2150 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2151                                        struct sk_buff *skb, gfp_t flags)
2152 {
2153         u32 tx_cmd_a, tx_cmd_b;
2154
2155         if (skb_headroom(skb) < TX_OVERHEAD) {
2156                 struct sk_buff *skb2;
2157
2158                 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2159                 dev_kfree_skb_any(skb);
2160                 skb = skb2;
2161                 if (!skb)
2162                         return NULL;
2163         }
2164
2165         if (lan78xx_linearize(skb) < 0)
2166                 return NULL;
2167
2168         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2169
2170         if (skb->ip_summed == CHECKSUM_PARTIAL)
2171                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2172
2173         tx_cmd_b = 0;
2174         if (skb_is_gso(skb)) {
2175                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2176
2177                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2178
2179                 tx_cmd_a |= TX_CMD_A_LSO_;
2180         }
2181
2182         if (skb_vlan_tag_present(skb)) {
2183                 tx_cmd_a |= TX_CMD_A_IVTG_;
2184                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2185         }
2186
2187         skb_push(skb, 4);
2188         cpu_to_le32s(&tx_cmd_b);
2189         memcpy(skb->data, &tx_cmd_b, 4);
2190
2191         skb_push(skb, 4);
2192         cpu_to_le32s(&tx_cmd_a);
2193         memcpy(skb->data, &tx_cmd_a, 4);
2194
2195         return skb;
2196 }
2197
2198 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2199                                struct sk_buff_head *list, enum skb_state state)
2200 {
2201         unsigned long flags;
2202         enum skb_state old_state;
2203         struct skb_data *entry = (struct skb_data *)skb->cb;
2204
2205         spin_lock_irqsave(&list->lock, flags);
2206         old_state = entry->state;
2207         entry->state = state;
2208
2209         __skb_unlink(skb, list);
2210         spin_unlock(&list->lock);
2211         spin_lock(&dev->done.lock);
2212
2213         __skb_queue_tail(&dev->done, skb);
2214         if (skb_queue_len(&dev->done) == 1)
2215                 tasklet_schedule(&dev->bh);
2216         spin_unlock_irqrestore(&dev->done.lock, flags);
2217
2218         return old_state;
2219 }
2220
2221 static void tx_complete(struct urb *urb)
2222 {
2223         struct sk_buff *skb = (struct sk_buff *)urb->context;
2224         struct skb_data *entry = (struct skb_data *)skb->cb;
2225         struct lan78xx_net *dev = entry->dev;
2226
2227         if (urb->status == 0) {
2228                 dev->net->stats.tx_packets++;
2229                 dev->net->stats.tx_bytes += entry->length;
2230         } else {
2231                 dev->net->stats.tx_errors++;
2232
2233                 switch (urb->status) {
2234                 case -EPIPE:
2235                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2236                         break;
2237
2238                 /* software-driven interface shutdown */
2239                 case -ECONNRESET:
2240                 case -ESHUTDOWN:
2241                         break;
2242
2243                 case -EPROTO:
2244                 case -ETIME:
2245                 case -EILSEQ:
2246                         netif_stop_queue(dev->net);
2247                         break;
2248                 default:
2249                         netif_dbg(dev, tx_err, dev->net,
2250                                   "tx err %d\n", entry->urb->status);
2251                         break;
2252                 }
2253         }
2254
2255         usb_autopm_put_interface_async(dev->intf);
2256
2257         defer_bh(dev, skb, &dev->txq, tx_done);
2258 }
2259
2260 static void lan78xx_queue_skb(struct sk_buff_head *list,
2261                               struct sk_buff *newsk, enum skb_state state)
2262 {
2263         struct skb_data *entry = (struct skb_data *)newsk->cb;
2264
2265         __skb_queue_tail(list, newsk);
2266         entry->state = state;
2267 }
2268
2269 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2270 {
2271         struct lan78xx_net *dev = netdev_priv(net);
2272         struct sk_buff *skb2 = NULL;
2273
2274         if (skb) {
2275                 skb_tx_timestamp(skb);
2276                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2277         }
2278
2279         if (skb2) {
2280                 skb_queue_tail(&dev->txq_pend, skb2);
2281
2282                 if (skb_queue_len(&dev->txq_pend) > 10)
2283                         netif_stop_queue(net);
2284         } else {
2285                 netif_dbg(dev, tx_err, dev->net,
2286                           "lan78xx_tx_prep return NULL\n");
2287                 dev->net->stats.tx_errors++;
2288                 dev->net->stats.tx_dropped++;
2289         }
2290
2291         tasklet_schedule(&dev->bh);
2292
2293         return NETDEV_TX_OK;
2294 }
2295
2296 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2297 {
2298         int tmp;
2299         struct usb_host_interface *alt = NULL;
2300         struct usb_host_endpoint *in = NULL, *out = NULL;
2301         struct usb_host_endpoint *status = NULL;
2302
2303         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2304                 unsigned ep;
2305
2306                 in = NULL;
2307                 out = NULL;
2308                 status = NULL;
2309                 alt = intf->altsetting + tmp;
2310
2311                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2312                         struct usb_host_endpoint *e;
2313                         int intr = 0;
2314
2315                         e = alt->endpoint + ep;
2316                         switch (e->desc.bmAttributes) {
2317                         case USB_ENDPOINT_XFER_INT:
2318                                 if (!usb_endpoint_dir_in(&e->desc))
2319                                         continue;
2320                                 intr = 1;
2321                                 /* FALLTHROUGH */
2322                         case USB_ENDPOINT_XFER_BULK:
2323                                 break;
2324                         default:
2325                                 continue;
2326                         }
2327                         if (usb_endpoint_dir_in(&e->desc)) {
2328                                 if (!intr && !in)
2329                                         in = e;
2330                                 else if (intr && !status)
2331                                         status = e;
2332                         } else {
2333                                 if (!out)
2334                                         out = e;
2335                         }
2336                 }
2337                 if (in && out)
2338                         break;
2339         }
2340         if (!alt || !in || !out)
2341                 return -EINVAL;
2342
2343         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2344                                        in->desc.bEndpointAddress &
2345                                        USB_ENDPOINT_NUMBER_MASK);
2346         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2347                                         out->desc.bEndpointAddress &
2348                                         USB_ENDPOINT_NUMBER_MASK);
2349         dev->ep_intr = status;
2350
2351         return 0;
2352 }
2353
2354 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2355 {
2356         struct lan78xx_priv *pdata = NULL;
2357         int ret;
2358         int i;
2359
2360         ret = lan78xx_get_endpoints(dev, intf);
2361
2362         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2363
2364         pdata = (struct lan78xx_priv *)(dev->data[0]);
2365         if (!pdata) {
2366                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2367                 return -ENOMEM;
2368         }
2369
2370         pdata->dev = dev;
2371
2372         spin_lock_init(&pdata->rfe_ctl_lock);
2373         mutex_init(&pdata->dataport_mutex);
2374
2375         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2376
2377         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2378                 pdata->vlan_table[i] = 0;
2379
2380         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2381
2382         dev->net->features = 0;
2383
2384         if (DEFAULT_TX_CSUM_ENABLE)
2385                 dev->net->features |= NETIF_F_HW_CSUM;
2386
2387         if (DEFAULT_RX_CSUM_ENABLE)
2388                 dev->net->features |= NETIF_F_RXCSUM;
2389
2390         if (DEFAULT_TSO_CSUM_ENABLE)
2391                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2392
2393         dev->net->hw_features = dev->net->features;
2394
2395         /* Init all registers */
2396         ret = lan78xx_reset(dev);
2397
2398         dev->net->flags |= IFF_MULTICAST;
2399
2400         pdata->wol = WAKE_MAGIC;
2401
2402         return 0;
2403 }
2404
2405 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2406 {
2407         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2408
2409         if (pdata) {
2410                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2411                 kfree(pdata);
2412                 pdata = NULL;
2413                 dev->data[0] = 0;
2414         }
2415 }
2416
2417 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2418                                     struct sk_buff *skb,
2419                                     u32 rx_cmd_a, u32 rx_cmd_b)
2420 {
2421         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2422             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2423                 skb->ip_summed = CHECKSUM_NONE;
2424         } else {
2425                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2426                 skb->ip_summed = CHECKSUM_COMPLETE;
2427         }
2428 }
2429
2430 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2431 {
2432         int             status;
2433
2434         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2435                 skb_queue_tail(&dev->rxq_pause, skb);
2436                 return;
2437         }
2438
2439         skb->protocol = eth_type_trans(skb, dev->net);
2440         dev->net->stats.rx_packets++;
2441         dev->net->stats.rx_bytes += skb->len;
2442
2443         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2444                   skb->len + sizeof(struct ethhdr), skb->protocol);
2445         memset(skb->cb, 0, sizeof(struct skb_data));
2446
2447         if (skb_defer_rx_timestamp(skb))
2448                 return;
2449
2450         status = netif_rx(skb);
2451         if (status != NET_RX_SUCCESS)
2452                 netif_dbg(dev, rx_err, dev->net,
2453                           "netif_rx status %d\n", status);
2454 }
2455
2456 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2457 {
2458         if (skb->len < dev->net->hard_header_len)
2459                 return 0;
2460
2461         while (skb->len > 0) {
2462                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2463                 u16 rx_cmd_c;
2464                 struct sk_buff *skb2;
2465                 unsigned char *packet;
2466
2467                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2468                 le32_to_cpus(&rx_cmd_a);
2469                 skb_pull(skb, sizeof(rx_cmd_a));
2470
2471                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2472                 le32_to_cpus(&rx_cmd_b);
2473                 skb_pull(skb, sizeof(rx_cmd_b));
2474
2475                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2476                 le16_to_cpus(&rx_cmd_c);
2477                 skb_pull(skb, sizeof(rx_cmd_c));
2478
2479                 packet = skb->data;
2480
2481                 /* get the packet length */
2482                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2483                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2484
2485                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2486                         netif_dbg(dev, rx_err, dev->net,
2487                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2488                 } else {
2489                         /* last frame in this batch */
2490                         if (skb->len == size) {
2491                                 lan78xx_rx_csum_offload(dev, skb,
2492                                                         rx_cmd_a, rx_cmd_b);
2493
2494                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2495                                 skb->truesize = size + sizeof(struct sk_buff);
2496
2497                                 return 1;
2498                         }
2499
2500                         skb2 = skb_clone(skb, GFP_ATOMIC);
2501                         if (unlikely(!skb2)) {
2502                                 netdev_warn(dev->net, "Error allocating skb");
2503                                 return 0;
2504                         }
2505
2506                         skb2->len = size;
2507                         skb2->data = packet;
2508                         skb_set_tail_pointer(skb2, size);
2509
2510                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2511
2512                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2513                         skb2->truesize = size + sizeof(struct sk_buff);
2514
2515                         lan78xx_skb_return(dev, skb2);
2516                 }
2517
2518                 skb_pull(skb, size);
2519
2520                 /* padding bytes before the next frame starts */
2521                 if (skb->len)
2522                         skb_pull(skb, align_count);
2523         }
2524
2525         if (unlikely(skb->len < 0)) {
2526                 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
2527                 return 0;
2528         }
2529
2530         return 1;
2531 }
2532
2533 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2534 {
2535         if (!lan78xx_rx(dev, skb)) {
2536                 dev->net->stats.rx_errors++;
2537                 goto done;
2538         }
2539
2540         if (skb->len) {
2541                 lan78xx_skb_return(dev, skb);
2542                 return;
2543         }
2544
2545         netif_dbg(dev, rx_err, dev->net, "drop\n");
2546         dev->net->stats.rx_errors++;
2547 done:
2548         skb_queue_tail(&dev->done, skb);
2549 }
2550
2551 static void rx_complete(struct urb *urb);
2552
2553 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2554 {
2555         struct sk_buff *skb;
2556         struct skb_data *entry;
2557         unsigned long lockflags;
2558         size_t size = dev->rx_urb_size;
2559         int ret = 0;
2560
2561         skb = netdev_alloc_skb_ip_align(dev->net, size);
2562         if (!skb) {
2563                 usb_free_urb(urb);
2564                 return -ENOMEM;
2565         }
2566
2567         entry = (struct skb_data *)skb->cb;
2568         entry->urb = urb;
2569         entry->dev = dev;
2570         entry->length = 0;
2571
2572         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2573                           skb->data, size, rx_complete, skb);
2574
2575         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2576
2577         if (netif_device_present(dev->net) &&
2578             netif_running(dev->net) &&
2579             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2580             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2581                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2582                 switch (ret) {
2583                 case 0:
2584                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2585                         break;
2586                 case -EPIPE:
2587                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2588                         break;
2589                 case -ENODEV:
2590                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2591                         netif_device_detach(dev->net);
2592                         break;
2593                 case -EHOSTUNREACH:
2594                         ret = -ENOLINK;
2595                         break;
2596                 default:
2597                         netif_dbg(dev, rx_err, dev->net,
2598                                   "rx submit, %d\n", ret);
2599                         tasklet_schedule(&dev->bh);
2600                 }
2601         } else {
2602                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2603                 ret = -ENOLINK;
2604         }
2605         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2606         if (ret) {
2607                 dev_kfree_skb_any(skb);
2608                 usb_free_urb(urb);
2609         }
2610         return ret;
2611 }
2612
2613 static void rx_complete(struct urb *urb)
2614 {
2615         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2616         struct skb_data *entry = (struct skb_data *)skb->cb;
2617         struct lan78xx_net *dev = entry->dev;
2618         int urb_status = urb->status;
2619         enum skb_state state;
2620
2621         skb_put(skb, urb->actual_length);
2622         state = rx_done;
2623         entry->urb = NULL;
2624
2625         switch (urb_status) {
2626         case 0:
2627                 if (skb->len < dev->net->hard_header_len) {
2628                         state = rx_cleanup;
2629                         dev->net->stats.rx_errors++;
2630                         dev->net->stats.rx_length_errors++;
2631                         netif_dbg(dev, rx_err, dev->net,
2632                                   "rx length %d\n", skb->len);
2633                 }
2634                 usb_mark_last_busy(dev->udev);
2635                 break;
2636         case -EPIPE:
2637                 dev->net->stats.rx_errors++;
2638                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2639                 /* FALLTHROUGH */
2640         case -ECONNRESET:                               /* async unlink */
2641         case -ESHUTDOWN:                                /* hardware gone */
2642                 netif_dbg(dev, ifdown, dev->net,
2643                           "rx shutdown, code %d\n", urb_status);
2644                 state = rx_cleanup;
2645                 entry->urb = urb;
2646                 urb = NULL;
2647                 break;
2648         case -EPROTO:
2649         case -ETIME:
2650         case -EILSEQ:
2651                 dev->net->stats.rx_errors++;
2652                 state = rx_cleanup;
2653                 entry->urb = urb;
2654                 urb = NULL;
2655                 break;
2656
2657         /* data overrun ... flush fifo? */
2658         case -EOVERFLOW:
2659                 dev->net->stats.rx_over_errors++;
2660                 /* FALLTHROUGH */
2661
2662         default:
2663                 state = rx_cleanup;
2664                 dev->net->stats.rx_errors++;
2665                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2666                 break;
2667         }
2668
2669         state = defer_bh(dev, skb, &dev->rxq, state);
2670
2671         if (urb) {
2672                 if (netif_running(dev->net) &&
2673                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2674                     state != unlink_start) {
2675                         rx_submit(dev, urb, GFP_ATOMIC);
2676                         return;
2677                 }
2678                 usb_free_urb(urb);
2679         }
2680         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2681 }
2682
2683 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2684 {
2685         int length;
2686         struct urb *urb = NULL;
2687         struct skb_data *entry;
2688         unsigned long flags;
2689         struct sk_buff_head *tqp = &dev->txq_pend;
2690         struct sk_buff *skb, *skb2;
2691         int ret;
2692         int count, pos;
2693         int skb_totallen, pkt_cnt;
2694
2695         skb_totallen = 0;
2696         pkt_cnt = 0;
2697         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2698                 if (skb_is_gso(skb)) {
2699                         if (pkt_cnt) {
2700                                 /* handle previous packets first */
2701                                 break;
2702                         }
2703                         length = skb->len;
2704                         skb2 = skb_dequeue(tqp);
2705                         goto gso_skb;
2706                 }
2707
2708                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2709                         break;
2710                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2711                 pkt_cnt++;
2712         }
2713
2714         /* copy to a single skb */
2715         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2716         if (!skb)
2717                 goto drop;
2718
2719         skb_put(skb, skb_totallen);
2720
2721         for (count = pos = 0; count < pkt_cnt; count++) {
2722                 skb2 = skb_dequeue(tqp);
2723                 if (skb2) {
2724                         memcpy(skb->data + pos, skb2->data, skb2->len);
2725                         pos += roundup(skb2->len, sizeof(u32));
2726                         dev_kfree_skb(skb2);
2727                 }
2728         }
2729
2730         length = skb_totallen;
2731
2732 gso_skb:
2733         urb = usb_alloc_urb(0, GFP_ATOMIC);
2734         if (!urb) {
2735                 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2736                 goto drop;
2737         }
2738
2739         entry = (struct skb_data *)skb->cb;
2740         entry->urb = urb;
2741         entry->dev = dev;
2742         entry->length = length;
2743
2744         spin_lock_irqsave(&dev->txq.lock, flags);
2745         ret = usb_autopm_get_interface_async(dev->intf);
2746         if (ret < 0) {
2747                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2748                 goto drop;
2749         }
2750
2751         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2752                           skb->data, skb->len, tx_complete, skb);
2753
2754         if (length % dev->maxpacket == 0) {
2755                 /* send USB_ZERO_PACKET */
2756                 urb->transfer_flags |= URB_ZERO_PACKET;
2757         }
2758
2759 #ifdef CONFIG_PM
2760         /* if this triggers the device is still a sleep */
2761         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2762                 /* transmission will be done in resume */
2763                 usb_anchor_urb(urb, &dev->deferred);
2764                 /* no use to process more packets */
2765                 netif_stop_queue(dev->net);
2766                 usb_put_urb(urb);
2767                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2768                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2769                 return;
2770         }
2771 #endif
2772
2773         ret = usb_submit_urb(urb, GFP_ATOMIC);
2774         switch (ret) {
2775         case 0:
2776                 dev->net->trans_start = jiffies;
2777                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2778                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2779                         netif_stop_queue(dev->net);
2780                 break;
2781         case -EPIPE:
2782                 netif_stop_queue(dev->net);
2783                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2784                 usb_autopm_put_interface_async(dev->intf);
2785                 break;
2786         default:
2787                 usb_autopm_put_interface_async(dev->intf);
2788                 netif_dbg(dev, tx_err, dev->net,
2789                           "tx: submit urb err %d\n", ret);
2790                 break;
2791         }
2792
2793         spin_unlock_irqrestore(&dev->txq.lock, flags);
2794
2795         if (ret) {
2796                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2797 drop:
2798                 dev->net->stats.tx_dropped++;
2799                 if (skb)
2800                         dev_kfree_skb_any(skb);
2801                 usb_free_urb(urb);
2802         } else
2803                 netif_dbg(dev, tx_queued, dev->net,
2804                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
2805 }
2806
2807 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2808 {
2809         struct urb *urb;
2810         int i;
2811
2812         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2813                 for (i = 0; i < 10; i++) {
2814                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2815                                 break;
2816                         urb = usb_alloc_urb(0, GFP_ATOMIC);
2817                         if (urb)
2818                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2819                                         return;
2820                 }
2821
2822                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2823                         tasklet_schedule(&dev->bh);
2824         }
2825         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2826                 netif_wake_queue(dev->net);
2827 }
2828
2829 static void lan78xx_bh(unsigned long param)
2830 {
2831         struct lan78xx_net *dev = (struct lan78xx_net *)param;
2832         struct sk_buff *skb;
2833         struct skb_data *entry;
2834
2835         while ((skb = skb_dequeue(&dev->done))) {
2836                 entry = (struct skb_data *)(skb->cb);
2837                 switch (entry->state) {
2838                 case rx_done:
2839                         entry->state = rx_cleanup;
2840                         rx_process(dev, skb);
2841                         continue;
2842                 case tx_done:
2843                         usb_free_urb(entry->urb);
2844                         dev_kfree_skb(skb);
2845                         continue;
2846                 case rx_cleanup:
2847                         usb_free_urb(entry->urb);
2848                         dev_kfree_skb(skb);
2849                         continue;
2850                 default:
2851                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
2852                         return;
2853                 }
2854         }
2855
2856         if (netif_device_present(dev->net) && netif_running(dev->net)) {
2857                 if (!skb_queue_empty(&dev->txq_pend))
2858                         lan78xx_tx_bh(dev);
2859
2860                 if (!timer_pending(&dev->delay) &&
2861                     !test_bit(EVENT_RX_HALT, &dev->flags))
2862                         lan78xx_rx_bh(dev);
2863         }
2864 }
2865
2866 static void lan78xx_delayedwork(struct work_struct *work)
2867 {
2868         int status;
2869         struct lan78xx_net *dev;
2870
2871         dev = container_of(work, struct lan78xx_net, wq.work);
2872
2873         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2874                 unlink_urbs(dev, &dev->txq);
2875                 status = usb_autopm_get_interface(dev->intf);
2876                 if (status < 0)
2877                         goto fail_pipe;
2878                 status = usb_clear_halt(dev->udev, dev->pipe_out);
2879                 usb_autopm_put_interface(dev->intf);
2880                 if (status < 0 &&
2881                     status != -EPIPE &&
2882                     status != -ESHUTDOWN) {
2883                         if (netif_msg_tx_err(dev))
2884 fail_pipe:
2885                                 netdev_err(dev->net,
2886                                            "can't clear tx halt, status %d\n",
2887                                            status);
2888                 } else {
2889                         clear_bit(EVENT_TX_HALT, &dev->flags);
2890                         if (status != -ESHUTDOWN)
2891                                 netif_wake_queue(dev->net);
2892                 }
2893         }
2894         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2895                 unlink_urbs(dev, &dev->rxq);
2896                 status = usb_autopm_get_interface(dev->intf);
2897                 if (status < 0)
2898                                 goto fail_halt;
2899                 status = usb_clear_halt(dev->udev, dev->pipe_in);
2900                 usb_autopm_put_interface(dev->intf);
2901                 if (status < 0 &&
2902                     status != -EPIPE &&
2903                     status != -ESHUTDOWN) {
2904                         if (netif_msg_rx_err(dev))
2905 fail_halt:
2906                                 netdev_err(dev->net,
2907                                            "can't clear rx halt, status %d\n",
2908                                            status);
2909                 } else {
2910                         clear_bit(EVENT_RX_HALT, &dev->flags);
2911                         tasklet_schedule(&dev->bh);
2912                 }
2913         }
2914
2915         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2916                 int ret = 0;
2917
2918                 clear_bit(EVENT_LINK_RESET, &dev->flags);
2919                 status = usb_autopm_get_interface(dev->intf);
2920                 if (status < 0)
2921                         goto skip_reset;
2922                 if (lan78xx_link_reset(dev) < 0) {
2923                         usb_autopm_put_interface(dev->intf);
2924 skip_reset:
2925                         netdev_info(dev->net, "link reset failed (%d)\n",
2926                                     ret);
2927                 } else {
2928                         usb_autopm_put_interface(dev->intf);
2929                 }
2930         }
2931 }
2932
2933 static void intr_complete(struct urb *urb)
2934 {
2935         struct lan78xx_net *dev = urb->context;
2936         int status = urb->status;
2937
2938         switch (status) {
2939         /* success */
2940         case 0:
2941                 lan78xx_status(dev, urb);
2942                 break;
2943
2944         /* software-driven interface shutdown */
2945         case -ENOENT:                   /* urb killed */
2946         case -ESHUTDOWN:                /* hardware gone */
2947                 netif_dbg(dev, ifdown, dev->net,
2948                           "intr shutdown, code %d\n", status);
2949                 return;
2950
2951         /* NOTE:  not throttling like RX/TX, since this endpoint
2952          * already polls infrequently
2953          */
2954         default:
2955                 netdev_dbg(dev->net, "intr status %d\n", status);
2956                 break;
2957         }
2958
2959         if (!netif_running(dev->net))
2960                 return;
2961
2962         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2963         status = usb_submit_urb(urb, GFP_ATOMIC);
2964         if (status != 0)
2965                 netif_err(dev, timer, dev->net,
2966                           "intr resubmit --> %d\n", status);
2967 }
2968
2969 static void lan78xx_disconnect(struct usb_interface *intf)
2970 {
2971         struct lan78xx_net              *dev;
2972         struct usb_device               *udev;
2973         struct net_device               *net;
2974
2975         dev = usb_get_intfdata(intf);
2976         usb_set_intfdata(intf, NULL);
2977         if (!dev)
2978                 return;
2979
2980         udev = interface_to_usbdev(intf);
2981
2982         net = dev->net;
2983         unregister_netdev(net);
2984
2985         cancel_delayed_work_sync(&dev->wq);
2986
2987         usb_scuttle_anchored_urbs(&dev->deferred);
2988
2989         lan78xx_unbind(dev, intf);
2990
2991         usb_kill_urb(dev->urb_intr);
2992         usb_free_urb(dev->urb_intr);
2993
2994         free_netdev(net);
2995         usb_put_dev(udev);
2996 }
2997
2998 void lan78xx_tx_timeout(struct net_device *net)
2999 {
3000         struct lan78xx_net *dev = netdev_priv(net);
3001
3002         unlink_urbs(dev, &dev->txq);
3003         tasklet_schedule(&dev->bh);
3004 }
3005
3006 static const struct net_device_ops lan78xx_netdev_ops = {
3007         .ndo_open               = lan78xx_open,
3008         .ndo_stop               = lan78xx_stop,
3009         .ndo_start_xmit         = lan78xx_start_xmit,
3010         .ndo_tx_timeout         = lan78xx_tx_timeout,
3011         .ndo_change_mtu         = lan78xx_change_mtu,
3012         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3013         .ndo_validate_addr      = eth_validate_addr,
3014         .ndo_do_ioctl           = lan78xx_ioctl,
3015         .ndo_set_rx_mode        = lan78xx_set_multicast,
3016         .ndo_set_features       = lan78xx_set_features,
3017         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3018         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3019 };
3020
3021 static int lan78xx_probe(struct usb_interface *intf,
3022                          const struct usb_device_id *id)
3023 {
3024         struct lan78xx_net *dev;
3025         struct net_device *netdev;
3026         struct usb_device *udev;
3027         int ret;
3028         unsigned maxp;
3029         unsigned period;
3030         u8 *buf = NULL;
3031
3032         udev = interface_to_usbdev(intf);
3033         udev = usb_get_dev(udev);
3034
3035         ret = -ENOMEM;
3036         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3037         if (!netdev) {
3038                         dev_err(&intf->dev, "Error: OOM\n");
3039                         goto out1;
3040         }
3041
3042         /* netdev_printk() needs this */
3043         SET_NETDEV_DEV(netdev, &intf->dev);
3044
3045         dev = netdev_priv(netdev);
3046         dev->udev = udev;
3047         dev->intf = intf;
3048         dev->net = netdev;
3049         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3050                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3051
3052         skb_queue_head_init(&dev->rxq);
3053         skb_queue_head_init(&dev->txq);
3054         skb_queue_head_init(&dev->done);
3055         skb_queue_head_init(&dev->rxq_pause);
3056         skb_queue_head_init(&dev->txq_pend);
3057         mutex_init(&dev->phy_mutex);
3058
3059         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3060         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3061         init_usb_anchor(&dev->deferred);
3062
3063         netdev->netdev_ops = &lan78xx_netdev_ops;
3064         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3065         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3066
3067         ret = lan78xx_bind(dev, intf);
3068         if (ret < 0)
3069                 goto out2;
3070         strcpy(netdev->name, "eth%d");
3071
3072         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3073                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3074
3075         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3076         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3077         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3078
3079         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3080         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3081
3082         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3083                                         dev->ep_intr->desc.bEndpointAddress &
3084                                         USB_ENDPOINT_NUMBER_MASK);
3085         period = dev->ep_intr->desc.bInterval;
3086
3087         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3088         buf = kmalloc(maxp, GFP_KERNEL);
3089         if (buf) {
3090                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3091                 if (!dev->urb_intr) {
3092                         kfree(buf);
3093                         goto out3;
3094                 } else {
3095                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3096                                          dev->pipe_intr, buf, maxp,
3097                                          intr_complete, dev, period);
3098                 }
3099         }
3100
3101         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3102
3103         /* driver requires remote-wakeup capability during autosuspend. */
3104         intf->needs_remote_wakeup = 1;
3105
3106         ret = register_netdev(netdev);
3107         if (ret != 0) {
3108                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3109                 goto out2;
3110         }
3111
3112         usb_set_intfdata(intf, dev);
3113
3114         ret = device_set_wakeup_enable(&udev->dev, true);
3115
3116          /* Default delay of 2sec has more overhead than advantage.
3117           * Set to 10sec as default.
3118           */
3119         pm_runtime_set_autosuspend_delay(&udev->dev,
3120                                          DEFAULT_AUTOSUSPEND_DELAY);
3121
3122         return 0;
3123
3124 out3:
3125         lan78xx_unbind(dev, intf);
3126 out2:
3127         free_netdev(netdev);
3128 out1:
3129         usb_put_dev(udev);
3130
3131         return ret;
3132 }
3133
3134 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3135 {
3136         const u16 crc16poly = 0x8005;
3137         int i;
3138         u16 bit, crc, msb;
3139         u8 data;
3140
3141         crc = 0xFFFF;
3142         for (i = 0; i < len; i++) {
3143                 data = *buf++;
3144                 for (bit = 0; bit < 8; bit++) {
3145                         msb = crc >> 15;
3146                         crc <<= 1;
3147
3148                         if (msb ^ (u16)(data & 1)) {
3149                                 crc ^= crc16poly;
3150                                 crc |= (u16)0x0001U;
3151                         }
3152                         data >>= 1;
3153                 }
3154         }
3155
3156         return crc;
3157 }
3158
3159 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3160 {
3161         u32 buf;
3162         int ret;
3163         int mask_index;
3164         u16 crc;
3165         u32 temp_wucsr;
3166         u32 temp_pmt_ctl;
3167         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3168         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3169         const u8 arp_type[2] = { 0x08, 0x06 };
3170
3171         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3172         buf &= ~MAC_TX_TXEN_;
3173         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3174         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3175         buf &= ~MAC_RX_RXEN_;
3176         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3177
3178         ret = lan78xx_write_reg(dev, WUCSR, 0);
3179         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3180         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3181
3182         temp_wucsr = 0;
3183
3184         temp_pmt_ctl = 0;
3185         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3186         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3187         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3188
3189         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3190                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3191
3192         mask_index = 0;
3193         if (wol & WAKE_PHY) {
3194                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3195
3196                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3197                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3198                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3199         }
3200         if (wol & WAKE_MAGIC) {
3201                 temp_wucsr |= WUCSR_MPEN_;
3202
3203                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3204                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3205                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3206         }
3207         if (wol & WAKE_BCAST) {
3208                 temp_wucsr |= WUCSR_BCST_EN_;
3209
3210                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3211                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3212                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3213         }
3214         if (wol & WAKE_MCAST) {
3215                 temp_wucsr |= WUCSR_WAKE_EN_;
3216
3217                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3218                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3219                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3220                                         WUF_CFGX_EN_ |
3221                                         WUF_CFGX_TYPE_MCAST_ |
3222                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3223                                         (crc & WUF_CFGX_CRC16_MASK_));
3224
3225                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3226                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3227                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3228                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3229                 mask_index++;
3230
3231                 /* for IPv6 Multicast */
3232                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3233                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3234                                         WUF_CFGX_EN_ |
3235                                         WUF_CFGX_TYPE_MCAST_ |
3236                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3237                                         (crc & WUF_CFGX_CRC16_MASK_));
3238
3239                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3240                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3241                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3242                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3243                 mask_index++;
3244
3245                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3246                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3247                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3248         }
3249         if (wol & WAKE_UCAST) {
3250                 temp_wucsr |= WUCSR_PFDA_EN_;
3251
3252                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3253                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3254                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3255         }
3256         if (wol & WAKE_ARP) {
3257                 temp_wucsr |= WUCSR_WAKE_EN_;
3258
3259                 /* set WUF_CFG & WUF_MASK
3260                  * for packettype (offset 12,13) = ARP (0x0806)
3261                  */
3262                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3263                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3264                                         WUF_CFGX_EN_ |
3265                                         WUF_CFGX_TYPE_ALL_ |
3266                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3267                                         (crc & WUF_CFGX_CRC16_MASK_));
3268
3269                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3270                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3271                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3272                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3273                 mask_index++;
3274
3275                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3276                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3277                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3278         }
3279
3280         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3281
3282         /* when multiple WOL bits are set */
3283         if (hweight_long((unsigned long)wol) > 1) {
3284                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3285                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3286                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3287         }
3288         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3289
3290         /* clear WUPS */
3291         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3292         buf |= PMT_CTL_WUPS_MASK_;
3293         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3294
3295         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3296         buf |= MAC_RX_RXEN_;
3297         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3298
3299         return 0;
3300 }
3301
3302 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3303 {
3304         struct lan78xx_net *dev = usb_get_intfdata(intf);
3305         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3306         u32 buf;
3307         int ret;
3308         int event;
3309
3310         ret = 0;
3311         event = message.event;
3312
3313         if (!dev->suspend_count++) {
3314                 spin_lock_irq(&dev->txq.lock);
3315                 /* don't autosuspend while transmitting */
3316                 if ((skb_queue_len(&dev->txq) ||
3317                      skb_queue_len(&dev->txq_pend)) &&
3318                         PMSG_IS_AUTO(message)) {
3319                         spin_unlock_irq(&dev->txq.lock);
3320                         ret = -EBUSY;
3321                         goto out;
3322                 } else {
3323                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3324                         spin_unlock_irq(&dev->txq.lock);
3325                 }
3326
3327                 /* stop TX & RX */
3328                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3329                 buf &= ~MAC_TX_TXEN_;
3330                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3331                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3332                 buf &= ~MAC_RX_RXEN_;
3333                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3334
3335                 /* empty out the rx and queues */
3336                 netif_device_detach(dev->net);
3337                 lan78xx_terminate_urbs(dev);
3338                 usb_kill_urb(dev->urb_intr);
3339
3340                 /* reattach */
3341                 netif_device_attach(dev->net);
3342         }
3343
3344         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3345                 if (PMSG_IS_AUTO(message)) {
3346                         /* auto suspend (selective suspend) */
3347                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3348                         buf &= ~MAC_TX_TXEN_;
3349                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3350                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3351                         buf &= ~MAC_RX_RXEN_;
3352                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3353
3354                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3355                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3356                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3357
3358                         /* set goodframe wakeup */
3359                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3360
3361                         buf |= WUCSR_RFE_WAKE_EN_;
3362                         buf |= WUCSR_STORE_WAKE_;
3363
3364                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3365
3366                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3367
3368                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3369                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3370
3371                         buf |= PMT_CTL_PHY_WAKE_EN_;
3372                         buf |= PMT_CTL_WOL_EN_;
3373                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3374                         buf |= PMT_CTL_SUS_MODE_3_;
3375
3376                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3377
3378                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3379
3380                         buf |= PMT_CTL_WUPS_MASK_;
3381
3382                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3383
3384                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3385                         buf |= MAC_RX_RXEN_;
3386                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3387                 } else {
3388                         lan78xx_set_suspend(dev, pdata->wol);
3389                 }
3390         }
3391
3392 out:
3393         return ret;
3394 }
3395
3396 int lan78xx_resume(struct usb_interface *intf)
3397 {
3398         struct lan78xx_net *dev = usb_get_intfdata(intf);
3399         struct sk_buff *skb;
3400         struct urb *res;
3401         int ret;
3402         u32 buf;
3403
3404         if (!--dev->suspend_count) {
3405                 /* resume interrupt URBs */
3406                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3407                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3408
3409                 spin_lock_irq(&dev->txq.lock);
3410                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3411                         skb = (struct sk_buff *)res->context;
3412                         ret = usb_submit_urb(res, GFP_ATOMIC);
3413                         if (ret < 0) {
3414                                 dev_kfree_skb_any(skb);
3415                                 usb_free_urb(res);
3416                                 usb_autopm_put_interface_async(dev->intf);
3417                         } else {
3418                                 dev->net->trans_start = jiffies;
3419                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3420                         }
3421                 }
3422
3423                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3424                 spin_unlock_irq(&dev->txq.lock);
3425
3426                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3427                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3428                                 netif_start_queue(dev->net);
3429                         tasklet_schedule(&dev->bh);
3430                 }
3431         }
3432
3433         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3434         ret = lan78xx_write_reg(dev, WUCSR, 0);
3435         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3436
3437         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3438                                              WUCSR2_ARP_RCD_ |
3439                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3440                                              WUCSR2_IPV4_TCPSYN_RCD_);
3441
3442         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3443                                             WUCSR_EEE_RX_WAKE_ |
3444                                             WUCSR_PFDA_FR_ |
3445                                             WUCSR_RFE_WAKE_FR_ |
3446                                             WUCSR_WUFR_ |
3447                                             WUCSR_MPR_ |
3448                                             WUCSR_BCST_FR_);
3449
3450         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3451         buf |= MAC_TX_TXEN_;
3452         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3453
3454         return 0;
3455 }
3456
3457 int lan78xx_reset_resume(struct usb_interface *intf)
3458 {
3459         struct lan78xx_net *dev = usb_get_intfdata(intf);
3460
3461         lan78xx_reset(dev);
3462         return lan78xx_resume(intf);
3463 }
3464
3465 static const struct usb_device_id products[] = {
3466         {
3467         /* LAN7800 USB Gigabit Ethernet Device */
3468         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3469         },
3470         {
3471         /* LAN7850 USB Gigabit Ethernet Device */
3472         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3473         },
3474         {},
3475 };
3476 MODULE_DEVICE_TABLE(usb, products);
3477
3478 static struct usb_driver lan78xx_driver = {
3479         .name                   = DRIVER_NAME,
3480         .id_table               = products,
3481         .probe                  = lan78xx_probe,
3482         .disconnect             = lan78xx_disconnect,
3483         .suspend                = lan78xx_suspend,
3484         .resume                 = lan78xx_resume,
3485         .reset_resume           = lan78xx_reset_resume,
3486         .supports_autosuspend   = 1,
3487         .disable_hub_initiated_lpm = 1,
3488 };
3489
3490 module_usb_driver(lan78xx_driver);
3491
3492 MODULE_AUTHOR(DRIVER_AUTHOR);
3493 MODULE_DESCRIPTION(DRIVER_DESC);
3494 MODULE_LICENSE("GPL");