]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/usb/lan78xx.c
dce55636bc9b63128c550b2ed4f1f6d0a532f2f2
[karo-tx-linux.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35
36 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME     "lan78xx"
39 #define DRIVER_VERSION  "1.0.1"
40
41 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
42 #define THROTTLE_JIFFIES                (HZ / 8)
43 #define UNLINK_TIMEOUT_MS               3
44
45 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
46
47 #define SS_USB_PKT_SIZE                 (1024)
48 #define HS_USB_PKT_SIZE                 (512)
49 #define FS_USB_PKT_SIZE                 (64)
50
51 #define MAX_RX_FIFO_SIZE                (12 * 1024)
52 #define MAX_TX_FIFO_SIZE                (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY           (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE          (9000)
56 #define DEFAULT_TX_CSUM_ENABLE          (true)
57 #define DEFAULT_RX_CSUM_ENABLE          (true)
58 #define DEFAULT_TSO_CSUM_ENABLE         (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
60 #define TX_OVERHEAD                     (8)
61 #define RXW_PADDING                     2
62
63 #define LAN78XX_USB_VENDOR_ID           (0x0424)
64 #define LAN7800_USB_PRODUCT_ID          (0x7800)
65 #define LAN7850_USB_PRODUCT_ID          (0x7850)
66 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
67 #define LAN78XX_OTP_MAGIC               (0x78F3)
68
69 #define MII_READ                        1
70 #define MII_WRITE                       0
71
72 #define EEPROM_INDICATOR                (0xA5)
73 #define EEPROM_MAC_OFFSET               (0x01)
74 #define MAX_EEPROM_SIZE                 512
75 #define OTP_INDICATOR_1                 (0xF3)
76 #define OTP_INDICATOR_2                 (0xF7)
77
78 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
79                                          WAKE_MCAST | WAKE_BCAST | \
80                                          WAKE_ARP | WAKE_MAGIC)
81
82 /* USB related defines */
83 #define BULK_IN_PIPE                    1
84 #define BULK_OUT_PIPE                   2
85
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
88
89 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90         "RX FCS Errors",
91         "RX Alignment Errors",
92         "Rx Fragment Errors",
93         "RX Jabber Errors",
94         "RX Undersize Frame Errors",
95         "RX Oversize Frame Errors",
96         "RX Dropped Frames",
97         "RX Unicast Byte Count",
98         "RX Broadcast Byte Count",
99         "RX Multicast Byte Count",
100         "RX Unicast Frames",
101         "RX Broadcast Frames",
102         "RX Multicast Frames",
103         "RX Pause Frames",
104         "RX 64 Byte Frames",
105         "RX 65 - 127 Byte Frames",
106         "RX 128 - 255 Byte Frames",
107         "RX 256 - 511 Bytes Frames",
108         "RX 512 - 1023 Byte Frames",
109         "RX 1024 - 1518 Byte Frames",
110         "RX Greater 1518 Byte Frames",
111         "EEE RX LPI Transitions",
112         "EEE RX LPI Time",
113         "TX FCS Errors",
114         "TX Excess Deferral Errors",
115         "TX Carrier Errors",
116         "TX Bad Byte Count",
117         "TX Single Collisions",
118         "TX Multiple Collisions",
119         "TX Excessive Collision",
120         "TX Late Collisions",
121         "TX Unicast Byte Count",
122         "TX Broadcast Byte Count",
123         "TX Multicast Byte Count",
124         "TX Unicast Frames",
125         "TX Broadcast Frames",
126         "TX Multicast Frames",
127         "TX Pause Frames",
128         "TX 64 Byte Frames",
129         "TX 65 - 127 Byte Frames",
130         "TX 128 - 255 Byte Frames",
131         "TX 256 - 511 Bytes Frames",
132         "TX 512 - 1023 Byte Frames",
133         "TX 1024 - 1518 Byte Frames",
134         "TX Greater 1518 Byte Frames",
135         "EEE TX LPI Transitions",
136         "EEE TX LPI Time",
137 };
138
139 struct lan78xx_statstage {
140         u32 rx_fcs_errors;
141         u32 rx_alignment_errors;
142         u32 rx_fragment_errors;
143         u32 rx_jabber_errors;
144         u32 rx_undersize_frame_errors;
145         u32 rx_oversize_frame_errors;
146         u32 rx_dropped_frames;
147         u32 rx_unicast_byte_count;
148         u32 rx_broadcast_byte_count;
149         u32 rx_multicast_byte_count;
150         u32 rx_unicast_frames;
151         u32 rx_broadcast_frames;
152         u32 rx_multicast_frames;
153         u32 rx_pause_frames;
154         u32 rx_64_byte_frames;
155         u32 rx_65_127_byte_frames;
156         u32 rx_128_255_byte_frames;
157         u32 rx_256_511_bytes_frames;
158         u32 rx_512_1023_byte_frames;
159         u32 rx_1024_1518_byte_frames;
160         u32 rx_greater_1518_byte_frames;
161         u32 eee_rx_lpi_transitions;
162         u32 eee_rx_lpi_time;
163         u32 tx_fcs_errors;
164         u32 tx_excess_deferral_errors;
165         u32 tx_carrier_errors;
166         u32 tx_bad_byte_count;
167         u32 tx_single_collisions;
168         u32 tx_multiple_collisions;
169         u32 tx_excessive_collision;
170         u32 tx_late_collisions;
171         u32 tx_unicast_byte_count;
172         u32 tx_broadcast_byte_count;
173         u32 tx_multicast_byte_count;
174         u32 tx_unicast_frames;
175         u32 tx_broadcast_frames;
176         u32 tx_multicast_frames;
177         u32 tx_pause_frames;
178         u32 tx_64_byte_frames;
179         u32 tx_65_127_byte_frames;
180         u32 tx_128_255_byte_frames;
181         u32 tx_256_511_bytes_frames;
182         u32 tx_512_1023_byte_frames;
183         u32 tx_1024_1518_byte_frames;
184         u32 tx_greater_1518_byte_frames;
185         u32 eee_tx_lpi_transitions;
186         u32 eee_tx_lpi_time;
187 };
188
189 struct lan78xx_net;
190
191 struct lan78xx_priv {
192         struct lan78xx_net *dev;
193         u32 rfe_ctl;
194         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197         struct mutex dataport_mutex; /* for dataport access */
198         spinlock_t rfe_ctl_lock; /* for rfe register access */
199         struct work_struct set_multicast;
200         struct work_struct set_vlan;
201         u32 wol;
202 };
203
204 enum skb_state {
205         illegal = 0,
206         tx_start,
207         tx_done,
208         rx_start,
209         rx_done,
210         rx_cleanup,
211         unlink_start
212 };
213
214 struct skb_data {               /* skb->cb is one of these */
215         struct urb *urb;
216         struct lan78xx_net *dev;
217         enum skb_state state;
218         size_t length;
219 };
220
221 struct usb_context {
222         struct usb_ctrlrequest req;
223         struct lan78xx_net *dev;
224 };
225
226 #define EVENT_TX_HALT                   0
227 #define EVENT_RX_HALT                   1
228 #define EVENT_RX_MEMORY                 2
229 #define EVENT_STS_SPLIT                 3
230 #define EVENT_LINK_RESET                4
231 #define EVENT_RX_PAUSED                 5
232 #define EVENT_DEV_WAKING                6
233 #define EVENT_DEV_ASLEEP                7
234 #define EVENT_DEV_OPEN                  8
235
236 struct lan78xx_net {
237         struct net_device       *net;
238         struct usb_device       *udev;
239         struct usb_interface    *intf;
240         void                    *driver_priv;
241
242         int                     rx_qlen;
243         int                     tx_qlen;
244         struct sk_buff_head     rxq;
245         struct sk_buff_head     txq;
246         struct sk_buff_head     done;
247         struct sk_buff_head     rxq_pause;
248         struct sk_buff_head     txq_pend;
249
250         struct tasklet_struct   bh;
251         struct delayed_work     wq;
252
253         struct usb_host_endpoint *ep_blkin;
254         struct usb_host_endpoint *ep_blkout;
255         struct usb_host_endpoint *ep_intr;
256
257         int                     msg_enable;
258
259         struct urb              *urb_intr;
260         struct usb_anchor       deferred;
261
262         struct mutex            phy_mutex; /* for phy access */
263         unsigned                pipe_in, pipe_out, pipe_intr;
264
265         u32                     hard_mtu;       /* count any extra framing */
266         size_t                  rx_urb_size;    /* size for rx urbs */
267
268         unsigned long           flags;
269
270         wait_queue_head_t       *wait;
271         unsigned char           suspend_count;
272
273         unsigned                maxpacket;
274         struct timer_list       delay;
275
276         unsigned long           data[5];
277
278         int                     link_on;
279         u8                      mdix_ctrl;
280
281         u32                     devid;
282         struct mii_bus          *mdiobus;
283 };
284
285 /* use ethtool to change the level for any given device */
286 static int msg_level = -1;
287 module_param(msg_level, int, 0);
288 MODULE_PARM_DESC(msg_level, "Override default message level");
289
290 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291 {
292         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
293         int ret;
294
295         if (!buf)
296                 return -ENOMEM;
297
298         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
299                               USB_VENDOR_REQUEST_READ_REGISTER,
300                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
301                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
302         if (likely(ret >= 0)) {
303                 le32_to_cpus(buf);
304                 *data = *buf;
305         } else {
306                 netdev_warn(dev->net,
307                             "Failed to read register index 0x%08x. ret = %d",
308                             index, ret);
309         }
310
311         kfree(buf);
312
313         return ret;
314 }
315
316 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317 {
318         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
319         int ret;
320
321         if (!buf)
322                 return -ENOMEM;
323
324         *buf = data;
325         cpu_to_le32s(buf);
326
327         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
328                               USB_VENDOR_REQUEST_WRITE_REGISTER,
329                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
330                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
331         if (unlikely(ret < 0)) {
332                 netdev_warn(dev->net,
333                             "Failed to write register index 0x%08x. ret = %d",
334                             index, ret);
335         }
336
337         kfree(buf);
338
339         return ret;
340 }
341
342 static int lan78xx_read_stats(struct lan78xx_net *dev,
343                               struct lan78xx_statstage *data)
344 {
345         int ret = 0;
346         int i;
347         struct lan78xx_statstage *stats;
348         u32 *src;
349         u32 *dst;
350
351         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352         if (!stats)
353                 return -ENOMEM;
354
355         ret = usb_control_msg(dev->udev,
356                               usb_rcvctrlpipe(dev->udev, 0),
357                               USB_VENDOR_REQUEST_GET_STATS,
358                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
359                               0,
360                               0,
361                               (void *)stats,
362                               sizeof(*stats),
363                               USB_CTRL_SET_TIMEOUT);
364         if (likely(ret >= 0)) {
365                 src = (u32 *)stats;
366                 dst = (u32 *)data;
367                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
368                         le32_to_cpus(&src[i]);
369                         dst[i] = src[i];
370                 }
371         } else {
372                 netdev_warn(dev->net,
373                             "Failed to read stat ret = 0x%x", ret);
374         }
375
376         kfree(stats);
377
378         return ret;
379 }
380
381 /* Loop until the read is completed with timeout called with phy_mutex held */
382 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383 {
384         unsigned long start_time = jiffies;
385         u32 val;
386         int ret;
387
388         do {
389                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
390                 if (unlikely(ret < 0))
391                         return -EIO;
392
393                 if (!(val & MII_ACC_MII_BUSY_))
394                         return 0;
395         } while (!time_after(jiffies, start_time + HZ));
396
397         return -EIO;
398 }
399
400 static inline u32 mii_access(int id, int index, int read)
401 {
402         u32 ret;
403
404         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
405         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406         if (read)
407                 ret |= MII_ACC_MII_READ_;
408         else
409                 ret |= MII_ACC_MII_WRITE_;
410         ret |= MII_ACC_MII_BUSY_;
411
412         return ret;
413 }
414
415 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
416 {
417         unsigned long start_time = jiffies;
418         u32 val;
419         int ret;
420
421         do {
422                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
423                 if (unlikely(ret < 0))
424                         return -EIO;
425
426                 if (!(val & E2P_CMD_EPC_BUSY_) ||
427                     (val & E2P_CMD_EPC_TIMEOUT_))
428                         break;
429                 usleep_range(40, 100);
430         } while (!time_after(jiffies, start_time + HZ));
431
432         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
433                 netdev_warn(dev->net, "EEPROM read operation timeout");
434                 return -EIO;
435         }
436
437         return 0;
438 }
439
440 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
441 {
442         unsigned long start_time = jiffies;
443         u32 val;
444         int ret;
445
446         do {
447                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
448                 if (unlikely(ret < 0))
449                         return -EIO;
450
451                 if (!(val & E2P_CMD_EPC_BUSY_))
452                         return 0;
453
454                 usleep_range(40, 100);
455         } while (!time_after(jiffies, start_time + HZ));
456
457         netdev_warn(dev->net, "EEPROM is busy");
458         return -EIO;
459 }
460
461 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462                                    u32 length, u8 *data)
463 {
464         u32 val;
465         int i, ret;
466
467         ret = lan78xx_eeprom_confirm_not_busy(dev);
468         if (ret)
469                 return ret;
470
471         for (i = 0; i < length; i++) {
472                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
473                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
474                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
475                 if (unlikely(ret < 0))
476                         return -EIO;
477
478                 ret = lan78xx_wait_eeprom(dev);
479                 if (ret < 0)
480                         return ret;
481
482                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
483                 if (unlikely(ret < 0))
484                         return -EIO;
485
486                 data[i] = val & 0xFF;
487                 offset++;
488         }
489
490         return 0;
491 }
492
493 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
494                                u32 length, u8 *data)
495 {
496         u8 sig;
497         int ret;
498
499         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
500         if ((ret == 0) && (sig == EEPROM_INDICATOR))
501                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
502         else
503                 ret = -EINVAL;
504
505         return ret;
506 }
507
508 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
509                                     u32 length, u8 *data)
510 {
511         u32 val;
512         int i, ret;
513
514         ret = lan78xx_eeprom_confirm_not_busy(dev);
515         if (ret)
516                 return ret;
517
518         /* Issue write/erase enable command */
519         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
520         ret = lan78xx_write_reg(dev, E2P_CMD, val);
521         if (unlikely(ret < 0))
522                 return -EIO;
523
524         ret = lan78xx_wait_eeprom(dev);
525         if (ret < 0)
526                 return ret;
527
528         for (i = 0; i < length; i++) {
529                 /* Fill data register */
530                 val = data[i];
531                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
532                 if (ret < 0)
533                         return ret;
534
535                 /* Send "write" command */
536                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
537                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
538                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
539                 if (ret < 0)
540                         return ret;
541
542                 ret = lan78xx_wait_eeprom(dev);
543                 if (ret < 0)
544                         return ret;
545
546                 offset++;
547         }
548
549         return 0;
550 }
551
552 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
553                                 u32 length, u8 *data)
554 {
555         int i;
556         int ret;
557         u32 buf;
558         unsigned long timeout;
559
560         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
561
562         if (buf & OTP_PWR_DN_PWRDN_N_) {
563                 /* clear it and wait to be cleared */
564                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
565
566                 timeout = jiffies + HZ;
567                 do {
568                         usleep_range(1, 10);
569                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
570                         if (time_after(jiffies, timeout)) {
571                                 netdev_warn(dev->net,
572                                             "timeout on OTP_PWR_DN");
573                                 return -EIO;
574                         }
575                 } while (buf & OTP_PWR_DN_PWRDN_N_);
576         }
577
578         for (i = 0; i < length; i++) {
579                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
580                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
581                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
582                                         ((offset + i) & OTP_ADDR2_10_3));
583
584                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
585                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
586
587                 timeout = jiffies + HZ;
588                 do {
589                         udelay(1);
590                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
591                         if (time_after(jiffies, timeout)) {
592                                 netdev_warn(dev->net,
593                                             "timeout on OTP_STATUS");
594                                 return -EIO;
595                         }
596                 } while (buf & OTP_STATUS_BUSY_);
597
598                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
599
600                 data[i] = (u8)(buf & 0xFF);
601         }
602
603         return 0;
604 }
605
606 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
607                             u32 length, u8 *data)
608 {
609         u8 sig;
610         int ret;
611
612         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
613
614         if (ret == 0) {
615                 if (sig == OTP_INDICATOR_1)
616                         offset = offset;
617                 else if (sig == OTP_INDICATOR_2)
618                         offset += 0x100;
619                 else
620                         ret = -EINVAL;
621                 ret = lan78xx_read_raw_otp(dev, offset, length, data);
622         }
623
624         return ret;
625 }
626
627 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
628 {
629         int i, ret;
630
631         for (i = 0; i < 100; i++) {
632                 u32 dp_sel;
633
634                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
635                 if (unlikely(ret < 0))
636                         return -EIO;
637
638                 if (dp_sel & DP_SEL_DPRDY_)
639                         return 0;
640
641                 usleep_range(40, 100);
642         }
643
644         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
645
646         return -EIO;
647 }
648
649 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
650                                   u32 addr, u32 length, u32 *buf)
651 {
652         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
653         u32 dp_sel;
654         int i, ret;
655
656         if (usb_autopm_get_interface(dev->intf) < 0)
657                         return 0;
658
659         mutex_lock(&pdata->dataport_mutex);
660
661         ret = lan78xx_dataport_wait_not_busy(dev);
662         if (ret < 0)
663                 goto done;
664
665         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
666
667         dp_sel &= ~DP_SEL_RSEL_MASK_;
668         dp_sel |= ram_select;
669         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
670
671         for (i = 0; i < length; i++) {
672                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
673
674                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
675
676                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
677
678                 ret = lan78xx_dataport_wait_not_busy(dev);
679                 if (ret < 0)
680                         goto done;
681         }
682
683 done:
684         mutex_unlock(&pdata->dataport_mutex);
685         usb_autopm_put_interface(dev->intf);
686
687         return ret;
688 }
689
690 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
691                                     int index, u8 addr[ETH_ALEN])
692 {
693         u32     temp;
694
695         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
696                 temp = addr[3];
697                 temp = addr[2] | (temp << 8);
698                 temp = addr[1] | (temp << 8);
699                 temp = addr[0] | (temp << 8);
700                 pdata->pfilter_table[index][1] = temp;
701                 temp = addr[5];
702                 temp = addr[4] | (temp << 8);
703                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
704                 pdata->pfilter_table[index][0] = temp;
705         }
706 }
707
708 /* returns hash bit number for given MAC address */
709 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
710 {
711         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
712 }
713
714 static void lan78xx_deferred_multicast_write(struct work_struct *param)
715 {
716         struct lan78xx_priv *pdata =
717                         container_of(param, struct lan78xx_priv, set_multicast);
718         struct lan78xx_net *dev = pdata->dev;
719         int i;
720         int ret;
721
722         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
723                   pdata->rfe_ctl);
724
725         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
726                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
727
728         for (i = 1; i < NUM_OF_MAF; i++) {
729                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
730                 ret = lan78xx_write_reg(dev, MAF_LO(i),
731                                         pdata->pfilter_table[i][1]);
732                 ret = lan78xx_write_reg(dev, MAF_HI(i),
733                                         pdata->pfilter_table[i][0]);
734         }
735
736         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
737 }
738
739 static void lan78xx_set_multicast(struct net_device *netdev)
740 {
741         struct lan78xx_net *dev = netdev_priv(netdev);
742         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
743         unsigned long flags;
744         int i;
745
746         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
747
748         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
749                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
750
751         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
752                         pdata->mchash_table[i] = 0;
753         /* pfilter_table[0] has own HW address */
754         for (i = 1; i < NUM_OF_MAF; i++) {
755                         pdata->pfilter_table[i][0] =
756                         pdata->pfilter_table[i][1] = 0;
757         }
758
759         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
760
761         if (dev->net->flags & IFF_PROMISC) {
762                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
763                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
764         } else {
765                 if (dev->net->flags & IFF_ALLMULTI) {
766                         netif_dbg(dev, drv, dev->net,
767                                   "receive all multicast enabled");
768                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
769                 }
770         }
771
772         if (netdev_mc_count(dev->net)) {
773                 struct netdev_hw_addr *ha;
774                 int i;
775
776                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
777
778                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
779
780                 i = 1;
781                 netdev_for_each_mc_addr(ha, netdev) {
782                         /* set first 32 into Perfect Filter */
783                         if (i < 33) {
784                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
785                         } else {
786                                 u32 bitnum = lan78xx_hash(ha->addr);
787
788                                 pdata->mchash_table[bitnum / 32] |=
789                                                         (1 << (bitnum % 32));
790                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
791                         }
792                         i++;
793                 }
794         }
795
796         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
797
798         /* defer register writes to a sleepable context */
799         schedule_work(&pdata->set_multicast);
800 }
801
802 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
803                                       u16 lcladv, u16 rmtadv)
804 {
805         u32 flow = 0, fct_flow = 0;
806         int ret;
807
808         u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
809
810         if (cap & FLOW_CTRL_TX)
811                 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
812
813         if (cap & FLOW_CTRL_RX)
814                 flow |= FLOW_CR_RX_FCEN_;
815
816         if (dev->udev->speed == USB_SPEED_SUPER)
817                 fct_flow = 0x817;
818         else if (dev->udev->speed == USB_SPEED_HIGH)
819                 fct_flow = 0x211;
820
821         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
822                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
823                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
824
825         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
826
827         /* threshold value should be set before enabling flow */
828         ret = lan78xx_write_reg(dev, FLOW, flow);
829
830         return 0;
831 }
832
833 static int lan78xx_link_reset(struct lan78xx_net *dev)
834 {
835         struct phy_device *phydev = dev->net->phydev;
836         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
837         int ladv, radv, ret;
838         u32 buf;
839
840         /* clear PHY interrupt status */
841         ret = phy_read(phydev, LAN88XX_INT_STS);
842         if (unlikely(ret < 0))
843                 return -EIO;
844
845         /* clear LAN78xx interrupt status */
846         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
847         if (unlikely(ret < 0))
848                 return -EIO;
849
850         phy_read_status(phydev);
851
852         if (!phydev->link && dev->link_on) {
853                 dev->link_on = false;
854                 netif_carrier_off(dev->net);
855
856                 /* reset MAC */
857                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
858                 if (unlikely(ret < 0))
859                         return -EIO;
860                 buf |= MAC_CR_RST_;
861                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
862                 if (unlikely(ret < 0))
863                         return -EIO;
864         } else if (phydev->link && !dev->link_on) {
865                 dev->link_on = true;
866
867                 phy_ethtool_gset(phydev, &ecmd);
868
869                 ret = phy_read(phydev, LAN88XX_INT_STS);
870
871                 if (dev->udev->speed == USB_SPEED_SUPER) {
872                         if (ethtool_cmd_speed(&ecmd) == 1000) {
873                                 /* disable U2 */
874                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
875                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
876                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
877                                 /* enable U1 */
878                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
879                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
880                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
881                         } else {
882                                 /* enable U1 & U2 */
883                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
884                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
885                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
886                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
887                         }
888                 }
889
890                 ladv = phy_read(phydev, MII_ADVERTISE);
891                 if (ladv < 0)
892                         return ladv;
893
894                 radv = phy_read(phydev, MII_LPA);
895                 if (radv < 0)
896                         return radv;
897
898                 netif_dbg(dev, link, dev->net,
899                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
900                           ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
901
902                 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
903                 netif_carrier_on(dev->net);
904         }
905
906         return ret;
907 }
908
909 /* some work can't be done in tasklets, so we use keventd
910  *
911  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
912  * but tasklet_schedule() doesn't.      hope the failure is rare.
913  */
914 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
915 {
916         set_bit(work, &dev->flags);
917         if (!schedule_delayed_work(&dev->wq, 0))
918                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
919 }
920
921 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
922 {
923         u32 intdata;
924
925         if (urb->actual_length != 4) {
926                 netdev_warn(dev->net,
927                             "unexpected urb length %d", urb->actual_length);
928                 return;
929         }
930
931         memcpy(&intdata, urb->transfer_buffer, 4);
932         le32_to_cpus(&intdata);
933
934         if (intdata & INT_ENP_PHY_INT) {
935                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
937         } else
938                 netdev_warn(dev->net,
939                             "unexpected interrupt: 0x%08x\n", intdata);
940 }
941
942 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
943 {
944         return MAX_EEPROM_SIZE;
945 }
946
947 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948                                       struct ethtool_eeprom *ee, u8 *data)
949 {
950         struct lan78xx_net *dev = netdev_priv(netdev);
951
952         ee->magic = LAN78XX_EEPROM_MAGIC;
953
954         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
955 }
956
957 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958                                       struct ethtool_eeprom *ee, u8 *data)
959 {
960         struct lan78xx_net *dev = netdev_priv(netdev);
961
962         /* Allow entire eeprom update only */
963         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
964             (ee->offset == 0) &&
965             (ee->len == 512) &&
966             (data[0] == EEPROM_INDICATOR))
967                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
969                  (ee->offset == 0) &&
970                  (ee->len == 512) &&
971                  (data[0] == OTP_INDICATOR_1))
972                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
973
974         return -EINVAL;
975 }
976
977 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
978                                 u8 *data)
979 {
980         if (stringset == ETH_SS_STATS)
981                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
982 }
983
984 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
985 {
986         if (sset == ETH_SS_STATS)
987                 return ARRAY_SIZE(lan78xx_gstrings);
988         else
989                 return -EOPNOTSUPP;
990 }
991
992 static void lan78xx_get_stats(struct net_device *netdev,
993                               struct ethtool_stats *stats, u64 *data)
994 {
995         struct lan78xx_net *dev = netdev_priv(netdev);
996         struct lan78xx_statstage lan78xx_stat;
997         u32 *p;
998         int i;
999
1000         if (usb_autopm_get_interface(dev->intf) < 0)
1001                 return;
1002
1003         if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004                 p = (u32 *)&lan78xx_stat;
1005                 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1006                         data[i] = p[i];
1007         }
1008
1009         usb_autopm_put_interface(dev->intf);
1010 }
1011
1012 static void lan78xx_get_wol(struct net_device *netdev,
1013                             struct ethtool_wolinfo *wol)
1014 {
1015         struct lan78xx_net *dev = netdev_priv(netdev);
1016         int ret;
1017         u32 buf;
1018         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1019
1020         if (usb_autopm_get_interface(dev->intf) < 0)
1021                         return;
1022
1023         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024         if (unlikely(ret < 0)) {
1025                 wol->supported = 0;
1026                 wol->wolopts = 0;
1027         } else {
1028                 if (buf & USB_CFG_RMT_WKP_) {
1029                         wol->supported = WAKE_ALL;
1030                         wol->wolopts = pdata->wol;
1031                 } else {
1032                         wol->supported = 0;
1033                         wol->wolopts = 0;
1034                 }
1035         }
1036
1037         usb_autopm_put_interface(dev->intf);
1038 }
1039
1040 static int lan78xx_set_wol(struct net_device *netdev,
1041                            struct ethtool_wolinfo *wol)
1042 {
1043         struct lan78xx_net *dev = netdev_priv(netdev);
1044         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045         int ret;
1046
1047         ret = usb_autopm_get_interface(dev->intf);
1048         if (ret < 0)
1049                 return ret;
1050
1051         pdata->wol = 0;
1052         if (wol->wolopts & WAKE_UCAST)
1053                 pdata->wol |= WAKE_UCAST;
1054         if (wol->wolopts & WAKE_MCAST)
1055                 pdata->wol |= WAKE_MCAST;
1056         if (wol->wolopts & WAKE_BCAST)
1057                 pdata->wol |= WAKE_BCAST;
1058         if (wol->wolopts & WAKE_MAGIC)
1059                 pdata->wol |= WAKE_MAGIC;
1060         if (wol->wolopts & WAKE_PHY)
1061                 pdata->wol |= WAKE_PHY;
1062         if (wol->wolopts & WAKE_ARP)
1063                 pdata->wol |= WAKE_ARP;
1064
1065         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1066
1067         phy_ethtool_set_wol(netdev->phydev, wol);
1068
1069         usb_autopm_put_interface(dev->intf);
1070
1071         return ret;
1072 }
1073
1074 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1075 {
1076         struct lan78xx_net *dev = netdev_priv(net);
1077         struct phy_device *phydev = net->phydev;
1078         int ret;
1079         u32 buf;
1080
1081         ret = usb_autopm_get_interface(dev->intf);
1082         if (ret < 0)
1083                 return ret;
1084
1085         ret = phy_ethtool_get_eee(phydev, edata);
1086         if (ret < 0)
1087                 goto exit;
1088
1089         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1090         if (buf & MAC_CR_EEE_EN_) {
1091                 edata->eee_enabled = true;
1092                 edata->eee_active = !!(edata->advertised &
1093                                        edata->lp_advertised);
1094                 edata->tx_lpi_enabled = true;
1095                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1096                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1097                 edata->tx_lpi_timer = buf;
1098         } else {
1099                 edata->eee_enabled = false;
1100                 edata->eee_active = false;
1101                 edata->tx_lpi_enabled = false;
1102                 edata->tx_lpi_timer = 0;
1103         }
1104
1105         ret = 0;
1106 exit:
1107         usb_autopm_put_interface(dev->intf);
1108
1109         return ret;
1110 }
1111
1112 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1113 {
1114         struct lan78xx_net *dev = netdev_priv(net);
1115         int ret;
1116         u32 buf;
1117
1118         ret = usb_autopm_get_interface(dev->intf);
1119         if (ret < 0)
1120                 return ret;
1121
1122         if (edata->eee_enabled) {
1123                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124                 buf |= MAC_CR_EEE_EN_;
1125                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1126
1127                 phy_ethtool_set_eee(net->phydev, edata);
1128
1129                 buf = (u32)edata->tx_lpi_timer;
1130                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1131         } else {
1132                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1133                 buf &= ~MAC_CR_EEE_EN_;
1134                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1135         }
1136
1137         usb_autopm_put_interface(dev->intf);
1138
1139         return 0;
1140 }
1141
1142 static u32 lan78xx_get_link(struct net_device *net)
1143 {
1144         phy_read_status(net->phydev);
1145
1146         return net->phydev->link;
1147 }
1148
1149 int lan78xx_nway_reset(struct net_device *net)
1150 {
1151         return phy_start_aneg(net->phydev);
1152 }
1153
1154 static void lan78xx_get_drvinfo(struct net_device *net,
1155                                 struct ethtool_drvinfo *info)
1156 {
1157         struct lan78xx_net *dev = netdev_priv(net);
1158
1159         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1160         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1161         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1162 }
1163
1164 static u32 lan78xx_get_msglevel(struct net_device *net)
1165 {
1166         struct lan78xx_net *dev = netdev_priv(net);
1167
1168         return dev->msg_enable;
1169 }
1170
1171 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1172 {
1173         struct lan78xx_net *dev = netdev_priv(net);
1174
1175         dev->msg_enable = level;
1176 }
1177
1178 static int lan78xx_get_mdix_status(struct net_device *net)
1179 {
1180         struct phy_device *phydev = net->phydev;
1181         int buf;
1182
1183         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1184         buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1185         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1186
1187         return buf;
1188 }
1189
1190 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1191 {
1192         struct lan78xx_net *dev = netdev_priv(net);
1193         struct phy_device *phydev = net->phydev;
1194         int buf;
1195
1196         if (mdix_ctrl == ETH_TP_MDI) {
1197                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1198                           LAN88XX_EXT_PAGE_SPACE_1);
1199                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1200                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1201                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1202                           buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1203                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1204                           LAN88XX_EXT_PAGE_SPACE_0);
1205         } else if (mdix_ctrl == ETH_TP_MDI_X) {
1206                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1207                           LAN88XX_EXT_PAGE_SPACE_1);
1208                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1209                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1210                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1211                           buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1212                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1213                           LAN88XX_EXT_PAGE_SPACE_0);
1214         } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1215                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1216                           LAN88XX_EXT_PAGE_SPACE_1);
1217                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1218                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1219                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1220                           buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1221                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1222                           LAN88XX_EXT_PAGE_SPACE_0);
1223         }
1224         dev->mdix_ctrl = mdix_ctrl;
1225 }
1226
1227 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1228 {
1229         struct lan78xx_net *dev = netdev_priv(net);
1230         struct phy_device *phydev = net->phydev;
1231         int ret;
1232         int buf;
1233
1234         ret = usb_autopm_get_interface(dev->intf);
1235         if (ret < 0)
1236                 return ret;
1237
1238         ret = phy_ethtool_gset(phydev, cmd);
1239
1240         buf = lan78xx_get_mdix_status(net);
1241
1242         buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1243         if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1244                 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1245                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1246         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1247                 cmd->eth_tp_mdix = ETH_TP_MDI;
1248                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1249         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1250                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1251                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1252         }
1253
1254         usb_autopm_put_interface(dev->intf);
1255
1256         return ret;
1257 }
1258
1259 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1260 {
1261         struct lan78xx_net *dev = netdev_priv(net);
1262         struct phy_device *phydev = net->phydev;
1263         int ret = 0;
1264         int temp;
1265
1266         ret = usb_autopm_get_interface(dev->intf);
1267         if (ret < 0)
1268                 return ret;
1269
1270         if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1271                 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1272         }
1273
1274         /* change speed & duplex */
1275         ret = phy_ethtool_sset(phydev, cmd);
1276
1277         if (!cmd->autoneg) {
1278                 /* force link down */
1279                 temp = phy_read(phydev, MII_BMCR);
1280                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1281                 mdelay(1);
1282                 phy_write(phydev, MII_BMCR, temp);
1283         }
1284
1285         usb_autopm_put_interface(dev->intf);
1286
1287         return ret;
1288 }
1289
1290 static const struct ethtool_ops lan78xx_ethtool_ops = {
1291         .get_link       = lan78xx_get_link,
1292         .nway_reset     = lan78xx_nway_reset,
1293         .get_drvinfo    = lan78xx_get_drvinfo,
1294         .get_msglevel   = lan78xx_get_msglevel,
1295         .set_msglevel   = lan78xx_set_msglevel,
1296         .get_settings   = lan78xx_get_settings,
1297         .set_settings   = lan78xx_set_settings,
1298         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1299         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1300         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1301         .get_ethtool_stats = lan78xx_get_stats,
1302         .get_sset_count = lan78xx_get_sset_count,
1303         .get_strings    = lan78xx_get_strings,
1304         .get_wol        = lan78xx_get_wol,
1305         .set_wol        = lan78xx_set_wol,
1306         .get_eee        = lan78xx_get_eee,
1307         .set_eee        = lan78xx_set_eee,
1308 };
1309
1310 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1311 {
1312         if (!netif_running(netdev))
1313                 return -EINVAL;
1314
1315         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1316 }
1317
1318 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1319 {
1320         u32 addr_lo, addr_hi;
1321         int ret;
1322         u8 addr[6];
1323
1324         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1325         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1326
1327         addr[0] = addr_lo & 0xFF;
1328         addr[1] = (addr_lo >> 8) & 0xFF;
1329         addr[2] = (addr_lo >> 16) & 0xFF;
1330         addr[3] = (addr_lo >> 24) & 0xFF;
1331         addr[4] = addr_hi & 0xFF;
1332         addr[5] = (addr_hi >> 8) & 0xFF;
1333
1334         if (!is_valid_ether_addr(addr)) {
1335                 /* reading mac address from EEPROM or OTP */
1336                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1337                                          addr) == 0) ||
1338                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1339                                       addr) == 0)) {
1340                         if (is_valid_ether_addr(addr)) {
1341                                 /* eeprom values are valid so use them */
1342                                 netif_dbg(dev, ifup, dev->net,
1343                                           "MAC address read from EEPROM");
1344                         } else {
1345                                 /* generate random MAC */
1346                                 random_ether_addr(addr);
1347                                 netif_dbg(dev, ifup, dev->net,
1348                                           "MAC address set to random addr");
1349                         }
1350
1351                         addr_lo = addr[0] | (addr[1] << 8) |
1352                                   (addr[2] << 16) | (addr[3] << 24);
1353                         addr_hi = addr[4] | (addr[5] << 8);
1354
1355                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1356                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1357                 } else {
1358                         /* generate random MAC */
1359                         random_ether_addr(addr);
1360                         netif_dbg(dev, ifup, dev->net,
1361                                   "MAC address set to random addr");
1362                 }
1363         }
1364
1365         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1366         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1367
1368         ether_addr_copy(dev->net->dev_addr, addr);
1369 }
1370
1371 /* MDIO read and write wrappers for phylib */
1372 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1373 {
1374         struct lan78xx_net *dev = bus->priv;
1375         u32 val, addr;
1376         int ret;
1377
1378         ret = usb_autopm_get_interface(dev->intf);
1379         if (ret < 0)
1380                 return ret;
1381
1382         mutex_lock(&dev->phy_mutex);
1383
1384         /* confirm MII not busy */
1385         ret = lan78xx_phy_wait_not_busy(dev);
1386         if (ret < 0)
1387                 goto done;
1388
1389         /* set the address, index & direction (read from PHY) */
1390         addr = mii_access(phy_id, idx, MII_READ);
1391         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1392
1393         ret = lan78xx_phy_wait_not_busy(dev);
1394         if (ret < 0)
1395                 goto done;
1396
1397         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1398
1399         ret = (int)(val & 0xFFFF);
1400
1401 done:
1402         mutex_unlock(&dev->phy_mutex);
1403         usb_autopm_put_interface(dev->intf);
1404         return ret;
1405 }
1406
1407 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1408                                  u16 regval)
1409 {
1410         struct lan78xx_net *dev = bus->priv;
1411         u32 val, addr;
1412         int ret;
1413
1414         ret = usb_autopm_get_interface(dev->intf);
1415         if (ret < 0)
1416                 return ret;
1417
1418         mutex_lock(&dev->phy_mutex);
1419
1420         /* confirm MII not busy */
1421         ret = lan78xx_phy_wait_not_busy(dev);
1422         if (ret < 0)
1423                 goto done;
1424
1425         val = (u32)regval;
1426         ret = lan78xx_write_reg(dev, MII_DATA, val);
1427
1428         /* set the address, index & direction (write to PHY) */
1429         addr = mii_access(phy_id, idx, MII_WRITE);
1430         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1431
1432         ret = lan78xx_phy_wait_not_busy(dev);
1433         if (ret < 0)
1434                 goto done;
1435
1436 done:
1437         mutex_unlock(&dev->phy_mutex);
1438         usb_autopm_put_interface(dev->intf);
1439         return 0;
1440 }
1441
1442 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1443 {
1444         int ret;
1445         int i;
1446
1447         dev->mdiobus = mdiobus_alloc();
1448         if (!dev->mdiobus) {
1449                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1450                 return -ENOMEM;
1451         }
1452
1453         dev->mdiobus->priv = (void *)dev;
1454         dev->mdiobus->read = lan78xx_mdiobus_read;
1455         dev->mdiobus->write = lan78xx_mdiobus_write;
1456         dev->mdiobus->name = "lan78xx-mdiobus";
1457
1458         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1459                  dev->udev->bus->busnum, dev->udev->devnum);
1460
1461         dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1462         if (!dev->mdiobus->irq) {
1463                 ret = -ENOMEM;
1464                 goto exit1;
1465         }
1466
1467         /* handle our own interrupt */
1468         for (i = 0; i < PHY_MAX_ADDR; i++)
1469                 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1470
1471         switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1472         case 0x78000000:
1473         case 0x78500000:
1474                 /* set to internal PHY id */
1475                 dev->mdiobus->phy_mask = ~(1 << 1);
1476                 break;
1477         }
1478
1479         ret = mdiobus_register(dev->mdiobus);
1480         if (ret) {
1481                 netdev_err(dev->net, "can't register MDIO bus\n");
1482                 goto exit2;
1483         }
1484
1485         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1486         return 0;
1487 exit2:
1488         kfree(dev->mdiobus->irq);
1489 exit1:
1490         mdiobus_free(dev->mdiobus);
1491         return ret;
1492 }
1493
1494 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1495 {
1496         mdiobus_unregister(dev->mdiobus);
1497         kfree(dev->mdiobus->irq);
1498         mdiobus_free(dev->mdiobus);
1499 }
1500
1501 static void lan78xx_link_status_change(struct net_device *net)
1502 {
1503         /* nothing to do */
1504 }
1505
1506 static int lan78xx_phy_init(struct lan78xx_net *dev)
1507 {
1508         int ret;
1509         struct phy_device *phydev = dev->net->phydev;
1510
1511         phydev = phy_find_first(dev->mdiobus);
1512         if (!phydev) {
1513                 netdev_err(dev->net, "no PHY found\n");
1514                 return -EIO;
1515         }
1516
1517         ret = phy_connect_direct(dev->net, phydev,
1518                                  lan78xx_link_status_change,
1519                                  PHY_INTERFACE_MODE_GMII);
1520         if (ret) {
1521                 netdev_err(dev->net, "can't attach PHY to %s\n",
1522                            dev->mdiobus->id);
1523                 return -EIO;
1524         }
1525
1526         /* set to AUTOMDIX */
1527         lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1528
1529         /* MAC doesn't support 1000T Half */
1530         phydev->supported &= ~SUPPORTED_1000baseT_Half;
1531         phydev->supported |= (SUPPORTED_10baseT_Half |
1532                               SUPPORTED_10baseT_Full |
1533                               SUPPORTED_100baseT_Half |
1534                               SUPPORTED_100baseT_Full |
1535                               SUPPORTED_1000baseT_Full |
1536                               SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1537         genphy_config_aneg(phydev);
1538
1539         /* Workaround to enable PHY interrupt.
1540          * phy_start_interrupts() is API for requesting and enabling
1541          * PHY interrupt. However, USB-to-Ethernet device can't use
1542          * request_irq() called in phy_start_interrupts().
1543          * Set PHY to PHY_HALTED and call phy_start()
1544          * to make a call to phy_enable_interrupts()
1545          */
1546         phy_stop(phydev);
1547         phy_start(phydev);
1548
1549         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1550
1551         return 0;
1552 }
1553
1554 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1555 {
1556         int ret = 0;
1557         u32 buf;
1558         bool rxenabled;
1559
1560         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1561
1562         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1563
1564         if (rxenabled) {
1565                 buf &= ~MAC_RX_RXEN_;
1566                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1567         }
1568
1569         /* add 4 to size for FCS */
1570         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1571         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1572
1573         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1574
1575         if (rxenabled) {
1576                 buf |= MAC_RX_RXEN_;
1577                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1578         }
1579
1580         return 0;
1581 }
1582
1583 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1584 {
1585         struct sk_buff *skb;
1586         unsigned long flags;
1587         int count = 0;
1588
1589         spin_lock_irqsave(&q->lock, flags);
1590         while (!skb_queue_empty(q)) {
1591                 struct skb_data *entry;
1592                 struct urb *urb;
1593                 int ret;
1594
1595                 skb_queue_walk(q, skb) {
1596                         entry = (struct skb_data *)skb->cb;
1597                         if (entry->state != unlink_start)
1598                                 goto found;
1599                 }
1600                 break;
1601 found:
1602                 entry->state = unlink_start;
1603                 urb = entry->urb;
1604
1605                 /* Get reference count of the URB to avoid it to be
1606                  * freed during usb_unlink_urb, which may trigger
1607                  * use-after-free problem inside usb_unlink_urb since
1608                  * usb_unlink_urb is always racing with .complete
1609                  * handler(include defer_bh).
1610                  */
1611                 usb_get_urb(urb);
1612                 spin_unlock_irqrestore(&q->lock, flags);
1613                 /* during some PM-driven resume scenarios,
1614                  * these (async) unlinks complete immediately
1615                  */
1616                 ret = usb_unlink_urb(urb);
1617                 if (ret != -EINPROGRESS && ret != 0)
1618                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1619                 else
1620                         count++;
1621                 usb_put_urb(urb);
1622                 spin_lock_irqsave(&q->lock, flags);
1623         }
1624         spin_unlock_irqrestore(&q->lock, flags);
1625         return count;
1626 }
1627
1628 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1629 {
1630         struct lan78xx_net *dev = netdev_priv(netdev);
1631         int ll_mtu = new_mtu + netdev->hard_header_len;
1632         int old_hard_mtu = dev->hard_mtu;
1633         int old_rx_urb_size = dev->rx_urb_size;
1634         int ret;
1635
1636         if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1637                 return -EINVAL;
1638
1639         if (new_mtu <= 0)
1640                 return -EINVAL;
1641         /* no second zero-length packet read wanted after mtu-sized packets */
1642         if ((ll_mtu % dev->maxpacket) == 0)
1643                 return -EDOM;
1644
1645         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1646
1647         netdev->mtu = new_mtu;
1648
1649         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1650         if (dev->rx_urb_size == old_hard_mtu) {
1651                 dev->rx_urb_size = dev->hard_mtu;
1652                 if (dev->rx_urb_size > old_rx_urb_size) {
1653                         if (netif_running(dev->net)) {
1654                                 unlink_urbs(dev, &dev->rxq);
1655                                 tasklet_schedule(&dev->bh);
1656                         }
1657                 }
1658         }
1659
1660         return 0;
1661 }
1662
1663 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1664 {
1665         struct lan78xx_net *dev = netdev_priv(netdev);
1666         struct sockaddr *addr = p;
1667         u32 addr_lo, addr_hi;
1668         int ret;
1669
1670         if (netif_running(netdev))
1671                 return -EBUSY;
1672
1673         if (!is_valid_ether_addr(addr->sa_data))
1674                 return -EADDRNOTAVAIL;
1675
1676         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1677
1678         addr_lo = netdev->dev_addr[0] |
1679                   netdev->dev_addr[1] << 8 |
1680                   netdev->dev_addr[2] << 16 |
1681                   netdev->dev_addr[3] << 24;
1682         addr_hi = netdev->dev_addr[4] |
1683                   netdev->dev_addr[5] << 8;
1684
1685         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1686         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1687
1688         return 0;
1689 }
1690
1691 /* Enable or disable Rx checksum offload engine */
1692 static int lan78xx_set_features(struct net_device *netdev,
1693                                 netdev_features_t features)
1694 {
1695         struct lan78xx_net *dev = netdev_priv(netdev);
1696         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1697         unsigned long flags;
1698         int ret;
1699
1700         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1701
1702         if (features & NETIF_F_RXCSUM) {
1703                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1704                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1705         } else {
1706                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1707                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1708         }
1709
1710         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1711                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1712         else
1713                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1714
1715         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1716
1717         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1718
1719         return 0;
1720 }
1721
1722 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1723 {
1724         struct lan78xx_priv *pdata =
1725                         container_of(param, struct lan78xx_priv, set_vlan);
1726         struct lan78xx_net *dev = pdata->dev;
1727
1728         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1729                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1730 }
1731
1732 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1733                                    __be16 proto, u16 vid)
1734 {
1735         struct lan78xx_net *dev = netdev_priv(netdev);
1736         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1737         u16 vid_bit_index;
1738         u16 vid_dword_index;
1739
1740         vid_dword_index = (vid >> 5) & 0x7F;
1741         vid_bit_index = vid & 0x1F;
1742
1743         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1744
1745         /* defer register writes to a sleepable context */
1746         schedule_work(&pdata->set_vlan);
1747
1748         return 0;
1749 }
1750
1751 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1752                                     __be16 proto, u16 vid)
1753 {
1754         struct lan78xx_net *dev = netdev_priv(netdev);
1755         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1756         u16 vid_bit_index;
1757         u16 vid_dword_index;
1758
1759         vid_dword_index = (vid >> 5) & 0x7F;
1760         vid_bit_index = vid & 0x1F;
1761
1762         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1763
1764         /* defer register writes to a sleepable context */
1765         schedule_work(&pdata->set_vlan);
1766
1767         return 0;
1768 }
1769
1770 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1771 {
1772         int ret;
1773         u32 buf;
1774         u32 regs[6] = { 0 };
1775
1776         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1777         if (buf & USB_CFG1_LTM_ENABLE_) {
1778                 u8 temp[2];
1779                 /* Get values from EEPROM first */
1780                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1781                         if (temp[0] == 24) {
1782                                 ret = lan78xx_read_raw_eeprom(dev,
1783                                                               temp[1] * 2,
1784                                                               24,
1785                                                               (u8 *)regs);
1786                                 if (ret < 0)
1787                                         return;
1788                         }
1789                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1790                         if (temp[0] == 24) {
1791                                 ret = lan78xx_read_raw_otp(dev,
1792                                                            temp[1] * 2,
1793                                                            24,
1794                                                            (u8 *)regs);
1795                                 if (ret < 0)
1796                                         return;
1797                         }
1798                 }
1799         }
1800
1801         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1802         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1803         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1804         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1805         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1806         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1807 }
1808
1809 static int lan78xx_reset(struct lan78xx_net *dev)
1810 {
1811         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1812         u32 buf;
1813         int ret = 0;
1814         unsigned long timeout;
1815
1816         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1817         buf |= HW_CFG_LRST_;
1818         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1819
1820         timeout = jiffies + HZ;
1821         do {
1822                 mdelay(1);
1823                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1824                 if (time_after(jiffies, timeout)) {
1825                         netdev_warn(dev->net,
1826                                     "timeout on completion of LiteReset");
1827                         return -EIO;
1828                 }
1829         } while (buf & HW_CFG_LRST_);
1830
1831         lan78xx_init_mac_address(dev);
1832
1833         /* save DEVID for later usage */
1834         ret = lan78xx_read_reg(dev, ID_REV, &buf);
1835         dev->devid = buf;
1836
1837         /* Respond to the IN token with a NAK */
1838         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1839         buf |= USB_CFG_BIR_;
1840         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1841
1842         /* Init LTM */
1843         lan78xx_init_ltm(dev);
1844
1845         dev->net->hard_header_len += TX_OVERHEAD;
1846         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1847
1848         if (dev->udev->speed == USB_SPEED_SUPER) {
1849                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1850                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1851                 dev->rx_qlen = 4;
1852                 dev->tx_qlen = 4;
1853         } else if (dev->udev->speed == USB_SPEED_HIGH) {
1854                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1855                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1856                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1857                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1858         } else {
1859                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1860                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1861                 dev->rx_qlen = 4;
1862         }
1863
1864         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1865         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1866
1867         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1868         buf |= HW_CFG_MEF_;
1869         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1870
1871         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1872         buf |= USB_CFG_BCE_;
1873         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1874
1875         /* set FIFO sizes */
1876         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1877         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1878
1879         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1880         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1881
1882         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1883         ret = lan78xx_write_reg(dev, FLOW, 0);
1884         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1885
1886         /* Don't need rfe_ctl_lock during initialisation */
1887         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1888         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1889         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1890
1891         /* Enable or disable checksum offload engines */
1892         lan78xx_set_features(dev->net, dev->net->features);
1893
1894         lan78xx_set_multicast(dev->net);
1895
1896         /* reset PHY */
1897         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1898         buf |= PMT_CTL_PHY_RST_;
1899         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1900
1901         timeout = jiffies + HZ;
1902         do {
1903                 mdelay(1);
1904                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1905                 if (time_after(jiffies, timeout)) {
1906                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
1907                         return -EIO;
1908                 }
1909         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
1910
1911         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1912
1913         buf |= MAC_CR_GMII_EN_;
1914         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1915
1916         ret = lan78xx_write_reg(dev, MAC_CR, buf);
1917
1918         /* enable PHY interrupts */
1919         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1920         buf |= INT_ENP_PHY_INT;
1921         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1922
1923         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1924         buf |= MAC_TX_TXEN_;
1925         ret = lan78xx_write_reg(dev, MAC_TX, buf);
1926
1927         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1928         buf |= FCT_TX_CTL_EN_;
1929         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1930
1931         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1932
1933         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1934         buf |= MAC_RX_RXEN_;
1935         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1936
1937         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1938         buf |= FCT_RX_CTL_EN_;
1939         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1940
1941         return 0;
1942 }
1943
1944 static int lan78xx_open(struct net_device *net)
1945 {
1946         struct lan78xx_net *dev = netdev_priv(net);
1947         int ret;
1948
1949         ret = usb_autopm_get_interface(dev->intf);
1950         if (ret < 0)
1951                 goto out;
1952
1953         ret = lan78xx_reset(dev);
1954         if (ret < 0)
1955                 goto done;
1956
1957         ret = lan78xx_phy_init(dev);
1958         if (ret < 0)
1959                 goto done;
1960
1961         /* for Link Check */
1962         if (dev->urb_intr) {
1963                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1964                 if (ret < 0) {
1965                         netif_err(dev, ifup, dev->net,
1966                                   "intr submit %d\n", ret);
1967                         goto done;
1968                 }
1969         }
1970
1971         set_bit(EVENT_DEV_OPEN, &dev->flags);
1972
1973         netif_start_queue(net);
1974
1975         dev->link_on = false;
1976
1977         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1978 done:
1979         usb_autopm_put_interface(dev->intf);
1980
1981 out:
1982         return ret;
1983 }
1984
1985 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1986 {
1987         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1988         DECLARE_WAITQUEUE(wait, current);
1989         int temp;
1990
1991         /* ensure there are no more active urbs */
1992         add_wait_queue(&unlink_wakeup, &wait);
1993         set_current_state(TASK_UNINTERRUPTIBLE);
1994         dev->wait = &unlink_wakeup;
1995         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1996
1997         /* maybe wait for deletions to finish. */
1998         while (!skb_queue_empty(&dev->rxq) &&
1999                !skb_queue_empty(&dev->txq) &&
2000                !skb_queue_empty(&dev->done)) {
2001                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2002                 set_current_state(TASK_UNINTERRUPTIBLE);
2003                 netif_dbg(dev, ifdown, dev->net,
2004                           "waited for %d urb completions\n", temp);
2005         }
2006         set_current_state(TASK_RUNNING);
2007         dev->wait = NULL;
2008         remove_wait_queue(&unlink_wakeup, &wait);
2009 }
2010
2011 int lan78xx_stop(struct net_device *net)
2012 {
2013         struct lan78xx_net              *dev = netdev_priv(net);
2014
2015         phy_stop(net->phydev);
2016         phy_disconnect(net->phydev);
2017         net->phydev = NULL;
2018
2019         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2020         netif_stop_queue(net);
2021
2022         netif_info(dev, ifdown, dev->net,
2023                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2024                    net->stats.rx_packets, net->stats.tx_packets,
2025                    net->stats.rx_errors, net->stats.tx_errors);
2026
2027         lan78xx_terminate_urbs(dev);
2028
2029         usb_kill_urb(dev->urb_intr);
2030
2031         skb_queue_purge(&dev->rxq_pause);
2032
2033         /* deferred work (task, timer, softirq) must also stop.
2034          * can't flush_scheduled_work() until we drop rtnl (later),
2035          * else workers could deadlock; so make workers a NOP.
2036          */
2037         dev->flags = 0;
2038         cancel_delayed_work_sync(&dev->wq);
2039         tasklet_kill(&dev->bh);
2040
2041         usb_autopm_put_interface(dev->intf);
2042
2043         return 0;
2044 }
2045
2046 static int lan78xx_linearize(struct sk_buff *skb)
2047 {
2048         return skb_linearize(skb);
2049 }
2050
2051 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2052                                        struct sk_buff *skb, gfp_t flags)
2053 {
2054         u32 tx_cmd_a, tx_cmd_b;
2055
2056         if (skb_headroom(skb) < TX_OVERHEAD) {
2057                 struct sk_buff *skb2;
2058
2059                 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2060                 dev_kfree_skb_any(skb);
2061                 skb = skb2;
2062                 if (!skb)
2063                         return NULL;
2064         }
2065
2066         if (lan78xx_linearize(skb) < 0)
2067                 return NULL;
2068
2069         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2070
2071         if (skb->ip_summed == CHECKSUM_PARTIAL)
2072                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2073
2074         tx_cmd_b = 0;
2075         if (skb_is_gso(skb)) {
2076                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2077
2078                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2079
2080                 tx_cmd_a |= TX_CMD_A_LSO_;
2081         }
2082
2083         if (skb_vlan_tag_present(skb)) {
2084                 tx_cmd_a |= TX_CMD_A_IVTG_;
2085                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2086         }
2087
2088         skb_push(skb, 4);
2089         cpu_to_le32s(&tx_cmd_b);
2090         memcpy(skb->data, &tx_cmd_b, 4);
2091
2092         skb_push(skb, 4);
2093         cpu_to_le32s(&tx_cmd_a);
2094         memcpy(skb->data, &tx_cmd_a, 4);
2095
2096         return skb;
2097 }
2098
2099 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2100                                struct sk_buff_head *list, enum skb_state state)
2101 {
2102         unsigned long flags;
2103         enum skb_state old_state;
2104         struct skb_data *entry = (struct skb_data *)skb->cb;
2105
2106         spin_lock_irqsave(&list->lock, flags);
2107         old_state = entry->state;
2108         entry->state = state;
2109
2110         __skb_unlink(skb, list);
2111         spin_unlock(&list->lock);
2112         spin_lock(&dev->done.lock);
2113
2114         __skb_queue_tail(&dev->done, skb);
2115         if (skb_queue_len(&dev->done) == 1)
2116                 tasklet_schedule(&dev->bh);
2117         spin_unlock_irqrestore(&dev->done.lock, flags);
2118
2119         return old_state;
2120 }
2121
2122 static void tx_complete(struct urb *urb)
2123 {
2124         struct sk_buff *skb = (struct sk_buff *)urb->context;
2125         struct skb_data *entry = (struct skb_data *)skb->cb;
2126         struct lan78xx_net *dev = entry->dev;
2127
2128         if (urb->status == 0) {
2129                 dev->net->stats.tx_packets++;
2130                 dev->net->stats.tx_bytes += entry->length;
2131         } else {
2132                 dev->net->stats.tx_errors++;
2133
2134                 switch (urb->status) {
2135                 case -EPIPE:
2136                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2137                         break;
2138
2139                 /* software-driven interface shutdown */
2140                 case -ECONNRESET:
2141                 case -ESHUTDOWN:
2142                         break;
2143
2144                 case -EPROTO:
2145                 case -ETIME:
2146                 case -EILSEQ:
2147                         netif_stop_queue(dev->net);
2148                         break;
2149                 default:
2150                         netif_dbg(dev, tx_err, dev->net,
2151                                   "tx err %d\n", entry->urb->status);
2152                         break;
2153                 }
2154         }
2155
2156         usb_autopm_put_interface_async(dev->intf);
2157
2158         defer_bh(dev, skb, &dev->txq, tx_done);
2159 }
2160
2161 static void lan78xx_queue_skb(struct sk_buff_head *list,
2162                               struct sk_buff *newsk, enum skb_state state)
2163 {
2164         struct skb_data *entry = (struct skb_data *)newsk->cb;
2165
2166         __skb_queue_tail(list, newsk);
2167         entry->state = state;
2168 }
2169
2170 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2171 {
2172         struct lan78xx_net *dev = netdev_priv(net);
2173         struct sk_buff *skb2 = NULL;
2174
2175         if (skb) {
2176                 skb_tx_timestamp(skb);
2177                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2178         }
2179
2180         if (skb2) {
2181                 skb_queue_tail(&dev->txq_pend, skb2);
2182
2183                 if (skb_queue_len(&dev->txq_pend) > 10)
2184                         netif_stop_queue(net);
2185         } else {
2186                 netif_dbg(dev, tx_err, dev->net,
2187                           "lan78xx_tx_prep return NULL\n");
2188                 dev->net->stats.tx_errors++;
2189                 dev->net->stats.tx_dropped++;
2190         }
2191
2192         tasklet_schedule(&dev->bh);
2193
2194         return NETDEV_TX_OK;
2195 }
2196
2197 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2198 {
2199         int tmp;
2200         struct usb_host_interface *alt = NULL;
2201         struct usb_host_endpoint *in = NULL, *out = NULL;
2202         struct usb_host_endpoint *status = NULL;
2203
2204         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2205                 unsigned ep;
2206
2207                 in = NULL;
2208                 out = NULL;
2209                 status = NULL;
2210                 alt = intf->altsetting + tmp;
2211
2212                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2213                         struct usb_host_endpoint *e;
2214                         int intr = 0;
2215
2216                         e = alt->endpoint + ep;
2217                         switch (e->desc.bmAttributes) {
2218                         case USB_ENDPOINT_XFER_INT:
2219                                 if (!usb_endpoint_dir_in(&e->desc))
2220                                         continue;
2221                                 intr = 1;
2222                                 /* FALLTHROUGH */
2223                         case USB_ENDPOINT_XFER_BULK:
2224                                 break;
2225                         default:
2226                                 continue;
2227                         }
2228                         if (usb_endpoint_dir_in(&e->desc)) {
2229                                 if (!intr && !in)
2230                                         in = e;
2231                                 else if (intr && !status)
2232                                         status = e;
2233                         } else {
2234                                 if (!out)
2235                                         out = e;
2236                         }
2237                 }
2238                 if (in && out)
2239                         break;
2240         }
2241         if (!alt || !in || !out)
2242                 return -EINVAL;
2243
2244         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2245                                        in->desc.bEndpointAddress &
2246                                        USB_ENDPOINT_NUMBER_MASK);
2247         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2248                                         out->desc.bEndpointAddress &
2249                                         USB_ENDPOINT_NUMBER_MASK);
2250         dev->ep_intr = status;
2251
2252         return 0;
2253 }
2254
2255 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2256 {
2257         struct lan78xx_priv *pdata = NULL;
2258         int ret;
2259         int i;
2260
2261         ret = lan78xx_get_endpoints(dev, intf);
2262
2263         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2264
2265         pdata = (struct lan78xx_priv *)(dev->data[0]);
2266         if (!pdata) {
2267                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2268                 return -ENOMEM;
2269         }
2270
2271         pdata->dev = dev;
2272
2273         spin_lock_init(&pdata->rfe_ctl_lock);
2274         mutex_init(&pdata->dataport_mutex);
2275
2276         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2277
2278         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2279                 pdata->vlan_table[i] = 0;
2280
2281         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2282
2283         dev->net->features = 0;
2284
2285         if (DEFAULT_TX_CSUM_ENABLE)
2286                 dev->net->features |= NETIF_F_HW_CSUM;
2287
2288         if (DEFAULT_RX_CSUM_ENABLE)
2289                 dev->net->features |= NETIF_F_RXCSUM;
2290
2291         if (DEFAULT_TSO_CSUM_ENABLE)
2292                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2293
2294         dev->net->hw_features = dev->net->features;
2295
2296         /* Init all registers */
2297         ret = lan78xx_reset(dev);
2298
2299         lan78xx_mdio_init(dev);
2300
2301         dev->net->flags |= IFF_MULTICAST;
2302
2303         pdata->wol = WAKE_MAGIC;
2304
2305         return 0;
2306 }
2307
2308 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2309 {
2310         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2311
2312         lan78xx_remove_mdio(dev);
2313
2314         if (pdata) {
2315                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2316                 kfree(pdata);
2317                 pdata = NULL;
2318                 dev->data[0] = 0;
2319         }
2320 }
2321
2322 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2323                                     struct sk_buff *skb,
2324                                     u32 rx_cmd_a, u32 rx_cmd_b)
2325 {
2326         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2327             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2328                 skb->ip_summed = CHECKSUM_NONE;
2329         } else {
2330                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2331                 skb->ip_summed = CHECKSUM_COMPLETE;
2332         }
2333 }
2334
2335 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2336 {
2337         int             status;
2338
2339         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2340                 skb_queue_tail(&dev->rxq_pause, skb);
2341                 return;
2342         }
2343
2344         skb->protocol = eth_type_trans(skb, dev->net);
2345         dev->net->stats.rx_packets++;
2346         dev->net->stats.rx_bytes += skb->len;
2347
2348         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2349                   skb->len + sizeof(struct ethhdr), skb->protocol);
2350         memset(skb->cb, 0, sizeof(struct skb_data));
2351
2352         if (skb_defer_rx_timestamp(skb))
2353                 return;
2354
2355         status = netif_rx(skb);
2356         if (status != NET_RX_SUCCESS)
2357                 netif_dbg(dev, rx_err, dev->net,
2358                           "netif_rx status %d\n", status);
2359 }
2360
2361 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2362 {
2363         if (skb->len < dev->net->hard_header_len)
2364                 return 0;
2365
2366         while (skb->len > 0) {
2367                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2368                 u16 rx_cmd_c;
2369                 struct sk_buff *skb2;
2370                 unsigned char *packet;
2371
2372                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2373                 le32_to_cpus(&rx_cmd_a);
2374                 skb_pull(skb, sizeof(rx_cmd_a));
2375
2376                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2377                 le32_to_cpus(&rx_cmd_b);
2378                 skb_pull(skb, sizeof(rx_cmd_b));
2379
2380                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2381                 le16_to_cpus(&rx_cmd_c);
2382                 skb_pull(skb, sizeof(rx_cmd_c));
2383
2384                 packet = skb->data;
2385
2386                 /* get the packet length */
2387                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2388                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2389
2390                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2391                         netif_dbg(dev, rx_err, dev->net,
2392                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2393                 } else {
2394                         /* last frame in this batch */
2395                         if (skb->len == size) {
2396                                 lan78xx_rx_csum_offload(dev, skb,
2397                                                         rx_cmd_a, rx_cmd_b);
2398
2399                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2400                                 skb->truesize = size + sizeof(struct sk_buff);
2401
2402                                 return 1;
2403                         }
2404
2405                         skb2 = skb_clone(skb, GFP_ATOMIC);
2406                         if (unlikely(!skb2)) {
2407                                 netdev_warn(dev->net, "Error allocating skb");
2408                                 return 0;
2409                         }
2410
2411                         skb2->len = size;
2412                         skb2->data = packet;
2413                         skb_set_tail_pointer(skb2, size);
2414
2415                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2416
2417                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2418                         skb2->truesize = size + sizeof(struct sk_buff);
2419
2420                         lan78xx_skb_return(dev, skb2);
2421                 }
2422
2423                 skb_pull(skb, size);
2424
2425                 /* padding bytes before the next frame starts */
2426                 if (skb->len)
2427                         skb_pull(skb, align_count);
2428         }
2429
2430         if (unlikely(skb->len < 0)) {
2431                 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
2432                 return 0;
2433         }
2434
2435         return 1;
2436 }
2437
2438 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2439 {
2440         if (!lan78xx_rx(dev, skb)) {
2441                 dev->net->stats.rx_errors++;
2442                 goto done;
2443         }
2444
2445         if (skb->len) {
2446                 lan78xx_skb_return(dev, skb);
2447                 return;
2448         }
2449
2450         netif_dbg(dev, rx_err, dev->net, "drop\n");
2451         dev->net->stats.rx_errors++;
2452 done:
2453         skb_queue_tail(&dev->done, skb);
2454 }
2455
2456 static void rx_complete(struct urb *urb);
2457
2458 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2459 {
2460         struct sk_buff *skb;
2461         struct skb_data *entry;
2462         unsigned long lockflags;
2463         size_t size = dev->rx_urb_size;
2464         int ret = 0;
2465
2466         skb = netdev_alloc_skb_ip_align(dev->net, size);
2467         if (!skb) {
2468                 usb_free_urb(urb);
2469                 return -ENOMEM;
2470         }
2471
2472         entry = (struct skb_data *)skb->cb;
2473         entry->urb = urb;
2474         entry->dev = dev;
2475         entry->length = 0;
2476
2477         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2478                           skb->data, size, rx_complete, skb);
2479
2480         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2481
2482         if (netif_device_present(dev->net) &&
2483             netif_running(dev->net) &&
2484             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2485             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2486                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2487                 switch (ret) {
2488                 case 0:
2489                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2490                         break;
2491                 case -EPIPE:
2492                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2493                         break;
2494                 case -ENODEV:
2495                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2496                         netif_device_detach(dev->net);
2497                         break;
2498                 case -EHOSTUNREACH:
2499                         ret = -ENOLINK;
2500                         break;
2501                 default:
2502                         netif_dbg(dev, rx_err, dev->net,
2503                                   "rx submit, %d\n", ret);
2504                         tasklet_schedule(&dev->bh);
2505                 }
2506         } else {
2507                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2508                 ret = -ENOLINK;
2509         }
2510         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2511         if (ret) {
2512                 dev_kfree_skb_any(skb);
2513                 usb_free_urb(urb);
2514         }
2515         return ret;
2516 }
2517
2518 static void rx_complete(struct urb *urb)
2519 {
2520         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2521         struct skb_data *entry = (struct skb_data *)skb->cb;
2522         struct lan78xx_net *dev = entry->dev;
2523         int urb_status = urb->status;
2524         enum skb_state state;
2525
2526         skb_put(skb, urb->actual_length);
2527         state = rx_done;
2528         entry->urb = NULL;
2529
2530         switch (urb_status) {
2531         case 0:
2532                 if (skb->len < dev->net->hard_header_len) {
2533                         state = rx_cleanup;
2534                         dev->net->stats.rx_errors++;
2535                         dev->net->stats.rx_length_errors++;
2536                         netif_dbg(dev, rx_err, dev->net,
2537                                   "rx length %d\n", skb->len);
2538                 }
2539                 usb_mark_last_busy(dev->udev);
2540                 break;
2541         case -EPIPE:
2542                 dev->net->stats.rx_errors++;
2543                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2544                 /* FALLTHROUGH */
2545         case -ECONNRESET:                               /* async unlink */
2546         case -ESHUTDOWN:                                /* hardware gone */
2547                 netif_dbg(dev, ifdown, dev->net,
2548                           "rx shutdown, code %d\n", urb_status);
2549                 state = rx_cleanup;
2550                 entry->urb = urb;
2551                 urb = NULL;
2552                 break;
2553         case -EPROTO:
2554         case -ETIME:
2555         case -EILSEQ:
2556                 dev->net->stats.rx_errors++;
2557                 state = rx_cleanup;
2558                 entry->urb = urb;
2559                 urb = NULL;
2560                 break;
2561
2562         /* data overrun ... flush fifo? */
2563         case -EOVERFLOW:
2564                 dev->net->stats.rx_over_errors++;
2565                 /* FALLTHROUGH */
2566
2567         default:
2568                 state = rx_cleanup;
2569                 dev->net->stats.rx_errors++;
2570                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2571                 break;
2572         }
2573
2574         state = defer_bh(dev, skb, &dev->rxq, state);
2575
2576         if (urb) {
2577                 if (netif_running(dev->net) &&
2578                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2579                     state != unlink_start) {
2580                         rx_submit(dev, urb, GFP_ATOMIC);
2581                         return;
2582                 }
2583                 usb_free_urb(urb);
2584         }
2585         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2586 }
2587
2588 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2589 {
2590         int length;
2591         struct urb *urb = NULL;
2592         struct skb_data *entry;
2593         unsigned long flags;
2594         struct sk_buff_head *tqp = &dev->txq_pend;
2595         struct sk_buff *skb, *skb2;
2596         int ret;
2597         int count, pos;
2598         int skb_totallen, pkt_cnt;
2599
2600         skb_totallen = 0;
2601         pkt_cnt = 0;
2602         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2603                 if (skb_is_gso(skb)) {
2604                         if (pkt_cnt) {
2605                                 /* handle previous packets first */
2606                                 break;
2607                         }
2608                         length = skb->len;
2609                         skb2 = skb_dequeue(tqp);
2610                         goto gso_skb;
2611                 }
2612
2613                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2614                         break;
2615                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2616                 pkt_cnt++;
2617         }
2618
2619         /* copy to a single skb */
2620         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2621         if (!skb)
2622                 goto drop;
2623
2624         skb_put(skb, skb_totallen);
2625
2626         for (count = pos = 0; count < pkt_cnt; count++) {
2627                 skb2 = skb_dequeue(tqp);
2628                 if (skb2) {
2629                         memcpy(skb->data + pos, skb2->data, skb2->len);
2630                         pos += roundup(skb2->len, sizeof(u32));
2631                         dev_kfree_skb(skb2);
2632                 }
2633         }
2634
2635         length = skb_totallen;
2636
2637 gso_skb:
2638         urb = usb_alloc_urb(0, GFP_ATOMIC);
2639         if (!urb) {
2640                 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2641                 goto drop;
2642         }
2643
2644         entry = (struct skb_data *)skb->cb;
2645         entry->urb = urb;
2646         entry->dev = dev;
2647         entry->length = length;
2648
2649         spin_lock_irqsave(&dev->txq.lock, flags);
2650         ret = usb_autopm_get_interface_async(dev->intf);
2651         if (ret < 0) {
2652                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2653                 goto drop;
2654         }
2655
2656         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2657                           skb->data, skb->len, tx_complete, skb);
2658
2659         if (length % dev->maxpacket == 0) {
2660                 /* send USB_ZERO_PACKET */
2661                 urb->transfer_flags |= URB_ZERO_PACKET;
2662         }
2663
2664 #ifdef CONFIG_PM
2665         /* if this triggers the device is still a sleep */
2666         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2667                 /* transmission will be done in resume */
2668                 usb_anchor_urb(urb, &dev->deferred);
2669                 /* no use to process more packets */
2670                 netif_stop_queue(dev->net);
2671                 usb_put_urb(urb);
2672                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2673                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2674                 return;
2675         }
2676 #endif
2677
2678         ret = usb_submit_urb(urb, GFP_ATOMIC);
2679         switch (ret) {
2680         case 0:
2681                 dev->net->trans_start = jiffies;
2682                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2683                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2684                         netif_stop_queue(dev->net);
2685                 break;
2686         case -EPIPE:
2687                 netif_stop_queue(dev->net);
2688                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2689                 usb_autopm_put_interface_async(dev->intf);
2690                 break;
2691         default:
2692                 usb_autopm_put_interface_async(dev->intf);
2693                 netif_dbg(dev, tx_err, dev->net,
2694                           "tx: submit urb err %d\n", ret);
2695                 break;
2696         }
2697
2698         spin_unlock_irqrestore(&dev->txq.lock, flags);
2699
2700         if (ret) {
2701                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2702 drop:
2703                 dev->net->stats.tx_dropped++;
2704                 if (skb)
2705                         dev_kfree_skb_any(skb);
2706                 usb_free_urb(urb);
2707         } else
2708                 netif_dbg(dev, tx_queued, dev->net,
2709                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
2710 }
2711
2712 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2713 {
2714         struct urb *urb;
2715         int i;
2716
2717         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2718                 for (i = 0; i < 10; i++) {
2719                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2720                                 break;
2721                         urb = usb_alloc_urb(0, GFP_ATOMIC);
2722                         if (urb)
2723                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2724                                         return;
2725                 }
2726
2727                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2728                         tasklet_schedule(&dev->bh);
2729         }
2730         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2731                 netif_wake_queue(dev->net);
2732 }
2733
2734 static void lan78xx_bh(unsigned long param)
2735 {
2736         struct lan78xx_net *dev = (struct lan78xx_net *)param;
2737         struct sk_buff *skb;
2738         struct skb_data *entry;
2739
2740         while ((skb = skb_dequeue(&dev->done))) {
2741                 entry = (struct skb_data *)(skb->cb);
2742                 switch (entry->state) {
2743                 case rx_done:
2744                         entry->state = rx_cleanup;
2745                         rx_process(dev, skb);
2746                         continue;
2747                 case tx_done:
2748                         usb_free_urb(entry->urb);
2749                         dev_kfree_skb(skb);
2750                         continue;
2751                 case rx_cleanup:
2752                         usb_free_urb(entry->urb);
2753                         dev_kfree_skb(skb);
2754                         continue;
2755                 default:
2756                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
2757                         return;
2758                 }
2759         }
2760
2761         if (netif_device_present(dev->net) && netif_running(dev->net)) {
2762                 if (!skb_queue_empty(&dev->txq_pend))
2763                         lan78xx_tx_bh(dev);
2764
2765                 if (!timer_pending(&dev->delay) &&
2766                     !test_bit(EVENT_RX_HALT, &dev->flags))
2767                         lan78xx_rx_bh(dev);
2768         }
2769 }
2770
2771 static void lan78xx_delayedwork(struct work_struct *work)
2772 {
2773         int status;
2774         struct lan78xx_net *dev;
2775
2776         dev = container_of(work, struct lan78xx_net, wq.work);
2777
2778         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2779                 unlink_urbs(dev, &dev->txq);
2780                 status = usb_autopm_get_interface(dev->intf);
2781                 if (status < 0)
2782                         goto fail_pipe;
2783                 status = usb_clear_halt(dev->udev, dev->pipe_out);
2784                 usb_autopm_put_interface(dev->intf);
2785                 if (status < 0 &&
2786                     status != -EPIPE &&
2787                     status != -ESHUTDOWN) {
2788                         if (netif_msg_tx_err(dev))
2789 fail_pipe:
2790                                 netdev_err(dev->net,
2791                                            "can't clear tx halt, status %d\n",
2792                                            status);
2793                 } else {
2794                         clear_bit(EVENT_TX_HALT, &dev->flags);
2795                         if (status != -ESHUTDOWN)
2796                                 netif_wake_queue(dev->net);
2797                 }
2798         }
2799         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2800                 unlink_urbs(dev, &dev->rxq);
2801                 status = usb_autopm_get_interface(dev->intf);
2802                 if (status < 0)
2803                                 goto fail_halt;
2804                 status = usb_clear_halt(dev->udev, dev->pipe_in);
2805                 usb_autopm_put_interface(dev->intf);
2806                 if (status < 0 &&
2807                     status != -EPIPE &&
2808                     status != -ESHUTDOWN) {
2809                         if (netif_msg_rx_err(dev))
2810 fail_halt:
2811                                 netdev_err(dev->net,
2812                                            "can't clear rx halt, status %d\n",
2813                                            status);
2814                 } else {
2815                         clear_bit(EVENT_RX_HALT, &dev->flags);
2816                         tasklet_schedule(&dev->bh);
2817                 }
2818         }
2819
2820         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2821                 int ret = 0;
2822
2823                 clear_bit(EVENT_LINK_RESET, &dev->flags);
2824                 status = usb_autopm_get_interface(dev->intf);
2825                 if (status < 0)
2826                         goto skip_reset;
2827                 if (lan78xx_link_reset(dev) < 0) {
2828                         usb_autopm_put_interface(dev->intf);
2829 skip_reset:
2830                         netdev_info(dev->net, "link reset failed (%d)\n",
2831                                     ret);
2832                 } else {
2833                         usb_autopm_put_interface(dev->intf);
2834                 }
2835         }
2836 }
2837
2838 static void intr_complete(struct urb *urb)
2839 {
2840         struct lan78xx_net *dev = urb->context;
2841         int status = urb->status;
2842
2843         switch (status) {
2844         /* success */
2845         case 0:
2846                 lan78xx_status(dev, urb);
2847                 break;
2848
2849         /* software-driven interface shutdown */
2850         case -ENOENT:                   /* urb killed */
2851         case -ESHUTDOWN:                /* hardware gone */
2852                 netif_dbg(dev, ifdown, dev->net,
2853                           "intr shutdown, code %d\n", status);
2854                 return;
2855
2856         /* NOTE:  not throttling like RX/TX, since this endpoint
2857          * already polls infrequently
2858          */
2859         default:
2860                 netdev_dbg(dev->net, "intr status %d\n", status);
2861                 break;
2862         }
2863
2864         if (!netif_running(dev->net))
2865                 return;
2866
2867         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2868         status = usb_submit_urb(urb, GFP_ATOMIC);
2869         if (status != 0)
2870                 netif_err(dev, timer, dev->net,
2871                           "intr resubmit --> %d\n", status);
2872 }
2873
2874 static void lan78xx_disconnect(struct usb_interface *intf)
2875 {
2876         struct lan78xx_net              *dev;
2877         struct usb_device               *udev;
2878         struct net_device               *net;
2879
2880         dev = usb_get_intfdata(intf);
2881         usb_set_intfdata(intf, NULL);
2882         if (!dev)
2883                 return;
2884
2885         udev = interface_to_usbdev(intf);
2886
2887         net = dev->net;
2888         unregister_netdev(net);
2889
2890         cancel_delayed_work_sync(&dev->wq);
2891
2892         usb_scuttle_anchored_urbs(&dev->deferred);
2893
2894         lan78xx_unbind(dev, intf);
2895
2896         usb_kill_urb(dev->urb_intr);
2897         usb_free_urb(dev->urb_intr);
2898
2899         free_netdev(net);
2900         usb_put_dev(udev);
2901 }
2902
2903 void lan78xx_tx_timeout(struct net_device *net)
2904 {
2905         struct lan78xx_net *dev = netdev_priv(net);
2906
2907         unlink_urbs(dev, &dev->txq);
2908         tasklet_schedule(&dev->bh);
2909 }
2910
2911 static const struct net_device_ops lan78xx_netdev_ops = {
2912         .ndo_open               = lan78xx_open,
2913         .ndo_stop               = lan78xx_stop,
2914         .ndo_start_xmit         = lan78xx_start_xmit,
2915         .ndo_tx_timeout         = lan78xx_tx_timeout,
2916         .ndo_change_mtu         = lan78xx_change_mtu,
2917         .ndo_set_mac_address    = lan78xx_set_mac_addr,
2918         .ndo_validate_addr      = eth_validate_addr,
2919         .ndo_do_ioctl           = lan78xx_ioctl,
2920         .ndo_set_rx_mode        = lan78xx_set_multicast,
2921         .ndo_set_features       = lan78xx_set_features,
2922         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
2923         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
2924 };
2925
2926 static int lan78xx_probe(struct usb_interface *intf,
2927                          const struct usb_device_id *id)
2928 {
2929         struct lan78xx_net *dev;
2930         struct net_device *netdev;
2931         struct usb_device *udev;
2932         int ret;
2933         unsigned maxp;
2934         unsigned period;
2935         u8 *buf = NULL;
2936
2937         udev = interface_to_usbdev(intf);
2938         udev = usb_get_dev(udev);
2939
2940         ret = -ENOMEM;
2941         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2942         if (!netdev) {
2943                         dev_err(&intf->dev, "Error: OOM\n");
2944                         goto out1;
2945         }
2946
2947         /* netdev_printk() needs this */
2948         SET_NETDEV_DEV(netdev, &intf->dev);
2949
2950         dev = netdev_priv(netdev);
2951         dev->udev = udev;
2952         dev->intf = intf;
2953         dev->net = netdev;
2954         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2955                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2956
2957         skb_queue_head_init(&dev->rxq);
2958         skb_queue_head_init(&dev->txq);
2959         skb_queue_head_init(&dev->done);
2960         skb_queue_head_init(&dev->rxq_pause);
2961         skb_queue_head_init(&dev->txq_pend);
2962         mutex_init(&dev->phy_mutex);
2963
2964         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2965         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2966         init_usb_anchor(&dev->deferred);
2967
2968         netdev->netdev_ops = &lan78xx_netdev_ops;
2969         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2970         netdev->ethtool_ops = &lan78xx_ethtool_ops;
2971
2972         ret = lan78xx_bind(dev, intf);
2973         if (ret < 0)
2974                 goto out2;
2975         strcpy(netdev->name, "eth%d");
2976
2977         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2978                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2979
2980         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
2981         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
2982         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
2983
2984         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2985         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2986
2987         dev->pipe_intr = usb_rcvintpipe(dev->udev,
2988                                         dev->ep_intr->desc.bEndpointAddress &
2989                                         USB_ENDPOINT_NUMBER_MASK);
2990         period = dev->ep_intr->desc.bInterval;
2991
2992         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2993         buf = kmalloc(maxp, GFP_KERNEL);
2994         if (buf) {
2995                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2996                 if (!dev->urb_intr) {
2997                         kfree(buf);
2998                         goto out3;
2999                 } else {
3000                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3001                                          dev->pipe_intr, buf, maxp,
3002                                          intr_complete, dev, period);
3003                 }
3004         }
3005
3006         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3007
3008         /* driver requires remote-wakeup capability during autosuspend. */
3009         intf->needs_remote_wakeup = 1;
3010
3011         ret = register_netdev(netdev);
3012         if (ret != 0) {
3013                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3014                 goto out2;
3015         }
3016
3017         usb_set_intfdata(intf, dev);
3018
3019         ret = device_set_wakeup_enable(&udev->dev, true);
3020
3021          /* Default delay of 2sec has more overhead than advantage.
3022           * Set to 10sec as default.
3023           */
3024         pm_runtime_set_autosuspend_delay(&udev->dev,
3025                                          DEFAULT_AUTOSUSPEND_DELAY);
3026
3027         return 0;
3028
3029 out3:
3030         lan78xx_unbind(dev, intf);
3031 out2:
3032         free_netdev(netdev);
3033 out1:
3034         usb_put_dev(udev);
3035
3036         return ret;
3037 }
3038
3039 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3040 {
3041         const u16 crc16poly = 0x8005;
3042         int i;
3043         u16 bit, crc, msb;
3044         u8 data;
3045
3046         crc = 0xFFFF;
3047         for (i = 0; i < len; i++) {
3048                 data = *buf++;
3049                 for (bit = 0; bit < 8; bit++) {
3050                         msb = crc >> 15;
3051                         crc <<= 1;
3052
3053                         if (msb ^ (u16)(data & 1)) {
3054                                 crc ^= crc16poly;
3055                                 crc |= (u16)0x0001U;
3056                         }
3057                         data >>= 1;
3058                 }
3059         }
3060
3061         return crc;
3062 }
3063
3064 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3065 {
3066         u32 buf;
3067         int ret;
3068         int mask_index;
3069         u16 crc;
3070         u32 temp_wucsr;
3071         u32 temp_pmt_ctl;
3072         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3073         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3074         const u8 arp_type[2] = { 0x08, 0x06 };
3075
3076         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3077         buf &= ~MAC_TX_TXEN_;
3078         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3079         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3080         buf &= ~MAC_RX_RXEN_;
3081         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3082
3083         ret = lan78xx_write_reg(dev, WUCSR, 0);
3084         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3085         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3086
3087         temp_wucsr = 0;
3088
3089         temp_pmt_ctl = 0;
3090         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3091         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3092         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3093
3094         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3095                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3096
3097         mask_index = 0;
3098         if (wol & WAKE_PHY) {
3099                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3100
3101                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3102                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3103                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3104         }
3105         if (wol & WAKE_MAGIC) {
3106                 temp_wucsr |= WUCSR_MPEN_;
3107
3108                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3109                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3110                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3111         }
3112         if (wol & WAKE_BCAST) {
3113                 temp_wucsr |= WUCSR_BCST_EN_;
3114
3115                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3116                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3117                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3118         }
3119         if (wol & WAKE_MCAST) {
3120                 temp_wucsr |= WUCSR_WAKE_EN_;
3121
3122                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3123                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3124                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3125                                         WUF_CFGX_EN_ |
3126                                         WUF_CFGX_TYPE_MCAST_ |
3127                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3128                                         (crc & WUF_CFGX_CRC16_MASK_));
3129
3130                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3131                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3132                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3133                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3134                 mask_index++;
3135
3136                 /* for IPv6 Multicast */
3137                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3138                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3139                                         WUF_CFGX_EN_ |
3140                                         WUF_CFGX_TYPE_MCAST_ |
3141                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3142                                         (crc & WUF_CFGX_CRC16_MASK_));
3143
3144                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3145                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3146                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3147                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3148                 mask_index++;
3149
3150                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3151                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3152                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3153         }
3154         if (wol & WAKE_UCAST) {
3155                 temp_wucsr |= WUCSR_PFDA_EN_;
3156
3157                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3158                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3159                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3160         }
3161         if (wol & WAKE_ARP) {
3162                 temp_wucsr |= WUCSR_WAKE_EN_;
3163
3164                 /* set WUF_CFG & WUF_MASK
3165                  * for packettype (offset 12,13) = ARP (0x0806)
3166                  */
3167                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3168                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3169                                         WUF_CFGX_EN_ |
3170                                         WUF_CFGX_TYPE_ALL_ |
3171                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3172                                         (crc & WUF_CFGX_CRC16_MASK_));
3173
3174                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3175                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3176                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3177                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3178                 mask_index++;
3179
3180                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3181                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3182                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3183         }
3184
3185         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3186
3187         /* when multiple WOL bits are set */
3188         if (hweight_long((unsigned long)wol) > 1) {
3189                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3190                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3191                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3192         }
3193         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3194
3195         /* clear WUPS */
3196         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3197         buf |= PMT_CTL_WUPS_MASK_;
3198         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3199
3200         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3201         buf |= MAC_RX_RXEN_;
3202         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3203
3204         return 0;
3205 }
3206
3207 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3208 {
3209         struct lan78xx_net *dev = usb_get_intfdata(intf);
3210         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3211         u32 buf;
3212         int ret;
3213         int event;
3214
3215         ret = 0;
3216         event = message.event;
3217
3218         if (!dev->suspend_count++) {
3219                 spin_lock_irq(&dev->txq.lock);
3220                 /* don't autosuspend while transmitting */
3221                 if ((skb_queue_len(&dev->txq) ||
3222                      skb_queue_len(&dev->txq_pend)) &&
3223                         PMSG_IS_AUTO(message)) {
3224                         spin_unlock_irq(&dev->txq.lock);
3225                         ret = -EBUSY;
3226                         goto out;
3227                 } else {
3228                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3229                         spin_unlock_irq(&dev->txq.lock);
3230                 }
3231
3232                 /* stop TX & RX */
3233                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3234                 buf &= ~MAC_TX_TXEN_;
3235                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3236                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3237                 buf &= ~MAC_RX_RXEN_;
3238                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3239
3240                 /* empty out the rx and queues */
3241                 netif_device_detach(dev->net);
3242                 lan78xx_terminate_urbs(dev);
3243                 usb_kill_urb(dev->urb_intr);
3244
3245                 /* reattach */
3246                 netif_device_attach(dev->net);
3247         }
3248
3249         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3250                 if (PMSG_IS_AUTO(message)) {
3251                         /* auto suspend (selective suspend) */
3252                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3253                         buf &= ~MAC_TX_TXEN_;
3254                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3255                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3256                         buf &= ~MAC_RX_RXEN_;
3257                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3258
3259                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3260                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3261                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3262
3263                         /* set goodframe wakeup */
3264                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3265
3266                         buf |= WUCSR_RFE_WAKE_EN_;
3267                         buf |= WUCSR_STORE_WAKE_;
3268
3269                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3270
3271                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3272
3273                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3274                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3275
3276                         buf |= PMT_CTL_PHY_WAKE_EN_;
3277                         buf |= PMT_CTL_WOL_EN_;
3278                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3279                         buf |= PMT_CTL_SUS_MODE_3_;
3280
3281                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3282
3283                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3284
3285                         buf |= PMT_CTL_WUPS_MASK_;
3286
3287                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3288
3289                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3290                         buf |= MAC_RX_RXEN_;
3291                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3292                 } else {
3293                         lan78xx_set_suspend(dev, pdata->wol);
3294                 }
3295         }
3296
3297 out:
3298         return ret;
3299 }
3300
3301 int lan78xx_resume(struct usb_interface *intf)
3302 {
3303         struct lan78xx_net *dev = usb_get_intfdata(intf);
3304         struct sk_buff *skb;
3305         struct urb *res;
3306         int ret;
3307         u32 buf;
3308
3309         if (!--dev->suspend_count) {
3310                 /* resume interrupt URBs */
3311                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3312                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3313
3314                 spin_lock_irq(&dev->txq.lock);
3315                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3316                         skb = (struct sk_buff *)res->context;
3317                         ret = usb_submit_urb(res, GFP_ATOMIC);
3318                         if (ret < 0) {
3319                                 dev_kfree_skb_any(skb);
3320                                 usb_free_urb(res);
3321                                 usb_autopm_put_interface_async(dev->intf);
3322                         } else {
3323                                 dev->net->trans_start = jiffies;
3324                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3325                         }
3326                 }
3327
3328                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3329                 spin_unlock_irq(&dev->txq.lock);
3330
3331                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3332                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3333                                 netif_start_queue(dev->net);
3334                         tasklet_schedule(&dev->bh);
3335                 }
3336         }
3337
3338         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3339         ret = lan78xx_write_reg(dev, WUCSR, 0);
3340         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3341
3342         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3343                                              WUCSR2_ARP_RCD_ |
3344                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3345                                              WUCSR2_IPV4_TCPSYN_RCD_);
3346
3347         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3348                                             WUCSR_EEE_RX_WAKE_ |
3349                                             WUCSR_PFDA_FR_ |
3350                                             WUCSR_RFE_WAKE_FR_ |
3351                                             WUCSR_WUFR_ |
3352                                             WUCSR_MPR_ |
3353                                             WUCSR_BCST_FR_);
3354
3355         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3356         buf |= MAC_TX_TXEN_;
3357         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3358
3359         return 0;
3360 }
3361
3362 int lan78xx_reset_resume(struct usb_interface *intf)
3363 {
3364         struct lan78xx_net *dev = usb_get_intfdata(intf);
3365
3366         lan78xx_reset(dev);
3367
3368         lan78xx_phy_init(dev);
3369
3370         return lan78xx_resume(intf);
3371 }
3372
3373 static const struct usb_device_id products[] = {
3374         {
3375         /* LAN7800 USB Gigabit Ethernet Device */
3376         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3377         },
3378         {
3379         /* LAN7850 USB Gigabit Ethernet Device */
3380         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3381         },
3382         {},
3383 };
3384 MODULE_DEVICE_TABLE(usb, products);
3385
3386 static struct usb_driver lan78xx_driver = {
3387         .name                   = DRIVER_NAME,
3388         .id_table               = products,
3389         .probe                  = lan78xx_probe,
3390         .disconnect             = lan78xx_disconnect,
3391         .suspend                = lan78xx_suspend,
3392         .resume                 = lan78xx_resume,
3393         .reset_resume           = lan78xx_reset_resume,
3394         .supports_autosuspend   = 1,
3395         .disable_hub_initiated_lpm = 1,
3396 };
3397
3398 module_usb_driver(lan78xx_driver);
3399
3400 MODULE_AUTHOR(DRIVER_AUTHOR);
3401 MODULE_DESCRIPTION(DRIVER_DESC);
3402 MODULE_LICENSE("GPL");