]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/usb/lan78xx.c
lan78xx: Remove not defined MAC_CR_GMII_EN_ bit from MAC_CR.
[karo-tx-linux.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35
36 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME     "lan78xx"
39 #define DRIVER_VERSION  "1.0.1"
40
41 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
42 #define THROTTLE_JIFFIES                (HZ / 8)
43 #define UNLINK_TIMEOUT_MS               3
44
45 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
46
47 #define SS_USB_PKT_SIZE                 (1024)
48 #define HS_USB_PKT_SIZE                 (512)
49 #define FS_USB_PKT_SIZE                 (64)
50
51 #define MAX_RX_FIFO_SIZE                (12 * 1024)
52 #define MAX_TX_FIFO_SIZE                (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY           (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE          (9000)
56 #define DEFAULT_TX_CSUM_ENABLE          (true)
57 #define DEFAULT_RX_CSUM_ENABLE          (true)
58 #define DEFAULT_TSO_CSUM_ENABLE         (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
60 #define TX_OVERHEAD                     (8)
61 #define RXW_PADDING                     2
62
63 #define LAN78XX_USB_VENDOR_ID           (0x0424)
64 #define LAN7800_USB_PRODUCT_ID          (0x7800)
65 #define LAN7850_USB_PRODUCT_ID          (0x7850)
66 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
67 #define LAN78XX_OTP_MAGIC               (0x78F3)
68
69 #define MII_READ                        1
70 #define MII_WRITE                       0
71
72 #define EEPROM_INDICATOR                (0xA5)
73 #define EEPROM_MAC_OFFSET               (0x01)
74 #define MAX_EEPROM_SIZE                 512
75 #define OTP_INDICATOR_1                 (0xF3)
76 #define OTP_INDICATOR_2                 (0xF7)
77
78 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
79                                          WAKE_MCAST | WAKE_BCAST | \
80                                          WAKE_ARP | WAKE_MAGIC)
81
82 /* USB related defines */
83 #define BULK_IN_PIPE                    1
84 #define BULK_OUT_PIPE                   2
85
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
88
89 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90         "RX FCS Errors",
91         "RX Alignment Errors",
92         "Rx Fragment Errors",
93         "RX Jabber Errors",
94         "RX Undersize Frame Errors",
95         "RX Oversize Frame Errors",
96         "RX Dropped Frames",
97         "RX Unicast Byte Count",
98         "RX Broadcast Byte Count",
99         "RX Multicast Byte Count",
100         "RX Unicast Frames",
101         "RX Broadcast Frames",
102         "RX Multicast Frames",
103         "RX Pause Frames",
104         "RX 64 Byte Frames",
105         "RX 65 - 127 Byte Frames",
106         "RX 128 - 255 Byte Frames",
107         "RX 256 - 511 Bytes Frames",
108         "RX 512 - 1023 Byte Frames",
109         "RX 1024 - 1518 Byte Frames",
110         "RX Greater 1518 Byte Frames",
111         "EEE RX LPI Transitions",
112         "EEE RX LPI Time",
113         "TX FCS Errors",
114         "TX Excess Deferral Errors",
115         "TX Carrier Errors",
116         "TX Bad Byte Count",
117         "TX Single Collisions",
118         "TX Multiple Collisions",
119         "TX Excessive Collision",
120         "TX Late Collisions",
121         "TX Unicast Byte Count",
122         "TX Broadcast Byte Count",
123         "TX Multicast Byte Count",
124         "TX Unicast Frames",
125         "TX Broadcast Frames",
126         "TX Multicast Frames",
127         "TX Pause Frames",
128         "TX 64 Byte Frames",
129         "TX 65 - 127 Byte Frames",
130         "TX 128 - 255 Byte Frames",
131         "TX 256 - 511 Bytes Frames",
132         "TX 512 - 1023 Byte Frames",
133         "TX 1024 - 1518 Byte Frames",
134         "TX Greater 1518 Byte Frames",
135         "EEE TX LPI Transitions",
136         "EEE TX LPI Time",
137 };
138
139 struct lan78xx_statstage {
140         u32 rx_fcs_errors;
141         u32 rx_alignment_errors;
142         u32 rx_fragment_errors;
143         u32 rx_jabber_errors;
144         u32 rx_undersize_frame_errors;
145         u32 rx_oversize_frame_errors;
146         u32 rx_dropped_frames;
147         u32 rx_unicast_byte_count;
148         u32 rx_broadcast_byte_count;
149         u32 rx_multicast_byte_count;
150         u32 rx_unicast_frames;
151         u32 rx_broadcast_frames;
152         u32 rx_multicast_frames;
153         u32 rx_pause_frames;
154         u32 rx_64_byte_frames;
155         u32 rx_65_127_byte_frames;
156         u32 rx_128_255_byte_frames;
157         u32 rx_256_511_bytes_frames;
158         u32 rx_512_1023_byte_frames;
159         u32 rx_1024_1518_byte_frames;
160         u32 rx_greater_1518_byte_frames;
161         u32 eee_rx_lpi_transitions;
162         u32 eee_rx_lpi_time;
163         u32 tx_fcs_errors;
164         u32 tx_excess_deferral_errors;
165         u32 tx_carrier_errors;
166         u32 tx_bad_byte_count;
167         u32 tx_single_collisions;
168         u32 tx_multiple_collisions;
169         u32 tx_excessive_collision;
170         u32 tx_late_collisions;
171         u32 tx_unicast_byte_count;
172         u32 tx_broadcast_byte_count;
173         u32 tx_multicast_byte_count;
174         u32 tx_unicast_frames;
175         u32 tx_broadcast_frames;
176         u32 tx_multicast_frames;
177         u32 tx_pause_frames;
178         u32 tx_64_byte_frames;
179         u32 tx_65_127_byte_frames;
180         u32 tx_128_255_byte_frames;
181         u32 tx_256_511_bytes_frames;
182         u32 tx_512_1023_byte_frames;
183         u32 tx_1024_1518_byte_frames;
184         u32 tx_greater_1518_byte_frames;
185         u32 eee_tx_lpi_transitions;
186         u32 eee_tx_lpi_time;
187 };
188
189 struct lan78xx_net;
190
191 struct lan78xx_priv {
192         struct lan78xx_net *dev;
193         u32 rfe_ctl;
194         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197         struct mutex dataport_mutex; /* for dataport access */
198         spinlock_t rfe_ctl_lock; /* for rfe register access */
199         struct work_struct set_multicast;
200         struct work_struct set_vlan;
201         u32 wol;
202 };
203
204 enum skb_state {
205         illegal = 0,
206         tx_start,
207         tx_done,
208         rx_start,
209         rx_done,
210         rx_cleanup,
211         unlink_start
212 };
213
214 struct skb_data {               /* skb->cb is one of these */
215         struct urb *urb;
216         struct lan78xx_net *dev;
217         enum skb_state state;
218         size_t length;
219 };
220
221 struct usb_context {
222         struct usb_ctrlrequest req;
223         struct lan78xx_net *dev;
224 };
225
226 #define EVENT_TX_HALT                   0
227 #define EVENT_RX_HALT                   1
228 #define EVENT_RX_MEMORY                 2
229 #define EVENT_STS_SPLIT                 3
230 #define EVENT_LINK_RESET                4
231 #define EVENT_RX_PAUSED                 5
232 #define EVENT_DEV_WAKING                6
233 #define EVENT_DEV_ASLEEP                7
234 #define EVENT_DEV_OPEN                  8
235
236 struct lan78xx_net {
237         struct net_device       *net;
238         struct usb_device       *udev;
239         struct usb_interface    *intf;
240         void                    *driver_priv;
241
242         int                     rx_qlen;
243         int                     tx_qlen;
244         struct sk_buff_head     rxq;
245         struct sk_buff_head     txq;
246         struct sk_buff_head     done;
247         struct sk_buff_head     rxq_pause;
248         struct sk_buff_head     txq_pend;
249
250         struct tasklet_struct   bh;
251         struct delayed_work     wq;
252
253         struct usb_host_endpoint *ep_blkin;
254         struct usb_host_endpoint *ep_blkout;
255         struct usb_host_endpoint *ep_intr;
256
257         int                     msg_enable;
258
259         struct urb              *urb_intr;
260         struct usb_anchor       deferred;
261
262         struct mutex            phy_mutex; /* for phy access */
263         unsigned                pipe_in, pipe_out, pipe_intr;
264
265         u32                     hard_mtu;       /* count any extra framing */
266         size_t                  rx_urb_size;    /* size for rx urbs */
267
268         unsigned long           flags;
269
270         wait_queue_head_t       *wait;
271         unsigned char           suspend_count;
272
273         unsigned                maxpacket;
274         struct timer_list       delay;
275
276         unsigned long           data[5];
277
278         int                     link_on;
279         u8                      mdix_ctrl;
280
281         u32                     devid;
282         struct mii_bus          *mdiobus;
283 };
284
285 /* use ethtool to change the level for any given device */
286 static int msg_level = -1;
287 module_param(msg_level, int, 0);
288 MODULE_PARM_DESC(msg_level, "Override default message level");
289
290 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291 {
292         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
293         int ret;
294
295         if (!buf)
296                 return -ENOMEM;
297
298         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
299                               USB_VENDOR_REQUEST_READ_REGISTER,
300                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
301                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
302         if (likely(ret >= 0)) {
303                 le32_to_cpus(buf);
304                 *data = *buf;
305         } else {
306                 netdev_warn(dev->net,
307                             "Failed to read register index 0x%08x. ret = %d",
308                             index, ret);
309         }
310
311         kfree(buf);
312
313         return ret;
314 }
315
316 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317 {
318         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
319         int ret;
320
321         if (!buf)
322                 return -ENOMEM;
323
324         *buf = data;
325         cpu_to_le32s(buf);
326
327         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
328                               USB_VENDOR_REQUEST_WRITE_REGISTER,
329                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
330                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
331         if (unlikely(ret < 0)) {
332                 netdev_warn(dev->net,
333                             "Failed to write register index 0x%08x. ret = %d",
334                             index, ret);
335         }
336
337         kfree(buf);
338
339         return ret;
340 }
341
342 static int lan78xx_read_stats(struct lan78xx_net *dev,
343                               struct lan78xx_statstage *data)
344 {
345         int ret = 0;
346         int i;
347         struct lan78xx_statstage *stats;
348         u32 *src;
349         u32 *dst;
350
351         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352         if (!stats)
353                 return -ENOMEM;
354
355         ret = usb_control_msg(dev->udev,
356                               usb_rcvctrlpipe(dev->udev, 0),
357                               USB_VENDOR_REQUEST_GET_STATS,
358                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
359                               0,
360                               0,
361                               (void *)stats,
362                               sizeof(*stats),
363                               USB_CTRL_SET_TIMEOUT);
364         if (likely(ret >= 0)) {
365                 src = (u32 *)stats;
366                 dst = (u32 *)data;
367                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
368                         le32_to_cpus(&src[i]);
369                         dst[i] = src[i];
370                 }
371         } else {
372                 netdev_warn(dev->net,
373                             "Failed to read stat ret = 0x%x", ret);
374         }
375
376         kfree(stats);
377
378         return ret;
379 }
380
381 /* Loop until the read is completed with timeout called with phy_mutex held */
382 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383 {
384         unsigned long start_time = jiffies;
385         u32 val;
386         int ret;
387
388         do {
389                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
390                 if (unlikely(ret < 0))
391                         return -EIO;
392
393                 if (!(val & MII_ACC_MII_BUSY_))
394                         return 0;
395         } while (!time_after(jiffies, start_time + HZ));
396
397         return -EIO;
398 }
399
400 static inline u32 mii_access(int id, int index, int read)
401 {
402         u32 ret;
403
404         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
405         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406         if (read)
407                 ret |= MII_ACC_MII_READ_;
408         else
409                 ret |= MII_ACC_MII_WRITE_;
410         ret |= MII_ACC_MII_BUSY_;
411
412         return ret;
413 }
414
415 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
416 {
417         unsigned long start_time = jiffies;
418         u32 val;
419         int ret;
420
421         do {
422                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
423                 if (unlikely(ret < 0))
424                         return -EIO;
425
426                 if (!(val & E2P_CMD_EPC_BUSY_) ||
427                     (val & E2P_CMD_EPC_TIMEOUT_))
428                         break;
429                 usleep_range(40, 100);
430         } while (!time_after(jiffies, start_time + HZ));
431
432         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
433                 netdev_warn(dev->net, "EEPROM read operation timeout");
434                 return -EIO;
435         }
436
437         return 0;
438 }
439
440 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
441 {
442         unsigned long start_time = jiffies;
443         u32 val;
444         int ret;
445
446         do {
447                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
448                 if (unlikely(ret < 0))
449                         return -EIO;
450
451                 if (!(val & E2P_CMD_EPC_BUSY_))
452                         return 0;
453
454                 usleep_range(40, 100);
455         } while (!time_after(jiffies, start_time + HZ));
456
457         netdev_warn(dev->net, "EEPROM is busy");
458         return -EIO;
459 }
460
461 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462                                    u32 length, u8 *data)
463 {
464         u32 val;
465         int i, ret;
466
467         ret = lan78xx_eeprom_confirm_not_busy(dev);
468         if (ret)
469                 return ret;
470
471         for (i = 0; i < length; i++) {
472                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
473                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
474                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
475                 if (unlikely(ret < 0))
476                         return -EIO;
477
478                 ret = lan78xx_wait_eeprom(dev);
479                 if (ret < 0)
480                         return ret;
481
482                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
483                 if (unlikely(ret < 0))
484                         return -EIO;
485
486                 data[i] = val & 0xFF;
487                 offset++;
488         }
489
490         return 0;
491 }
492
493 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
494                                u32 length, u8 *data)
495 {
496         u8 sig;
497         int ret;
498
499         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
500         if ((ret == 0) && (sig == EEPROM_INDICATOR))
501                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
502         else
503                 ret = -EINVAL;
504
505         return ret;
506 }
507
508 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
509                                     u32 length, u8 *data)
510 {
511         u32 val;
512         int i, ret;
513
514         ret = lan78xx_eeprom_confirm_not_busy(dev);
515         if (ret)
516                 return ret;
517
518         /* Issue write/erase enable command */
519         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
520         ret = lan78xx_write_reg(dev, E2P_CMD, val);
521         if (unlikely(ret < 0))
522                 return -EIO;
523
524         ret = lan78xx_wait_eeprom(dev);
525         if (ret < 0)
526                 return ret;
527
528         for (i = 0; i < length; i++) {
529                 /* Fill data register */
530                 val = data[i];
531                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
532                 if (ret < 0)
533                         return ret;
534
535                 /* Send "write" command */
536                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
537                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
538                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
539                 if (ret < 0)
540                         return ret;
541
542                 ret = lan78xx_wait_eeprom(dev);
543                 if (ret < 0)
544                         return ret;
545
546                 offset++;
547         }
548
549         return 0;
550 }
551
552 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
553                                 u32 length, u8 *data)
554 {
555         int i;
556         int ret;
557         u32 buf;
558         unsigned long timeout;
559
560         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
561
562         if (buf & OTP_PWR_DN_PWRDN_N_) {
563                 /* clear it and wait to be cleared */
564                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
565
566                 timeout = jiffies + HZ;
567                 do {
568                         usleep_range(1, 10);
569                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
570                         if (time_after(jiffies, timeout)) {
571                                 netdev_warn(dev->net,
572                                             "timeout on OTP_PWR_DN");
573                                 return -EIO;
574                         }
575                 } while (buf & OTP_PWR_DN_PWRDN_N_);
576         }
577
578         for (i = 0; i < length; i++) {
579                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
580                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
581                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
582                                         ((offset + i) & OTP_ADDR2_10_3));
583
584                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
585                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
586
587                 timeout = jiffies + HZ;
588                 do {
589                         udelay(1);
590                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
591                         if (time_after(jiffies, timeout)) {
592                                 netdev_warn(dev->net,
593                                             "timeout on OTP_STATUS");
594                                 return -EIO;
595                         }
596                 } while (buf & OTP_STATUS_BUSY_);
597
598                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
599
600                 data[i] = (u8)(buf & 0xFF);
601         }
602
603         return 0;
604 }
605
606 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
607                             u32 length, u8 *data)
608 {
609         u8 sig;
610         int ret;
611
612         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
613
614         if (ret == 0) {
615                 if (sig == OTP_INDICATOR_1)
616                         offset = offset;
617                 else if (sig == OTP_INDICATOR_2)
618                         offset += 0x100;
619                 else
620                         ret = -EINVAL;
621                 ret = lan78xx_read_raw_otp(dev, offset, length, data);
622         }
623
624         return ret;
625 }
626
627 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
628 {
629         int i, ret;
630
631         for (i = 0; i < 100; i++) {
632                 u32 dp_sel;
633
634                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
635                 if (unlikely(ret < 0))
636                         return -EIO;
637
638                 if (dp_sel & DP_SEL_DPRDY_)
639                         return 0;
640
641                 usleep_range(40, 100);
642         }
643
644         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
645
646         return -EIO;
647 }
648
649 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
650                                   u32 addr, u32 length, u32 *buf)
651 {
652         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
653         u32 dp_sel;
654         int i, ret;
655
656         if (usb_autopm_get_interface(dev->intf) < 0)
657                         return 0;
658
659         mutex_lock(&pdata->dataport_mutex);
660
661         ret = lan78xx_dataport_wait_not_busy(dev);
662         if (ret < 0)
663                 goto done;
664
665         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
666
667         dp_sel &= ~DP_SEL_RSEL_MASK_;
668         dp_sel |= ram_select;
669         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
670
671         for (i = 0; i < length; i++) {
672                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
673
674                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
675
676                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
677
678                 ret = lan78xx_dataport_wait_not_busy(dev);
679                 if (ret < 0)
680                         goto done;
681         }
682
683 done:
684         mutex_unlock(&pdata->dataport_mutex);
685         usb_autopm_put_interface(dev->intf);
686
687         return ret;
688 }
689
690 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
691                                     int index, u8 addr[ETH_ALEN])
692 {
693         u32     temp;
694
695         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
696                 temp = addr[3];
697                 temp = addr[2] | (temp << 8);
698                 temp = addr[1] | (temp << 8);
699                 temp = addr[0] | (temp << 8);
700                 pdata->pfilter_table[index][1] = temp;
701                 temp = addr[5];
702                 temp = addr[4] | (temp << 8);
703                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
704                 pdata->pfilter_table[index][0] = temp;
705         }
706 }
707
708 /* returns hash bit number for given MAC address */
709 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
710 {
711         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
712 }
713
714 static void lan78xx_deferred_multicast_write(struct work_struct *param)
715 {
716         struct lan78xx_priv *pdata =
717                         container_of(param, struct lan78xx_priv, set_multicast);
718         struct lan78xx_net *dev = pdata->dev;
719         int i;
720         int ret;
721
722         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
723                   pdata->rfe_ctl);
724
725         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
726                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
727
728         for (i = 1; i < NUM_OF_MAF; i++) {
729                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
730                 ret = lan78xx_write_reg(dev, MAF_LO(i),
731                                         pdata->pfilter_table[i][1]);
732                 ret = lan78xx_write_reg(dev, MAF_HI(i),
733                                         pdata->pfilter_table[i][0]);
734         }
735
736         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
737 }
738
739 static void lan78xx_set_multicast(struct net_device *netdev)
740 {
741         struct lan78xx_net *dev = netdev_priv(netdev);
742         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
743         unsigned long flags;
744         int i;
745
746         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
747
748         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
749                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
750
751         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
752                         pdata->mchash_table[i] = 0;
753         /* pfilter_table[0] has own HW address */
754         for (i = 1; i < NUM_OF_MAF; i++) {
755                         pdata->pfilter_table[i][0] =
756                         pdata->pfilter_table[i][1] = 0;
757         }
758
759         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
760
761         if (dev->net->flags & IFF_PROMISC) {
762                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
763                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
764         } else {
765                 if (dev->net->flags & IFF_ALLMULTI) {
766                         netif_dbg(dev, drv, dev->net,
767                                   "receive all multicast enabled");
768                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
769                 }
770         }
771
772         if (netdev_mc_count(dev->net)) {
773                 struct netdev_hw_addr *ha;
774                 int i;
775
776                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
777
778                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
779
780                 i = 1;
781                 netdev_for_each_mc_addr(ha, netdev) {
782                         /* set first 32 into Perfect Filter */
783                         if (i < 33) {
784                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
785                         } else {
786                                 u32 bitnum = lan78xx_hash(ha->addr);
787
788                                 pdata->mchash_table[bitnum / 32] |=
789                                                         (1 << (bitnum % 32));
790                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
791                         }
792                         i++;
793                 }
794         }
795
796         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
797
798         /* defer register writes to a sleepable context */
799         schedule_work(&pdata->set_multicast);
800 }
801
802 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
803                                       u16 lcladv, u16 rmtadv)
804 {
805         u32 flow = 0, fct_flow = 0;
806         int ret;
807
808         u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
809
810         if (cap & FLOW_CTRL_TX)
811                 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
812
813         if (cap & FLOW_CTRL_RX)
814                 flow |= FLOW_CR_RX_FCEN_;
815
816         if (dev->udev->speed == USB_SPEED_SUPER)
817                 fct_flow = 0x817;
818         else if (dev->udev->speed == USB_SPEED_HIGH)
819                 fct_flow = 0x211;
820
821         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
822                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
823                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
824
825         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
826
827         /* threshold value should be set before enabling flow */
828         ret = lan78xx_write_reg(dev, FLOW, flow);
829
830         return 0;
831 }
832
833 static int lan78xx_link_reset(struct lan78xx_net *dev)
834 {
835         struct phy_device *phydev = dev->net->phydev;
836         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
837         int ladv, radv, ret;
838         u32 buf;
839
840         /* clear PHY interrupt status */
841         ret = phy_read(phydev, LAN88XX_INT_STS);
842         if (unlikely(ret < 0))
843                 return -EIO;
844
845         /* clear LAN78xx interrupt status */
846         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
847         if (unlikely(ret < 0))
848                 return -EIO;
849
850         phy_read_status(phydev);
851
852         if (!phydev->link && dev->link_on) {
853                 dev->link_on = false;
854                 netif_carrier_off(dev->net);
855
856                 /* reset MAC */
857                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
858                 if (unlikely(ret < 0))
859                         return -EIO;
860                 buf |= MAC_CR_RST_;
861                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
862                 if (unlikely(ret < 0))
863                         return -EIO;
864         } else if (phydev->link && !dev->link_on) {
865                 dev->link_on = true;
866
867                 phy_ethtool_gset(phydev, &ecmd);
868
869                 ret = phy_read(phydev, LAN88XX_INT_STS);
870
871                 if (dev->udev->speed == USB_SPEED_SUPER) {
872                         if (ethtool_cmd_speed(&ecmd) == 1000) {
873                                 /* disable U2 */
874                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
875                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
876                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
877                                 /* enable U1 */
878                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
879                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
880                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
881                         } else {
882                                 /* enable U1 & U2 */
883                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
884                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
885                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
886                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
887                         }
888                 }
889
890                 ladv = phy_read(phydev, MII_ADVERTISE);
891                 if (ladv < 0)
892                         return ladv;
893
894                 radv = phy_read(phydev, MII_LPA);
895                 if (radv < 0)
896                         return radv;
897
898                 netif_dbg(dev, link, dev->net,
899                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
900                           ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
901
902                 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
903                 netif_carrier_on(dev->net);
904         }
905
906         return ret;
907 }
908
909 /* some work can't be done in tasklets, so we use keventd
910  *
911  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
912  * but tasklet_schedule() doesn't.      hope the failure is rare.
913  */
914 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
915 {
916         set_bit(work, &dev->flags);
917         if (!schedule_delayed_work(&dev->wq, 0))
918                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
919 }
920
921 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
922 {
923         u32 intdata;
924
925         if (urb->actual_length != 4) {
926                 netdev_warn(dev->net,
927                             "unexpected urb length %d", urb->actual_length);
928                 return;
929         }
930
931         memcpy(&intdata, urb->transfer_buffer, 4);
932         le32_to_cpus(&intdata);
933
934         if (intdata & INT_ENP_PHY_INT) {
935                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
937         } else
938                 netdev_warn(dev->net,
939                             "unexpected interrupt: 0x%08x\n", intdata);
940 }
941
942 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
943 {
944         return MAX_EEPROM_SIZE;
945 }
946
947 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948                                       struct ethtool_eeprom *ee, u8 *data)
949 {
950         struct lan78xx_net *dev = netdev_priv(netdev);
951
952         ee->magic = LAN78XX_EEPROM_MAGIC;
953
954         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
955 }
956
957 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958                                       struct ethtool_eeprom *ee, u8 *data)
959 {
960         struct lan78xx_net *dev = netdev_priv(netdev);
961
962         /* Allow entire eeprom update only */
963         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
964             (ee->offset == 0) &&
965             (ee->len == 512) &&
966             (data[0] == EEPROM_INDICATOR))
967                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
969                  (ee->offset == 0) &&
970                  (ee->len == 512) &&
971                  (data[0] == OTP_INDICATOR_1))
972                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
973
974         return -EINVAL;
975 }
976
977 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
978                                 u8 *data)
979 {
980         if (stringset == ETH_SS_STATS)
981                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
982 }
983
984 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
985 {
986         if (sset == ETH_SS_STATS)
987                 return ARRAY_SIZE(lan78xx_gstrings);
988         else
989                 return -EOPNOTSUPP;
990 }
991
992 static void lan78xx_get_stats(struct net_device *netdev,
993                               struct ethtool_stats *stats, u64 *data)
994 {
995         struct lan78xx_net *dev = netdev_priv(netdev);
996         struct lan78xx_statstage lan78xx_stat;
997         u32 *p;
998         int i;
999
1000         if (usb_autopm_get_interface(dev->intf) < 0)
1001                 return;
1002
1003         if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004                 p = (u32 *)&lan78xx_stat;
1005                 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1006                         data[i] = p[i];
1007         }
1008
1009         usb_autopm_put_interface(dev->intf);
1010 }
1011
1012 static void lan78xx_get_wol(struct net_device *netdev,
1013                             struct ethtool_wolinfo *wol)
1014 {
1015         struct lan78xx_net *dev = netdev_priv(netdev);
1016         int ret;
1017         u32 buf;
1018         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1019
1020         if (usb_autopm_get_interface(dev->intf) < 0)
1021                         return;
1022
1023         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024         if (unlikely(ret < 0)) {
1025                 wol->supported = 0;
1026                 wol->wolopts = 0;
1027         } else {
1028                 if (buf & USB_CFG_RMT_WKP_) {
1029                         wol->supported = WAKE_ALL;
1030                         wol->wolopts = pdata->wol;
1031                 } else {
1032                         wol->supported = 0;
1033                         wol->wolopts = 0;
1034                 }
1035         }
1036
1037         usb_autopm_put_interface(dev->intf);
1038 }
1039
1040 static int lan78xx_set_wol(struct net_device *netdev,
1041                            struct ethtool_wolinfo *wol)
1042 {
1043         struct lan78xx_net *dev = netdev_priv(netdev);
1044         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045         int ret;
1046
1047         ret = usb_autopm_get_interface(dev->intf);
1048         if (ret < 0)
1049                 return ret;
1050
1051         pdata->wol = 0;
1052         if (wol->wolopts & WAKE_UCAST)
1053                 pdata->wol |= WAKE_UCAST;
1054         if (wol->wolopts & WAKE_MCAST)
1055                 pdata->wol |= WAKE_MCAST;
1056         if (wol->wolopts & WAKE_BCAST)
1057                 pdata->wol |= WAKE_BCAST;
1058         if (wol->wolopts & WAKE_MAGIC)
1059                 pdata->wol |= WAKE_MAGIC;
1060         if (wol->wolopts & WAKE_PHY)
1061                 pdata->wol |= WAKE_PHY;
1062         if (wol->wolopts & WAKE_ARP)
1063                 pdata->wol |= WAKE_ARP;
1064
1065         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1066
1067         phy_ethtool_set_wol(netdev->phydev, wol);
1068
1069         usb_autopm_put_interface(dev->intf);
1070
1071         return ret;
1072 }
1073
1074 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1075 {
1076         struct lan78xx_net *dev = netdev_priv(net);
1077         struct phy_device *phydev = net->phydev;
1078         int ret;
1079         u32 buf;
1080
1081         ret = usb_autopm_get_interface(dev->intf);
1082         if (ret < 0)
1083                 return ret;
1084
1085         ret = phy_ethtool_get_eee(phydev, edata);
1086         if (ret < 0)
1087                 goto exit;
1088
1089         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1090         if (buf & MAC_CR_EEE_EN_) {
1091                 edata->eee_enabled = true;
1092                 edata->eee_active = !!(edata->advertised &
1093                                        edata->lp_advertised);
1094                 edata->tx_lpi_enabled = true;
1095                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1096                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1097                 edata->tx_lpi_timer = buf;
1098         } else {
1099                 edata->eee_enabled = false;
1100                 edata->eee_active = false;
1101                 edata->tx_lpi_enabled = false;
1102                 edata->tx_lpi_timer = 0;
1103         }
1104
1105         ret = 0;
1106 exit:
1107         usb_autopm_put_interface(dev->intf);
1108
1109         return ret;
1110 }
1111
1112 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1113 {
1114         struct lan78xx_net *dev = netdev_priv(net);
1115         int ret;
1116         u32 buf;
1117
1118         ret = usb_autopm_get_interface(dev->intf);
1119         if (ret < 0)
1120                 return ret;
1121
1122         if (edata->eee_enabled) {
1123                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124                 buf |= MAC_CR_EEE_EN_;
1125                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1126
1127                 phy_ethtool_set_eee(net->phydev, edata);
1128
1129                 buf = (u32)edata->tx_lpi_timer;
1130                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1131         } else {
1132                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1133                 buf &= ~MAC_CR_EEE_EN_;
1134                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1135         }
1136
1137         usb_autopm_put_interface(dev->intf);
1138
1139         return 0;
1140 }
1141
1142 static u32 lan78xx_get_link(struct net_device *net)
1143 {
1144         phy_read_status(net->phydev);
1145
1146         return net->phydev->link;
1147 }
1148
1149 int lan78xx_nway_reset(struct net_device *net)
1150 {
1151         return phy_start_aneg(net->phydev);
1152 }
1153
1154 static void lan78xx_get_drvinfo(struct net_device *net,
1155                                 struct ethtool_drvinfo *info)
1156 {
1157         struct lan78xx_net *dev = netdev_priv(net);
1158
1159         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1160         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1161         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1162 }
1163
1164 static u32 lan78xx_get_msglevel(struct net_device *net)
1165 {
1166         struct lan78xx_net *dev = netdev_priv(net);
1167
1168         return dev->msg_enable;
1169 }
1170
1171 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1172 {
1173         struct lan78xx_net *dev = netdev_priv(net);
1174
1175         dev->msg_enable = level;
1176 }
1177
1178 static int lan78xx_get_mdix_status(struct net_device *net)
1179 {
1180         struct phy_device *phydev = net->phydev;
1181         int buf;
1182
1183         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1184         buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1185         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1186
1187         return buf;
1188 }
1189
1190 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1191 {
1192         struct lan78xx_net *dev = netdev_priv(net);
1193         struct phy_device *phydev = net->phydev;
1194         int buf;
1195
1196         if (mdix_ctrl == ETH_TP_MDI) {
1197                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1198                           LAN88XX_EXT_PAGE_SPACE_1);
1199                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1200                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1201                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1202                           buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1203                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1204                           LAN88XX_EXT_PAGE_SPACE_0);
1205         } else if (mdix_ctrl == ETH_TP_MDI_X) {
1206                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1207                           LAN88XX_EXT_PAGE_SPACE_1);
1208                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1209                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1210                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1211                           buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1212                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1213                           LAN88XX_EXT_PAGE_SPACE_0);
1214         } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1215                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1216                           LAN88XX_EXT_PAGE_SPACE_1);
1217                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1218                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1219                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1220                           buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1221                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1222                           LAN88XX_EXT_PAGE_SPACE_0);
1223         }
1224         dev->mdix_ctrl = mdix_ctrl;
1225 }
1226
1227 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1228 {
1229         struct lan78xx_net *dev = netdev_priv(net);
1230         struct phy_device *phydev = net->phydev;
1231         int ret;
1232         int buf;
1233
1234         ret = usb_autopm_get_interface(dev->intf);
1235         if (ret < 0)
1236                 return ret;
1237
1238         ret = phy_ethtool_gset(phydev, cmd);
1239
1240         buf = lan78xx_get_mdix_status(net);
1241
1242         buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1243         if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1244                 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1245                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1246         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1247                 cmd->eth_tp_mdix = ETH_TP_MDI;
1248                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1249         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1250                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1251                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1252         }
1253
1254         usb_autopm_put_interface(dev->intf);
1255
1256         return ret;
1257 }
1258
1259 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1260 {
1261         struct lan78xx_net *dev = netdev_priv(net);
1262         struct phy_device *phydev = net->phydev;
1263         int ret = 0;
1264         int temp;
1265
1266         ret = usb_autopm_get_interface(dev->intf);
1267         if (ret < 0)
1268                 return ret;
1269
1270         if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1271                 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1272         }
1273
1274         /* change speed & duplex */
1275         ret = phy_ethtool_sset(phydev, cmd);
1276
1277         if (!cmd->autoneg) {
1278                 /* force link down */
1279                 temp = phy_read(phydev, MII_BMCR);
1280                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1281                 mdelay(1);
1282                 phy_write(phydev, MII_BMCR, temp);
1283         }
1284
1285         usb_autopm_put_interface(dev->intf);
1286
1287         return ret;
1288 }
1289
1290 static const struct ethtool_ops lan78xx_ethtool_ops = {
1291         .get_link       = lan78xx_get_link,
1292         .nway_reset     = lan78xx_nway_reset,
1293         .get_drvinfo    = lan78xx_get_drvinfo,
1294         .get_msglevel   = lan78xx_get_msglevel,
1295         .set_msglevel   = lan78xx_set_msglevel,
1296         .get_settings   = lan78xx_get_settings,
1297         .set_settings   = lan78xx_set_settings,
1298         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1299         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1300         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1301         .get_ethtool_stats = lan78xx_get_stats,
1302         .get_sset_count = lan78xx_get_sset_count,
1303         .get_strings    = lan78xx_get_strings,
1304         .get_wol        = lan78xx_get_wol,
1305         .set_wol        = lan78xx_set_wol,
1306         .get_eee        = lan78xx_get_eee,
1307         .set_eee        = lan78xx_set_eee,
1308 };
1309
1310 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1311 {
1312         if (!netif_running(netdev))
1313                 return -EINVAL;
1314
1315         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1316 }
1317
1318 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1319 {
1320         u32 addr_lo, addr_hi;
1321         int ret;
1322         u8 addr[6];
1323
1324         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1325         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1326
1327         addr[0] = addr_lo & 0xFF;
1328         addr[1] = (addr_lo >> 8) & 0xFF;
1329         addr[2] = (addr_lo >> 16) & 0xFF;
1330         addr[3] = (addr_lo >> 24) & 0xFF;
1331         addr[4] = addr_hi & 0xFF;
1332         addr[5] = (addr_hi >> 8) & 0xFF;
1333
1334         if (!is_valid_ether_addr(addr)) {
1335                 /* reading mac address from EEPROM or OTP */
1336                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1337                                          addr) == 0) ||
1338                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1339                                       addr) == 0)) {
1340                         if (is_valid_ether_addr(addr)) {
1341                                 /* eeprom values are valid so use them */
1342                                 netif_dbg(dev, ifup, dev->net,
1343                                           "MAC address read from EEPROM");
1344                         } else {
1345                                 /* generate random MAC */
1346                                 random_ether_addr(addr);
1347                                 netif_dbg(dev, ifup, dev->net,
1348                                           "MAC address set to random addr");
1349                         }
1350
1351                         addr_lo = addr[0] | (addr[1] << 8) |
1352                                   (addr[2] << 16) | (addr[3] << 24);
1353                         addr_hi = addr[4] | (addr[5] << 8);
1354
1355                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1356                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1357                 } else {
1358                         /* generate random MAC */
1359                         random_ether_addr(addr);
1360                         netif_dbg(dev, ifup, dev->net,
1361                                   "MAC address set to random addr");
1362                 }
1363         }
1364
1365         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1366         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1367
1368         ether_addr_copy(dev->net->dev_addr, addr);
1369 }
1370
1371 /* MDIO read and write wrappers for phylib */
1372 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1373 {
1374         struct lan78xx_net *dev = bus->priv;
1375         u32 val, addr;
1376         int ret;
1377
1378         ret = usb_autopm_get_interface(dev->intf);
1379         if (ret < 0)
1380                 return ret;
1381
1382         mutex_lock(&dev->phy_mutex);
1383
1384         /* confirm MII not busy */
1385         ret = lan78xx_phy_wait_not_busy(dev);
1386         if (ret < 0)
1387                 goto done;
1388
1389         /* set the address, index & direction (read from PHY) */
1390         addr = mii_access(phy_id, idx, MII_READ);
1391         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1392
1393         ret = lan78xx_phy_wait_not_busy(dev);
1394         if (ret < 0)
1395                 goto done;
1396
1397         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1398
1399         ret = (int)(val & 0xFFFF);
1400
1401 done:
1402         mutex_unlock(&dev->phy_mutex);
1403         usb_autopm_put_interface(dev->intf);
1404         return ret;
1405 }
1406
1407 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1408                                  u16 regval)
1409 {
1410         struct lan78xx_net *dev = bus->priv;
1411         u32 val, addr;
1412         int ret;
1413
1414         ret = usb_autopm_get_interface(dev->intf);
1415         if (ret < 0)
1416                 return ret;
1417
1418         mutex_lock(&dev->phy_mutex);
1419
1420         /* confirm MII not busy */
1421         ret = lan78xx_phy_wait_not_busy(dev);
1422         if (ret < 0)
1423                 goto done;
1424
1425         val = (u32)regval;
1426         ret = lan78xx_write_reg(dev, MII_DATA, val);
1427
1428         /* set the address, index & direction (write to PHY) */
1429         addr = mii_access(phy_id, idx, MII_WRITE);
1430         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1431
1432         ret = lan78xx_phy_wait_not_busy(dev);
1433         if (ret < 0)
1434                 goto done;
1435
1436 done:
1437         mutex_unlock(&dev->phy_mutex);
1438         usb_autopm_put_interface(dev->intf);
1439         return 0;
1440 }
1441
1442 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1443 {
1444         int ret;
1445         int i;
1446
1447         dev->mdiobus = mdiobus_alloc();
1448         if (!dev->mdiobus) {
1449                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1450                 return -ENOMEM;
1451         }
1452
1453         dev->mdiobus->priv = (void *)dev;
1454         dev->mdiobus->read = lan78xx_mdiobus_read;
1455         dev->mdiobus->write = lan78xx_mdiobus_write;
1456         dev->mdiobus->name = "lan78xx-mdiobus";
1457
1458         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1459                  dev->udev->bus->busnum, dev->udev->devnum);
1460
1461         dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1462         if (!dev->mdiobus->irq) {
1463                 ret = -ENOMEM;
1464                 goto exit1;
1465         }
1466
1467         /* handle our own interrupt */
1468         for (i = 0; i < PHY_MAX_ADDR; i++)
1469                 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1470
1471         switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1472         case 0x78000000:
1473         case 0x78500000:
1474                 /* set to internal PHY id */
1475                 dev->mdiobus->phy_mask = ~(1 << 1);
1476                 break;
1477         }
1478
1479         ret = mdiobus_register(dev->mdiobus);
1480         if (ret) {
1481                 netdev_err(dev->net, "can't register MDIO bus\n");
1482                 goto exit2;
1483         }
1484
1485         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1486         return 0;
1487 exit2:
1488         kfree(dev->mdiobus->irq);
1489 exit1:
1490         mdiobus_free(dev->mdiobus);
1491         return ret;
1492 }
1493
1494 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1495 {
1496         mdiobus_unregister(dev->mdiobus);
1497         kfree(dev->mdiobus->irq);
1498         mdiobus_free(dev->mdiobus);
1499 }
1500
1501 static void lan78xx_link_status_change(struct net_device *net)
1502 {
1503         /* nothing to do */
1504 }
1505
1506 static int lan78xx_phy_init(struct lan78xx_net *dev)
1507 {
1508         int ret;
1509         struct phy_device *phydev = dev->net->phydev;
1510
1511         phydev = phy_find_first(dev->mdiobus);
1512         if (!phydev) {
1513                 netdev_err(dev->net, "no PHY found\n");
1514                 return -EIO;
1515         }
1516
1517         ret = phy_connect_direct(dev->net, phydev,
1518                                  lan78xx_link_status_change,
1519                                  PHY_INTERFACE_MODE_GMII);
1520         if (ret) {
1521                 netdev_err(dev->net, "can't attach PHY to %s\n",
1522                            dev->mdiobus->id);
1523                 return -EIO;
1524         }
1525
1526         /* set to AUTOMDIX */
1527         lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1528
1529         /* MAC doesn't support 1000T Half */
1530         phydev->supported &= ~SUPPORTED_1000baseT_Half;
1531         phydev->supported |= (SUPPORTED_10baseT_Half |
1532                               SUPPORTED_10baseT_Full |
1533                               SUPPORTED_100baseT_Half |
1534                               SUPPORTED_100baseT_Full |
1535                               SUPPORTED_1000baseT_Full |
1536                               SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1537         genphy_config_aneg(phydev);
1538
1539         /* Workaround to enable PHY interrupt.
1540          * phy_start_interrupts() is API for requesting and enabling
1541          * PHY interrupt. However, USB-to-Ethernet device can't use
1542          * request_irq() called in phy_start_interrupts().
1543          * Set PHY to PHY_HALTED and call phy_start()
1544          * to make a call to phy_enable_interrupts()
1545          */
1546         phy_stop(phydev);
1547         phy_start(phydev);
1548
1549         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1550
1551         return 0;
1552 }
1553
1554 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1555 {
1556         int ret = 0;
1557         u32 buf;
1558         bool rxenabled;
1559
1560         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1561
1562         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1563
1564         if (rxenabled) {
1565                 buf &= ~MAC_RX_RXEN_;
1566                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1567         }
1568
1569         /* add 4 to size for FCS */
1570         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1571         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1572
1573         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1574
1575         if (rxenabled) {
1576                 buf |= MAC_RX_RXEN_;
1577                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1578         }
1579
1580         return 0;
1581 }
1582
1583 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1584 {
1585         struct sk_buff *skb;
1586         unsigned long flags;
1587         int count = 0;
1588
1589         spin_lock_irqsave(&q->lock, flags);
1590         while (!skb_queue_empty(q)) {
1591                 struct skb_data *entry;
1592                 struct urb *urb;
1593                 int ret;
1594
1595                 skb_queue_walk(q, skb) {
1596                         entry = (struct skb_data *)skb->cb;
1597                         if (entry->state != unlink_start)
1598                                 goto found;
1599                 }
1600                 break;
1601 found:
1602                 entry->state = unlink_start;
1603                 urb = entry->urb;
1604
1605                 /* Get reference count of the URB to avoid it to be
1606                  * freed during usb_unlink_urb, which may trigger
1607                  * use-after-free problem inside usb_unlink_urb since
1608                  * usb_unlink_urb is always racing with .complete
1609                  * handler(include defer_bh).
1610                  */
1611                 usb_get_urb(urb);
1612                 spin_unlock_irqrestore(&q->lock, flags);
1613                 /* during some PM-driven resume scenarios,
1614                  * these (async) unlinks complete immediately
1615                  */
1616                 ret = usb_unlink_urb(urb);
1617                 if (ret != -EINPROGRESS && ret != 0)
1618                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1619                 else
1620                         count++;
1621                 usb_put_urb(urb);
1622                 spin_lock_irqsave(&q->lock, flags);
1623         }
1624         spin_unlock_irqrestore(&q->lock, flags);
1625         return count;
1626 }
1627
1628 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1629 {
1630         struct lan78xx_net *dev = netdev_priv(netdev);
1631         int ll_mtu = new_mtu + netdev->hard_header_len;
1632         int old_hard_mtu = dev->hard_mtu;
1633         int old_rx_urb_size = dev->rx_urb_size;
1634         int ret;
1635
1636         if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1637                 return -EINVAL;
1638
1639         if (new_mtu <= 0)
1640                 return -EINVAL;
1641         /* no second zero-length packet read wanted after mtu-sized packets */
1642         if ((ll_mtu % dev->maxpacket) == 0)
1643                 return -EDOM;
1644
1645         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1646
1647         netdev->mtu = new_mtu;
1648
1649         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1650         if (dev->rx_urb_size == old_hard_mtu) {
1651                 dev->rx_urb_size = dev->hard_mtu;
1652                 if (dev->rx_urb_size > old_rx_urb_size) {
1653                         if (netif_running(dev->net)) {
1654                                 unlink_urbs(dev, &dev->rxq);
1655                                 tasklet_schedule(&dev->bh);
1656                         }
1657                 }
1658         }
1659
1660         return 0;
1661 }
1662
1663 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1664 {
1665         struct lan78xx_net *dev = netdev_priv(netdev);
1666         struct sockaddr *addr = p;
1667         u32 addr_lo, addr_hi;
1668         int ret;
1669
1670         if (netif_running(netdev))
1671                 return -EBUSY;
1672
1673         if (!is_valid_ether_addr(addr->sa_data))
1674                 return -EADDRNOTAVAIL;
1675
1676         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1677
1678         addr_lo = netdev->dev_addr[0] |
1679                   netdev->dev_addr[1] << 8 |
1680                   netdev->dev_addr[2] << 16 |
1681                   netdev->dev_addr[3] << 24;
1682         addr_hi = netdev->dev_addr[4] |
1683                   netdev->dev_addr[5] << 8;
1684
1685         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1686         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1687
1688         return 0;
1689 }
1690
1691 /* Enable or disable Rx checksum offload engine */
1692 static int lan78xx_set_features(struct net_device *netdev,
1693                                 netdev_features_t features)
1694 {
1695         struct lan78xx_net *dev = netdev_priv(netdev);
1696         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1697         unsigned long flags;
1698         int ret;
1699
1700         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1701
1702         if (features & NETIF_F_RXCSUM) {
1703                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1704                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1705         } else {
1706                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1707                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1708         }
1709
1710         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1711                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1712         else
1713                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1714
1715         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1716
1717         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1718
1719         return 0;
1720 }
1721
1722 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1723 {
1724         struct lan78xx_priv *pdata =
1725                         container_of(param, struct lan78xx_priv, set_vlan);
1726         struct lan78xx_net *dev = pdata->dev;
1727
1728         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1729                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1730 }
1731
1732 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1733                                    __be16 proto, u16 vid)
1734 {
1735         struct lan78xx_net *dev = netdev_priv(netdev);
1736         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1737         u16 vid_bit_index;
1738         u16 vid_dword_index;
1739
1740         vid_dword_index = (vid >> 5) & 0x7F;
1741         vid_bit_index = vid & 0x1F;
1742
1743         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1744
1745         /* defer register writes to a sleepable context */
1746         schedule_work(&pdata->set_vlan);
1747
1748         return 0;
1749 }
1750
1751 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1752                                     __be16 proto, u16 vid)
1753 {
1754         struct lan78xx_net *dev = netdev_priv(netdev);
1755         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1756         u16 vid_bit_index;
1757         u16 vid_dword_index;
1758
1759         vid_dword_index = (vid >> 5) & 0x7F;
1760         vid_bit_index = vid & 0x1F;
1761
1762         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1763
1764         /* defer register writes to a sleepable context */
1765         schedule_work(&pdata->set_vlan);
1766
1767         return 0;
1768 }
1769
1770 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1771 {
1772         int ret;
1773         u32 buf;
1774         u32 regs[6] = { 0 };
1775
1776         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1777         if (buf & USB_CFG1_LTM_ENABLE_) {
1778                 u8 temp[2];
1779                 /* Get values from EEPROM first */
1780                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1781                         if (temp[0] == 24) {
1782                                 ret = lan78xx_read_raw_eeprom(dev,
1783                                                               temp[1] * 2,
1784                                                               24,
1785                                                               (u8 *)regs);
1786                                 if (ret < 0)
1787                                         return;
1788                         }
1789                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1790                         if (temp[0] == 24) {
1791                                 ret = lan78xx_read_raw_otp(dev,
1792                                                            temp[1] * 2,
1793                                                            24,
1794                                                            (u8 *)regs);
1795                                 if (ret < 0)
1796                                         return;
1797                         }
1798                 }
1799         }
1800
1801         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1802         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1803         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1804         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1805         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1806         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1807 }
1808
1809 static int lan78xx_reset(struct lan78xx_net *dev)
1810 {
1811         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1812         u32 buf;
1813         int ret = 0;
1814         unsigned long timeout;
1815
1816         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1817         buf |= HW_CFG_LRST_;
1818         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1819
1820         timeout = jiffies + HZ;
1821         do {
1822                 mdelay(1);
1823                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1824                 if (time_after(jiffies, timeout)) {
1825                         netdev_warn(dev->net,
1826                                     "timeout on completion of LiteReset");
1827                         return -EIO;
1828                 }
1829         } while (buf & HW_CFG_LRST_);
1830
1831         lan78xx_init_mac_address(dev);
1832
1833         /* save DEVID for later usage */
1834         ret = lan78xx_read_reg(dev, ID_REV, &buf);
1835         dev->devid = buf;
1836
1837         /* Respond to the IN token with a NAK */
1838         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1839         buf |= USB_CFG_BIR_;
1840         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1841
1842         /* Init LTM */
1843         lan78xx_init_ltm(dev);
1844
1845         dev->net->hard_header_len += TX_OVERHEAD;
1846         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1847
1848         if (dev->udev->speed == USB_SPEED_SUPER) {
1849                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1850                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1851                 dev->rx_qlen = 4;
1852                 dev->tx_qlen = 4;
1853         } else if (dev->udev->speed == USB_SPEED_HIGH) {
1854                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1855                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1856                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1857                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1858         } else {
1859                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1860                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1861                 dev->rx_qlen = 4;
1862         }
1863
1864         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1865         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1866
1867         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1868         buf |= HW_CFG_MEF_;
1869         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1870
1871         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1872         buf |= USB_CFG_BCE_;
1873         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1874
1875         /* set FIFO sizes */
1876         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1877         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1878
1879         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1880         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1881
1882         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1883         ret = lan78xx_write_reg(dev, FLOW, 0);
1884         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1885
1886         /* Don't need rfe_ctl_lock during initialisation */
1887         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1888         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1889         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1890
1891         /* Enable or disable checksum offload engines */
1892         lan78xx_set_features(dev->net, dev->net->features);
1893
1894         lan78xx_set_multicast(dev->net);
1895
1896         /* reset PHY */
1897         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1898         buf |= PMT_CTL_PHY_RST_;
1899         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1900
1901         timeout = jiffies + HZ;
1902         do {
1903                 mdelay(1);
1904                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1905                 if (time_after(jiffies, timeout)) {
1906                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
1907                         return -EIO;
1908                 }
1909         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
1910
1911         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1912         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1913         ret = lan78xx_write_reg(dev, MAC_CR, buf);
1914
1915         /* enable PHY interrupts */
1916         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1917         buf |= INT_ENP_PHY_INT;
1918         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1919
1920         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1921         buf |= MAC_TX_TXEN_;
1922         ret = lan78xx_write_reg(dev, MAC_TX, buf);
1923
1924         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1925         buf |= FCT_TX_CTL_EN_;
1926         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1927
1928         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1929
1930         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1931         buf |= MAC_RX_RXEN_;
1932         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1933
1934         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1935         buf |= FCT_RX_CTL_EN_;
1936         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1937
1938         return 0;
1939 }
1940
1941 static int lan78xx_open(struct net_device *net)
1942 {
1943         struct lan78xx_net *dev = netdev_priv(net);
1944         int ret;
1945
1946         ret = usb_autopm_get_interface(dev->intf);
1947         if (ret < 0)
1948                 goto out;
1949
1950         ret = lan78xx_reset(dev);
1951         if (ret < 0)
1952                 goto done;
1953
1954         ret = lan78xx_phy_init(dev);
1955         if (ret < 0)
1956                 goto done;
1957
1958         /* for Link Check */
1959         if (dev->urb_intr) {
1960                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1961                 if (ret < 0) {
1962                         netif_err(dev, ifup, dev->net,
1963                                   "intr submit %d\n", ret);
1964                         goto done;
1965                 }
1966         }
1967
1968         set_bit(EVENT_DEV_OPEN, &dev->flags);
1969
1970         netif_start_queue(net);
1971
1972         dev->link_on = false;
1973
1974         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1975 done:
1976         usb_autopm_put_interface(dev->intf);
1977
1978 out:
1979         return ret;
1980 }
1981
1982 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1983 {
1984         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1985         DECLARE_WAITQUEUE(wait, current);
1986         int temp;
1987
1988         /* ensure there are no more active urbs */
1989         add_wait_queue(&unlink_wakeup, &wait);
1990         set_current_state(TASK_UNINTERRUPTIBLE);
1991         dev->wait = &unlink_wakeup;
1992         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1993
1994         /* maybe wait for deletions to finish. */
1995         while (!skb_queue_empty(&dev->rxq) &&
1996                !skb_queue_empty(&dev->txq) &&
1997                !skb_queue_empty(&dev->done)) {
1998                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
1999                 set_current_state(TASK_UNINTERRUPTIBLE);
2000                 netif_dbg(dev, ifdown, dev->net,
2001                           "waited for %d urb completions\n", temp);
2002         }
2003         set_current_state(TASK_RUNNING);
2004         dev->wait = NULL;
2005         remove_wait_queue(&unlink_wakeup, &wait);
2006 }
2007
2008 int lan78xx_stop(struct net_device *net)
2009 {
2010         struct lan78xx_net              *dev = netdev_priv(net);
2011
2012         phy_stop(net->phydev);
2013         phy_disconnect(net->phydev);
2014         net->phydev = NULL;
2015
2016         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2017         netif_stop_queue(net);
2018
2019         netif_info(dev, ifdown, dev->net,
2020                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2021                    net->stats.rx_packets, net->stats.tx_packets,
2022                    net->stats.rx_errors, net->stats.tx_errors);
2023
2024         lan78xx_terminate_urbs(dev);
2025
2026         usb_kill_urb(dev->urb_intr);
2027
2028         skb_queue_purge(&dev->rxq_pause);
2029
2030         /* deferred work (task, timer, softirq) must also stop.
2031          * can't flush_scheduled_work() until we drop rtnl (later),
2032          * else workers could deadlock; so make workers a NOP.
2033          */
2034         dev->flags = 0;
2035         cancel_delayed_work_sync(&dev->wq);
2036         tasklet_kill(&dev->bh);
2037
2038         usb_autopm_put_interface(dev->intf);
2039
2040         return 0;
2041 }
2042
2043 static int lan78xx_linearize(struct sk_buff *skb)
2044 {
2045         return skb_linearize(skb);
2046 }
2047
2048 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2049                                        struct sk_buff *skb, gfp_t flags)
2050 {
2051         u32 tx_cmd_a, tx_cmd_b;
2052
2053         if (skb_headroom(skb) < TX_OVERHEAD) {
2054                 struct sk_buff *skb2;
2055
2056                 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2057                 dev_kfree_skb_any(skb);
2058                 skb = skb2;
2059                 if (!skb)
2060                         return NULL;
2061         }
2062
2063         if (lan78xx_linearize(skb) < 0)
2064                 return NULL;
2065
2066         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2067
2068         if (skb->ip_summed == CHECKSUM_PARTIAL)
2069                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2070
2071         tx_cmd_b = 0;
2072         if (skb_is_gso(skb)) {
2073                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2074
2075                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2076
2077                 tx_cmd_a |= TX_CMD_A_LSO_;
2078         }
2079
2080         if (skb_vlan_tag_present(skb)) {
2081                 tx_cmd_a |= TX_CMD_A_IVTG_;
2082                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2083         }
2084
2085         skb_push(skb, 4);
2086         cpu_to_le32s(&tx_cmd_b);
2087         memcpy(skb->data, &tx_cmd_b, 4);
2088
2089         skb_push(skb, 4);
2090         cpu_to_le32s(&tx_cmd_a);
2091         memcpy(skb->data, &tx_cmd_a, 4);
2092
2093         return skb;
2094 }
2095
2096 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2097                                struct sk_buff_head *list, enum skb_state state)
2098 {
2099         unsigned long flags;
2100         enum skb_state old_state;
2101         struct skb_data *entry = (struct skb_data *)skb->cb;
2102
2103         spin_lock_irqsave(&list->lock, flags);
2104         old_state = entry->state;
2105         entry->state = state;
2106
2107         __skb_unlink(skb, list);
2108         spin_unlock(&list->lock);
2109         spin_lock(&dev->done.lock);
2110
2111         __skb_queue_tail(&dev->done, skb);
2112         if (skb_queue_len(&dev->done) == 1)
2113                 tasklet_schedule(&dev->bh);
2114         spin_unlock_irqrestore(&dev->done.lock, flags);
2115
2116         return old_state;
2117 }
2118
2119 static void tx_complete(struct urb *urb)
2120 {
2121         struct sk_buff *skb = (struct sk_buff *)urb->context;
2122         struct skb_data *entry = (struct skb_data *)skb->cb;
2123         struct lan78xx_net *dev = entry->dev;
2124
2125         if (urb->status == 0) {
2126                 dev->net->stats.tx_packets++;
2127                 dev->net->stats.tx_bytes += entry->length;
2128         } else {
2129                 dev->net->stats.tx_errors++;
2130
2131                 switch (urb->status) {
2132                 case -EPIPE:
2133                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2134                         break;
2135
2136                 /* software-driven interface shutdown */
2137                 case -ECONNRESET:
2138                 case -ESHUTDOWN:
2139                         break;
2140
2141                 case -EPROTO:
2142                 case -ETIME:
2143                 case -EILSEQ:
2144                         netif_stop_queue(dev->net);
2145                         break;
2146                 default:
2147                         netif_dbg(dev, tx_err, dev->net,
2148                                   "tx err %d\n", entry->urb->status);
2149                         break;
2150                 }
2151         }
2152
2153         usb_autopm_put_interface_async(dev->intf);
2154
2155         defer_bh(dev, skb, &dev->txq, tx_done);
2156 }
2157
2158 static void lan78xx_queue_skb(struct sk_buff_head *list,
2159                               struct sk_buff *newsk, enum skb_state state)
2160 {
2161         struct skb_data *entry = (struct skb_data *)newsk->cb;
2162
2163         __skb_queue_tail(list, newsk);
2164         entry->state = state;
2165 }
2166
2167 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2168 {
2169         struct lan78xx_net *dev = netdev_priv(net);
2170         struct sk_buff *skb2 = NULL;
2171
2172         if (skb) {
2173                 skb_tx_timestamp(skb);
2174                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2175         }
2176
2177         if (skb2) {
2178                 skb_queue_tail(&dev->txq_pend, skb2);
2179
2180                 if (skb_queue_len(&dev->txq_pend) > 10)
2181                         netif_stop_queue(net);
2182         } else {
2183                 netif_dbg(dev, tx_err, dev->net,
2184                           "lan78xx_tx_prep return NULL\n");
2185                 dev->net->stats.tx_errors++;
2186                 dev->net->stats.tx_dropped++;
2187         }
2188
2189         tasklet_schedule(&dev->bh);
2190
2191         return NETDEV_TX_OK;
2192 }
2193
2194 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2195 {
2196         int tmp;
2197         struct usb_host_interface *alt = NULL;
2198         struct usb_host_endpoint *in = NULL, *out = NULL;
2199         struct usb_host_endpoint *status = NULL;
2200
2201         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2202                 unsigned ep;
2203
2204                 in = NULL;
2205                 out = NULL;
2206                 status = NULL;
2207                 alt = intf->altsetting + tmp;
2208
2209                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2210                         struct usb_host_endpoint *e;
2211                         int intr = 0;
2212
2213                         e = alt->endpoint + ep;
2214                         switch (e->desc.bmAttributes) {
2215                         case USB_ENDPOINT_XFER_INT:
2216                                 if (!usb_endpoint_dir_in(&e->desc))
2217                                         continue;
2218                                 intr = 1;
2219                                 /* FALLTHROUGH */
2220                         case USB_ENDPOINT_XFER_BULK:
2221                                 break;
2222                         default:
2223                                 continue;
2224                         }
2225                         if (usb_endpoint_dir_in(&e->desc)) {
2226                                 if (!intr && !in)
2227                                         in = e;
2228                                 else if (intr && !status)
2229                                         status = e;
2230                         } else {
2231                                 if (!out)
2232                                         out = e;
2233                         }
2234                 }
2235                 if (in && out)
2236                         break;
2237         }
2238         if (!alt || !in || !out)
2239                 return -EINVAL;
2240
2241         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2242                                        in->desc.bEndpointAddress &
2243                                        USB_ENDPOINT_NUMBER_MASK);
2244         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2245                                         out->desc.bEndpointAddress &
2246                                         USB_ENDPOINT_NUMBER_MASK);
2247         dev->ep_intr = status;
2248
2249         return 0;
2250 }
2251
2252 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2253 {
2254         struct lan78xx_priv *pdata = NULL;
2255         int ret;
2256         int i;
2257
2258         ret = lan78xx_get_endpoints(dev, intf);
2259
2260         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2261
2262         pdata = (struct lan78xx_priv *)(dev->data[0]);
2263         if (!pdata) {
2264                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2265                 return -ENOMEM;
2266         }
2267
2268         pdata->dev = dev;
2269
2270         spin_lock_init(&pdata->rfe_ctl_lock);
2271         mutex_init(&pdata->dataport_mutex);
2272
2273         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2274
2275         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2276                 pdata->vlan_table[i] = 0;
2277
2278         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2279
2280         dev->net->features = 0;
2281
2282         if (DEFAULT_TX_CSUM_ENABLE)
2283                 dev->net->features |= NETIF_F_HW_CSUM;
2284
2285         if (DEFAULT_RX_CSUM_ENABLE)
2286                 dev->net->features |= NETIF_F_RXCSUM;
2287
2288         if (DEFAULT_TSO_CSUM_ENABLE)
2289                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2290
2291         dev->net->hw_features = dev->net->features;
2292
2293         /* Init all registers */
2294         ret = lan78xx_reset(dev);
2295
2296         lan78xx_mdio_init(dev);
2297
2298         dev->net->flags |= IFF_MULTICAST;
2299
2300         pdata->wol = WAKE_MAGIC;
2301
2302         return 0;
2303 }
2304
2305 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2306 {
2307         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2308
2309         lan78xx_remove_mdio(dev);
2310
2311         if (pdata) {
2312                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2313                 kfree(pdata);
2314                 pdata = NULL;
2315                 dev->data[0] = 0;
2316         }
2317 }
2318
2319 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2320                                     struct sk_buff *skb,
2321                                     u32 rx_cmd_a, u32 rx_cmd_b)
2322 {
2323         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2324             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2325                 skb->ip_summed = CHECKSUM_NONE;
2326         } else {
2327                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2328                 skb->ip_summed = CHECKSUM_COMPLETE;
2329         }
2330 }
2331
2332 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2333 {
2334         int             status;
2335
2336         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2337                 skb_queue_tail(&dev->rxq_pause, skb);
2338                 return;
2339         }
2340
2341         skb->protocol = eth_type_trans(skb, dev->net);
2342         dev->net->stats.rx_packets++;
2343         dev->net->stats.rx_bytes += skb->len;
2344
2345         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2346                   skb->len + sizeof(struct ethhdr), skb->protocol);
2347         memset(skb->cb, 0, sizeof(struct skb_data));
2348
2349         if (skb_defer_rx_timestamp(skb))
2350                 return;
2351
2352         status = netif_rx(skb);
2353         if (status != NET_RX_SUCCESS)
2354                 netif_dbg(dev, rx_err, dev->net,
2355                           "netif_rx status %d\n", status);
2356 }
2357
2358 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2359 {
2360         if (skb->len < dev->net->hard_header_len)
2361                 return 0;
2362
2363         while (skb->len > 0) {
2364                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2365                 u16 rx_cmd_c;
2366                 struct sk_buff *skb2;
2367                 unsigned char *packet;
2368
2369                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2370                 le32_to_cpus(&rx_cmd_a);
2371                 skb_pull(skb, sizeof(rx_cmd_a));
2372
2373                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2374                 le32_to_cpus(&rx_cmd_b);
2375                 skb_pull(skb, sizeof(rx_cmd_b));
2376
2377                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2378                 le16_to_cpus(&rx_cmd_c);
2379                 skb_pull(skb, sizeof(rx_cmd_c));
2380
2381                 packet = skb->data;
2382
2383                 /* get the packet length */
2384                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2385                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2386
2387                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2388                         netif_dbg(dev, rx_err, dev->net,
2389                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2390                 } else {
2391                         /* last frame in this batch */
2392                         if (skb->len == size) {
2393                                 lan78xx_rx_csum_offload(dev, skb,
2394                                                         rx_cmd_a, rx_cmd_b);
2395
2396                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2397                                 skb->truesize = size + sizeof(struct sk_buff);
2398
2399                                 return 1;
2400                         }
2401
2402                         skb2 = skb_clone(skb, GFP_ATOMIC);
2403                         if (unlikely(!skb2)) {
2404                                 netdev_warn(dev->net, "Error allocating skb");
2405                                 return 0;
2406                         }
2407
2408                         skb2->len = size;
2409                         skb2->data = packet;
2410                         skb_set_tail_pointer(skb2, size);
2411
2412                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2413
2414                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2415                         skb2->truesize = size + sizeof(struct sk_buff);
2416
2417                         lan78xx_skb_return(dev, skb2);
2418                 }
2419
2420                 skb_pull(skb, size);
2421
2422                 /* padding bytes before the next frame starts */
2423                 if (skb->len)
2424                         skb_pull(skb, align_count);
2425         }
2426
2427         if (unlikely(skb->len < 0)) {
2428                 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
2429                 return 0;
2430         }
2431
2432         return 1;
2433 }
2434
2435 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2436 {
2437         if (!lan78xx_rx(dev, skb)) {
2438                 dev->net->stats.rx_errors++;
2439                 goto done;
2440         }
2441
2442         if (skb->len) {
2443                 lan78xx_skb_return(dev, skb);
2444                 return;
2445         }
2446
2447         netif_dbg(dev, rx_err, dev->net, "drop\n");
2448         dev->net->stats.rx_errors++;
2449 done:
2450         skb_queue_tail(&dev->done, skb);
2451 }
2452
2453 static void rx_complete(struct urb *urb);
2454
2455 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2456 {
2457         struct sk_buff *skb;
2458         struct skb_data *entry;
2459         unsigned long lockflags;
2460         size_t size = dev->rx_urb_size;
2461         int ret = 0;
2462
2463         skb = netdev_alloc_skb_ip_align(dev->net, size);
2464         if (!skb) {
2465                 usb_free_urb(urb);
2466                 return -ENOMEM;
2467         }
2468
2469         entry = (struct skb_data *)skb->cb;
2470         entry->urb = urb;
2471         entry->dev = dev;
2472         entry->length = 0;
2473
2474         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2475                           skb->data, size, rx_complete, skb);
2476
2477         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2478
2479         if (netif_device_present(dev->net) &&
2480             netif_running(dev->net) &&
2481             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2482             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2483                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2484                 switch (ret) {
2485                 case 0:
2486                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2487                         break;
2488                 case -EPIPE:
2489                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2490                         break;
2491                 case -ENODEV:
2492                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2493                         netif_device_detach(dev->net);
2494                         break;
2495                 case -EHOSTUNREACH:
2496                         ret = -ENOLINK;
2497                         break;
2498                 default:
2499                         netif_dbg(dev, rx_err, dev->net,
2500                                   "rx submit, %d\n", ret);
2501                         tasklet_schedule(&dev->bh);
2502                 }
2503         } else {
2504                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2505                 ret = -ENOLINK;
2506         }
2507         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2508         if (ret) {
2509                 dev_kfree_skb_any(skb);
2510                 usb_free_urb(urb);
2511         }
2512         return ret;
2513 }
2514
2515 static void rx_complete(struct urb *urb)
2516 {
2517         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2518         struct skb_data *entry = (struct skb_data *)skb->cb;
2519         struct lan78xx_net *dev = entry->dev;
2520         int urb_status = urb->status;
2521         enum skb_state state;
2522
2523         skb_put(skb, urb->actual_length);
2524         state = rx_done;
2525         entry->urb = NULL;
2526
2527         switch (urb_status) {
2528         case 0:
2529                 if (skb->len < dev->net->hard_header_len) {
2530                         state = rx_cleanup;
2531                         dev->net->stats.rx_errors++;
2532                         dev->net->stats.rx_length_errors++;
2533                         netif_dbg(dev, rx_err, dev->net,
2534                                   "rx length %d\n", skb->len);
2535                 }
2536                 usb_mark_last_busy(dev->udev);
2537                 break;
2538         case -EPIPE:
2539                 dev->net->stats.rx_errors++;
2540                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2541                 /* FALLTHROUGH */
2542         case -ECONNRESET:                               /* async unlink */
2543         case -ESHUTDOWN:                                /* hardware gone */
2544                 netif_dbg(dev, ifdown, dev->net,
2545                           "rx shutdown, code %d\n", urb_status);
2546                 state = rx_cleanup;
2547                 entry->urb = urb;
2548                 urb = NULL;
2549                 break;
2550         case -EPROTO:
2551         case -ETIME:
2552         case -EILSEQ:
2553                 dev->net->stats.rx_errors++;
2554                 state = rx_cleanup;
2555                 entry->urb = urb;
2556                 urb = NULL;
2557                 break;
2558
2559         /* data overrun ... flush fifo? */
2560         case -EOVERFLOW:
2561                 dev->net->stats.rx_over_errors++;
2562                 /* FALLTHROUGH */
2563
2564         default:
2565                 state = rx_cleanup;
2566                 dev->net->stats.rx_errors++;
2567                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2568                 break;
2569         }
2570
2571         state = defer_bh(dev, skb, &dev->rxq, state);
2572
2573         if (urb) {
2574                 if (netif_running(dev->net) &&
2575                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2576                     state != unlink_start) {
2577                         rx_submit(dev, urb, GFP_ATOMIC);
2578                         return;
2579                 }
2580                 usb_free_urb(urb);
2581         }
2582         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2583 }
2584
2585 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2586 {
2587         int length;
2588         struct urb *urb = NULL;
2589         struct skb_data *entry;
2590         unsigned long flags;
2591         struct sk_buff_head *tqp = &dev->txq_pend;
2592         struct sk_buff *skb, *skb2;
2593         int ret;
2594         int count, pos;
2595         int skb_totallen, pkt_cnt;
2596
2597         skb_totallen = 0;
2598         pkt_cnt = 0;
2599         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2600                 if (skb_is_gso(skb)) {
2601                         if (pkt_cnt) {
2602                                 /* handle previous packets first */
2603                                 break;
2604                         }
2605                         length = skb->len;
2606                         skb2 = skb_dequeue(tqp);
2607                         goto gso_skb;
2608                 }
2609
2610                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2611                         break;
2612                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2613                 pkt_cnt++;
2614         }
2615
2616         /* copy to a single skb */
2617         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2618         if (!skb)
2619                 goto drop;
2620
2621         skb_put(skb, skb_totallen);
2622
2623         for (count = pos = 0; count < pkt_cnt; count++) {
2624                 skb2 = skb_dequeue(tqp);
2625                 if (skb2) {
2626                         memcpy(skb->data + pos, skb2->data, skb2->len);
2627                         pos += roundup(skb2->len, sizeof(u32));
2628                         dev_kfree_skb(skb2);
2629                 }
2630         }
2631
2632         length = skb_totallen;
2633
2634 gso_skb:
2635         urb = usb_alloc_urb(0, GFP_ATOMIC);
2636         if (!urb) {
2637                 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2638                 goto drop;
2639         }
2640
2641         entry = (struct skb_data *)skb->cb;
2642         entry->urb = urb;
2643         entry->dev = dev;
2644         entry->length = length;
2645
2646         spin_lock_irqsave(&dev->txq.lock, flags);
2647         ret = usb_autopm_get_interface_async(dev->intf);
2648         if (ret < 0) {
2649                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2650                 goto drop;
2651         }
2652
2653         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2654                           skb->data, skb->len, tx_complete, skb);
2655
2656         if (length % dev->maxpacket == 0) {
2657                 /* send USB_ZERO_PACKET */
2658                 urb->transfer_flags |= URB_ZERO_PACKET;
2659         }
2660
2661 #ifdef CONFIG_PM
2662         /* if this triggers the device is still a sleep */
2663         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2664                 /* transmission will be done in resume */
2665                 usb_anchor_urb(urb, &dev->deferred);
2666                 /* no use to process more packets */
2667                 netif_stop_queue(dev->net);
2668                 usb_put_urb(urb);
2669                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2670                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2671                 return;
2672         }
2673 #endif
2674
2675         ret = usb_submit_urb(urb, GFP_ATOMIC);
2676         switch (ret) {
2677         case 0:
2678                 dev->net->trans_start = jiffies;
2679                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2680                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2681                         netif_stop_queue(dev->net);
2682                 break;
2683         case -EPIPE:
2684                 netif_stop_queue(dev->net);
2685                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2686                 usb_autopm_put_interface_async(dev->intf);
2687                 break;
2688         default:
2689                 usb_autopm_put_interface_async(dev->intf);
2690                 netif_dbg(dev, tx_err, dev->net,
2691                           "tx: submit urb err %d\n", ret);
2692                 break;
2693         }
2694
2695         spin_unlock_irqrestore(&dev->txq.lock, flags);
2696
2697         if (ret) {
2698                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2699 drop:
2700                 dev->net->stats.tx_dropped++;
2701                 if (skb)
2702                         dev_kfree_skb_any(skb);
2703                 usb_free_urb(urb);
2704         } else
2705                 netif_dbg(dev, tx_queued, dev->net,
2706                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
2707 }
2708
2709 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2710 {
2711         struct urb *urb;
2712         int i;
2713
2714         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2715                 for (i = 0; i < 10; i++) {
2716                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2717                                 break;
2718                         urb = usb_alloc_urb(0, GFP_ATOMIC);
2719                         if (urb)
2720                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2721                                         return;
2722                 }
2723
2724                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2725                         tasklet_schedule(&dev->bh);
2726         }
2727         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2728                 netif_wake_queue(dev->net);
2729 }
2730
2731 static void lan78xx_bh(unsigned long param)
2732 {
2733         struct lan78xx_net *dev = (struct lan78xx_net *)param;
2734         struct sk_buff *skb;
2735         struct skb_data *entry;
2736
2737         while ((skb = skb_dequeue(&dev->done))) {
2738                 entry = (struct skb_data *)(skb->cb);
2739                 switch (entry->state) {
2740                 case rx_done:
2741                         entry->state = rx_cleanup;
2742                         rx_process(dev, skb);
2743                         continue;
2744                 case tx_done:
2745                         usb_free_urb(entry->urb);
2746                         dev_kfree_skb(skb);
2747                         continue;
2748                 case rx_cleanup:
2749                         usb_free_urb(entry->urb);
2750                         dev_kfree_skb(skb);
2751                         continue;
2752                 default:
2753                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
2754                         return;
2755                 }
2756         }
2757
2758         if (netif_device_present(dev->net) && netif_running(dev->net)) {
2759                 if (!skb_queue_empty(&dev->txq_pend))
2760                         lan78xx_tx_bh(dev);
2761
2762                 if (!timer_pending(&dev->delay) &&
2763                     !test_bit(EVENT_RX_HALT, &dev->flags))
2764                         lan78xx_rx_bh(dev);
2765         }
2766 }
2767
2768 static void lan78xx_delayedwork(struct work_struct *work)
2769 {
2770         int status;
2771         struct lan78xx_net *dev;
2772
2773         dev = container_of(work, struct lan78xx_net, wq.work);
2774
2775         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2776                 unlink_urbs(dev, &dev->txq);
2777                 status = usb_autopm_get_interface(dev->intf);
2778                 if (status < 0)
2779                         goto fail_pipe;
2780                 status = usb_clear_halt(dev->udev, dev->pipe_out);
2781                 usb_autopm_put_interface(dev->intf);
2782                 if (status < 0 &&
2783                     status != -EPIPE &&
2784                     status != -ESHUTDOWN) {
2785                         if (netif_msg_tx_err(dev))
2786 fail_pipe:
2787                                 netdev_err(dev->net,
2788                                            "can't clear tx halt, status %d\n",
2789                                            status);
2790                 } else {
2791                         clear_bit(EVENT_TX_HALT, &dev->flags);
2792                         if (status != -ESHUTDOWN)
2793                                 netif_wake_queue(dev->net);
2794                 }
2795         }
2796         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2797                 unlink_urbs(dev, &dev->rxq);
2798                 status = usb_autopm_get_interface(dev->intf);
2799                 if (status < 0)
2800                                 goto fail_halt;
2801                 status = usb_clear_halt(dev->udev, dev->pipe_in);
2802                 usb_autopm_put_interface(dev->intf);
2803                 if (status < 0 &&
2804                     status != -EPIPE &&
2805                     status != -ESHUTDOWN) {
2806                         if (netif_msg_rx_err(dev))
2807 fail_halt:
2808                                 netdev_err(dev->net,
2809                                            "can't clear rx halt, status %d\n",
2810                                            status);
2811                 } else {
2812                         clear_bit(EVENT_RX_HALT, &dev->flags);
2813                         tasklet_schedule(&dev->bh);
2814                 }
2815         }
2816
2817         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2818                 int ret = 0;
2819
2820                 clear_bit(EVENT_LINK_RESET, &dev->flags);
2821                 status = usb_autopm_get_interface(dev->intf);
2822                 if (status < 0)
2823                         goto skip_reset;
2824                 if (lan78xx_link_reset(dev) < 0) {
2825                         usb_autopm_put_interface(dev->intf);
2826 skip_reset:
2827                         netdev_info(dev->net, "link reset failed (%d)\n",
2828                                     ret);
2829                 } else {
2830                         usb_autopm_put_interface(dev->intf);
2831                 }
2832         }
2833 }
2834
2835 static void intr_complete(struct urb *urb)
2836 {
2837         struct lan78xx_net *dev = urb->context;
2838         int status = urb->status;
2839
2840         switch (status) {
2841         /* success */
2842         case 0:
2843                 lan78xx_status(dev, urb);
2844                 break;
2845
2846         /* software-driven interface shutdown */
2847         case -ENOENT:                   /* urb killed */
2848         case -ESHUTDOWN:                /* hardware gone */
2849                 netif_dbg(dev, ifdown, dev->net,
2850                           "intr shutdown, code %d\n", status);
2851                 return;
2852
2853         /* NOTE:  not throttling like RX/TX, since this endpoint
2854          * already polls infrequently
2855          */
2856         default:
2857                 netdev_dbg(dev->net, "intr status %d\n", status);
2858                 break;
2859         }
2860
2861         if (!netif_running(dev->net))
2862                 return;
2863
2864         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2865         status = usb_submit_urb(urb, GFP_ATOMIC);
2866         if (status != 0)
2867                 netif_err(dev, timer, dev->net,
2868                           "intr resubmit --> %d\n", status);
2869 }
2870
2871 static void lan78xx_disconnect(struct usb_interface *intf)
2872 {
2873         struct lan78xx_net              *dev;
2874         struct usb_device               *udev;
2875         struct net_device               *net;
2876
2877         dev = usb_get_intfdata(intf);
2878         usb_set_intfdata(intf, NULL);
2879         if (!dev)
2880                 return;
2881
2882         udev = interface_to_usbdev(intf);
2883
2884         net = dev->net;
2885         unregister_netdev(net);
2886
2887         cancel_delayed_work_sync(&dev->wq);
2888
2889         usb_scuttle_anchored_urbs(&dev->deferred);
2890
2891         lan78xx_unbind(dev, intf);
2892
2893         usb_kill_urb(dev->urb_intr);
2894         usb_free_urb(dev->urb_intr);
2895
2896         free_netdev(net);
2897         usb_put_dev(udev);
2898 }
2899
2900 void lan78xx_tx_timeout(struct net_device *net)
2901 {
2902         struct lan78xx_net *dev = netdev_priv(net);
2903
2904         unlink_urbs(dev, &dev->txq);
2905         tasklet_schedule(&dev->bh);
2906 }
2907
2908 static const struct net_device_ops lan78xx_netdev_ops = {
2909         .ndo_open               = lan78xx_open,
2910         .ndo_stop               = lan78xx_stop,
2911         .ndo_start_xmit         = lan78xx_start_xmit,
2912         .ndo_tx_timeout         = lan78xx_tx_timeout,
2913         .ndo_change_mtu         = lan78xx_change_mtu,
2914         .ndo_set_mac_address    = lan78xx_set_mac_addr,
2915         .ndo_validate_addr      = eth_validate_addr,
2916         .ndo_do_ioctl           = lan78xx_ioctl,
2917         .ndo_set_rx_mode        = lan78xx_set_multicast,
2918         .ndo_set_features       = lan78xx_set_features,
2919         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
2920         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
2921 };
2922
2923 static int lan78xx_probe(struct usb_interface *intf,
2924                          const struct usb_device_id *id)
2925 {
2926         struct lan78xx_net *dev;
2927         struct net_device *netdev;
2928         struct usb_device *udev;
2929         int ret;
2930         unsigned maxp;
2931         unsigned period;
2932         u8 *buf = NULL;
2933
2934         udev = interface_to_usbdev(intf);
2935         udev = usb_get_dev(udev);
2936
2937         ret = -ENOMEM;
2938         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2939         if (!netdev) {
2940                         dev_err(&intf->dev, "Error: OOM\n");
2941                         goto out1;
2942         }
2943
2944         /* netdev_printk() needs this */
2945         SET_NETDEV_DEV(netdev, &intf->dev);
2946
2947         dev = netdev_priv(netdev);
2948         dev->udev = udev;
2949         dev->intf = intf;
2950         dev->net = netdev;
2951         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2952                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2953
2954         skb_queue_head_init(&dev->rxq);
2955         skb_queue_head_init(&dev->txq);
2956         skb_queue_head_init(&dev->done);
2957         skb_queue_head_init(&dev->rxq_pause);
2958         skb_queue_head_init(&dev->txq_pend);
2959         mutex_init(&dev->phy_mutex);
2960
2961         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2962         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2963         init_usb_anchor(&dev->deferred);
2964
2965         netdev->netdev_ops = &lan78xx_netdev_ops;
2966         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2967         netdev->ethtool_ops = &lan78xx_ethtool_ops;
2968
2969         ret = lan78xx_bind(dev, intf);
2970         if (ret < 0)
2971                 goto out2;
2972         strcpy(netdev->name, "eth%d");
2973
2974         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2975                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2976
2977         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
2978         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
2979         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
2980
2981         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2982         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2983
2984         dev->pipe_intr = usb_rcvintpipe(dev->udev,
2985                                         dev->ep_intr->desc.bEndpointAddress &
2986                                         USB_ENDPOINT_NUMBER_MASK);
2987         period = dev->ep_intr->desc.bInterval;
2988
2989         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2990         buf = kmalloc(maxp, GFP_KERNEL);
2991         if (buf) {
2992                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2993                 if (!dev->urb_intr) {
2994                         kfree(buf);
2995                         goto out3;
2996                 } else {
2997                         usb_fill_int_urb(dev->urb_intr, dev->udev,
2998                                          dev->pipe_intr, buf, maxp,
2999                                          intr_complete, dev, period);
3000                 }
3001         }
3002
3003         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3004
3005         /* driver requires remote-wakeup capability during autosuspend. */
3006         intf->needs_remote_wakeup = 1;
3007
3008         ret = register_netdev(netdev);
3009         if (ret != 0) {
3010                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3011                 goto out2;
3012         }
3013
3014         usb_set_intfdata(intf, dev);
3015
3016         ret = device_set_wakeup_enable(&udev->dev, true);
3017
3018          /* Default delay of 2sec has more overhead than advantage.
3019           * Set to 10sec as default.
3020           */
3021         pm_runtime_set_autosuspend_delay(&udev->dev,
3022                                          DEFAULT_AUTOSUSPEND_DELAY);
3023
3024         return 0;
3025
3026 out3:
3027         lan78xx_unbind(dev, intf);
3028 out2:
3029         free_netdev(netdev);
3030 out1:
3031         usb_put_dev(udev);
3032
3033         return ret;
3034 }
3035
3036 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3037 {
3038         const u16 crc16poly = 0x8005;
3039         int i;
3040         u16 bit, crc, msb;
3041         u8 data;
3042
3043         crc = 0xFFFF;
3044         for (i = 0; i < len; i++) {
3045                 data = *buf++;
3046                 for (bit = 0; bit < 8; bit++) {
3047                         msb = crc >> 15;
3048                         crc <<= 1;
3049
3050                         if (msb ^ (u16)(data & 1)) {
3051                                 crc ^= crc16poly;
3052                                 crc |= (u16)0x0001U;
3053                         }
3054                         data >>= 1;
3055                 }
3056         }
3057
3058         return crc;
3059 }
3060
3061 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3062 {
3063         u32 buf;
3064         int ret;
3065         int mask_index;
3066         u16 crc;
3067         u32 temp_wucsr;
3068         u32 temp_pmt_ctl;
3069         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3070         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3071         const u8 arp_type[2] = { 0x08, 0x06 };
3072
3073         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3074         buf &= ~MAC_TX_TXEN_;
3075         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3076         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3077         buf &= ~MAC_RX_RXEN_;
3078         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3079
3080         ret = lan78xx_write_reg(dev, WUCSR, 0);
3081         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3082         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3083
3084         temp_wucsr = 0;
3085
3086         temp_pmt_ctl = 0;
3087         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3088         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3089         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3090
3091         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3092                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3093
3094         mask_index = 0;
3095         if (wol & WAKE_PHY) {
3096                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3097
3098                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3099                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3100                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3101         }
3102         if (wol & WAKE_MAGIC) {
3103                 temp_wucsr |= WUCSR_MPEN_;
3104
3105                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3106                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3107                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3108         }
3109         if (wol & WAKE_BCAST) {
3110                 temp_wucsr |= WUCSR_BCST_EN_;
3111
3112                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3113                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3114                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3115         }
3116         if (wol & WAKE_MCAST) {
3117                 temp_wucsr |= WUCSR_WAKE_EN_;
3118
3119                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3120                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3121                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3122                                         WUF_CFGX_EN_ |
3123                                         WUF_CFGX_TYPE_MCAST_ |
3124                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3125                                         (crc & WUF_CFGX_CRC16_MASK_));
3126
3127                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3128                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3129                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3130                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3131                 mask_index++;
3132
3133                 /* for IPv6 Multicast */
3134                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3135                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3136                                         WUF_CFGX_EN_ |
3137                                         WUF_CFGX_TYPE_MCAST_ |
3138                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3139                                         (crc & WUF_CFGX_CRC16_MASK_));
3140
3141                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3142                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3143                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3144                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3145                 mask_index++;
3146
3147                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3148                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3149                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3150         }
3151         if (wol & WAKE_UCAST) {
3152                 temp_wucsr |= WUCSR_PFDA_EN_;
3153
3154                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3155                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3156                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3157         }
3158         if (wol & WAKE_ARP) {
3159                 temp_wucsr |= WUCSR_WAKE_EN_;
3160
3161                 /* set WUF_CFG & WUF_MASK
3162                  * for packettype (offset 12,13) = ARP (0x0806)
3163                  */
3164                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3165                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3166                                         WUF_CFGX_EN_ |
3167                                         WUF_CFGX_TYPE_ALL_ |
3168                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3169                                         (crc & WUF_CFGX_CRC16_MASK_));
3170
3171                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3172                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3173                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3174                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3175                 mask_index++;
3176
3177                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3178                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3179                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3180         }
3181
3182         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3183
3184         /* when multiple WOL bits are set */
3185         if (hweight_long((unsigned long)wol) > 1) {
3186                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3187                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3188                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3189         }
3190         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3191
3192         /* clear WUPS */
3193         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3194         buf |= PMT_CTL_WUPS_MASK_;
3195         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3196
3197         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3198         buf |= MAC_RX_RXEN_;
3199         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3200
3201         return 0;
3202 }
3203
3204 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3205 {
3206         struct lan78xx_net *dev = usb_get_intfdata(intf);
3207         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3208         u32 buf;
3209         int ret;
3210         int event;
3211
3212         ret = 0;
3213         event = message.event;
3214
3215         if (!dev->suspend_count++) {
3216                 spin_lock_irq(&dev->txq.lock);
3217                 /* don't autosuspend while transmitting */
3218                 if ((skb_queue_len(&dev->txq) ||
3219                      skb_queue_len(&dev->txq_pend)) &&
3220                         PMSG_IS_AUTO(message)) {
3221                         spin_unlock_irq(&dev->txq.lock);
3222                         ret = -EBUSY;
3223                         goto out;
3224                 } else {
3225                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3226                         spin_unlock_irq(&dev->txq.lock);
3227                 }
3228
3229                 /* stop TX & RX */
3230                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3231                 buf &= ~MAC_TX_TXEN_;
3232                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3233                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3234                 buf &= ~MAC_RX_RXEN_;
3235                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3236
3237                 /* empty out the rx and queues */
3238                 netif_device_detach(dev->net);
3239                 lan78xx_terminate_urbs(dev);
3240                 usb_kill_urb(dev->urb_intr);
3241
3242                 /* reattach */
3243                 netif_device_attach(dev->net);
3244         }
3245
3246         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3247                 if (PMSG_IS_AUTO(message)) {
3248                         /* auto suspend (selective suspend) */
3249                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3250                         buf &= ~MAC_TX_TXEN_;
3251                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3252                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3253                         buf &= ~MAC_RX_RXEN_;
3254                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3255
3256                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3257                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3258                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3259
3260                         /* set goodframe wakeup */
3261                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3262
3263                         buf |= WUCSR_RFE_WAKE_EN_;
3264                         buf |= WUCSR_STORE_WAKE_;
3265
3266                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3267
3268                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3269
3270                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3271                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3272
3273                         buf |= PMT_CTL_PHY_WAKE_EN_;
3274                         buf |= PMT_CTL_WOL_EN_;
3275                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3276                         buf |= PMT_CTL_SUS_MODE_3_;
3277
3278                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3279
3280                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3281
3282                         buf |= PMT_CTL_WUPS_MASK_;
3283
3284                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3285
3286                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3287                         buf |= MAC_RX_RXEN_;
3288                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3289                 } else {
3290                         lan78xx_set_suspend(dev, pdata->wol);
3291                 }
3292         }
3293
3294 out:
3295         return ret;
3296 }
3297
3298 int lan78xx_resume(struct usb_interface *intf)
3299 {
3300         struct lan78xx_net *dev = usb_get_intfdata(intf);
3301         struct sk_buff *skb;
3302         struct urb *res;
3303         int ret;
3304         u32 buf;
3305
3306         if (!--dev->suspend_count) {
3307                 /* resume interrupt URBs */
3308                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3309                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3310
3311                 spin_lock_irq(&dev->txq.lock);
3312                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3313                         skb = (struct sk_buff *)res->context;
3314                         ret = usb_submit_urb(res, GFP_ATOMIC);
3315                         if (ret < 0) {
3316                                 dev_kfree_skb_any(skb);
3317                                 usb_free_urb(res);
3318                                 usb_autopm_put_interface_async(dev->intf);
3319                         } else {
3320                                 dev->net->trans_start = jiffies;
3321                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3322                         }
3323                 }
3324
3325                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3326                 spin_unlock_irq(&dev->txq.lock);
3327
3328                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3329                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3330                                 netif_start_queue(dev->net);
3331                         tasklet_schedule(&dev->bh);
3332                 }
3333         }
3334
3335         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3336         ret = lan78xx_write_reg(dev, WUCSR, 0);
3337         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3338
3339         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3340                                              WUCSR2_ARP_RCD_ |
3341                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3342                                              WUCSR2_IPV4_TCPSYN_RCD_);
3343
3344         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3345                                             WUCSR_EEE_RX_WAKE_ |
3346                                             WUCSR_PFDA_FR_ |
3347                                             WUCSR_RFE_WAKE_FR_ |
3348                                             WUCSR_WUFR_ |
3349                                             WUCSR_MPR_ |
3350                                             WUCSR_BCST_FR_);
3351
3352         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3353         buf |= MAC_TX_TXEN_;
3354         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3355
3356         return 0;
3357 }
3358
3359 int lan78xx_reset_resume(struct usb_interface *intf)
3360 {
3361         struct lan78xx_net *dev = usb_get_intfdata(intf);
3362
3363         lan78xx_reset(dev);
3364
3365         lan78xx_phy_init(dev);
3366
3367         return lan78xx_resume(intf);
3368 }
3369
3370 static const struct usb_device_id products[] = {
3371         {
3372         /* LAN7800 USB Gigabit Ethernet Device */
3373         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3374         },
3375         {
3376         /* LAN7850 USB Gigabit Ethernet Device */
3377         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3378         },
3379         {},
3380 };
3381 MODULE_DEVICE_TABLE(usb, products);
3382
3383 static struct usb_driver lan78xx_driver = {
3384         .name                   = DRIVER_NAME,
3385         .id_table               = products,
3386         .probe                  = lan78xx_probe,
3387         .disconnect             = lan78xx_disconnect,
3388         .suspend                = lan78xx_suspend,
3389         .resume                 = lan78xx_resume,
3390         .reset_resume           = lan78xx_reset_resume,
3391         .supports_autosuspend   = 1,
3392         .disable_hub_initiated_lpm = 1,
3393 };
3394
3395 module_usb_driver(lan78xx_driver);
3396
3397 MODULE_AUTHOR(DRIVER_AUTHOR);
3398 MODULE_DESCRIPTION(DRIVER_DESC);
3399 MODULE_LICENSE("GPL");