]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/usb/lan78xx.c
Merge tag 'fbdev-v4.13-rc5' of git://github.com/bzolnier/linux
[karo-tx-linux.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
40 #include "lan78xx.h"
41
42 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
43 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
44 #define DRIVER_NAME     "lan78xx"
45 #define DRIVER_VERSION  "1.0.6"
46
47 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
48 #define THROTTLE_JIFFIES                (HZ / 8)
49 #define UNLINK_TIMEOUT_MS               3
50
51 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
52
53 #define SS_USB_PKT_SIZE                 (1024)
54 #define HS_USB_PKT_SIZE                 (512)
55 #define FS_USB_PKT_SIZE                 (64)
56
57 #define MAX_RX_FIFO_SIZE                (12 * 1024)
58 #define MAX_TX_FIFO_SIZE                (12 * 1024)
59 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
60 #define DEFAULT_BULK_IN_DELAY           (0x0800)
61 #define MAX_SINGLE_PACKET_SIZE          (9000)
62 #define DEFAULT_TX_CSUM_ENABLE          (true)
63 #define DEFAULT_RX_CSUM_ENABLE          (true)
64 #define DEFAULT_TSO_CSUM_ENABLE         (true)
65 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
66 #define TX_OVERHEAD                     (8)
67 #define RXW_PADDING                     2
68
69 #define LAN78XX_USB_VENDOR_ID           (0x0424)
70 #define LAN7800_USB_PRODUCT_ID          (0x7800)
71 #define LAN7850_USB_PRODUCT_ID          (0x7850)
72 #define LAN7801_USB_PRODUCT_ID          (0x7801)
73 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
74 #define LAN78XX_OTP_MAGIC               (0x78F3)
75
76 #define MII_READ                        1
77 #define MII_WRITE                       0
78
79 #define EEPROM_INDICATOR                (0xA5)
80 #define EEPROM_MAC_OFFSET               (0x01)
81 #define MAX_EEPROM_SIZE                 512
82 #define OTP_INDICATOR_1                 (0xF3)
83 #define OTP_INDICATOR_2                 (0xF7)
84
85 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
86                                          WAKE_MCAST | WAKE_BCAST | \
87                                          WAKE_ARP | WAKE_MAGIC)
88
89 /* USB related defines */
90 #define BULK_IN_PIPE                    1
91 #define BULK_OUT_PIPE                   2
92
93 /* default autosuspend delay (mSec)*/
94 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
95
96 /* statistic update interval (mSec) */
97 #define STAT_UPDATE_TIMER               (1 * 1000)
98
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP                      (32)
101 #define INT_EP_INTEP                    (31)
102 #define INT_EP_OTP_WR_DONE              (28)
103 #define INT_EP_EEE_TX_LPI_START         (26)
104 #define INT_EP_EEE_TX_LPI_STOP          (25)
105 #define INT_EP_EEE_RX_LPI               (24)
106 #define INT_EP_MAC_RESET_TIMEOUT        (23)
107 #define INT_EP_RDFO                     (22)
108 #define INT_EP_TXE                      (21)
109 #define INT_EP_USB_STATUS               (20)
110 #define INT_EP_TX_DIS                   (19)
111 #define INT_EP_RX_DIS                   (18)
112 #define INT_EP_PHY                      (17)
113 #define INT_EP_DP                       (16)
114 #define INT_EP_MAC_ERR                  (15)
115 #define INT_EP_TDFU                     (14)
116 #define INT_EP_TDFO                     (13)
117 #define INT_EP_UTX                      (12)
118 #define INT_EP_GPIO_11                  (11)
119 #define INT_EP_GPIO_10                  (10)
120 #define INT_EP_GPIO_9                   (9)
121 #define INT_EP_GPIO_8                   (8)
122 #define INT_EP_GPIO_7                   (7)
123 #define INT_EP_GPIO_6                   (6)
124 #define INT_EP_GPIO_5                   (5)
125 #define INT_EP_GPIO_4                   (4)
126 #define INT_EP_GPIO_3                   (3)
127 #define INT_EP_GPIO_2                   (2)
128 #define INT_EP_GPIO_1                   (1)
129 #define INT_EP_GPIO_0                   (0)
130
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
132         "RX FCS Errors",
133         "RX Alignment Errors",
134         "Rx Fragment Errors",
135         "RX Jabber Errors",
136         "RX Undersize Frame Errors",
137         "RX Oversize Frame Errors",
138         "RX Dropped Frames",
139         "RX Unicast Byte Count",
140         "RX Broadcast Byte Count",
141         "RX Multicast Byte Count",
142         "RX Unicast Frames",
143         "RX Broadcast Frames",
144         "RX Multicast Frames",
145         "RX Pause Frames",
146         "RX 64 Byte Frames",
147         "RX 65 - 127 Byte Frames",
148         "RX 128 - 255 Byte Frames",
149         "RX 256 - 511 Bytes Frames",
150         "RX 512 - 1023 Byte Frames",
151         "RX 1024 - 1518 Byte Frames",
152         "RX Greater 1518 Byte Frames",
153         "EEE RX LPI Transitions",
154         "EEE RX LPI Time",
155         "TX FCS Errors",
156         "TX Excess Deferral Errors",
157         "TX Carrier Errors",
158         "TX Bad Byte Count",
159         "TX Single Collisions",
160         "TX Multiple Collisions",
161         "TX Excessive Collision",
162         "TX Late Collisions",
163         "TX Unicast Byte Count",
164         "TX Broadcast Byte Count",
165         "TX Multicast Byte Count",
166         "TX Unicast Frames",
167         "TX Broadcast Frames",
168         "TX Multicast Frames",
169         "TX Pause Frames",
170         "TX 64 Byte Frames",
171         "TX 65 - 127 Byte Frames",
172         "TX 128 - 255 Byte Frames",
173         "TX 256 - 511 Bytes Frames",
174         "TX 512 - 1023 Byte Frames",
175         "TX 1024 - 1518 Byte Frames",
176         "TX Greater 1518 Byte Frames",
177         "EEE TX LPI Transitions",
178         "EEE TX LPI Time",
179 };
180
181 struct lan78xx_statstage {
182         u32 rx_fcs_errors;
183         u32 rx_alignment_errors;
184         u32 rx_fragment_errors;
185         u32 rx_jabber_errors;
186         u32 rx_undersize_frame_errors;
187         u32 rx_oversize_frame_errors;
188         u32 rx_dropped_frames;
189         u32 rx_unicast_byte_count;
190         u32 rx_broadcast_byte_count;
191         u32 rx_multicast_byte_count;
192         u32 rx_unicast_frames;
193         u32 rx_broadcast_frames;
194         u32 rx_multicast_frames;
195         u32 rx_pause_frames;
196         u32 rx_64_byte_frames;
197         u32 rx_65_127_byte_frames;
198         u32 rx_128_255_byte_frames;
199         u32 rx_256_511_bytes_frames;
200         u32 rx_512_1023_byte_frames;
201         u32 rx_1024_1518_byte_frames;
202         u32 rx_greater_1518_byte_frames;
203         u32 eee_rx_lpi_transitions;
204         u32 eee_rx_lpi_time;
205         u32 tx_fcs_errors;
206         u32 tx_excess_deferral_errors;
207         u32 tx_carrier_errors;
208         u32 tx_bad_byte_count;
209         u32 tx_single_collisions;
210         u32 tx_multiple_collisions;
211         u32 tx_excessive_collision;
212         u32 tx_late_collisions;
213         u32 tx_unicast_byte_count;
214         u32 tx_broadcast_byte_count;
215         u32 tx_multicast_byte_count;
216         u32 tx_unicast_frames;
217         u32 tx_broadcast_frames;
218         u32 tx_multicast_frames;
219         u32 tx_pause_frames;
220         u32 tx_64_byte_frames;
221         u32 tx_65_127_byte_frames;
222         u32 tx_128_255_byte_frames;
223         u32 tx_256_511_bytes_frames;
224         u32 tx_512_1023_byte_frames;
225         u32 tx_1024_1518_byte_frames;
226         u32 tx_greater_1518_byte_frames;
227         u32 eee_tx_lpi_transitions;
228         u32 eee_tx_lpi_time;
229 };
230
231 struct lan78xx_statstage64 {
232         u64 rx_fcs_errors;
233         u64 rx_alignment_errors;
234         u64 rx_fragment_errors;
235         u64 rx_jabber_errors;
236         u64 rx_undersize_frame_errors;
237         u64 rx_oversize_frame_errors;
238         u64 rx_dropped_frames;
239         u64 rx_unicast_byte_count;
240         u64 rx_broadcast_byte_count;
241         u64 rx_multicast_byte_count;
242         u64 rx_unicast_frames;
243         u64 rx_broadcast_frames;
244         u64 rx_multicast_frames;
245         u64 rx_pause_frames;
246         u64 rx_64_byte_frames;
247         u64 rx_65_127_byte_frames;
248         u64 rx_128_255_byte_frames;
249         u64 rx_256_511_bytes_frames;
250         u64 rx_512_1023_byte_frames;
251         u64 rx_1024_1518_byte_frames;
252         u64 rx_greater_1518_byte_frames;
253         u64 eee_rx_lpi_transitions;
254         u64 eee_rx_lpi_time;
255         u64 tx_fcs_errors;
256         u64 tx_excess_deferral_errors;
257         u64 tx_carrier_errors;
258         u64 tx_bad_byte_count;
259         u64 tx_single_collisions;
260         u64 tx_multiple_collisions;
261         u64 tx_excessive_collision;
262         u64 tx_late_collisions;
263         u64 tx_unicast_byte_count;
264         u64 tx_broadcast_byte_count;
265         u64 tx_multicast_byte_count;
266         u64 tx_unicast_frames;
267         u64 tx_broadcast_frames;
268         u64 tx_multicast_frames;
269         u64 tx_pause_frames;
270         u64 tx_64_byte_frames;
271         u64 tx_65_127_byte_frames;
272         u64 tx_128_255_byte_frames;
273         u64 tx_256_511_bytes_frames;
274         u64 tx_512_1023_byte_frames;
275         u64 tx_1024_1518_byte_frames;
276         u64 tx_greater_1518_byte_frames;
277         u64 eee_tx_lpi_transitions;
278         u64 eee_tx_lpi_time;
279 };
280
281 struct lan78xx_net;
282
283 struct lan78xx_priv {
284         struct lan78xx_net *dev;
285         u32 rfe_ctl;
286         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
287         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
288         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
289         struct mutex dataport_mutex; /* for dataport access */
290         spinlock_t rfe_ctl_lock; /* for rfe register access */
291         struct work_struct set_multicast;
292         struct work_struct set_vlan;
293         u32 wol;
294 };
295
296 enum skb_state {
297         illegal = 0,
298         tx_start,
299         tx_done,
300         rx_start,
301         rx_done,
302         rx_cleanup,
303         unlink_start
304 };
305
306 struct skb_data {               /* skb->cb is one of these */
307         struct urb *urb;
308         struct lan78xx_net *dev;
309         enum skb_state state;
310         size_t length;
311         int num_of_packet;
312 };
313
314 struct usb_context {
315         struct usb_ctrlrequest req;
316         struct lan78xx_net *dev;
317 };
318
319 #define EVENT_TX_HALT                   0
320 #define EVENT_RX_HALT                   1
321 #define EVENT_RX_MEMORY                 2
322 #define EVENT_STS_SPLIT                 3
323 #define EVENT_LINK_RESET                4
324 #define EVENT_RX_PAUSED                 5
325 #define EVENT_DEV_WAKING                6
326 #define EVENT_DEV_ASLEEP                7
327 #define EVENT_DEV_OPEN                  8
328 #define EVENT_STAT_UPDATE               9
329
330 struct statstage {
331         struct mutex                    access_lock;    /* for stats access */
332         struct lan78xx_statstage        saved;
333         struct lan78xx_statstage        rollover_count;
334         struct lan78xx_statstage        rollover_max;
335         struct lan78xx_statstage64      curr_stat;
336 };
337
338 struct irq_domain_data {
339         struct irq_domain       *irqdomain;
340         unsigned int            phyirq;
341         struct irq_chip         *irqchip;
342         irq_flow_handler_t      irq_handler;
343         u32                     irqenable;
344         struct mutex            irq_lock;               /* for irq bus access */
345 };
346
347 struct lan78xx_net {
348         struct net_device       *net;
349         struct usb_device       *udev;
350         struct usb_interface    *intf;
351         void                    *driver_priv;
352
353         int                     rx_qlen;
354         int                     tx_qlen;
355         struct sk_buff_head     rxq;
356         struct sk_buff_head     txq;
357         struct sk_buff_head     done;
358         struct sk_buff_head     rxq_pause;
359         struct sk_buff_head     txq_pend;
360
361         struct tasklet_struct   bh;
362         struct delayed_work     wq;
363
364         struct usb_host_endpoint *ep_blkin;
365         struct usb_host_endpoint *ep_blkout;
366         struct usb_host_endpoint *ep_intr;
367
368         int                     msg_enable;
369
370         struct urb              *urb_intr;
371         struct usb_anchor       deferred;
372
373         struct mutex            phy_mutex; /* for phy access */
374         unsigned                pipe_in, pipe_out, pipe_intr;
375
376         u32                     hard_mtu;       /* count any extra framing */
377         size_t                  rx_urb_size;    /* size for rx urbs */
378
379         unsigned long           flags;
380
381         wait_queue_head_t       *wait;
382         unsigned char           suspend_count;
383
384         unsigned                maxpacket;
385         struct timer_list       delay;
386         struct timer_list       stat_monitor;
387
388         unsigned long           data[5];
389
390         int                     link_on;
391         u8                      mdix_ctrl;
392
393         u32                     chipid;
394         u32                     chiprev;
395         struct mii_bus          *mdiobus;
396         phy_interface_t         interface;
397
398         int                     fc_autoneg;
399         u8                      fc_request_control;
400
401         int                     delta;
402         struct statstage        stats;
403
404         struct irq_domain_data  domain_data;
405 };
406
407 /* define external phy id */
408 #define PHY_LAN8835                     (0x0007C130)
409 #define PHY_KSZ9031RNX                  (0x00221620)
410
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
415
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
417 {
418         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
419         int ret;
420
421         if (!buf)
422                 return -ENOMEM;
423
424         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425                               USB_VENDOR_REQUEST_READ_REGISTER,
426                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428         if (likely(ret >= 0)) {
429                 le32_to_cpus(buf);
430                 *data = *buf;
431         } else {
432                 netdev_warn(dev->net,
433                             "Failed to read register index 0x%08x. ret = %d",
434                             index, ret);
435         }
436
437         kfree(buf);
438
439         return ret;
440 }
441
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
443 {
444         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445         int ret;
446
447         if (!buf)
448                 return -ENOMEM;
449
450         *buf = data;
451         cpu_to_le32s(buf);
452
453         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454                               USB_VENDOR_REQUEST_WRITE_REGISTER,
455                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457         if (unlikely(ret < 0)) {
458                 netdev_warn(dev->net,
459                             "Failed to write register index 0x%08x. ret = %d",
460                             index, ret);
461         }
462
463         kfree(buf);
464
465         return ret;
466 }
467
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469                               struct lan78xx_statstage *data)
470 {
471         int ret = 0;
472         int i;
473         struct lan78xx_statstage *stats;
474         u32 *src;
475         u32 *dst;
476
477         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
478         if (!stats)
479                 return -ENOMEM;
480
481         ret = usb_control_msg(dev->udev,
482                               usb_rcvctrlpipe(dev->udev, 0),
483                               USB_VENDOR_REQUEST_GET_STATS,
484                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
485                               0,
486                               0,
487                               (void *)stats,
488                               sizeof(*stats),
489                               USB_CTRL_SET_TIMEOUT);
490         if (likely(ret >= 0)) {
491                 src = (u32 *)stats;
492                 dst = (u32 *)data;
493                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494                         le32_to_cpus(&src[i]);
495                         dst[i] = src[i];
496                 }
497         } else {
498                 netdev_warn(dev->net,
499                             "Failed to read stat ret = 0x%x", ret);
500         }
501
502         kfree(stats);
503
504         return ret;
505 }
506
507 #define check_counter_rollover(struct1, dev_stats, member) {    \
508         if (struct1->member < dev_stats.saved.member)           \
509                 dev_stats.rollover_count.member++;              \
510         }
511
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513                                         struct lan78xx_statstage *stats)
514 {
515         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528         check_counter_rollover(stats, dev->stats, rx_pause_frames);
529         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542         check_counter_rollover(stats, dev->stats, tx_single_collisions);
543         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545         check_counter_rollover(stats, dev->stats, tx_late_collisions);
546         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552         check_counter_rollover(stats, dev->stats, tx_pause_frames);
553         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
562
563         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
564 }
565
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
567 {
568         u32 *p, *count, *max;
569         u64 *data;
570         int i;
571         struct lan78xx_statstage lan78xx_stats;
572
573         if (usb_autopm_get_interface(dev->intf) < 0)
574                 return;
575
576         p = (u32 *)&lan78xx_stats;
577         count = (u32 *)&dev->stats.rollover_count;
578         max = (u32 *)&dev->stats.rollover_max;
579         data = (u64 *)&dev->stats.curr_stat;
580
581         mutex_lock(&dev->stats.access_lock);
582
583         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
585
586         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
588
589         mutex_unlock(&dev->stats.access_lock);
590
591         usb_autopm_put_interface(dev->intf);
592 }
593
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
596 {
597         unsigned long start_time = jiffies;
598         u32 val;
599         int ret;
600
601         do {
602                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
603                 if (unlikely(ret < 0))
604                         return -EIO;
605
606                 if (!(val & MII_ACC_MII_BUSY_))
607                         return 0;
608         } while (!time_after(jiffies, start_time + HZ));
609
610         return -EIO;
611 }
612
613 static inline u32 mii_access(int id, int index, int read)
614 {
615         u32 ret;
616
617         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
619         if (read)
620                 ret |= MII_ACC_MII_READ_;
621         else
622                 ret |= MII_ACC_MII_WRITE_;
623         ret |= MII_ACC_MII_BUSY_;
624
625         return ret;
626 }
627
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
629 {
630         unsigned long start_time = jiffies;
631         u32 val;
632         int ret;
633
634         do {
635                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636                 if (unlikely(ret < 0))
637                         return -EIO;
638
639                 if (!(val & E2P_CMD_EPC_BUSY_) ||
640                     (val & E2P_CMD_EPC_TIMEOUT_))
641                         break;
642                 usleep_range(40, 100);
643         } while (!time_after(jiffies, start_time + HZ));
644
645         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646                 netdev_warn(dev->net, "EEPROM read operation timeout");
647                 return -EIO;
648         }
649
650         return 0;
651 }
652
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
654 {
655         unsigned long start_time = jiffies;
656         u32 val;
657         int ret;
658
659         do {
660                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661                 if (unlikely(ret < 0))
662                         return -EIO;
663
664                 if (!(val & E2P_CMD_EPC_BUSY_))
665                         return 0;
666
667                 usleep_range(40, 100);
668         } while (!time_after(jiffies, start_time + HZ));
669
670         netdev_warn(dev->net, "EEPROM is busy");
671         return -EIO;
672 }
673
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675                                    u32 length, u8 *data)
676 {
677         u32 val;
678         u32 saved;
679         int i, ret;
680         int retval;
681
682         /* depends on chip, some EEPROM pins are muxed with LED function.
683          * disable & restore LED function to access EEPROM.
684          */
685         ret = lan78xx_read_reg(dev, HW_CFG, &val);
686         saved = val;
687         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689                 ret = lan78xx_write_reg(dev, HW_CFG, val);
690         }
691
692         retval = lan78xx_eeprom_confirm_not_busy(dev);
693         if (retval)
694                 return retval;
695
696         for (i = 0; i < length; i++) {
697                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
700                 if (unlikely(ret < 0)) {
701                         retval = -EIO;
702                         goto exit;
703                 }
704
705                 retval = lan78xx_wait_eeprom(dev);
706                 if (retval < 0)
707                         goto exit;
708
709                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710                 if (unlikely(ret < 0)) {
711                         retval = -EIO;
712                         goto exit;
713                 }
714
715                 data[i] = val & 0xFF;
716                 offset++;
717         }
718
719         retval = 0;
720 exit:
721         if (dev->chipid == ID_REV_CHIP_ID_7800_)
722                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
723
724         return retval;
725 }
726
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728                                u32 length, u8 *data)
729 {
730         u8 sig;
731         int ret;
732
733         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734         if ((ret == 0) && (sig == EEPROM_INDICATOR))
735                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
736         else
737                 ret = -EINVAL;
738
739         return ret;
740 }
741
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743                                     u32 length, u8 *data)
744 {
745         u32 val;
746         u32 saved;
747         int i, ret;
748         int retval;
749
750         /* depends on chip, some EEPROM pins are muxed with LED function.
751          * disable & restore LED function to access EEPROM.
752          */
753         ret = lan78xx_read_reg(dev, HW_CFG, &val);
754         saved = val;
755         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757                 ret = lan78xx_write_reg(dev, HW_CFG, val);
758         }
759
760         retval = lan78xx_eeprom_confirm_not_busy(dev);
761         if (retval)
762                 goto exit;
763
764         /* Issue write/erase enable command */
765         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766         ret = lan78xx_write_reg(dev, E2P_CMD, val);
767         if (unlikely(ret < 0)) {
768                 retval = -EIO;
769                 goto exit;
770         }
771
772         retval = lan78xx_wait_eeprom(dev);
773         if (retval < 0)
774                 goto exit;
775
776         for (i = 0; i < length; i++) {
777                 /* Fill data register */
778                 val = data[i];
779                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
780                 if (ret < 0) {
781                         retval = -EIO;
782                         goto exit;
783                 }
784
785                 /* Send "write" command */
786                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
789                 if (ret < 0) {
790                         retval = -EIO;
791                         goto exit;
792                 }
793
794                 retval = lan78xx_wait_eeprom(dev);
795                 if (retval < 0)
796                         goto exit;
797
798                 offset++;
799         }
800
801         retval = 0;
802 exit:
803         if (dev->chipid == ID_REV_CHIP_ID_7800_)
804                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
805
806         return retval;
807 }
808
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810                                 u32 length, u8 *data)
811 {
812         int i;
813         int ret;
814         u32 buf;
815         unsigned long timeout;
816
817         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
818
819         if (buf & OTP_PWR_DN_PWRDN_N_) {
820                 /* clear it and wait to be cleared */
821                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
822
823                 timeout = jiffies + HZ;
824                 do {
825                         usleep_range(1, 10);
826                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827                         if (time_after(jiffies, timeout)) {
828                                 netdev_warn(dev->net,
829                                             "timeout on OTP_PWR_DN");
830                                 return -EIO;
831                         }
832                 } while (buf & OTP_PWR_DN_PWRDN_N_);
833         }
834
835         for (i = 0; i < length; i++) {
836                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
838                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839                                         ((offset + i) & OTP_ADDR2_10_3));
840
841                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843
844                 timeout = jiffies + HZ;
845                 do {
846                         udelay(1);
847                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848                         if (time_after(jiffies, timeout)) {
849                                 netdev_warn(dev->net,
850                                             "timeout on OTP_STATUS");
851                                 return -EIO;
852                         }
853                 } while (buf & OTP_STATUS_BUSY_);
854
855                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
856
857                 data[i] = (u8)(buf & 0xFF);
858         }
859
860         return 0;
861 }
862
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864                                  u32 length, u8 *data)
865 {
866         int i;
867         int ret;
868         u32 buf;
869         unsigned long timeout;
870
871         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
872
873         if (buf & OTP_PWR_DN_PWRDN_N_) {
874                 /* clear it and wait to be cleared */
875                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
876
877                 timeout = jiffies + HZ;
878                 do {
879                         udelay(1);
880                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881                         if (time_after(jiffies, timeout)) {
882                                 netdev_warn(dev->net,
883                                             "timeout on OTP_PWR_DN completion");
884                                 return -EIO;
885                         }
886                 } while (buf & OTP_PWR_DN_PWRDN_N_);
887         }
888
889         /* set to BYTE program mode */
890         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
891
892         for (i = 0; i < length; i++) {
893                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
894                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
895                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
896                                         ((offset + i) & OTP_ADDR2_10_3));
897                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
900
901                 timeout = jiffies + HZ;
902                 do {
903                         udelay(1);
904                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905                         if (time_after(jiffies, timeout)) {
906                                 netdev_warn(dev->net,
907                                             "Timeout on OTP_STATUS completion");
908                                 return -EIO;
909                         }
910                 } while (buf & OTP_STATUS_BUSY_);
911         }
912
913         return 0;
914 }
915
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917                             u32 length, u8 *data)
918 {
919         u8 sig;
920         int ret;
921
922         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
923
924         if (ret == 0) {
925                 if (sig == OTP_INDICATOR_1)
926                         offset = offset;
927                 else if (sig == OTP_INDICATOR_2)
928                         offset += 0x100;
929                 else
930                         ret = -EINVAL;
931                 ret = lan78xx_read_raw_otp(dev, offset, length, data);
932         }
933
934         return ret;
935 }
936
937 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
938 {
939         int i, ret;
940
941         for (i = 0; i < 100; i++) {
942                 u32 dp_sel;
943
944                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
945                 if (unlikely(ret < 0))
946                         return -EIO;
947
948                 if (dp_sel & DP_SEL_DPRDY_)
949                         return 0;
950
951                 usleep_range(40, 100);
952         }
953
954         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
955
956         return -EIO;
957 }
958
959 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
960                                   u32 addr, u32 length, u32 *buf)
961 {
962         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
963         u32 dp_sel;
964         int i, ret;
965
966         if (usb_autopm_get_interface(dev->intf) < 0)
967                         return 0;
968
969         mutex_lock(&pdata->dataport_mutex);
970
971         ret = lan78xx_dataport_wait_not_busy(dev);
972         if (ret < 0)
973                 goto done;
974
975         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
976
977         dp_sel &= ~DP_SEL_RSEL_MASK_;
978         dp_sel |= ram_select;
979         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
980
981         for (i = 0; i < length; i++) {
982                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
983
984                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
985
986                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
987
988                 ret = lan78xx_dataport_wait_not_busy(dev);
989                 if (ret < 0)
990                         goto done;
991         }
992
993 done:
994         mutex_unlock(&pdata->dataport_mutex);
995         usb_autopm_put_interface(dev->intf);
996
997         return ret;
998 }
999
1000 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1001                                     int index, u8 addr[ETH_ALEN])
1002 {
1003         u32     temp;
1004
1005         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1006                 temp = addr[3];
1007                 temp = addr[2] | (temp << 8);
1008                 temp = addr[1] | (temp << 8);
1009                 temp = addr[0] | (temp << 8);
1010                 pdata->pfilter_table[index][1] = temp;
1011                 temp = addr[5];
1012                 temp = addr[4] | (temp << 8);
1013                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1014                 pdata->pfilter_table[index][0] = temp;
1015         }
1016 }
1017
1018 /* returns hash bit number for given MAC address */
1019 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1020 {
1021         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1022 }
1023
1024 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1025 {
1026         struct lan78xx_priv *pdata =
1027                         container_of(param, struct lan78xx_priv, set_multicast);
1028         struct lan78xx_net *dev = pdata->dev;
1029         int i;
1030         int ret;
1031
1032         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1033                   pdata->rfe_ctl);
1034
1035         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1036                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1037
1038         for (i = 1; i < NUM_OF_MAF; i++) {
1039                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1040                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1041                                         pdata->pfilter_table[i][1]);
1042                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1043                                         pdata->pfilter_table[i][0]);
1044         }
1045
1046         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1047 }
1048
1049 static void lan78xx_set_multicast(struct net_device *netdev)
1050 {
1051         struct lan78xx_net *dev = netdev_priv(netdev);
1052         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1053         unsigned long flags;
1054         int i;
1055
1056         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1057
1058         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1059                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1060
1061         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1062                         pdata->mchash_table[i] = 0;
1063         /* pfilter_table[0] has own HW address */
1064         for (i = 1; i < NUM_OF_MAF; i++) {
1065                         pdata->pfilter_table[i][0] =
1066                         pdata->pfilter_table[i][1] = 0;
1067         }
1068
1069         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1070
1071         if (dev->net->flags & IFF_PROMISC) {
1072                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1073                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1074         } else {
1075                 if (dev->net->flags & IFF_ALLMULTI) {
1076                         netif_dbg(dev, drv, dev->net,
1077                                   "receive all multicast enabled");
1078                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1079                 }
1080         }
1081
1082         if (netdev_mc_count(dev->net)) {
1083                 struct netdev_hw_addr *ha;
1084                 int i;
1085
1086                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1087
1088                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1089
1090                 i = 1;
1091                 netdev_for_each_mc_addr(ha, netdev) {
1092                         /* set first 32 into Perfect Filter */
1093                         if (i < 33) {
1094                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1095                         } else {
1096                                 u32 bitnum = lan78xx_hash(ha->addr);
1097
1098                                 pdata->mchash_table[bitnum / 32] |=
1099                                                         (1 << (bitnum % 32));
1100                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1101                         }
1102                         i++;
1103                 }
1104         }
1105
1106         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1107
1108         /* defer register writes to a sleepable context */
1109         schedule_work(&pdata->set_multicast);
1110 }
1111
1112 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1113                                       u16 lcladv, u16 rmtadv)
1114 {
1115         u32 flow = 0, fct_flow = 0;
1116         int ret;
1117         u8 cap;
1118
1119         if (dev->fc_autoneg)
1120                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1121         else
1122                 cap = dev->fc_request_control;
1123
1124         if (cap & FLOW_CTRL_TX)
1125                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1126
1127         if (cap & FLOW_CTRL_RX)
1128                 flow |= FLOW_CR_RX_FCEN_;
1129
1130         if (dev->udev->speed == USB_SPEED_SUPER)
1131                 fct_flow = 0x817;
1132         else if (dev->udev->speed == USB_SPEED_HIGH)
1133                 fct_flow = 0x211;
1134
1135         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1136                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1137                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1138
1139         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1140
1141         /* threshold value should be set before enabling flow */
1142         ret = lan78xx_write_reg(dev, FLOW, flow);
1143
1144         return 0;
1145 }
1146
1147 static int lan78xx_link_reset(struct lan78xx_net *dev)
1148 {
1149         struct phy_device *phydev = dev->net->phydev;
1150         struct ethtool_link_ksettings ecmd;
1151         int ladv, radv, ret;
1152         u32 buf;
1153
1154         /* clear LAN78xx interrupt status */
1155         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1156         if (unlikely(ret < 0))
1157                 return -EIO;
1158
1159         phy_read_status(phydev);
1160
1161         if (!phydev->link && dev->link_on) {
1162                 dev->link_on = false;
1163
1164                 /* reset MAC */
1165                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1166                 if (unlikely(ret < 0))
1167                         return -EIO;
1168                 buf |= MAC_CR_RST_;
1169                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1170                 if (unlikely(ret < 0))
1171                         return -EIO;
1172
1173                 del_timer(&dev->stat_monitor);
1174         } else if (phydev->link && !dev->link_on) {
1175                 dev->link_on = true;
1176
1177                 phy_ethtool_ksettings_get(phydev, &ecmd);
1178
1179                 if (dev->udev->speed == USB_SPEED_SUPER) {
1180                         if (ecmd.base.speed == 1000) {
1181                                 /* disable U2 */
1182                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1183                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1184                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1185                                 /* enable U1 */
1186                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1187                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1188                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1189                         } else {
1190                                 /* enable U1 & U2 */
1191                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1192                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1193                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1194                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1195                         }
1196                 }
1197
1198                 ladv = phy_read(phydev, MII_ADVERTISE);
1199                 if (ladv < 0)
1200                         return ladv;
1201
1202                 radv = phy_read(phydev, MII_LPA);
1203                 if (radv < 0)
1204                         return radv;
1205
1206                 netif_dbg(dev, link, dev->net,
1207                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1208                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1209
1210                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1211                                                  radv);
1212
1213                 if (!timer_pending(&dev->stat_monitor)) {
1214                         dev->delta = 1;
1215                         mod_timer(&dev->stat_monitor,
1216                                   jiffies + STAT_UPDATE_TIMER);
1217                 }
1218         }
1219
1220         return ret;
1221 }
1222
1223 /* some work can't be done in tasklets, so we use keventd
1224  *
1225  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1226  * but tasklet_schedule() doesn't.      hope the failure is rare.
1227  */
1228 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1229 {
1230         set_bit(work, &dev->flags);
1231         if (!schedule_delayed_work(&dev->wq, 0))
1232                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1233 }
1234
1235 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1236 {
1237         u32 intdata;
1238
1239         if (urb->actual_length != 4) {
1240                 netdev_warn(dev->net,
1241                             "unexpected urb length %d", urb->actual_length);
1242                 return;
1243         }
1244
1245         memcpy(&intdata, urb->transfer_buffer, 4);
1246         le32_to_cpus(&intdata);
1247
1248         if (intdata & INT_ENP_PHY_INT) {
1249                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1250                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1251
1252                 if (dev->domain_data.phyirq > 0)
1253                         generic_handle_irq(dev->domain_data.phyirq);
1254         } else
1255                 netdev_warn(dev->net,
1256                             "unexpected interrupt: 0x%08x\n", intdata);
1257 }
1258
1259 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1260 {
1261         return MAX_EEPROM_SIZE;
1262 }
1263
1264 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1265                                       struct ethtool_eeprom *ee, u8 *data)
1266 {
1267         struct lan78xx_net *dev = netdev_priv(netdev);
1268
1269         ee->magic = LAN78XX_EEPROM_MAGIC;
1270
1271         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1272 }
1273
1274 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1275                                       struct ethtool_eeprom *ee, u8 *data)
1276 {
1277         struct lan78xx_net *dev = netdev_priv(netdev);
1278
1279         /* Allow entire eeprom update only */
1280         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1281             (ee->offset == 0) &&
1282             (ee->len == 512) &&
1283             (data[0] == EEPROM_INDICATOR))
1284                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1285         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1286                  (ee->offset == 0) &&
1287                  (ee->len == 512) &&
1288                  (data[0] == OTP_INDICATOR_1))
1289                 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1290
1291         return -EINVAL;
1292 }
1293
1294 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1295                                 u8 *data)
1296 {
1297         if (stringset == ETH_SS_STATS)
1298                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1299 }
1300
1301 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1302 {
1303         if (sset == ETH_SS_STATS)
1304                 return ARRAY_SIZE(lan78xx_gstrings);
1305         else
1306                 return -EOPNOTSUPP;
1307 }
1308
1309 static void lan78xx_get_stats(struct net_device *netdev,
1310                               struct ethtool_stats *stats, u64 *data)
1311 {
1312         struct lan78xx_net *dev = netdev_priv(netdev);
1313
1314         lan78xx_update_stats(dev);
1315
1316         mutex_lock(&dev->stats.access_lock);
1317         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1318         mutex_unlock(&dev->stats.access_lock);
1319 }
1320
1321 static void lan78xx_get_wol(struct net_device *netdev,
1322                             struct ethtool_wolinfo *wol)
1323 {
1324         struct lan78xx_net *dev = netdev_priv(netdev);
1325         int ret;
1326         u32 buf;
1327         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1328
1329         if (usb_autopm_get_interface(dev->intf) < 0)
1330                         return;
1331
1332         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1333         if (unlikely(ret < 0)) {
1334                 wol->supported = 0;
1335                 wol->wolopts = 0;
1336         } else {
1337                 if (buf & USB_CFG_RMT_WKP_) {
1338                         wol->supported = WAKE_ALL;
1339                         wol->wolopts = pdata->wol;
1340                 } else {
1341                         wol->supported = 0;
1342                         wol->wolopts = 0;
1343                 }
1344         }
1345
1346         usb_autopm_put_interface(dev->intf);
1347 }
1348
1349 static int lan78xx_set_wol(struct net_device *netdev,
1350                            struct ethtool_wolinfo *wol)
1351 {
1352         struct lan78xx_net *dev = netdev_priv(netdev);
1353         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1354         int ret;
1355
1356         ret = usb_autopm_get_interface(dev->intf);
1357         if (ret < 0)
1358                 return ret;
1359
1360         pdata->wol = 0;
1361         if (wol->wolopts & WAKE_UCAST)
1362                 pdata->wol |= WAKE_UCAST;
1363         if (wol->wolopts & WAKE_MCAST)
1364                 pdata->wol |= WAKE_MCAST;
1365         if (wol->wolopts & WAKE_BCAST)
1366                 pdata->wol |= WAKE_BCAST;
1367         if (wol->wolopts & WAKE_MAGIC)
1368                 pdata->wol |= WAKE_MAGIC;
1369         if (wol->wolopts & WAKE_PHY)
1370                 pdata->wol |= WAKE_PHY;
1371         if (wol->wolopts & WAKE_ARP)
1372                 pdata->wol |= WAKE_ARP;
1373
1374         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1375
1376         phy_ethtool_set_wol(netdev->phydev, wol);
1377
1378         usb_autopm_put_interface(dev->intf);
1379
1380         return ret;
1381 }
1382
1383 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1384 {
1385         struct lan78xx_net *dev = netdev_priv(net);
1386         struct phy_device *phydev = net->phydev;
1387         int ret;
1388         u32 buf;
1389
1390         ret = usb_autopm_get_interface(dev->intf);
1391         if (ret < 0)
1392                 return ret;
1393
1394         ret = phy_ethtool_get_eee(phydev, edata);
1395         if (ret < 0)
1396                 goto exit;
1397
1398         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1399         if (buf & MAC_CR_EEE_EN_) {
1400                 edata->eee_enabled = true;
1401                 edata->eee_active = !!(edata->advertised &
1402                                        edata->lp_advertised);
1403                 edata->tx_lpi_enabled = true;
1404                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1405                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1406                 edata->tx_lpi_timer = buf;
1407         } else {
1408                 edata->eee_enabled = false;
1409                 edata->eee_active = false;
1410                 edata->tx_lpi_enabled = false;
1411                 edata->tx_lpi_timer = 0;
1412         }
1413
1414         ret = 0;
1415 exit:
1416         usb_autopm_put_interface(dev->intf);
1417
1418         return ret;
1419 }
1420
1421 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1422 {
1423         struct lan78xx_net *dev = netdev_priv(net);
1424         int ret;
1425         u32 buf;
1426
1427         ret = usb_autopm_get_interface(dev->intf);
1428         if (ret < 0)
1429                 return ret;
1430
1431         if (edata->eee_enabled) {
1432                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1433                 buf |= MAC_CR_EEE_EN_;
1434                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1435
1436                 phy_ethtool_set_eee(net->phydev, edata);
1437
1438                 buf = (u32)edata->tx_lpi_timer;
1439                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1440         } else {
1441                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1442                 buf &= ~MAC_CR_EEE_EN_;
1443                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1444         }
1445
1446         usb_autopm_put_interface(dev->intf);
1447
1448         return 0;
1449 }
1450
1451 static u32 lan78xx_get_link(struct net_device *net)
1452 {
1453         phy_read_status(net->phydev);
1454
1455         return net->phydev->link;
1456 }
1457
1458 static void lan78xx_get_drvinfo(struct net_device *net,
1459                                 struct ethtool_drvinfo *info)
1460 {
1461         struct lan78xx_net *dev = netdev_priv(net);
1462
1463         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1464         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1465         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1466 }
1467
1468 static u32 lan78xx_get_msglevel(struct net_device *net)
1469 {
1470         struct lan78xx_net *dev = netdev_priv(net);
1471
1472         return dev->msg_enable;
1473 }
1474
1475 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1476 {
1477         struct lan78xx_net *dev = netdev_priv(net);
1478
1479         dev->msg_enable = level;
1480 }
1481
1482 static int lan78xx_get_link_ksettings(struct net_device *net,
1483                                       struct ethtool_link_ksettings *cmd)
1484 {
1485         struct lan78xx_net *dev = netdev_priv(net);
1486         struct phy_device *phydev = net->phydev;
1487         int ret;
1488
1489         ret = usb_autopm_get_interface(dev->intf);
1490         if (ret < 0)
1491                 return ret;
1492
1493         phy_ethtool_ksettings_get(phydev, cmd);
1494
1495         usb_autopm_put_interface(dev->intf);
1496
1497         return ret;
1498 }
1499
1500 static int lan78xx_set_link_ksettings(struct net_device *net,
1501                                       const struct ethtool_link_ksettings *cmd)
1502 {
1503         struct lan78xx_net *dev = netdev_priv(net);
1504         struct phy_device *phydev = net->phydev;
1505         int ret = 0;
1506         int temp;
1507
1508         ret = usb_autopm_get_interface(dev->intf);
1509         if (ret < 0)
1510                 return ret;
1511
1512         /* change speed & duplex */
1513         ret = phy_ethtool_ksettings_set(phydev, cmd);
1514
1515         if (!cmd->base.autoneg) {
1516                 /* force link down */
1517                 temp = phy_read(phydev, MII_BMCR);
1518                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1519                 mdelay(1);
1520                 phy_write(phydev, MII_BMCR, temp);
1521         }
1522
1523         usb_autopm_put_interface(dev->intf);
1524
1525         return ret;
1526 }
1527
1528 static void lan78xx_get_pause(struct net_device *net,
1529                               struct ethtool_pauseparam *pause)
1530 {
1531         struct lan78xx_net *dev = netdev_priv(net);
1532         struct phy_device *phydev = net->phydev;
1533         struct ethtool_link_ksettings ecmd;
1534
1535         phy_ethtool_ksettings_get(phydev, &ecmd);
1536
1537         pause->autoneg = dev->fc_autoneg;
1538
1539         if (dev->fc_request_control & FLOW_CTRL_TX)
1540                 pause->tx_pause = 1;
1541
1542         if (dev->fc_request_control & FLOW_CTRL_RX)
1543                 pause->rx_pause = 1;
1544 }
1545
1546 static int lan78xx_set_pause(struct net_device *net,
1547                              struct ethtool_pauseparam *pause)
1548 {
1549         struct lan78xx_net *dev = netdev_priv(net);
1550         struct phy_device *phydev = net->phydev;
1551         struct ethtool_link_ksettings ecmd;
1552         int ret;
1553
1554         phy_ethtool_ksettings_get(phydev, &ecmd);
1555
1556         if (pause->autoneg && !ecmd.base.autoneg) {
1557                 ret = -EINVAL;
1558                 goto exit;
1559         }
1560
1561         dev->fc_request_control = 0;
1562         if (pause->rx_pause)
1563                 dev->fc_request_control |= FLOW_CTRL_RX;
1564
1565         if (pause->tx_pause)
1566                 dev->fc_request_control |= FLOW_CTRL_TX;
1567
1568         if (ecmd.base.autoneg) {
1569                 u32 mii_adv;
1570                 u32 advertising;
1571
1572                 ethtool_convert_link_mode_to_legacy_u32(
1573                         &advertising, ecmd.link_modes.advertising);
1574
1575                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1576                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1577                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1578
1579                 ethtool_convert_legacy_u32_to_link_mode(
1580                         ecmd.link_modes.advertising, advertising);
1581
1582                 phy_ethtool_ksettings_set(phydev, &ecmd);
1583         }
1584
1585         dev->fc_autoneg = pause->autoneg;
1586
1587         ret = 0;
1588 exit:
1589         return ret;
1590 }
1591
1592 static const struct ethtool_ops lan78xx_ethtool_ops = {
1593         .get_link       = lan78xx_get_link,
1594         .nway_reset     = phy_ethtool_nway_reset,
1595         .get_drvinfo    = lan78xx_get_drvinfo,
1596         .get_msglevel   = lan78xx_get_msglevel,
1597         .set_msglevel   = lan78xx_set_msglevel,
1598         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1599         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1600         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1601         .get_ethtool_stats = lan78xx_get_stats,
1602         .get_sset_count = lan78xx_get_sset_count,
1603         .get_strings    = lan78xx_get_strings,
1604         .get_wol        = lan78xx_get_wol,
1605         .set_wol        = lan78xx_set_wol,
1606         .get_eee        = lan78xx_get_eee,
1607         .set_eee        = lan78xx_set_eee,
1608         .get_pauseparam = lan78xx_get_pause,
1609         .set_pauseparam = lan78xx_set_pause,
1610         .get_link_ksettings = lan78xx_get_link_ksettings,
1611         .set_link_ksettings = lan78xx_set_link_ksettings,
1612 };
1613
1614 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1615 {
1616         if (!netif_running(netdev))
1617                 return -EINVAL;
1618
1619         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1620 }
1621
1622 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1623 {
1624         u32 addr_lo, addr_hi;
1625         int ret;
1626         u8 addr[6];
1627
1628         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1629         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1630
1631         addr[0] = addr_lo & 0xFF;
1632         addr[1] = (addr_lo >> 8) & 0xFF;
1633         addr[2] = (addr_lo >> 16) & 0xFF;
1634         addr[3] = (addr_lo >> 24) & 0xFF;
1635         addr[4] = addr_hi & 0xFF;
1636         addr[5] = (addr_hi >> 8) & 0xFF;
1637
1638         if (!is_valid_ether_addr(addr)) {
1639                 /* reading mac address from EEPROM or OTP */
1640                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1641                                          addr) == 0) ||
1642                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1643                                       addr) == 0)) {
1644                         if (is_valid_ether_addr(addr)) {
1645                                 /* eeprom values are valid so use them */
1646                                 netif_dbg(dev, ifup, dev->net,
1647                                           "MAC address read from EEPROM");
1648                         } else {
1649                                 /* generate random MAC */
1650                                 random_ether_addr(addr);
1651                                 netif_dbg(dev, ifup, dev->net,
1652                                           "MAC address set to random addr");
1653                         }
1654
1655                         addr_lo = addr[0] | (addr[1] << 8) |
1656                                   (addr[2] << 16) | (addr[3] << 24);
1657                         addr_hi = addr[4] | (addr[5] << 8);
1658
1659                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1660                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1661                 } else {
1662                         /* generate random MAC */
1663                         random_ether_addr(addr);
1664                         netif_dbg(dev, ifup, dev->net,
1665                                   "MAC address set to random addr");
1666                 }
1667         }
1668
1669         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1670         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1671
1672         ether_addr_copy(dev->net->dev_addr, addr);
1673 }
1674
1675 /* MDIO read and write wrappers for phylib */
1676 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1677 {
1678         struct lan78xx_net *dev = bus->priv;
1679         u32 val, addr;
1680         int ret;
1681
1682         ret = usb_autopm_get_interface(dev->intf);
1683         if (ret < 0)
1684                 return ret;
1685
1686         mutex_lock(&dev->phy_mutex);
1687
1688         /* confirm MII not busy */
1689         ret = lan78xx_phy_wait_not_busy(dev);
1690         if (ret < 0)
1691                 goto done;
1692
1693         /* set the address, index & direction (read from PHY) */
1694         addr = mii_access(phy_id, idx, MII_READ);
1695         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1696
1697         ret = lan78xx_phy_wait_not_busy(dev);
1698         if (ret < 0)
1699                 goto done;
1700
1701         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1702
1703         ret = (int)(val & 0xFFFF);
1704
1705 done:
1706         mutex_unlock(&dev->phy_mutex);
1707         usb_autopm_put_interface(dev->intf);
1708
1709         return ret;
1710 }
1711
1712 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1713                                  u16 regval)
1714 {
1715         struct lan78xx_net *dev = bus->priv;
1716         u32 val, addr;
1717         int ret;
1718
1719         ret = usb_autopm_get_interface(dev->intf);
1720         if (ret < 0)
1721                 return ret;
1722
1723         mutex_lock(&dev->phy_mutex);
1724
1725         /* confirm MII not busy */
1726         ret = lan78xx_phy_wait_not_busy(dev);
1727         if (ret < 0)
1728                 goto done;
1729
1730         val = (u32)regval;
1731         ret = lan78xx_write_reg(dev, MII_DATA, val);
1732
1733         /* set the address, index & direction (write to PHY) */
1734         addr = mii_access(phy_id, idx, MII_WRITE);
1735         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1736
1737         ret = lan78xx_phy_wait_not_busy(dev);
1738         if (ret < 0)
1739                 goto done;
1740
1741 done:
1742         mutex_unlock(&dev->phy_mutex);
1743         usb_autopm_put_interface(dev->intf);
1744         return 0;
1745 }
1746
1747 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1748 {
1749         int ret;
1750
1751         dev->mdiobus = mdiobus_alloc();
1752         if (!dev->mdiobus) {
1753                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1754                 return -ENOMEM;
1755         }
1756
1757         dev->mdiobus->priv = (void *)dev;
1758         dev->mdiobus->read = lan78xx_mdiobus_read;
1759         dev->mdiobus->write = lan78xx_mdiobus_write;
1760         dev->mdiobus->name = "lan78xx-mdiobus";
1761
1762         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1763                  dev->udev->bus->busnum, dev->udev->devnum);
1764
1765         switch (dev->chipid) {
1766         case ID_REV_CHIP_ID_7800_:
1767         case ID_REV_CHIP_ID_7850_:
1768                 /* set to internal PHY id */
1769                 dev->mdiobus->phy_mask = ~(1 << 1);
1770                 break;
1771         case ID_REV_CHIP_ID_7801_:
1772                 /* scan thru PHYAD[2..0] */
1773                 dev->mdiobus->phy_mask = ~(0xFF);
1774                 break;
1775         }
1776
1777         ret = mdiobus_register(dev->mdiobus);
1778         if (ret) {
1779                 netdev_err(dev->net, "can't register MDIO bus\n");
1780                 goto exit1;
1781         }
1782
1783         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1784         return 0;
1785 exit1:
1786         mdiobus_free(dev->mdiobus);
1787         return ret;
1788 }
1789
1790 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1791 {
1792         mdiobus_unregister(dev->mdiobus);
1793         mdiobus_free(dev->mdiobus);
1794 }
1795
1796 static void lan78xx_link_status_change(struct net_device *net)
1797 {
1798         struct phy_device *phydev = net->phydev;
1799         int ret, temp;
1800
1801         /* At forced 100 F/H mode, chip may fail to set mode correctly
1802          * when cable is switched between long(~50+m) and short one.
1803          * As workaround, set to 10 before setting to 100
1804          * at forced 100 F/H mode.
1805          */
1806         if (!phydev->autoneg && (phydev->speed == 100)) {
1807                 /* disable phy interrupt */
1808                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1809                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1810                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1811
1812                 temp = phy_read(phydev, MII_BMCR);
1813                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1814                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1815                 temp |= BMCR_SPEED100;
1816                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1817
1818                 /* clear pending interrupt generated while workaround */
1819                 temp = phy_read(phydev, LAN88XX_INT_STS);
1820
1821                 /* enable phy interrupt back */
1822                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1823                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1824                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1825         }
1826 }
1827
1828 static int irq_map(struct irq_domain *d, unsigned int irq,
1829                    irq_hw_number_t hwirq)
1830 {
1831         struct irq_domain_data *data = d->host_data;
1832
1833         irq_set_chip_data(irq, data);
1834         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1835         irq_set_noprobe(irq);
1836
1837         return 0;
1838 }
1839
1840 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1841 {
1842         irq_set_chip_and_handler(irq, NULL, NULL);
1843         irq_set_chip_data(irq, NULL);
1844 }
1845
1846 static const struct irq_domain_ops chip_domain_ops = {
1847         .map    = irq_map,
1848         .unmap  = irq_unmap,
1849 };
1850
1851 static void lan78xx_irq_mask(struct irq_data *irqd)
1852 {
1853         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1854
1855         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1856 }
1857
1858 static void lan78xx_irq_unmask(struct irq_data *irqd)
1859 {
1860         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1861
1862         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1863 }
1864
1865 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1866 {
1867         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1868
1869         mutex_lock(&data->irq_lock);
1870 }
1871
1872 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1873 {
1874         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1875         struct lan78xx_net *dev =
1876                         container_of(data, struct lan78xx_net, domain_data);
1877         u32 buf;
1878         int ret;
1879
1880         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1881          * are only two callbacks executed in non-atomic contex.
1882          */
1883         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1884         if (buf != data->irqenable)
1885                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1886
1887         mutex_unlock(&data->irq_lock);
1888 }
1889
1890 static struct irq_chip lan78xx_irqchip = {
1891         .name                   = "lan78xx-irqs",
1892         .irq_mask               = lan78xx_irq_mask,
1893         .irq_unmask             = lan78xx_irq_unmask,
1894         .irq_bus_lock           = lan78xx_irq_bus_lock,
1895         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1896 };
1897
1898 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1899 {
1900         struct device_node *of_node;
1901         struct irq_domain *irqdomain;
1902         unsigned int irqmap = 0;
1903         u32 buf;
1904         int ret = 0;
1905
1906         of_node = dev->udev->dev.parent->of_node;
1907
1908         mutex_init(&dev->domain_data.irq_lock);
1909
1910         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1911         dev->domain_data.irqenable = buf;
1912
1913         dev->domain_data.irqchip = &lan78xx_irqchip;
1914         dev->domain_data.irq_handler = handle_simple_irq;
1915
1916         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1917                                           &chip_domain_ops, &dev->domain_data);
1918         if (irqdomain) {
1919                 /* create mapping for PHY interrupt */
1920                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1921                 if (!irqmap) {
1922                         irq_domain_remove(irqdomain);
1923
1924                         irqdomain = NULL;
1925                         ret = -EINVAL;
1926                 }
1927         } else {
1928                 ret = -EINVAL;
1929         }
1930
1931         dev->domain_data.irqdomain = irqdomain;
1932         dev->domain_data.phyirq = irqmap;
1933
1934         return ret;
1935 }
1936
1937 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1938 {
1939         if (dev->domain_data.phyirq > 0) {
1940                 irq_dispose_mapping(dev->domain_data.phyirq);
1941
1942                 if (dev->domain_data.irqdomain)
1943                         irq_domain_remove(dev->domain_data.irqdomain);
1944         }
1945         dev->domain_data.phyirq = 0;
1946         dev->domain_data.irqdomain = NULL;
1947 }
1948
1949 static int lan8835_fixup(struct phy_device *phydev)
1950 {
1951         int buf;
1952         int ret;
1953         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1954
1955         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1956         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1957         buf &= ~0x1800;
1958         buf |= 0x0800;
1959         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1960
1961         /* RGMII MAC TXC Delay Enable */
1962         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1963                                 MAC_RGMII_ID_TXC_DELAY_EN_);
1964
1965         /* RGMII TX DLL Tune Adjust */
1966         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1967
1968         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1969
1970         return 1;
1971 }
1972
1973 static int ksz9031rnx_fixup(struct phy_device *phydev)
1974 {
1975         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1976
1977         /* Micrel9301RNX PHY configuration */
1978         /* RGMII Control Signal Pad Skew */
1979         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1980         /* RGMII RX Data Pad Skew */
1981         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1982         /* RGMII RX Clock Pad Skew */
1983         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1984
1985         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1986
1987         return 1;
1988 }
1989
1990 static int lan78xx_phy_init(struct lan78xx_net *dev)
1991 {
1992         int ret;
1993         u32 mii_adv;
1994         struct phy_device *phydev = dev->net->phydev;
1995
1996         phydev = phy_find_first(dev->mdiobus);
1997         if (!phydev) {
1998                 netdev_err(dev->net, "no PHY found\n");
1999                 return -EIO;
2000         }
2001
2002         if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2003             (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2004                 phydev->is_internal = true;
2005                 dev->interface = PHY_INTERFACE_MODE_GMII;
2006
2007         } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2008                 if (!phydev->drv) {
2009                         netdev_err(dev->net, "no PHY driver found\n");
2010                         return -EIO;
2011                 }
2012
2013                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2014
2015                 /* external PHY fixup for KSZ9031RNX */
2016                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2017                                                  ksz9031rnx_fixup);
2018                 if (ret < 0) {
2019                         netdev_err(dev->net, "fail to register fixup\n");
2020                         return ret;
2021                 }
2022                 /* external PHY fixup for LAN8835 */
2023                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2024                                                  lan8835_fixup);
2025                 if (ret < 0) {
2026                         netdev_err(dev->net, "fail to register fixup\n");
2027                         return ret;
2028                 }
2029                 /* add more external PHY fixup here if needed */
2030
2031                 phydev->is_internal = false;
2032         } else {
2033                 netdev_err(dev->net, "unknown ID found\n");
2034                 ret = -EIO;
2035                 goto error;
2036         }
2037
2038         /* if phyirq is not set, use polling mode in phylib */
2039         if (dev->domain_data.phyirq > 0)
2040                 phydev->irq = dev->domain_data.phyirq;
2041         else
2042                 phydev->irq = 0;
2043         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2044
2045         /* set to AUTOMDIX */
2046         phydev->mdix = ETH_TP_MDI_AUTO;
2047
2048         ret = phy_connect_direct(dev->net, phydev,
2049                                  lan78xx_link_status_change,
2050                                  dev->interface);
2051         if (ret) {
2052                 netdev_err(dev->net, "can't attach PHY to %s\n",
2053                            dev->mdiobus->id);
2054                 return -EIO;
2055         }
2056
2057         /* MAC doesn't support 1000T Half */
2058         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2059
2060         /* support both flow controls */
2061         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2062         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2063         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2064         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2065
2066         genphy_config_aneg(phydev);
2067
2068         dev->fc_autoneg = phydev->autoneg;
2069
2070         phy_start(phydev);
2071
2072         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2073
2074         return 0;
2075
2076 error:
2077         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2078         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2079
2080         return ret;
2081 }
2082
2083 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2084 {
2085         int ret = 0;
2086         u32 buf;
2087         bool rxenabled;
2088
2089         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2090
2091         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2092
2093         if (rxenabled) {
2094                 buf &= ~MAC_RX_RXEN_;
2095                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2096         }
2097
2098         /* add 4 to size for FCS */
2099         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2100         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2101
2102         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2103
2104         if (rxenabled) {
2105                 buf |= MAC_RX_RXEN_;
2106                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2107         }
2108
2109         return 0;
2110 }
2111
2112 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2113 {
2114         struct sk_buff *skb;
2115         unsigned long flags;
2116         int count = 0;
2117
2118         spin_lock_irqsave(&q->lock, flags);
2119         while (!skb_queue_empty(q)) {
2120                 struct skb_data *entry;
2121                 struct urb *urb;
2122                 int ret;
2123
2124                 skb_queue_walk(q, skb) {
2125                         entry = (struct skb_data *)skb->cb;
2126                         if (entry->state != unlink_start)
2127                                 goto found;
2128                 }
2129                 break;
2130 found:
2131                 entry->state = unlink_start;
2132                 urb = entry->urb;
2133
2134                 /* Get reference count of the URB to avoid it to be
2135                  * freed during usb_unlink_urb, which may trigger
2136                  * use-after-free problem inside usb_unlink_urb since
2137                  * usb_unlink_urb is always racing with .complete
2138                  * handler(include defer_bh).
2139                  */
2140                 usb_get_urb(urb);
2141                 spin_unlock_irqrestore(&q->lock, flags);
2142                 /* during some PM-driven resume scenarios,
2143                  * these (async) unlinks complete immediately
2144                  */
2145                 ret = usb_unlink_urb(urb);
2146                 if (ret != -EINPROGRESS && ret != 0)
2147                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2148                 else
2149                         count++;
2150                 usb_put_urb(urb);
2151                 spin_lock_irqsave(&q->lock, flags);
2152         }
2153         spin_unlock_irqrestore(&q->lock, flags);
2154         return count;
2155 }
2156
2157 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2158 {
2159         struct lan78xx_net *dev = netdev_priv(netdev);
2160         int ll_mtu = new_mtu + netdev->hard_header_len;
2161         int old_hard_mtu = dev->hard_mtu;
2162         int old_rx_urb_size = dev->rx_urb_size;
2163         int ret;
2164
2165         /* no second zero-length packet read wanted after mtu-sized packets */
2166         if ((ll_mtu % dev->maxpacket) == 0)
2167                 return -EDOM;
2168
2169         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2170
2171         netdev->mtu = new_mtu;
2172
2173         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2174         if (dev->rx_urb_size == old_hard_mtu) {
2175                 dev->rx_urb_size = dev->hard_mtu;
2176                 if (dev->rx_urb_size > old_rx_urb_size) {
2177                         if (netif_running(dev->net)) {
2178                                 unlink_urbs(dev, &dev->rxq);
2179                                 tasklet_schedule(&dev->bh);
2180                         }
2181                 }
2182         }
2183
2184         return 0;
2185 }
2186
2187 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2188 {
2189         struct lan78xx_net *dev = netdev_priv(netdev);
2190         struct sockaddr *addr = p;
2191         u32 addr_lo, addr_hi;
2192         int ret;
2193
2194         if (netif_running(netdev))
2195                 return -EBUSY;
2196
2197         if (!is_valid_ether_addr(addr->sa_data))
2198                 return -EADDRNOTAVAIL;
2199
2200         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2201
2202         addr_lo = netdev->dev_addr[0] |
2203                   netdev->dev_addr[1] << 8 |
2204                   netdev->dev_addr[2] << 16 |
2205                   netdev->dev_addr[3] << 24;
2206         addr_hi = netdev->dev_addr[4] |
2207                   netdev->dev_addr[5] << 8;
2208
2209         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2210         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2211
2212         return 0;
2213 }
2214
2215 /* Enable or disable Rx checksum offload engine */
2216 static int lan78xx_set_features(struct net_device *netdev,
2217                                 netdev_features_t features)
2218 {
2219         struct lan78xx_net *dev = netdev_priv(netdev);
2220         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2221         unsigned long flags;
2222         int ret;
2223
2224         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2225
2226         if (features & NETIF_F_RXCSUM) {
2227                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2228                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2229         } else {
2230                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2231                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2232         }
2233
2234         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2235                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2236         else
2237                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2238
2239         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2240
2241         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2242
2243         return 0;
2244 }
2245
2246 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2247 {
2248         struct lan78xx_priv *pdata =
2249                         container_of(param, struct lan78xx_priv, set_vlan);
2250         struct lan78xx_net *dev = pdata->dev;
2251
2252         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2253                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2254 }
2255
2256 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2257                                    __be16 proto, u16 vid)
2258 {
2259         struct lan78xx_net *dev = netdev_priv(netdev);
2260         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2261         u16 vid_bit_index;
2262         u16 vid_dword_index;
2263
2264         vid_dword_index = (vid >> 5) & 0x7F;
2265         vid_bit_index = vid & 0x1F;
2266
2267         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2268
2269         /* defer register writes to a sleepable context */
2270         schedule_work(&pdata->set_vlan);
2271
2272         return 0;
2273 }
2274
2275 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2276                                     __be16 proto, u16 vid)
2277 {
2278         struct lan78xx_net *dev = netdev_priv(netdev);
2279         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2280         u16 vid_bit_index;
2281         u16 vid_dword_index;
2282
2283         vid_dword_index = (vid >> 5) & 0x7F;
2284         vid_bit_index = vid & 0x1F;
2285
2286         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2287
2288         /* defer register writes to a sleepable context */
2289         schedule_work(&pdata->set_vlan);
2290
2291         return 0;
2292 }
2293
2294 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2295 {
2296         int ret;
2297         u32 buf;
2298         u32 regs[6] = { 0 };
2299
2300         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2301         if (buf & USB_CFG1_LTM_ENABLE_) {
2302                 u8 temp[2];
2303                 /* Get values from EEPROM first */
2304                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2305                         if (temp[0] == 24) {
2306                                 ret = lan78xx_read_raw_eeprom(dev,
2307                                                               temp[1] * 2,
2308                                                               24,
2309                                                               (u8 *)regs);
2310                                 if (ret < 0)
2311                                         return;
2312                         }
2313                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2314                         if (temp[0] == 24) {
2315                                 ret = lan78xx_read_raw_otp(dev,
2316                                                            temp[1] * 2,
2317                                                            24,
2318                                                            (u8 *)regs);
2319                                 if (ret < 0)
2320                                         return;
2321                         }
2322                 }
2323         }
2324
2325         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2326         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2327         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2328         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2329         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2330         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2331 }
2332
2333 static int lan78xx_reset(struct lan78xx_net *dev)
2334 {
2335         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2336         u32 buf;
2337         int ret = 0;
2338         unsigned long timeout;
2339
2340         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2341         buf |= HW_CFG_LRST_;
2342         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2343
2344         timeout = jiffies + HZ;
2345         do {
2346                 mdelay(1);
2347                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2348                 if (time_after(jiffies, timeout)) {
2349                         netdev_warn(dev->net,
2350                                     "timeout on completion of LiteReset");
2351                         return -EIO;
2352                 }
2353         } while (buf & HW_CFG_LRST_);
2354
2355         lan78xx_init_mac_address(dev);
2356
2357         /* save DEVID for later usage */
2358         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2359         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2360         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2361
2362         /* Respond to the IN token with a NAK */
2363         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2364         buf |= USB_CFG_BIR_;
2365         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2366
2367         /* Init LTM */
2368         lan78xx_init_ltm(dev);
2369
2370         if (dev->udev->speed == USB_SPEED_SUPER) {
2371                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2372                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2373                 dev->rx_qlen = 4;
2374                 dev->tx_qlen = 4;
2375         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2376                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2377                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2378                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2379                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2380         } else {
2381                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2382                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2383                 dev->rx_qlen = 4;
2384         }
2385
2386         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2387         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2388
2389         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2390         buf |= HW_CFG_MEF_;
2391         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2392
2393         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2394         buf |= USB_CFG_BCE_;
2395         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2396
2397         /* set FIFO sizes */
2398         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2399         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2400
2401         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2402         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2403
2404         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2405         ret = lan78xx_write_reg(dev, FLOW, 0);
2406         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2407
2408         /* Don't need rfe_ctl_lock during initialisation */
2409         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2410         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2411         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2412
2413         /* Enable or disable checksum offload engines */
2414         lan78xx_set_features(dev->net, dev->net->features);
2415
2416         lan78xx_set_multicast(dev->net);
2417
2418         /* reset PHY */
2419         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2420         buf |= PMT_CTL_PHY_RST_;
2421         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2422
2423         timeout = jiffies + HZ;
2424         do {
2425                 mdelay(1);
2426                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2427                 if (time_after(jiffies, timeout)) {
2428                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2429                         return -EIO;
2430                 }
2431         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2432
2433         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2434         /* LAN7801 only has RGMII mode */
2435         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2436                 buf &= ~MAC_CR_GMII_EN_;
2437         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2438         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2439
2440         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2441         buf |= MAC_TX_TXEN_;
2442         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2443
2444         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2445         buf |= FCT_TX_CTL_EN_;
2446         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2447
2448         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2449
2450         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2451         buf |= MAC_RX_RXEN_;
2452         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2453
2454         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2455         buf |= FCT_RX_CTL_EN_;
2456         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2457
2458         return 0;
2459 }
2460
2461 static void lan78xx_init_stats(struct lan78xx_net *dev)
2462 {
2463         u32 *p;
2464         int i;
2465
2466         /* initialize for stats update
2467          * some counters are 20bits and some are 32bits
2468          */
2469         p = (u32 *)&dev->stats.rollover_max;
2470         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2471                 p[i] = 0xFFFFF;
2472
2473         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2474         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2475         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2476         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2477         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2478         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2479         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2480         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2481         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2482         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2483
2484         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2485 }
2486
2487 static int lan78xx_open(struct net_device *net)
2488 {
2489         struct lan78xx_net *dev = netdev_priv(net);
2490         int ret;
2491
2492         ret = usb_autopm_get_interface(dev->intf);
2493         if (ret < 0)
2494                 goto out;
2495
2496         ret = lan78xx_reset(dev);
2497         if (ret < 0)
2498                 goto done;
2499
2500         ret = lan78xx_phy_init(dev);
2501         if (ret < 0)
2502                 goto done;
2503
2504         /* for Link Check */
2505         if (dev->urb_intr) {
2506                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2507                 if (ret < 0) {
2508                         netif_err(dev, ifup, dev->net,
2509                                   "intr submit %d\n", ret);
2510                         goto done;
2511                 }
2512         }
2513
2514         lan78xx_init_stats(dev);
2515
2516         set_bit(EVENT_DEV_OPEN, &dev->flags);
2517
2518         netif_start_queue(net);
2519
2520         dev->link_on = false;
2521
2522         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2523 done:
2524         usb_autopm_put_interface(dev->intf);
2525
2526 out:
2527         return ret;
2528 }
2529
2530 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2531 {
2532         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2533         DECLARE_WAITQUEUE(wait, current);
2534         int temp;
2535
2536         /* ensure there are no more active urbs */
2537         add_wait_queue(&unlink_wakeup, &wait);
2538         set_current_state(TASK_UNINTERRUPTIBLE);
2539         dev->wait = &unlink_wakeup;
2540         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2541
2542         /* maybe wait for deletions to finish. */
2543         while (!skb_queue_empty(&dev->rxq) &&
2544                !skb_queue_empty(&dev->txq) &&
2545                !skb_queue_empty(&dev->done)) {
2546                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2547                 set_current_state(TASK_UNINTERRUPTIBLE);
2548                 netif_dbg(dev, ifdown, dev->net,
2549                           "waited for %d urb completions\n", temp);
2550         }
2551         set_current_state(TASK_RUNNING);
2552         dev->wait = NULL;
2553         remove_wait_queue(&unlink_wakeup, &wait);
2554 }
2555
2556 static int lan78xx_stop(struct net_device *net)
2557 {
2558         struct lan78xx_net              *dev = netdev_priv(net);
2559
2560         if (timer_pending(&dev->stat_monitor))
2561                 del_timer_sync(&dev->stat_monitor);
2562
2563         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2564         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2565
2566         phy_stop(net->phydev);
2567         phy_disconnect(net->phydev);
2568
2569         net->phydev = NULL;
2570
2571         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2572         netif_stop_queue(net);
2573
2574         netif_info(dev, ifdown, dev->net,
2575                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2576                    net->stats.rx_packets, net->stats.tx_packets,
2577                    net->stats.rx_errors, net->stats.tx_errors);
2578
2579         lan78xx_terminate_urbs(dev);
2580
2581         usb_kill_urb(dev->urb_intr);
2582
2583         skb_queue_purge(&dev->rxq_pause);
2584
2585         /* deferred work (task, timer, softirq) must also stop.
2586          * can't flush_scheduled_work() until we drop rtnl (later),
2587          * else workers could deadlock; so make workers a NOP.
2588          */
2589         dev->flags = 0;
2590         cancel_delayed_work_sync(&dev->wq);
2591         tasklet_kill(&dev->bh);
2592
2593         usb_autopm_put_interface(dev->intf);
2594
2595         return 0;
2596 }
2597
2598 static int lan78xx_linearize(struct sk_buff *skb)
2599 {
2600         return skb_linearize(skb);
2601 }
2602
2603 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2604                                        struct sk_buff *skb, gfp_t flags)
2605 {
2606         u32 tx_cmd_a, tx_cmd_b;
2607
2608         if (skb_cow_head(skb, TX_OVERHEAD)) {
2609                 dev_kfree_skb_any(skb);
2610                 return NULL;
2611         }
2612
2613         if (lan78xx_linearize(skb) < 0)
2614                 return NULL;
2615
2616         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2617
2618         if (skb->ip_summed == CHECKSUM_PARTIAL)
2619                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2620
2621         tx_cmd_b = 0;
2622         if (skb_is_gso(skb)) {
2623                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2624
2625                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2626
2627                 tx_cmd_a |= TX_CMD_A_LSO_;
2628         }
2629
2630         if (skb_vlan_tag_present(skb)) {
2631                 tx_cmd_a |= TX_CMD_A_IVTG_;
2632                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2633         }
2634
2635         skb_push(skb, 4);
2636         cpu_to_le32s(&tx_cmd_b);
2637         memcpy(skb->data, &tx_cmd_b, 4);
2638
2639         skb_push(skb, 4);
2640         cpu_to_le32s(&tx_cmd_a);
2641         memcpy(skb->data, &tx_cmd_a, 4);
2642
2643         return skb;
2644 }
2645
2646 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2647                                struct sk_buff_head *list, enum skb_state state)
2648 {
2649         unsigned long flags;
2650         enum skb_state old_state;
2651         struct skb_data *entry = (struct skb_data *)skb->cb;
2652
2653         spin_lock_irqsave(&list->lock, flags);
2654         old_state = entry->state;
2655         entry->state = state;
2656
2657         __skb_unlink(skb, list);
2658         spin_unlock(&list->lock);
2659         spin_lock(&dev->done.lock);
2660
2661         __skb_queue_tail(&dev->done, skb);
2662         if (skb_queue_len(&dev->done) == 1)
2663                 tasklet_schedule(&dev->bh);
2664         spin_unlock_irqrestore(&dev->done.lock, flags);
2665
2666         return old_state;
2667 }
2668
2669 static void tx_complete(struct urb *urb)
2670 {
2671         struct sk_buff *skb = (struct sk_buff *)urb->context;
2672         struct skb_data *entry = (struct skb_data *)skb->cb;
2673         struct lan78xx_net *dev = entry->dev;
2674
2675         if (urb->status == 0) {
2676                 dev->net->stats.tx_packets += entry->num_of_packet;
2677                 dev->net->stats.tx_bytes += entry->length;
2678         } else {
2679                 dev->net->stats.tx_errors++;
2680
2681                 switch (urb->status) {
2682                 case -EPIPE:
2683                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2684                         break;
2685
2686                 /* software-driven interface shutdown */
2687                 case -ECONNRESET:
2688                 case -ESHUTDOWN:
2689                         break;
2690
2691                 case -EPROTO:
2692                 case -ETIME:
2693                 case -EILSEQ:
2694                         netif_stop_queue(dev->net);
2695                         break;
2696                 default:
2697                         netif_dbg(dev, tx_err, dev->net,
2698                                   "tx err %d\n", entry->urb->status);
2699                         break;
2700                 }
2701         }
2702
2703         usb_autopm_put_interface_async(dev->intf);
2704
2705         defer_bh(dev, skb, &dev->txq, tx_done);
2706 }
2707
2708 static void lan78xx_queue_skb(struct sk_buff_head *list,
2709                               struct sk_buff *newsk, enum skb_state state)
2710 {
2711         struct skb_data *entry = (struct skb_data *)newsk->cb;
2712
2713         __skb_queue_tail(list, newsk);
2714         entry->state = state;
2715 }
2716
2717 static netdev_tx_t
2718 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2719 {
2720         struct lan78xx_net *dev = netdev_priv(net);
2721         struct sk_buff *skb2 = NULL;
2722
2723         if (skb) {
2724                 skb_tx_timestamp(skb);
2725                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2726         }
2727
2728         if (skb2) {
2729                 skb_queue_tail(&dev->txq_pend, skb2);
2730
2731                 /* throttle TX patch at slower than SUPER SPEED USB */
2732                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2733                     (skb_queue_len(&dev->txq_pend) > 10))
2734                         netif_stop_queue(net);
2735         } else {
2736                 netif_dbg(dev, tx_err, dev->net,
2737                           "lan78xx_tx_prep return NULL\n");
2738                 dev->net->stats.tx_errors++;
2739                 dev->net->stats.tx_dropped++;
2740         }
2741
2742         tasklet_schedule(&dev->bh);
2743
2744         return NETDEV_TX_OK;
2745 }
2746
2747 static int
2748 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2749 {
2750         int tmp;
2751         struct usb_host_interface *alt = NULL;
2752         struct usb_host_endpoint *in = NULL, *out = NULL;
2753         struct usb_host_endpoint *status = NULL;
2754
2755         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2756                 unsigned ep;
2757
2758                 in = NULL;
2759                 out = NULL;
2760                 status = NULL;
2761                 alt = intf->altsetting + tmp;
2762
2763                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2764                         struct usb_host_endpoint *e;
2765                         int intr = 0;
2766
2767                         e = alt->endpoint + ep;
2768                         switch (e->desc.bmAttributes) {
2769                         case USB_ENDPOINT_XFER_INT:
2770                                 if (!usb_endpoint_dir_in(&e->desc))
2771                                         continue;
2772                                 intr = 1;
2773                                 /* FALLTHROUGH */
2774                         case USB_ENDPOINT_XFER_BULK:
2775                                 break;
2776                         default:
2777                                 continue;
2778                         }
2779                         if (usb_endpoint_dir_in(&e->desc)) {
2780                                 if (!intr && !in)
2781                                         in = e;
2782                                 else if (intr && !status)
2783                                         status = e;
2784                         } else {
2785                                 if (!out)
2786                                         out = e;
2787                         }
2788                 }
2789                 if (in && out)
2790                         break;
2791         }
2792         if (!alt || !in || !out)
2793                 return -EINVAL;
2794
2795         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2796                                        in->desc.bEndpointAddress &
2797                                        USB_ENDPOINT_NUMBER_MASK);
2798         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2799                                         out->desc.bEndpointAddress &
2800                                         USB_ENDPOINT_NUMBER_MASK);
2801         dev->ep_intr = status;
2802
2803         return 0;
2804 }
2805
2806 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2807 {
2808         struct lan78xx_priv *pdata = NULL;
2809         int ret;
2810         int i;
2811
2812         ret = lan78xx_get_endpoints(dev, intf);
2813
2814         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2815
2816         pdata = (struct lan78xx_priv *)(dev->data[0]);
2817         if (!pdata) {
2818                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2819                 return -ENOMEM;
2820         }
2821
2822         pdata->dev = dev;
2823
2824         spin_lock_init(&pdata->rfe_ctl_lock);
2825         mutex_init(&pdata->dataport_mutex);
2826
2827         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2828
2829         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2830                 pdata->vlan_table[i] = 0;
2831
2832         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2833
2834         dev->net->features = 0;
2835
2836         if (DEFAULT_TX_CSUM_ENABLE)
2837                 dev->net->features |= NETIF_F_HW_CSUM;
2838
2839         if (DEFAULT_RX_CSUM_ENABLE)
2840                 dev->net->features |= NETIF_F_RXCSUM;
2841
2842         if (DEFAULT_TSO_CSUM_ENABLE)
2843                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2844
2845         dev->net->hw_features = dev->net->features;
2846
2847         ret = lan78xx_setup_irq_domain(dev);
2848         if (ret < 0) {
2849                 netdev_warn(dev->net,
2850                             "lan78xx_setup_irq_domain() failed : %d", ret);
2851                 kfree(pdata);
2852                 return ret;
2853         }
2854
2855         dev->net->hard_header_len += TX_OVERHEAD;
2856         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2857
2858         /* Init all registers */
2859         ret = lan78xx_reset(dev);
2860
2861         ret = lan78xx_mdio_init(dev);
2862
2863         dev->net->flags |= IFF_MULTICAST;
2864
2865         pdata->wol = WAKE_MAGIC;
2866
2867         return ret;
2868 }
2869
2870 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2871 {
2872         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2873
2874         lan78xx_remove_irq_domain(dev);
2875
2876         lan78xx_remove_mdio(dev);
2877
2878         if (pdata) {
2879                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2880                 kfree(pdata);
2881                 pdata = NULL;
2882                 dev->data[0] = 0;
2883         }
2884 }
2885
2886 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2887                                     struct sk_buff *skb,
2888                                     u32 rx_cmd_a, u32 rx_cmd_b)
2889 {
2890         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2891             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2892                 skb->ip_summed = CHECKSUM_NONE;
2893         } else {
2894                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2895                 skb->ip_summed = CHECKSUM_COMPLETE;
2896         }
2897 }
2898
2899 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2900 {
2901         int             status;
2902
2903         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2904                 skb_queue_tail(&dev->rxq_pause, skb);
2905                 return;
2906         }
2907
2908         dev->net->stats.rx_packets++;
2909         dev->net->stats.rx_bytes += skb->len;
2910
2911         skb->protocol = eth_type_trans(skb, dev->net);
2912
2913         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2914                   skb->len + sizeof(struct ethhdr), skb->protocol);
2915         memset(skb->cb, 0, sizeof(struct skb_data));
2916
2917         if (skb_defer_rx_timestamp(skb))
2918                 return;
2919
2920         status = netif_rx(skb);
2921         if (status != NET_RX_SUCCESS)
2922                 netif_dbg(dev, rx_err, dev->net,
2923                           "netif_rx status %d\n", status);
2924 }
2925
2926 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2927 {
2928         if (skb->len < dev->net->hard_header_len)
2929                 return 0;
2930
2931         while (skb->len > 0) {
2932                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2933                 u16 rx_cmd_c;
2934                 struct sk_buff *skb2;
2935                 unsigned char *packet;
2936
2937                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2938                 le32_to_cpus(&rx_cmd_a);
2939                 skb_pull(skb, sizeof(rx_cmd_a));
2940
2941                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2942                 le32_to_cpus(&rx_cmd_b);
2943                 skb_pull(skb, sizeof(rx_cmd_b));
2944
2945                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2946                 le16_to_cpus(&rx_cmd_c);
2947                 skb_pull(skb, sizeof(rx_cmd_c));
2948
2949                 packet = skb->data;
2950
2951                 /* get the packet length */
2952                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2953                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2954
2955                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2956                         netif_dbg(dev, rx_err, dev->net,
2957                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2958                 } else {
2959                         /* last frame in this batch */
2960                         if (skb->len == size) {
2961                                 lan78xx_rx_csum_offload(dev, skb,
2962                                                         rx_cmd_a, rx_cmd_b);
2963
2964                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2965                                 skb->truesize = size + sizeof(struct sk_buff);
2966
2967                                 return 1;
2968                         }
2969
2970                         skb2 = skb_clone(skb, GFP_ATOMIC);
2971                         if (unlikely(!skb2)) {
2972                                 netdev_warn(dev->net, "Error allocating skb");
2973                                 return 0;
2974                         }
2975
2976                         skb2->len = size;
2977                         skb2->data = packet;
2978                         skb_set_tail_pointer(skb2, size);
2979
2980                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2981
2982                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2983                         skb2->truesize = size + sizeof(struct sk_buff);
2984
2985                         lan78xx_skb_return(dev, skb2);
2986                 }
2987
2988                 skb_pull(skb, size);
2989
2990                 /* padding bytes before the next frame starts */
2991                 if (skb->len)
2992                         skb_pull(skb, align_count);
2993         }
2994
2995         return 1;
2996 }
2997
2998 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2999 {
3000         if (!lan78xx_rx(dev, skb)) {
3001                 dev->net->stats.rx_errors++;
3002                 goto done;
3003         }
3004
3005         if (skb->len) {
3006                 lan78xx_skb_return(dev, skb);
3007                 return;
3008         }
3009
3010         netif_dbg(dev, rx_err, dev->net, "drop\n");
3011         dev->net->stats.rx_errors++;
3012 done:
3013         skb_queue_tail(&dev->done, skb);
3014 }
3015
3016 static void rx_complete(struct urb *urb);
3017
3018 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3019 {
3020         struct sk_buff *skb;
3021         struct skb_data *entry;
3022         unsigned long lockflags;
3023         size_t size = dev->rx_urb_size;
3024         int ret = 0;
3025
3026         skb = netdev_alloc_skb_ip_align(dev->net, size);
3027         if (!skb) {
3028                 usb_free_urb(urb);
3029                 return -ENOMEM;
3030         }
3031
3032         entry = (struct skb_data *)skb->cb;
3033         entry->urb = urb;
3034         entry->dev = dev;
3035         entry->length = 0;
3036
3037         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3038                           skb->data, size, rx_complete, skb);
3039
3040         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3041
3042         if (netif_device_present(dev->net) &&
3043             netif_running(dev->net) &&
3044             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3045             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3046                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3047                 switch (ret) {
3048                 case 0:
3049                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3050                         break;
3051                 case -EPIPE:
3052                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3053                         break;
3054                 case -ENODEV:
3055                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3056                         netif_device_detach(dev->net);
3057                         break;
3058                 case -EHOSTUNREACH:
3059                         ret = -ENOLINK;
3060                         break;
3061                 default:
3062                         netif_dbg(dev, rx_err, dev->net,
3063                                   "rx submit, %d\n", ret);
3064                         tasklet_schedule(&dev->bh);
3065                 }
3066         } else {
3067                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3068                 ret = -ENOLINK;
3069         }
3070         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3071         if (ret) {
3072                 dev_kfree_skb_any(skb);
3073                 usb_free_urb(urb);
3074         }
3075         return ret;
3076 }
3077
3078 static void rx_complete(struct urb *urb)
3079 {
3080         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3081         struct skb_data *entry = (struct skb_data *)skb->cb;
3082         struct lan78xx_net *dev = entry->dev;
3083         int urb_status = urb->status;
3084         enum skb_state state;
3085
3086         skb_put(skb, urb->actual_length);
3087         state = rx_done;
3088         entry->urb = NULL;
3089
3090         switch (urb_status) {
3091         case 0:
3092                 if (skb->len < dev->net->hard_header_len) {
3093                         state = rx_cleanup;
3094                         dev->net->stats.rx_errors++;
3095                         dev->net->stats.rx_length_errors++;
3096                         netif_dbg(dev, rx_err, dev->net,
3097                                   "rx length %d\n", skb->len);
3098                 }
3099                 usb_mark_last_busy(dev->udev);
3100                 break;
3101         case -EPIPE:
3102                 dev->net->stats.rx_errors++;
3103                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3104                 /* FALLTHROUGH */
3105         case -ECONNRESET:                               /* async unlink */
3106         case -ESHUTDOWN:                                /* hardware gone */
3107                 netif_dbg(dev, ifdown, dev->net,
3108                           "rx shutdown, code %d\n", urb_status);
3109                 state = rx_cleanup;
3110                 entry->urb = urb;
3111                 urb = NULL;
3112                 break;
3113         case -EPROTO:
3114         case -ETIME:
3115         case -EILSEQ:
3116                 dev->net->stats.rx_errors++;
3117                 state = rx_cleanup;
3118                 entry->urb = urb;
3119                 urb = NULL;
3120                 break;
3121
3122         /* data overrun ... flush fifo? */
3123         case -EOVERFLOW:
3124                 dev->net->stats.rx_over_errors++;
3125                 /* FALLTHROUGH */
3126
3127         default:
3128                 state = rx_cleanup;
3129                 dev->net->stats.rx_errors++;
3130                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3131                 break;
3132         }
3133
3134         state = defer_bh(dev, skb, &dev->rxq, state);
3135
3136         if (urb) {
3137                 if (netif_running(dev->net) &&
3138                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3139                     state != unlink_start) {
3140                         rx_submit(dev, urb, GFP_ATOMIC);
3141                         return;
3142                 }
3143                 usb_free_urb(urb);
3144         }
3145         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3146 }
3147
3148 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3149 {
3150         int length;
3151         struct urb *urb = NULL;
3152         struct skb_data *entry;
3153         unsigned long flags;
3154         struct sk_buff_head *tqp = &dev->txq_pend;
3155         struct sk_buff *skb, *skb2;
3156         int ret;
3157         int count, pos;
3158         int skb_totallen, pkt_cnt;
3159
3160         skb_totallen = 0;
3161         pkt_cnt = 0;
3162         count = 0;
3163         length = 0;
3164         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3165                 if (skb_is_gso(skb)) {
3166                         if (pkt_cnt) {
3167                                 /* handle previous packets first */
3168                                 break;
3169                         }
3170                         count = 1;
3171                         length = skb->len - TX_OVERHEAD;
3172                         skb2 = skb_dequeue(tqp);
3173                         goto gso_skb;
3174                 }
3175
3176                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3177                         break;
3178                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3179                 pkt_cnt++;
3180         }
3181
3182         /* copy to a single skb */
3183         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3184         if (!skb)
3185                 goto drop;
3186
3187         skb_put(skb, skb_totallen);
3188
3189         for (count = pos = 0; count < pkt_cnt; count++) {
3190                 skb2 = skb_dequeue(tqp);
3191                 if (skb2) {
3192                         length += (skb2->len - TX_OVERHEAD);
3193                         memcpy(skb->data + pos, skb2->data, skb2->len);
3194                         pos += roundup(skb2->len, sizeof(u32));
3195                         dev_kfree_skb(skb2);
3196                 }
3197         }
3198
3199 gso_skb:
3200         urb = usb_alloc_urb(0, GFP_ATOMIC);
3201         if (!urb)
3202                 goto drop;
3203
3204         entry = (struct skb_data *)skb->cb;
3205         entry->urb = urb;
3206         entry->dev = dev;
3207         entry->length = length;
3208         entry->num_of_packet = count;
3209
3210         spin_lock_irqsave(&dev->txq.lock, flags);
3211         ret = usb_autopm_get_interface_async(dev->intf);
3212         if (ret < 0) {
3213                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3214                 goto drop;
3215         }
3216
3217         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3218                           skb->data, skb->len, tx_complete, skb);
3219
3220         if (length % dev->maxpacket == 0) {
3221                 /* send USB_ZERO_PACKET */
3222                 urb->transfer_flags |= URB_ZERO_PACKET;
3223         }
3224
3225 #ifdef CONFIG_PM
3226         /* if this triggers the device is still a sleep */
3227         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3228                 /* transmission will be done in resume */
3229                 usb_anchor_urb(urb, &dev->deferred);
3230                 /* no use to process more packets */
3231                 netif_stop_queue(dev->net);
3232                 usb_put_urb(urb);
3233                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3234                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3235                 return;
3236         }
3237 #endif
3238
3239         ret = usb_submit_urb(urb, GFP_ATOMIC);
3240         switch (ret) {
3241         case 0:
3242                 netif_trans_update(dev->net);
3243                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3244                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3245                         netif_stop_queue(dev->net);
3246                 break;
3247         case -EPIPE:
3248                 netif_stop_queue(dev->net);
3249                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3250                 usb_autopm_put_interface_async(dev->intf);
3251                 break;
3252         default:
3253                 usb_autopm_put_interface_async(dev->intf);
3254                 netif_dbg(dev, tx_err, dev->net,
3255                           "tx: submit urb err %d\n", ret);
3256                 break;
3257         }
3258
3259         spin_unlock_irqrestore(&dev->txq.lock, flags);
3260
3261         if (ret) {
3262                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3263 drop:
3264                 dev->net->stats.tx_dropped++;
3265                 if (skb)
3266                         dev_kfree_skb_any(skb);
3267                 usb_free_urb(urb);
3268         } else
3269                 netif_dbg(dev, tx_queued, dev->net,
3270                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3271 }
3272
3273 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3274 {
3275         struct urb *urb;
3276         int i;
3277
3278         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3279                 for (i = 0; i < 10; i++) {
3280                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3281                                 break;
3282                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3283                         if (urb)
3284                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3285                                         return;
3286                 }
3287
3288                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3289                         tasklet_schedule(&dev->bh);
3290         }
3291         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3292                 netif_wake_queue(dev->net);
3293 }
3294
3295 static void lan78xx_bh(unsigned long param)
3296 {
3297         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3298         struct sk_buff *skb;
3299         struct skb_data *entry;
3300
3301         while ((skb = skb_dequeue(&dev->done))) {
3302                 entry = (struct skb_data *)(skb->cb);
3303                 switch (entry->state) {
3304                 case rx_done:
3305                         entry->state = rx_cleanup;
3306                         rx_process(dev, skb);
3307                         continue;
3308                 case tx_done:
3309                         usb_free_urb(entry->urb);
3310                         dev_kfree_skb(skb);
3311                         continue;
3312                 case rx_cleanup:
3313                         usb_free_urb(entry->urb);
3314                         dev_kfree_skb(skb);
3315                         continue;
3316                 default:
3317                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3318                         return;
3319                 }
3320         }
3321
3322         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3323                 /* reset update timer delta */
3324                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3325                         dev->delta = 1;
3326                         mod_timer(&dev->stat_monitor,
3327                                   jiffies + STAT_UPDATE_TIMER);
3328                 }
3329
3330                 if (!skb_queue_empty(&dev->txq_pend))
3331                         lan78xx_tx_bh(dev);
3332
3333                 if (!timer_pending(&dev->delay) &&
3334                     !test_bit(EVENT_RX_HALT, &dev->flags))
3335                         lan78xx_rx_bh(dev);
3336         }
3337 }
3338
3339 static void lan78xx_delayedwork(struct work_struct *work)
3340 {
3341         int status;
3342         struct lan78xx_net *dev;
3343
3344         dev = container_of(work, struct lan78xx_net, wq.work);
3345
3346         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3347                 unlink_urbs(dev, &dev->txq);
3348                 status = usb_autopm_get_interface(dev->intf);
3349                 if (status < 0)
3350                         goto fail_pipe;
3351                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3352                 usb_autopm_put_interface(dev->intf);
3353                 if (status < 0 &&
3354                     status != -EPIPE &&
3355                     status != -ESHUTDOWN) {
3356                         if (netif_msg_tx_err(dev))
3357 fail_pipe:
3358                                 netdev_err(dev->net,
3359                                            "can't clear tx halt, status %d\n",
3360                                            status);
3361                 } else {
3362                         clear_bit(EVENT_TX_HALT, &dev->flags);
3363                         if (status != -ESHUTDOWN)
3364                                 netif_wake_queue(dev->net);
3365                 }
3366         }
3367         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3368                 unlink_urbs(dev, &dev->rxq);
3369                 status = usb_autopm_get_interface(dev->intf);
3370                 if (status < 0)
3371                                 goto fail_halt;
3372                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3373                 usb_autopm_put_interface(dev->intf);
3374                 if (status < 0 &&
3375                     status != -EPIPE &&
3376                     status != -ESHUTDOWN) {
3377                         if (netif_msg_rx_err(dev))
3378 fail_halt:
3379                                 netdev_err(dev->net,
3380                                            "can't clear rx halt, status %d\n",
3381                                            status);
3382                 } else {
3383                         clear_bit(EVENT_RX_HALT, &dev->flags);
3384                         tasklet_schedule(&dev->bh);
3385                 }
3386         }
3387
3388         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3389                 int ret = 0;
3390
3391                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3392                 status = usb_autopm_get_interface(dev->intf);
3393                 if (status < 0)
3394                         goto skip_reset;
3395                 if (lan78xx_link_reset(dev) < 0) {
3396                         usb_autopm_put_interface(dev->intf);
3397 skip_reset:
3398                         netdev_info(dev->net, "link reset failed (%d)\n",
3399                                     ret);
3400                 } else {
3401                         usb_autopm_put_interface(dev->intf);
3402                 }
3403         }
3404
3405         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3406                 lan78xx_update_stats(dev);
3407
3408                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3409
3410                 mod_timer(&dev->stat_monitor,
3411                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3412
3413                 dev->delta = min((dev->delta * 2), 50);
3414         }
3415 }
3416
3417 static void intr_complete(struct urb *urb)
3418 {
3419         struct lan78xx_net *dev = urb->context;
3420         int status = urb->status;
3421
3422         switch (status) {
3423         /* success */
3424         case 0:
3425                 lan78xx_status(dev, urb);
3426                 break;
3427
3428         /* software-driven interface shutdown */
3429         case -ENOENT:                   /* urb killed */
3430         case -ESHUTDOWN:                /* hardware gone */
3431                 netif_dbg(dev, ifdown, dev->net,
3432                           "intr shutdown, code %d\n", status);
3433                 return;
3434
3435         /* NOTE:  not throttling like RX/TX, since this endpoint
3436          * already polls infrequently
3437          */
3438         default:
3439                 netdev_dbg(dev->net, "intr status %d\n", status);
3440                 break;
3441         }
3442
3443         if (!netif_running(dev->net))
3444                 return;
3445
3446         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3447         status = usb_submit_urb(urb, GFP_ATOMIC);
3448         if (status != 0)
3449                 netif_err(dev, timer, dev->net,
3450                           "intr resubmit --> %d\n", status);
3451 }
3452
3453 static void lan78xx_disconnect(struct usb_interface *intf)
3454 {
3455         struct lan78xx_net              *dev;
3456         struct usb_device               *udev;
3457         struct net_device               *net;
3458
3459         dev = usb_get_intfdata(intf);
3460         usb_set_intfdata(intf, NULL);
3461         if (!dev)
3462                 return;
3463
3464         udev = interface_to_usbdev(intf);
3465
3466         net = dev->net;
3467         unregister_netdev(net);
3468
3469         cancel_delayed_work_sync(&dev->wq);
3470
3471         usb_scuttle_anchored_urbs(&dev->deferred);
3472
3473         lan78xx_unbind(dev, intf);
3474
3475         usb_kill_urb(dev->urb_intr);
3476         usb_free_urb(dev->urb_intr);
3477
3478         free_netdev(net);
3479         usb_put_dev(udev);
3480 }
3481
3482 static void lan78xx_tx_timeout(struct net_device *net)
3483 {
3484         struct lan78xx_net *dev = netdev_priv(net);
3485
3486         unlink_urbs(dev, &dev->txq);
3487         tasklet_schedule(&dev->bh);
3488 }
3489
3490 static const struct net_device_ops lan78xx_netdev_ops = {
3491         .ndo_open               = lan78xx_open,
3492         .ndo_stop               = lan78xx_stop,
3493         .ndo_start_xmit         = lan78xx_start_xmit,
3494         .ndo_tx_timeout         = lan78xx_tx_timeout,
3495         .ndo_change_mtu         = lan78xx_change_mtu,
3496         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3497         .ndo_validate_addr      = eth_validate_addr,
3498         .ndo_do_ioctl           = lan78xx_ioctl,
3499         .ndo_set_rx_mode        = lan78xx_set_multicast,
3500         .ndo_set_features       = lan78xx_set_features,
3501         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3502         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3503 };
3504
3505 static void lan78xx_stat_monitor(unsigned long param)
3506 {
3507         struct lan78xx_net *dev;
3508
3509         dev = (struct lan78xx_net *)param;
3510
3511         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3512 }
3513
3514 static int lan78xx_probe(struct usb_interface *intf,
3515                          const struct usb_device_id *id)
3516 {
3517         struct lan78xx_net *dev;
3518         struct net_device *netdev;
3519         struct usb_device *udev;
3520         int ret;
3521         unsigned maxp;
3522         unsigned period;
3523         u8 *buf = NULL;
3524
3525         udev = interface_to_usbdev(intf);
3526         udev = usb_get_dev(udev);
3527
3528         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3529         if (!netdev) {
3530                 dev_err(&intf->dev, "Error: OOM\n");
3531                 ret = -ENOMEM;
3532                 goto out1;
3533         }
3534
3535         /* netdev_printk() needs this */
3536         SET_NETDEV_DEV(netdev, &intf->dev);
3537
3538         dev = netdev_priv(netdev);
3539         dev->udev = udev;
3540         dev->intf = intf;
3541         dev->net = netdev;
3542         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3543                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3544
3545         skb_queue_head_init(&dev->rxq);
3546         skb_queue_head_init(&dev->txq);
3547         skb_queue_head_init(&dev->done);
3548         skb_queue_head_init(&dev->rxq_pause);
3549         skb_queue_head_init(&dev->txq_pend);
3550         mutex_init(&dev->phy_mutex);
3551
3552         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3553         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3554         init_usb_anchor(&dev->deferred);
3555
3556         netdev->netdev_ops = &lan78xx_netdev_ops;
3557         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3558         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3559
3560         dev->stat_monitor.function = lan78xx_stat_monitor;
3561         dev->stat_monitor.data = (unsigned long)dev;
3562         dev->delta = 1;
3563         init_timer(&dev->stat_monitor);
3564
3565         mutex_init(&dev->stats.access_lock);
3566
3567         ret = lan78xx_bind(dev, intf);
3568         if (ret < 0)
3569                 goto out2;
3570         strcpy(netdev->name, "eth%d");
3571
3572         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3573                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3574
3575         /* MTU range: 68 - 9000 */
3576         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3577
3578         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3579         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3580         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3581
3582         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3583         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3584
3585         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3586                                         dev->ep_intr->desc.bEndpointAddress &
3587                                         USB_ENDPOINT_NUMBER_MASK);
3588         period = dev->ep_intr->desc.bInterval;
3589
3590         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3591         buf = kmalloc(maxp, GFP_KERNEL);
3592         if (buf) {
3593                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3594                 if (!dev->urb_intr) {
3595                         ret = -ENOMEM;
3596                         kfree(buf);
3597                         goto out3;
3598                 } else {
3599                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3600                                          dev->pipe_intr, buf, maxp,
3601                                          intr_complete, dev, period);
3602                 }
3603         }
3604
3605         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3606
3607         /* driver requires remote-wakeup capability during autosuspend. */
3608         intf->needs_remote_wakeup = 1;
3609
3610         ret = register_netdev(netdev);
3611         if (ret != 0) {
3612                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3613                 goto out3;
3614         }
3615
3616         usb_set_intfdata(intf, dev);
3617
3618         ret = device_set_wakeup_enable(&udev->dev, true);
3619
3620          /* Default delay of 2sec has more overhead than advantage.
3621           * Set to 10sec as default.
3622           */
3623         pm_runtime_set_autosuspend_delay(&udev->dev,
3624                                          DEFAULT_AUTOSUSPEND_DELAY);
3625
3626         return 0;
3627
3628 out3:
3629         lan78xx_unbind(dev, intf);
3630 out2:
3631         free_netdev(netdev);
3632 out1:
3633         usb_put_dev(udev);
3634
3635         return ret;
3636 }
3637
3638 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3639 {
3640         const u16 crc16poly = 0x8005;
3641         int i;
3642         u16 bit, crc, msb;
3643         u8 data;
3644
3645         crc = 0xFFFF;
3646         for (i = 0; i < len; i++) {
3647                 data = *buf++;
3648                 for (bit = 0; bit < 8; bit++) {
3649                         msb = crc >> 15;
3650                         crc <<= 1;
3651
3652                         if (msb ^ (u16)(data & 1)) {
3653                                 crc ^= crc16poly;
3654                                 crc |= (u16)0x0001U;
3655                         }
3656                         data >>= 1;
3657                 }
3658         }
3659
3660         return crc;
3661 }
3662
3663 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3664 {
3665         u32 buf;
3666         int ret;
3667         int mask_index;
3668         u16 crc;
3669         u32 temp_wucsr;
3670         u32 temp_pmt_ctl;
3671         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3672         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3673         const u8 arp_type[2] = { 0x08, 0x06 };
3674
3675         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3676         buf &= ~MAC_TX_TXEN_;
3677         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3678         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3679         buf &= ~MAC_RX_RXEN_;
3680         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3681
3682         ret = lan78xx_write_reg(dev, WUCSR, 0);
3683         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3684         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3685
3686         temp_wucsr = 0;
3687
3688         temp_pmt_ctl = 0;
3689         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3690         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3691         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3692
3693         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3694                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3695
3696         mask_index = 0;
3697         if (wol & WAKE_PHY) {
3698                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3699
3700                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3701                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3702                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3703         }
3704         if (wol & WAKE_MAGIC) {
3705                 temp_wucsr |= WUCSR_MPEN_;
3706
3707                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3708                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3709                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3710         }
3711         if (wol & WAKE_BCAST) {
3712                 temp_wucsr |= WUCSR_BCST_EN_;
3713
3714                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3715                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3716                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3717         }
3718         if (wol & WAKE_MCAST) {
3719                 temp_wucsr |= WUCSR_WAKE_EN_;
3720
3721                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3722                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3723                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3724                                         WUF_CFGX_EN_ |
3725                                         WUF_CFGX_TYPE_MCAST_ |
3726                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3727                                         (crc & WUF_CFGX_CRC16_MASK_));
3728
3729                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3730                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3731                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3732                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3733                 mask_index++;
3734
3735                 /* for IPv6 Multicast */
3736                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3737                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3738                                         WUF_CFGX_EN_ |
3739                                         WUF_CFGX_TYPE_MCAST_ |
3740                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3741                                         (crc & WUF_CFGX_CRC16_MASK_));
3742
3743                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3744                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3745                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3746                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3747                 mask_index++;
3748
3749                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3750                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3751                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3752         }
3753         if (wol & WAKE_UCAST) {
3754                 temp_wucsr |= WUCSR_PFDA_EN_;
3755
3756                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3757                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3758                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3759         }
3760         if (wol & WAKE_ARP) {
3761                 temp_wucsr |= WUCSR_WAKE_EN_;
3762
3763                 /* set WUF_CFG & WUF_MASK
3764                  * for packettype (offset 12,13) = ARP (0x0806)
3765                  */
3766                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3767                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3768                                         WUF_CFGX_EN_ |
3769                                         WUF_CFGX_TYPE_ALL_ |
3770                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3771                                         (crc & WUF_CFGX_CRC16_MASK_));
3772
3773                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3774                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3775                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3776                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3777                 mask_index++;
3778
3779                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3780                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3781                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3782         }
3783
3784         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3785
3786         /* when multiple WOL bits are set */
3787         if (hweight_long((unsigned long)wol) > 1) {
3788                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3789                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3790                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3791         }
3792         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3793
3794         /* clear WUPS */
3795         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3796         buf |= PMT_CTL_WUPS_MASK_;
3797         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3798
3799         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3800         buf |= MAC_RX_RXEN_;
3801         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3802
3803         return 0;
3804 }
3805
3806 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3807 {
3808         struct lan78xx_net *dev = usb_get_intfdata(intf);
3809         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3810         u32 buf;
3811         int ret;
3812         int event;
3813
3814         event = message.event;
3815
3816         if (!dev->suspend_count++) {
3817                 spin_lock_irq(&dev->txq.lock);
3818                 /* don't autosuspend while transmitting */
3819                 if ((skb_queue_len(&dev->txq) ||
3820                      skb_queue_len(&dev->txq_pend)) &&
3821                         PMSG_IS_AUTO(message)) {
3822                         spin_unlock_irq(&dev->txq.lock);
3823                         ret = -EBUSY;
3824                         goto out;
3825                 } else {
3826                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3827                         spin_unlock_irq(&dev->txq.lock);
3828                 }
3829
3830                 /* stop TX & RX */
3831                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3832                 buf &= ~MAC_TX_TXEN_;
3833                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3834                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3835                 buf &= ~MAC_RX_RXEN_;
3836                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3837
3838                 /* empty out the rx and queues */
3839                 netif_device_detach(dev->net);
3840                 lan78xx_terminate_urbs(dev);
3841                 usb_kill_urb(dev->urb_intr);
3842
3843                 /* reattach */
3844                 netif_device_attach(dev->net);
3845         }
3846
3847         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3848                 del_timer(&dev->stat_monitor);
3849
3850                 if (PMSG_IS_AUTO(message)) {
3851                         /* auto suspend (selective suspend) */
3852                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3853                         buf &= ~MAC_TX_TXEN_;
3854                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3855                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3856                         buf &= ~MAC_RX_RXEN_;
3857                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3858
3859                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3860                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3861                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3862
3863                         /* set goodframe wakeup */
3864                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3865
3866                         buf |= WUCSR_RFE_WAKE_EN_;
3867                         buf |= WUCSR_STORE_WAKE_;
3868
3869                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3870
3871                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3872
3873                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3874                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3875
3876                         buf |= PMT_CTL_PHY_WAKE_EN_;
3877                         buf |= PMT_CTL_WOL_EN_;
3878                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3879                         buf |= PMT_CTL_SUS_MODE_3_;
3880
3881                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3882
3883                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3884
3885                         buf |= PMT_CTL_WUPS_MASK_;
3886
3887                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3888
3889                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3890                         buf |= MAC_RX_RXEN_;
3891                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3892                 } else {
3893                         lan78xx_set_suspend(dev, pdata->wol);
3894                 }
3895         }
3896
3897         ret = 0;
3898 out:
3899         return ret;
3900 }
3901
3902 static int lan78xx_resume(struct usb_interface *intf)
3903 {
3904         struct lan78xx_net *dev = usb_get_intfdata(intf);
3905         struct sk_buff *skb;
3906         struct urb *res;
3907         int ret;
3908         u32 buf;
3909
3910         if (!timer_pending(&dev->stat_monitor)) {
3911                 dev->delta = 1;
3912                 mod_timer(&dev->stat_monitor,
3913                           jiffies + STAT_UPDATE_TIMER);
3914         }
3915
3916         if (!--dev->suspend_count) {
3917                 /* resume interrupt URBs */
3918                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3919                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3920
3921                 spin_lock_irq(&dev->txq.lock);
3922                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3923                         skb = (struct sk_buff *)res->context;
3924                         ret = usb_submit_urb(res, GFP_ATOMIC);
3925                         if (ret < 0) {
3926                                 dev_kfree_skb_any(skb);
3927                                 usb_free_urb(res);
3928                                 usb_autopm_put_interface_async(dev->intf);
3929                         } else {
3930                                 netif_trans_update(dev->net);
3931                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3932                         }
3933                 }
3934
3935                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3936                 spin_unlock_irq(&dev->txq.lock);
3937
3938                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3939                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3940                                 netif_start_queue(dev->net);
3941                         tasklet_schedule(&dev->bh);
3942                 }
3943         }
3944
3945         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3946         ret = lan78xx_write_reg(dev, WUCSR, 0);
3947         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3948
3949         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3950                                              WUCSR2_ARP_RCD_ |
3951                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3952                                              WUCSR2_IPV4_TCPSYN_RCD_);
3953
3954         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3955                                             WUCSR_EEE_RX_WAKE_ |
3956                                             WUCSR_PFDA_FR_ |
3957                                             WUCSR_RFE_WAKE_FR_ |
3958                                             WUCSR_WUFR_ |
3959                                             WUCSR_MPR_ |
3960                                             WUCSR_BCST_FR_);
3961
3962         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3963         buf |= MAC_TX_TXEN_;
3964         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3965
3966         return 0;
3967 }
3968
3969 static int lan78xx_reset_resume(struct usb_interface *intf)
3970 {
3971         struct lan78xx_net *dev = usb_get_intfdata(intf);
3972
3973         lan78xx_reset(dev);
3974
3975         lan78xx_phy_init(dev);
3976
3977         return lan78xx_resume(intf);
3978 }
3979
3980 static const struct usb_device_id products[] = {
3981         {
3982         /* LAN7800 USB Gigabit Ethernet Device */
3983         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3984         },
3985         {
3986         /* LAN7850 USB Gigabit Ethernet Device */
3987         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3988         },
3989         {
3990         /* LAN7801 USB Gigabit Ethernet Device */
3991         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
3992         },
3993         {},
3994 };
3995 MODULE_DEVICE_TABLE(usb, products);
3996
3997 static struct usb_driver lan78xx_driver = {
3998         .name                   = DRIVER_NAME,
3999         .id_table               = products,
4000         .probe                  = lan78xx_probe,
4001         .disconnect             = lan78xx_disconnect,
4002         .suspend                = lan78xx_suspend,
4003         .resume                 = lan78xx_resume,
4004         .reset_resume           = lan78xx_reset_resume,
4005         .supports_autosuspend   = 1,
4006         .disable_hub_initiated_lpm = 1,
4007 };
4008
4009 module_usb_driver(lan78xx_driver);
4010
4011 MODULE_AUTHOR(DRIVER_AUTHOR);
4012 MODULE_DESCRIPTION(DRIVER_DESC);
4013 MODULE_LICENSE("GPL");