2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
29 * This component encapsulates the Ethernet link glue needed to provide
30 * one (!) network link through the USB gadget stack, normally "usb0".
32 * The control and data models are handled by the function driver which
33 * connects to this code; such as CDC Ethernet (ECM or EEM),
34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
37 * Link level addressing is handled by this component using module
38 * parameters; if no such parameters are provided, random link level
39 * addresses are used. Each end of the link uses one address. The
40 * host end address is exported in various ways, and is often recorded
41 * in configuration databases.
43 * The driver which assembles each configuration using such a link is
44 * responsible for ensuring that each configuration includes at most one
45 * instance of is network link. (The network layer provides ways for
46 * this single "physical" link to be used by multiple virtual links.)
49 #define UETH__VERSION "29-May-2008"
52 /* lock is held while accessing port_usb
53 * or updating its backlink port_usb->ioport
56 struct gether *port_usb;
58 struct net_device *net;
59 struct usb_gadget *gadget;
61 spinlock_t req_lock; /* guard {rx,tx}_reqs */
62 struct list_head tx_reqs, rx_reqs;
65 struct sk_buff_head rx_frames;
68 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
69 int (*unwrap)(struct gether *,
71 struct sk_buff_head *list);
73 struct work_struct work;
76 #define WORK_RX_MEMORY 0
79 u8 host_mac[ETH_ALEN];
82 /*-------------------------------------------------------------------------*/
84 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
86 #define DEFAULT_QLEN 2 /* double buffering by default */
88 static unsigned qmult = 5;
89 module_param(qmult, uint, S_IRUGO|S_IWUSR);
90 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
92 /* for dual-speed hardware, use deeper queues at high/super speed */
93 static inline int qlen(struct usb_gadget *gadget)
95 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
96 gadget->speed == USB_SPEED_SUPER))
97 return qmult * DEFAULT_QLEN;
102 /*-------------------------------------------------------------------------*/
104 /* REVISIT there must be a better way than having two sets
113 #define xprintk(d, level, fmt, args...) \
114 printk(level "%s: " fmt , (d)->net->name , ## args)
118 #define DBG(dev, fmt, args...) \
119 xprintk(dev , KERN_DEBUG , fmt , ## args)
121 #define DBG(dev, fmt, args...) \
128 #define VDBG(dev, fmt, args...) \
132 #define ERROR(dev, fmt, args...) \
133 xprintk(dev , KERN_ERR , fmt , ## args)
134 #define INFO(dev, fmt, args...) \
135 xprintk(dev , KERN_INFO , fmt , ## args)
137 /*-------------------------------------------------------------------------*/
139 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
141 static int ueth_change_mtu(struct net_device *net, int new_mtu)
143 struct eth_dev *dev = netdev_priv(net);
147 /* don't change MTU on "live" link (peer won't know) */
148 spin_lock_irqsave(&dev->lock, flags);
151 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
155 spin_unlock_irqrestore(&dev->lock, flags);
160 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
162 struct eth_dev *dev = netdev_priv(net);
164 strlcpy(p->driver, "g_ether", sizeof p->driver);
165 strlcpy(p->version, UETH__VERSION, sizeof p->version);
166 strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
167 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
170 /* REVISIT can also support:
171 * - WOL (by tracking suspends and issuing remote wakeup)
172 * - msglevel (implies updated messaging)
173 * - ... probably more ethtool ops
176 static const struct ethtool_ops ops = {
177 .get_drvinfo = eth_get_drvinfo,
178 .get_link = ethtool_op_get_link,
181 static void defer_kevent(struct eth_dev *dev, int flag)
183 if (test_and_set_bit(flag, &dev->todo))
185 if (!schedule_work(&dev->work))
186 ERROR(dev, "kevent %d may have been dropped\n", flag);
188 DBG(dev, "kevent %d scheduled\n", flag);
191 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
194 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
197 int retval = -ENOMEM;
202 spin_lock_irqsave(&dev->lock, flags);
204 out = dev->port_usb->out_ep;
207 spin_unlock_irqrestore(&dev->lock, flags);
213 /* Padding up to RX_EXTRA handles minor disagreements with host.
214 * Normally we use the USB "terminate on short read" convention;
215 * so allow up to (N*maxpacket), since that memory is normally
216 * already allocated. Some hardware doesn't deal well with short
217 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
218 * byte off the end (to force hardware errors on overflow).
220 * RNDIS uses internal framing, and explicitly allows senders to
221 * pad to end-of-packet. That's potentially nice for speed, but
222 * means receivers can't recover lost synch on their own (because
223 * new packets don't only start after a short RX).
225 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
226 size += dev->port_usb->header_len;
227 size += out->maxpacket - 1;
228 size -= size % out->maxpacket;
230 if (dev->port_usb->is_fixed)
231 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
233 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
235 DBG(dev, "no rx skb\n");
239 /* Some platforms perform better when IP packets are aligned,
240 * but on at least one, checksumming fails otherwise. Note:
241 * RNDIS headers involve variable numbers of LE32 values.
243 skb_reserve(skb, NET_IP_ALIGN);
245 req->buf = skb->data;
247 req->complete = rx_complete;
250 retval = usb_ep_queue(out, req, gfp_flags);
251 if (retval == -ENOMEM)
253 defer_kevent(dev, WORK_RX_MEMORY);
255 DBG(dev, "rx submit --> %d\n", retval);
257 dev_kfree_skb_any(skb);
258 spin_lock_irqsave(&dev->req_lock, flags);
259 list_add(&req->list, &dev->rx_reqs);
260 spin_unlock_irqrestore(&dev->req_lock, flags);
265 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
267 struct sk_buff *skb = req->context, *skb2;
268 struct eth_dev *dev = ep->driver_data;
269 int status = req->status;
273 /* normal completion */
275 skb_put(skb, req->actual);
280 spin_lock_irqsave(&dev->lock, flags);
282 status = dev->unwrap(dev->port_usb,
286 dev_kfree_skb_any(skb);
289 spin_unlock_irqrestore(&dev->lock, flags);
291 skb_queue_tail(&dev->rx_frames, skb);
295 skb2 = skb_dequeue(&dev->rx_frames);
298 || ETH_HLEN > skb2->len
299 || skb2->len > VLAN_ETH_FRAME_LEN) {
300 dev->net->stats.rx_errors++;
301 dev->net->stats.rx_length_errors++;
302 DBG(dev, "rx length %d\n", skb2->len);
303 dev_kfree_skb_any(skb2);
306 skb2->protocol = eth_type_trans(skb2, dev->net);
307 dev->net->stats.rx_packets++;
308 dev->net->stats.rx_bytes += skb2->len;
310 /* no buffer copies needed, unless hardware can't
313 status = netif_rx(skb2);
315 skb2 = skb_dequeue(&dev->rx_frames);
319 /* software-driven interface shutdown */
320 case -ECONNRESET: /* unlink */
321 case -ESHUTDOWN: /* disconnect etc */
322 VDBG(dev, "rx shutdown, code %d\n", status);
325 /* for hardware automagic (such as pxa) */
326 case -ECONNABORTED: /* endpoint reset */
327 DBG(dev, "rx %s reset\n", ep->name);
328 defer_kevent(dev, WORK_RX_MEMORY);
330 dev_kfree_skb_any(skb);
335 dev->net->stats.rx_over_errors++;
339 dev->net->stats.rx_errors++;
340 DBG(dev, "rx status %d\n", status);
345 dev_kfree_skb_any(skb);
346 if (!netif_running(dev->net)) {
348 spin_lock(&dev->req_lock);
349 list_add(&req->list, &dev->rx_reqs);
350 spin_unlock(&dev->req_lock);
354 rx_submit(dev, req, GFP_ATOMIC);
357 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
360 struct usb_request *req;
365 /* queue/recycle up to N requests */
367 list_for_each_entry(req, list, list) {
372 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
374 return list_empty(list) ? -ENOMEM : 0;
375 list_add(&req->list, list);
382 struct list_head *next;
384 next = req->list.next;
385 list_del(&req->list);
386 usb_ep_free_request(ep, req);
391 req = container_of(next, struct usb_request, list);
396 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
400 spin_lock(&dev->req_lock);
401 status = prealloc(&dev->tx_reqs, link->in_ep, n);
404 status = prealloc(&dev->rx_reqs, link->out_ep, n);
409 DBG(dev, "can't alloc requests\n");
411 spin_unlock(&dev->req_lock);
415 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
417 struct usb_request *req;
420 /* fill unused rxq slots with some skb */
421 spin_lock_irqsave(&dev->req_lock, flags);
422 while (!list_empty(&dev->rx_reqs)) {
423 req = container_of(dev->rx_reqs.next,
424 struct usb_request, list);
425 list_del_init(&req->list);
426 spin_unlock_irqrestore(&dev->req_lock, flags);
428 if (rx_submit(dev, req, gfp_flags) < 0) {
429 defer_kevent(dev, WORK_RX_MEMORY);
433 spin_lock_irqsave(&dev->req_lock, flags);
435 spin_unlock_irqrestore(&dev->req_lock, flags);
438 static void eth_work(struct work_struct *work)
440 struct eth_dev *dev = container_of(work, struct eth_dev, work);
442 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
443 if (netif_running(dev->net))
444 rx_fill(dev, GFP_KERNEL);
448 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
451 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
453 struct sk_buff *skb = req->context;
454 struct eth_dev *dev = ep->driver_data;
456 switch (req->status) {
458 dev->net->stats.tx_errors++;
459 VDBG(dev, "tx err %d\n", req->status);
461 case -ECONNRESET: /* unlink */
462 case -ESHUTDOWN: /* disconnect etc */
465 dev->net->stats.tx_bytes += skb->len;
467 dev->net->stats.tx_packets++;
469 spin_lock(&dev->req_lock);
470 list_add(&req->list, &dev->tx_reqs);
471 spin_unlock(&dev->req_lock);
472 dev_kfree_skb_any(skb);
474 atomic_dec(&dev->tx_qlen);
475 if (netif_carrier_ok(dev->net))
476 netif_wake_queue(dev->net);
479 static inline int is_promisc(u16 cdc_filter)
481 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
484 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
485 struct net_device *net)
487 struct eth_dev *dev = netdev_priv(net);
488 int length = skb->len;
490 struct usb_request *req = NULL;
495 spin_lock_irqsave(&dev->lock, flags);
497 in = dev->port_usb->in_ep;
498 cdc_filter = dev->port_usb->cdc_filter;
503 spin_unlock_irqrestore(&dev->lock, flags);
506 dev_kfree_skb_any(skb);
510 /* apply outgoing CDC or RNDIS filters */
511 if (!is_promisc(cdc_filter)) {
512 u8 *dest = skb->data;
514 if (is_multicast_ether_addr(dest)) {
517 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
518 * SET_ETHERNET_MULTICAST_FILTERS requests
520 if (is_broadcast_ether_addr(dest))
521 type = USB_CDC_PACKET_TYPE_BROADCAST;
523 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
524 if (!(cdc_filter & type)) {
525 dev_kfree_skb_any(skb);
529 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
532 spin_lock_irqsave(&dev->req_lock, flags);
534 * this freelist can be empty if an interrupt triggered disconnect()
535 * and reconfigured the gadget (shutting down this queue) after the
536 * network stack decided to xmit but before we got the spinlock.
538 if (list_empty(&dev->tx_reqs)) {
539 spin_unlock_irqrestore(&dev->req_lock, flags);
540 return NETDEV_TX_BUSY;
543 req = container_of(dev->tx_reqs.next, struct usb_request, list);
544 list_del(&req->list);
546 /* temporarily stop TX queue when the freelist empties */
547 if (list_empty(&dev->tx_reqs))
548 netif_stop_queue(net);
549 spin_unlock_irqrestore(&dev->req_lock, flags);
551 /* no buffer copies needed, unless the network stack did it
552 * or the hardware can't use skb buffers.
553 * or there's not enough space for extra headers we need
558 spin_lock_irqsave(&dev->lock, flags);
560 skb = dev->wrap(dev->port_usb, skb);
561 spin_unlock_irqrestore(&dev->lock, flags);
567 req->buf = skb->data;
569 req->complete = tx_complete;
571 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
572 if (dev->port_usb->is_fixed &&
573 length == dev->port_usb->fixed_in_len &&
574 (length % in->maxpacket) == 0)
579 /* use zlp framing on tx for strict CDC-Ether conformance,
580 * though any robust network rx path ignores extra padding.
581 * and some hardware doesn't like to write zlps.
583 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
586 req->length = length;
588 /* throttle high/super speed IRQ rate back slightly */
589 if (gadget_is_dualspeed(dev->gadget))
590 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
591 dev->gadget->speed == USB_SPEED_SUPER)
592 ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
595 retval = usb_ep_queue(in, req, GFP_ATOMIC);
598 DBG(dev, "tx queue err %d\n", retval);
601 net->trans_start = jiffies;
602 atomic_inc(&dev->tx_qlen);
606 dev_kfree_skb_any(skb);
608 dev->net->stats.tx_dropped++;
609 spin_lock_irqsave(&dev->req_lock, flags);
610 if (list_empty(&dev->tx_reqs))
611 netif_start_queue(net);
612 list_add(&req->list, &dev->tx_reqs);
613 spin_unlock_irqrestore(&dev->req_lock, flags);
618 /*-------------------------------------------------------------------------*/
620 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
622 DBG(dev, "%s\n", __func__);
624 /* fill the rx queue */
625 rx_fill(dev, gfp_flags);
627 /* and open the tx floodgates */
628 atomic_set(&dev->tx_qlen, 0);
629 netif_wake_queue(dev->net);
632 static int eth_open(struct net_device *net)
634 struct eth_dev *dev = netdev_priv(net);
637 DBG(dev, "%s\n", __func__);
638 if (netif_carrier_ok(dev->net))
639 eth_start(dev, GFP_KERNEL);
641 spin_lock_irq(&dev->lock);
642 link = dev->port_usb;
643 if (link && link->open)
645 spin_unlock_irq(&dev->lock);
650 static int eth_stop(struct net_device *net)
652 struct eth_dev *dev = netdev_priv(net);
655 VDBG(dev, "%s\n", __func__);
656 netif_stop_queue(net);
658 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
659 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
660 dev->net->stats.rx_errors, dev->net->stats.tx_errors
663 /* ensure there are no more active requests */
664 spin_lock_irqsave(&dev->lock, flags);
666 struct gether *link = dev->port_usb;
667 const struct usb_endpoint_descriptor *in;
668 const struct usb_endpoint_descriptor *out;
673 /* NOTE: we have no abort-queue primitive we could use
674 * to cancel all pending I/O. Instead, we disable then
675 * reenable the endpoints ... this idiom may leave toggle
676 * wrong, but that's a self-correcting error.
678 * REVISIT: we *COULD* just let the transfers complete at
679 * their own pace; the network stack can handle old packets.
680 * For the moment we leave this here, since it works.
682 in = link->in_ep->desc;
683 out = link->out_ep->desc;
684 usb_ep_disable(link->in_ep);
685 usb_ep_disable(link->out_ep);
686 if (netif_carrier_ok(net)) {
687 DBG(dev, "host still using in/out endpoints\n");
688 link->in_ep->desc = in;
689 link->out_ep->desc = out;
690 usb_ep_enable(link->in_ep);
691 usb_ep_enable(link->out_ep);
694 spin_unlock_irqrestore(&dev->lock, flags);
699 /*-------------------------------------------------------------------------*/
701 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
702 static char *dev_addr;
703 module_param(dev_addr, charp, S_IRUGO);
704 MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
706 /* this address is invisible to ifconfig */
707 static char *host_addr;
708 module_param(host_addr, charp, S_IRUGO);
709 MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
711 static int get_ether_addr(const char *str, u8 *dev_addr)
716 for (i = 0; i < 6; i++) {
719 if ((*str == '.') || (*str == ':'))
721 num = hex_to_bin(*str++) << 4;
722 num |= hex_to_bin(*str++);
725 if (is_valid_ether_addr(dev_addr))
728 eth_random_addr(dev_addr);
732 static struct eth_dev *the_dev;
734 static const struct net_device_ops eth_netdev_ops = {
735 .ndo_open = eth_open,
736 .ndo_stop = eth_stop,
737 .ndo_start_xmit = eth_start_xmit,
738 .ndo_change_mtu = ueth_change_mtu,
739 .ndo_set_mac_address = eth_mac_addr,
740 .ndo_validate_addr = eth_validate_addr,
743 static struct device_type gadget_type = {
748 * gether_setup_name - initialize one ethernet-over-usb link
749 * @g: gadget to associated with these links
750 * @ethaddr: NULL, or a buffer in which the ethernet address of the
751 * host side of the link is recorded
752 * @netname: name for network device (for example, "usb")
755 * This sets up the single network link that may be exported by a
756 * gadget driver using this framework. The link layer addresses are
757 * set up using module parameters.
759 * Returns negative errno, or zero on success
761 int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
765 struct net_device *net;
771 net = alloc_etherdev(sizeof *dev);
775 dev = netdev_priv(net);
776 spin_lock_init(&dev->lock);
777 spin_lock_init(&dev->req_lock);
778 INIT_WORK(&dev->work, eth_work);
779 INIT_LIST_HEAD(&dev->tx_reqs);
780 INIT_LIST_HEAD(&dev->rx_reqs);
782 skb_queue_head_init(&dev->rx_frames);
784 /* network device setup */
786 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
788 if (get_ether_addr(dev_addr, net->dev_addr))
790 "using random %s ethernet address\n", "self");
791 if (get_ether_addr(host_addr, dev->host_mac))
793 "using random %s ethernet address\n", "host");
796 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
798 net->netdev_ops = ð_netdev_ops;
800 SET_ETHTOOL_OPS(net, &ops);
803 SET_NETDEV_DEV(net, &g->dev);
804 SET_NETDEV_DEVTYPE(net, &gadget_type);
806 status = register_netdev(net);
808 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
811 INFO(dev, "MAC %pM\n", net->dev_addr);
812 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
816 /* two kinds of host-initiated state changes:
817 * - iff DATA transfer is active, carrier is "on"
818 * - tx queueing enabled if open *and* carrier is "on"
820 netif_carrier_off(net);
827 * gether_cleanup - remove Ethernet-over-USB device
830 * This is called to free all resources allocated by @gether_setup().
832 void gether_cleanup(void)
837 unregister_netdev(the_dev->net);
838 flush_work(&the_dev->work);
839 free_netdev(the_dev->net);
846 * gether_connect - notify network layer that USB link is active
847 * @link: the USB link, set up with endpoints, descriptors matching
848 * current device speed, and any framing wrapper(s) set up.
849 * Context: irqs blocked
851 * This is called to activate endpoints and let the network layer know
852 * the connection is active ("carrier detect"). It may cause the I/O
853 * queues to open and start letting network packets flow, but will in
854 * any case activate the endpoints so that they respond properly to the
857 * Verify net_device pointer returned using IS_ERR(). If it doesn't
858 * indicate some error code (negative errno), ep->driver_data values
859 * have been overwritten.
861 struct net_device *gether_connect(struct gether *link)
863 struct eth_dev *dev = the_dev;
867 return ERR_PTR(-EINVAL);
869 link->in_ep->driver_data = dev;
870 result = usb_ep_enable(link->in_ep);
872 DBG(dev, "enable %s --> %d\n",
873 link->in_ep->name, result);
877 link->out_ep->driver_data = dev;
878 result = usb_ep_enable(link->out_ep);
880 DBG(dev, "enable %s --> %d\n",
881 link->out_ep->name, result);
886 result = alloc_requests(dev, link, qlen(dev->gadget));
889 dev->zlp = link->is_zlp_ok;
890 DBG(dev, "qlen %d\n", qlen(dev->gadget));
892 dev->header_len = link->header_len;
893 dev->unwrap = link->unwrap;
894 dev->wrap = link->wrap;
896 spin_lock(&dev->lock);
897 dev->port_usb = link;
899 if (netif_running(dev->net)) {
906 spin_unlock(&dev->lock);
908 netif_carrier_on(dev->net);
909 if (netif_running(dev->net))
910 eth_start(dev, GFP_ATOMIC);
912 /* on error, disable any endpoints */
914 (void) usb_ep_disable(link->out_ep);
916 (void) usb_ep_disable(link->in_ep);
919 /* caller is responsible for cleanup on error */
921 return ERR_PTR(result);
926 * gether_disconnect - notify network layer that USB link is inactive
927 * @link: the USB link, on which gether_connect() was called
928 * Context: irqs blocked
930 * This is called to deactivate endpoints and let the network layer know
931 * the connection went inactive ("no carrier").
933 * On return, the state is as if gether_connect() had never been called.
934 * The endpoints are inactive, and accordingly without active USB I/O.
935 * Pointers to endpoint descriptors and endpoint private data are nulled.
937 void gether_disconnect(struct gether *link)
939 struct eth_dev *dev = link->ioport;
940 struct usb_request *req;
946 DBG(dev, "%s\n", __func__);
948 netif_stop_queue(dev->net);
949 netif_carrier_off(dev->net);
951 /* disable endpoints, forcing (synchronous) completion
952 * of all pending i/o. then free the request objects
953 * and forget about the endpoints.
955 usb_ep_disable(link->in_ep);
956 spin_lock(&dev->req_lock);
957 while (!list_empty(&dev->tx_reqs)) {
958 req = container_of(dev->tx_reqs.next,
959 struct usb_request, list);
960 list_del(&req->list);
962 spin_unlock(&dev->req_lock);
963 usb_ep_free_request(link->in_ep, req);
964 spin_lock(&dev->req_lock);
966 spin_unlock(&dev->req_lock);
967 link->in_ep->driver_data = NULL;
968 link->in_ep->desc = NULL;
970 usb_ep_disable(link->out_ep);
971 spin_lock(&dev->req_lock);
972 while (!list_empty(&dev->rx_reqs)) {
973 req = container_of(dev->rx_reqs.next,
974 struct usb_request, list);
975 list_del(&req->list);
977 spin_unlock(&dev->req_lock);
978 usb_ep_free_request(link->out_ep, req);
979 spin_lock(&dev->req_lock);
981 spin_unlock(&dev->req_lock);
982 link->out_ep->driver_data = NULL;
983 link->out_ep->desc = NULL;
985 /* finish forgetting about this USB link episode */
990 spin_lock(&dev->lock);
991 dev->port_usb = NULL;
993 spin_unlock(&dev->lock);