3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
88 #include <linux/scatterlist.h>
109 static void wa_xfer_delayed_run(struct wa_rpipe *);
112 * Life cycle governed by 'struct urb' (the refcount of the struct is
113 * that of the 'struct urb' and usb_free_urb() would free the whole
117 struct urb tr_urb; /* transfer request urb. */
118 struct urb *dto_urb; /* for data output. */
119 struct list_head list_node; /* for rpipe->req_list */
120 struct wa_xfer *xfer; /* out xfer */
121 u8 index; /* which segment we are */
122 enum wa_seg_status status;
123 ssize_t result; /* bytes xfered or error */
124 struct wa_xfer_hdr xfer_hdr;
125 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
128 static inline void wa_seg_init(struct wa_seg *seg)
130 usb_init_urb(&seg->tr_urb);
132 /* set the remaining memory to 0. */
133 memset(((void *)seg) + sizeof(seg->tr_urb), 0,
134 sizeof(*seg) - sizeof(seg->tr_urb));
138 * Protected by xfer->lock
143 struct list_head list_node;
147 struct wahc *wa; /* Wire adapter we are plugged to */
148 struct usb_host_endpoint *ep;
149 struct urb *urb; /* URB we are transferring for */
150 struct wa_seg **seg; /* transfer segments */
151 u8 segs, segs_submitted, segs_done;
152 unsigned is_inbound:1;
157 gfp_t gfp; /* allocation mask */
159 struct wusb_dev *wusb_dev; /* for activity timestamps */
162 static inline void wa_xfer_init(struct wa_xfer *xfer)
164 kref_init(&xfer->refcnt);
165 INIT_LIST_HEAD(&xfer->list_node);
166 spin_lock_init(&xfer->lock);
170 * Destroy a transfer structure
172 * Note that freeing xfer->seg[cnt]->urb will free the containing
173 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
175 static void wa_xfer_destroy(struct kref *_xfer)
177 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
180 for (cnt = 0; cnt < xfer->segs; cnt++) {
181 if (xfer->seg[cnt]) {
182 if (xfer->seg[cnt]->dto_urb) {
183 kfree(xfer->seg[cnt]->dto_urb->sg);
184 usb_free_urb(xfer->seg[cnt]->dto_urb);
186 usb_free_urb(&xfer->seg[cnt]->tr_urb);
194 static void wa_xfer_get(struct wa_xfer *xfer)
196 kref_get(&xfer->refcnt);
199 static void wa_xfer_put(struct wa_xfer *xfer)
201 kref_put(&xfer->refcnt, wa_xfer_destroy);
207 * xfer->lock has to be unlocked
209 * We take xfer->lock for setting the result; this is a barrier
210 * against drivers/usb/core/hcd.c:unlink1() being called after we call
211 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
212 * reference to the transfer.
214 static void wa_xfer_giveback(struct wa_xfer *xfer)
218 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
219 list_del_init(&xfer->list_node);
220 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
221 /* FIXME: segmentation broken -- kills DWA */
222 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
230 * xfer->lock has to be unlocked
232 static void wa_xfer_completion(struct wa_xfer *xfer)
235 wusb_dev_put(xfer->wusb_dev);
236 rpipe_put(xfer->ep->hcpriv);
237 wa_xfer_giveback(xfer);
241 * If transfer is done, wrap it up and return true
243 * xfer->lock has to be locked
245 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
247 struct device *dev = &xfer->wa->usb_iface->dev;
248 unsigned result, cnt;
250 struct urb *urb = xfer->urb;
251 unsigned found_short = 0;
253 result = xfer->segs_done == xfer->segs_submitted;
256 urb->actual_length = 0;
257 for (cnt = 0; cnt < xfer->segs; cnt++) {
258 seg = xfer->seg[cnt];
259 switch (seg->status) {
261 if (found_short && seg->result > 0) {
262 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
263 xfer, cnt, seg->result);
264 urb->status = -EINVAL;
267 urb->actual_length += seg->result;
268 if (seg->result < xfer->seg_size
269 && cnt != xfer->segs-1)
271 dev_dbg(dev, "xfer %p#%u: DONE short %d "
272 "result %zu urb->actual_length %d\n",
273 xfer, seg->index, found_short, seg->result,
277 xfer->result = seg->result;
278 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
279 xfer, seg->index, seg->result);
282 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
283 xfer, seg->index, urb->status);
284 xfer->result = urb->status;
287 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
288 xfer, cnt, seg->status);
289 xfer->result = -EINVAL;
299 * Initialize a transfer's ID
301 * We need to use a sequential number; if we use the pointer or the
302 * hash of the pointer, it can repeat over sequential transfers and
303 * then it will confuse the HWA....wonder why in hell they put a 32
304 * bit handle in there then.
306 static void wa_xfer_id_init(struct wa_xfer *xfer)
308 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
312 * Return the xfer's ID associated with xfer
316 static u32 wa_xfer_id(struct wa_xfer *xfer)
322 * Search for a transfer list ID on the HCD's URB list
324 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
325 * 32-bit hash of the pointer.
327 * @returns NULL if not found.
329 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
332 struct wa_xfer *xfer_itr;
333 spin_lock_irqsave(&wa->xfer_list_lock, flags);
334 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
335 if (id == xfer_itr->id) {
336 wa_xfer_get(xfer_itr);
342 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
346 struct wa_xfer_abort_buffer {
348 struct wa_xfer_abort cmd;
351 static void __wa_xfer_abort_cb(struct urb *urb)
353 struct wa_xfer_abort_buffer *b = urb->context;
354 usb_put_urb(&b->urb);
358 * Aborts an ongoing transaction
360 * Assumes the transfer is referenced and locked and in a submitted
361 * state (mainly that there is an endpoint/rpipe assigned).
363 * The callback (see above) does nothing but freeing up the data by
364 * putting the URB. Because the URB is allocated at the head of the
365 * struct, the whole space we allocated is kfreed.
367 * We'll get an 'aborted transaction' xfer result on DTI, that'll
368 * politely ignore because at this point the transaction has been
369 * marked as aborted already.
371 static void __wa_xfer_abort(struct wa_xfer *xfer)
374 struct device *dev = &xfer->wa->usb_iface->dev;
375 struct wa_xfer_abort_buffer *b;
376 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
378 b = kmalloc(sizeof(*b), GFP_ATOMIC);
381 b->cmd.bLength = sizeof(b->cmd);
382 b->cmd.bRequestType = WA_XFER_ABORT;
383 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
384 b->cmd.dwTransferID = wa_xfer_id(xfer);
386 usb_init_urb(&b->urb);
387 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
388 usb_sndbulkpipe(xfer->wa->usb_dev,
389 xfer->wa->dto_epd->bEndpointAddress),
390 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
391 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
394 return; /* callback frees! */
398 if (printk_ratelimit())
399 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
409 * @returns < 0 on error, transfer segment request size if ok
411 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
412 enum wa_xfer_type *pxfer_type)
415 struct device *dev = &xfer->wa->usb_iface->dev;
417 struct urb *urb = xfer->urb;
418 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
420 switch (rpipe->descr.bmAttribute & 0x3) {
421 case USB_ENDPOINT_XFER_CONTROL:
422 *pxfer_type = WA_XFER_TYPE_CTL;
423 result = sizeof(struct wa_xfer_ctl);
425 case USB_ENDPOINT_XFER_INT:
426 case USB_ENDPOINT_XFER_BULK:
427 *pxfer_type = WA_XFER_TYPE_BI;
428 result = sizeof(struct wa_xfer_bi);
430 case USB_ENDPOINT_XFER_ISOC:
431 dev_err(dev, "FIXME: ISOC not implemented\n");
437 result = -EINVAL; /* shut gcc up */
439 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
440 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
441 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
442 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
443 /* Compute the segment size and make sure it is a multiple of
444 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
446 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
447 if (xfer->seg_size < maxpktsize) {
448 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
449 "%zu\n", xfer->seg_size, maxpktsize);
453 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
454 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
455 if (xfer->segs >= WA_SEGS_MAX) {
456 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
457 (int)(urb->transfer_buffer_length / xfer->seg_size),
462 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
468 /* Fill in the common request header and xfer-type specific data. */
469 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
470 struct wa_xfer_hdr *xfer_hdr0,
471 enum wa_xfer_type xfer_type,
472 size_t xfer_hdr_size)
474 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
476 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
477 xfer_hdr0->bLength = xfer_hdr_size;
478 xfer_hdr0->bRequestType = xfer_type;
479 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
480 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
481 xfer_hdr0->bTransferSegment = 0;
483 case WA_XFER_TYPE_CTL: {
484 struct wa_xfer_ctl *xfer_ctl =
485 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
486 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
487 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
488 sizeof(xfer_ctl->baSetupData));
491 case WA_XFER_TYPE_BI:
493 case WA_XFER_TYPE_ISO:
494 printk(KERN_ERR "FIXME: ISOC not implemented\n");
501 * Callback for the OUT data phase of the segment request
503 * Check wa_seg_tr_cb(); most comments also apply here because this
504 * function does almost the same thing and they work closely
507 * If the seg request has failed but this DTO phase has succeeded,
508 * wa_seg_tr_cb() has already failed the segment and moved the
509 * status to WA_SEG_ERROR, so this will go through 'case 0' and
510 * effectively do nothing.
512 static void wa_seg_dto_cb(struct urb *urb)
514 struct wa_seg *seg = urb->context;
515 struct wa_xfer *xfer = seg->xfer;
518 struct wa_rpipe *rpipe;
520 unsigned rpipe_ready = 0;
523 /* free the sg if it was used. */
527 switch (urb->status) {
529 spin_lock_irqsave(&xfer->lock, flags);
531 dev = &wa->usb_iface->dev;
532 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
533 xfer, seg->index, urb->actual_length);
534 if (seg->status < WA_SEG_PENDING)
535 seg->status = WA_SEG_PENDING;
536 seg->result = urb->actual_length;
537 spin_unlock_irqrestore(&xfer->lock, flags);
539 case -ECONNRESET: /* URB unlinked; no need to do anything */
540 case -ENOENT: /* as it was done by the who unlinked us */
542 default: /* Other errors ... */
543 spin_lock_irqsave(&xfer->lock, flags);
545 dev = &wa->usb_iface->dev;
546 rpipe = xfer->ep->hcpriv;
547 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
548 xfer, seg->index, urb->status);
549 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
550 EDC_ERROR_TIMEFRAME)){
551 dev_err(dev, "DTO: URB max acceptable errors "
552 "exceeded, resetting device\n");
555 if (seg->status != WA_SEG_ERROR) {
556 seg->status = WA_SEG_ERROR;
557 seg->result = urb->status;
559 __wa_xfer_abort(xfer);
560 rpipe_ready = rpipe_avail_inc(rpipe);
561 done = __wa_xfer_is_done(xfer);
563 spin_unlock_irqrestore(&xfer->lock, flags);
565 wa_xfer_completion(xfer);
567 wa_xfer_delayed_run(rpipe);
572 * Callback for the segment request
574 * If successful transition state (unless already transitioned or
575 * outbound transfer); otherwise, take a note of the error, mark this
576 * segment done and try completion.
578 * Note we don't access until we are sure that the transfer hasn't
579 * been cancelled (ECONNRESET, ENOENT), which could mean that
580 * seg->xfer could be already gone.
582 * We have to check before setting the status to WA_SEG_PENDING
583 * because sometimes the xfer result callback arrives before this
584 * callback (geeeeeeze), so it might happen that we are already in
585 * another state. As well, we don't set it if the transfer is inbound,
586 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
589 static void wa_seg_tr_cb(struct urb *urb)
591 struct wa_seg *seg = urb->context;
592 struct wa_xfer *xfer = seg->xfer;
595 struct wa_rpipe *rpipe;
597 unsigned rpipe_ready;
600 switch (urb->status) {
602 spin_lock_irqsave(&xfer->lock, flags);
604 dev = &wa->usb_iface->dev;
605 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
606 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
607 seg->status = WA_SEG_PENDING;
608 spin_unlock_irqrestore(&xfer->lock, flags);
610 case -ECONNRESET: /* URB unlinked; no need to do anything */
611 case -ENOENT: /* as it was done by the who unlinked us */
613 default: /* Other errors ... */
614 spin_lock_irqsave(&xfer->lock, flags);
616 dev = &wa->usb_iface->dev;
617 rpipe = xfer->ep->hcpriv;
618 if (printk_ratelimit())
619 dev_err(dev, "xfer %p#%u: request error %d\n",
620 xfer, seg->index, urb->status);
621 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
622 EDC_ERROR_TIMEFRAME)){
623 dev_err(dev, "DTO: URB max acceptable errors "
624 "exceeded, resetting device\n");
627 usb_unlink_urb(seg->dto_urb);
628 seg->status = WA_SEG_ERROR;
629 seg->result = urb->status;
631 __wa_xfer_abort(xfer);
632 rpipe_ready = rpipe_avail_inc(rpipe);
633 done = __wa_xfer_is_done(xfer);
634 spin_unlock_irqrestore(&xfer->lock, flags);
636 wa_xfer_completion(xfer);
638 wa_xfer_delayed_run(rpipe);
643 * Allocate an SG list to store bytes_to_transfer bytes and copy the
644 * subset of the in_sg that matches the buffer subset
645 * we are about to transfer.
647 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
648 const unsigned int bytes_transferred,
649 const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
651 struct scatterlist *out_sg;
652 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
654 struct scatterlist *current_xfer_sg = in_sg;
655 struct scatterlist *current_seg_sg, *last_seg_sg;
657 /* skip previously transferred pages. */
658 while ((current_xfer_sg) &&
659 (bytes_processed < bytes_transferred)) {
660 bytes_processed += current_xfer_sg->length;
662 /* advance the sg if current segment starts on or past the
664 if (bytes_processed <= bytes_transferred)
665 current_xfer_sg = sg_next(current_xfer_sg);
668 /* the data for the current segment starts in current_xfer_sg.
669 calculate the offset. */
670 if (bytes_processed > bytes_transferred) {
671 offset_into_current_page_data = current_xfer_sg->length -
672 (bytes_processed - bytes_transferred);
675 /* calculate the number of pages needed by this segment. */
676 nents = DIV_ROUND_UP((bytes_to_transfer +
677 offset_into_current_page_data +
678 current_xfer_sg->offset),
681 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
683 sg_init_table(out_sg, nents);
685 /* copy the portion of the incoming SG that correlates to the
686 * data to be transferred by this segment to the segment SG. */
687 last_seg_sg = current_seg_sg = out_sg;
690 /* reset nents and calculate the actual number of sg entries
693 while ((bytes_processed < bytes_to_transfer) &&
694 current_seg_sg && current_xfer_sg) {
695 unsigned int page_len = min((current_xfer_sg->length -
696 offset_into_current_page_data),
697 (bytes_to_transfer - bytes_processed));
699 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
701 current_xfer_sg->offset +
702 offset_into_current_page_data);
704 bytes_processed += page_len;
706 last_seg_sg = current_seg_sg;
707 current_seg_sg = sg_next(current_seg_sg);
708 current_xfer_sg = sg_next(current_xfer_sg);
710 /* only the first page may require additional offset. */
711 offset_into_current_page_data = 0;
715 /* update num_sgs and terminate the list since we may have
716 * concatenated pages. */
717 sg_mark_end(last_seg_sg);
718 *out_num_sgs = nents;
725 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
727 static int __wa_populate_dto_urb(struct wa_xfer *xfer,
728 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
733 seg->dto_urb->transfer_dma =
734 xfer->urb->transfer_dma + buf_itr_offset;
735 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
736 seg->dto_urb->sg = NULL;
737 seg->dto_urb->num_sgs = 0;
739 /* do buffer or SG processing. */
740 seg->dto_urb->transfer_flags &=
741 ~URB_NO_TRANSFER_DMA_MAP;
742 /* this should always be 0 before a resubmit. */
743 seg->dto_urb->num_mapped_sgs = 0;
745 if (xfer->urb->transfer_buffer) {
746 seg->dto_urb->transfer_buffer =
747 xfer->urb->transfer_buffer +
749 seg->dto_urb->sg = NULL;
750 seg->dto_urb->num_sgs = 0;
752 seg->dto_urb->transfer_buffer = NULL;
755 * allocate an SG list to store seg_size bytes
756 * and copy the subset of the xfer->urb->sg that
757 * matches the buffer subset we are about to
760 seg->dto_urb->sg = wa_xfer_create_subset_sg(
762 buf_itr_offset, buf_itr_size,
763 &(seg->dto_urb->num_sgs));
764 if (!(seg->dto_urb->sg))
768 seg->dto_urb->transfer_buffer_length = buf_itr_size;
774 * Allocate the segs array and initialize each of them
776 * The segments are freed by wa_xfer_destroy() when the xfer use count
777 * drops to zero; however, because each segment is given the same life
778 * cycle as the USB URB it contains, it is actually freed by
779 * usb_put_urb() on the contained USB URB (twisted, eh?).
781 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
784 size_t alloc_size = sizeof(*xfer->seg[0])
785 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
786 struct usb_device *usb_dev = xfer->wa->usb_dev;
787 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
789 size_t buf_itr, buf_size, buf_itr_size;
792 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
793 if (xfer->seg == NULL)
794 goto error_segs_kzalloc;
796 buf_size = xfer->urb->transfer_buffer_length;
797 for (cnt = 0; cnt < xfer->segs; cnt++) {
798 seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
800 goto error_seg_kmalloc;
804 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
805 usb_sndbulkpipe(usb_dev,
806 dto_epd->bEndpointAddress),
807 &seg->xfer_hdr, xfer_hdr_size,
809 buf_itr_size = min(buf_size, xfer->seg_size);
810 if (xfer->is_inbound == 0 && buf_size > 0) {
812 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
813 if (seg->dto_urb == NULL)
814 goto error_dto_alloc;
816 seg->dto_urb, usb_dev,
817 usb_sndbulkpipe(usb_dev,
818 dto_epd->bEndpointAddress),
819 NULL, 0, wa_seg_dto_cb, seg);
821 /* fill in the xfer buffer information. */
822 result = __wa_populate_dto_urb(xfer, seg,
823 buf_itr, buf_itr_size);
826 goto error_seg_outbound_populate;
828 seg->status = WA_SEG_READY;
829 buf_itr += buf_itr_size;
830 buf_size -= buf_itr_size;
835 * Free the memory for the current segment which failed to init.
836 * Use the fact that cnt is left at were it failed. The remaining
837 * segments will be cleaned up by wa_xfer_destroy.
839 error_seg_outbound_populate:
840 usb_free_urb(xfer->seg[cnt]->dto_urb);
842 kfree(xfer->seg[cnt]);
843 xfer->seg[cnt] = NULL;
850 * Allocates all the stuff needed to submit a transfer
852 * Breaks the whole data buffer in a list of segments, each one has a
853 * structure allocated to it and linked in xfer->seg[index]
855 * FIXME: merge setup_segs() and the last part of this function, no
856 * need to do two for loops when we could run everything in a
859 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
862 struct device *dev = &xfer->wa->usb_iface->dev;
863 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
864 size_t xfer_hdr_size, cnt, transfer_size;
865 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
867 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
869 goto error_setup_sizes;
870 xfer_hdr_size = result;
871 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
873 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
874 xfer, xfer->segs, result);
875 goto error_setup_segs;
877 /* Fill the first header */
878 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
879 wa_xfer_id_init(xfer);
880 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
882 /* Fill remainig headers */
883 xfer_hdr = xfer_hdr0;
884 transfer_size = urb->transfer_buffer_length;
885 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
886 xfer->seg_size : transfer_size;
887 transfer_size -= xfer->seg_size;
888 for (cnt = 1; cnt < xfer->segs; cnt++) {
889 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
890 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
891 xfer_hdr->bTransferSegment = cnt;
892 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
893 cpu_to_le32(xfer->seg_size)
894 : cpu_to_le32(transfer_size);
895 xfer->seg[cnt]->status = WA_SEG_READY;
896 transfer_size -= xfer->seg_size;
898 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
908 * rpipe->seg_lock is held!
910 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
914 /* submit the transfer request. */
915 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
917 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
918 xfer, seg->index, result);
919 goto error_seg_submit;
921 /* submit the out data if this is an out request. */
923 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
925 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
926 xfer, seg->index, result);
927 goto error_dto_submit;
930 seg->status = WA_SEG_SUBMITTED;
931 rpipe_avail_dec(rpipe);
935 usb_unlink_urb(&seg->tr_urb);
937 seg->status = WA_SEG_ERROR;
938 seg->result = result;
943 * Execute more queued request segments until the maximum concurrent allowed
945 * The ugly unlock/lock sequence on the error path is needed as the
946 * xfer->lock normally nests the seg_lock and not viceversa.
949 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
952 struct device *dev = &rpipe->wa->usb_iface->dev;
954 struct wa_xfer *xfer;
957 spin_lock_irqsave(&rpipe->seg_lock, flags);
958 while (atomic_read(&rpipe->segs_available) > 0
959 && !list_empty(&rpipe->seg_list)) {
960 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
962 list_del(&seg->list_node);
964 result = __wa_seg_submit(rpipe, xfer, seg);
965 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
966 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
967 if (unlikely(result < 0)) {
968 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
969 spin_lock_irqsave(&xfer->lock, flags);
970 __wa_xfer_abort(xfer);
972 spin_unlock_irqrestore(&xfer->lock, flags);
973 spin_lock_irqsave(&rpipe->seg_lock, flags);
976 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
981 * xfer->lock is taken
983 * On failure submitting we just stop submitting and return error;
984 * wa_urb_enqueue_b() will execute the completion path
986 static int __wa_xfer_submit(struct wa_xfer *xfer)
989 struct wahc *wa = xfer->wa;
990 struct device *dev = &wa->usb_iface->dev;
994 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
995 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
999 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1000 list_add_tail(&xfer->list_node, &wa->xfer_list);
1001 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1003 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1005 spin_lock_irqsave(&rpipe->seg_lock, flags);
1006 for (cnt = 0; cnt < xfer->segs; cnt++) {
1007 available = atomic_read(&rpipe->segs_available);
1008 empty = list_empty(&rpipe->seg_list);
1009 seg = xfer->seg[cnt];
1010 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
1011 xfer, cnt, available, empty,
1012 available == 0 || !empty ? "delayed" : "submitted");
1013 if (available == 0 || !empty) {
1014 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
1015 seg->status = WA_SEG_DELAYED;
1016 list_add_tail(&seg->list_node, &rpipe->seg_list);
1018 result = __wa_seg_submit(rpipe, xfer, seg);
1020 __wa_xfer_abort(xfer);
1021 goto error_seg_submit;
1024 xfer->segs_submitted++;
1027 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1032 * Second part of a URB/transfer enqueuement
1034 * Assumes this comes from wa_urb_enqueue() [maybe through
1035 * wa_urb_enqueue_run()]. At this point:
1037 * xfer->wa filled and refcounted
1038 * xfer->ep filled with rpipe refcounted if
1040 * xfer->urb filled and refcounted (this is the case when called
1041 * from wa_urb_enqueue() as we come from usb_submit_urb()
1042 * and when called by wa_urb_enqueue_run(), as we took an
1043 * extra ref dropped by _run() after we return).
1046 * If we fail at __wa_xfer_submit(), then we just check if we are done
1047 * and if so, we run the completion procedure. However, if we are not
1048 * yet done, we do nothing and wait for the completion handlers from
1049 * the submitted URBs or from the xfer-result path to kick in. If xfer
1050 * result never kicks in, the xfer will timeout from the USB code and
1051 * dequeue() will be called.
1053 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1056 unsigned long flags;
1057 struct urb *urb = xfer->urb;
1058 struct wahc *wa = xfer->wa;
1059 struct wusbhc *wusbhc = wa->wusb;
1060 struct wusb_dev *wusb_dev;
1063 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1065 goto error_rpipe_get;
1067 /* FIXME: segmentation broken -- kills DWA */
1068 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
1069 if (urb->dev == NULL) {
1070 mutex_unlock(&wusbhc->mutex);
1071 goto error_dev_gone;
1073 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1074 if (wusb_dev == NULL) {
1075 mutex_unlock(&wusbhc->mutex);
1076 goto error_dev_gone;
1078 mutex_unlock(&wusbhc->mutex);
1080 spin_lock_irqsave(&xfer->lock, flags);
1081 xfer->wusb_dev = wusb_dev;
1082 result = urb->status;
1083 if (urb->status != -EINPROGRESS)
1084 goto error_dequeued;
1086 result = __wa_xfer_setup(xfer, urb);
1088 goto error_xfer_setup;
1089 result = __wa_xfer_submit(xfer);
1091 goto error_xfer_submit;
1092 spin_unlock_irqrestore(&xfer->lock, flags);
1095 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1096 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1101 spin_unlock_irqrestore(&xfer->lock, flags);
1102 /* FIXME: segmentation broken, kills DWA */
1104 wusb_dev_put(wusb_dev);
1106 rpipe_put(xfer->ep->hcpriv);
1108 xfer->result = result;
1109 wa_xfer_giveback(xfer);
1113 done = __wa_xfer_is_done(xfer);
1114 xfer->result = result;
1115 spin_unlock_irqrestore(&xfer->lock, flags);
1117 wa_xfer_completion(xfer);
1121 * Execute the delayed transfers in the Wire Adapter @wa
1123 * We need to be careful here, as dequeue() could be called in the
1124 * middle. That's why we do the whole thing under the
1125 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1126 * and then checks the list -- so as we would be acquiring in inverse
1127 * order, we move the delayed list to a separate list while locked and then
1128 * submit them without the list lock held.
1130 void wa_urb_enqueue_run(struct work_struct *ws)
1132 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1133 struct wa_xfer *xfer, *next;
1135 LIST_HEAD(tmp_list);
1137 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1138 spin_lock_irq(&wa->xfer_list_lock);
1139 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1140 wa->xfer_delayed_list.prev);
1141 spin_unlock_irq(&wa->xfer_list_lock);
1144 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1145 * can take xfer->lock as well as lock mutexes.
1147 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1148 list_del_init(&xfer->list_node);
1151 wa_urb_enqueue_b(xfer);
1152 usb_put_urb(urb); /* taken when queuing */
1155 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1158 * Process the errored transfers on the Wire Adapter outside of interrupt.
1160 void wa_process_errored_transfers_run(struct work_struct *ws)
1162 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1163 struct wa_xfer *xfer, *next;
1164 LIST_HEAD(tmp_list);
1166 pr_info("%s: Run delayed STALL processing.\n", __func__);
1168 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1169 spin_lock_irq(&wa->xfer_list_lock);
1170 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1171 wa->xfer_errored_list.prev);
1172 spin_unlock_irq(&wa->xfer_list_lock);
1175 * run rpipe_clear_feature_stalled from temp list without list lock
1178 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1179 struct usb_host_endpoint *ep;
1180 unsigned long flags;
1181 struct wa_rpipe *rpipe;
1183 spin_lock_irqsave(&xfer->lock, flags);
1186 spin_unlock_irqrestore(&xfer->lock, flags);
1188 /* clear RPIPE feature stalled without holding a lock. */
1189 rpipe_clear_feature_stalled(wa, ep);
1191 /* complete the xfer. This removes it from the tmp list. */
1192 wa_xfer_completion(xfer);
1194 /* check for work. */
1195 wa_xfer_delayed_run(rpipe);
1198 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1201 * Submit a transfer to the Wire Adapter in a delayed way
1203 * The process of enqueuing involves possible sleeps() [see
1204 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1205 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1207 * @urb: We own a reference to it done by the HCI Linux USB stack that
1208 * will be given up by calling usb_hcd_giveback_urb() or by
1209 * returning error from this function -> ergo we don't have to
1212 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1213 struct urb *urb, gfp_t gfp)
1216 struct device *dev = &wa->usb_iface->dev;
1217 struct wa_xfer *xfer;
1218 unsigned long my_flags;
1219 unsigned cant_sleep = irqs_disabled() | in_atomic();
1221 if ((urb->transfer_buffer == NULL)
1222 && (urb->sg == NULL)
1223 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1224 && urb->transfer_buffer_length != 0) {
1225 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1230 xfer = kzalloc(sizeof(*xfer), gfp);
1235 if (urb->status != -EINPROGRESS) /* cancelled */
1236 goto error_dequeued; /* before starting? */
1238 xfer->wa = wa_get(wa);
1244 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1245 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1246 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1247 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1248 cant_sleep ? "deferred" : "inline");
1252 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1253 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1254 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1255 queue_work(wusbd, &wa->xfer_enqueue_work);
1257 wa_urb_enqueue_b(xfer);
1266 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1269 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1270 * handler] is called.
1272 * Until a transfer goes successfully through wa_urb_enqueue() it
1273 * needs to be dequeued with completion calling; when stuck in delayed
1274 * or before wa_xfer_setup() is called, we need to do completion.
1276 * not setup If there is no hcpriv yet, that means that that enqueue
1277 * still had no time to set the xfer up. Because
1278 * urb->status should be other than -EINPROGRESS,
1279 * enqueue() will catch that and bail out.
1281 * If the transfer has gone through setup, we just need to clean it
1282 * up. If it has gone through submit(), we have to abort it [with an
1283 * asynch request] and then make sure we cancel each segment.
1286 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1288 unsigned long flags, flags2;
1289 struct wa_xfer *xfer;
1291 struct wa_rpipe *rpipe;
1293 unsigned rpipe_ready = 0;
1298 * Nothing setup yet enqueue will see urb->status !=
1299 * -EINPROGRESS (by hcd layer) and bail out with
1300 * error, no need to do completion
1302 BUG_ON(urb->status == -EINPROGRESS);
1305 spin_lock_irqsave(&xfer->lock, flags);
1306 rpipe = xfer->ep->hcpriv;
1307 if (rpipe == NULL) {
1308 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1309 __func__, wa_xfer_id(xfer),
1310 "Probably already aborted.\n" );
1313 /* Check the delayed list -> if there, release and complete */
1314 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1315 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1316 goto dequeue_delayed;
1317 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1318 if (xfer->seg == NULL) /* still hasn't reached */
1319 goto out_unlock; /* setup(), enqueue_b() completes */
1320 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1321 __wa_xfer_abort(xfer);
1322 for (cnt = 0; cnt < xfer->segs; cnt++) {
1323 seg = xfer->seg[cnt];
1324 switch (seg->status) {
1325 case WA_SEG_NOTREADY:
1327 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1328 xfer, cnt, seg->status);
1331 case WA_SEG_DELAYED:
1332 seg->status = WA_SEG_ABORTED;
1333 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1334 list_del(&seg->list_node);
1336 rpipe_ready = rpipe_avail_inc(rpipe);
1337 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1339 case WA_SEG_SUBMITTED:
1340 seg->status = WA_SEG_ABORTED;
1341 usb_unlink_urb(&seg->tr_urb);
1342 if (xfer->is_inbound == 0)
1343 usb_unlink_urb(seg->dto_urb);
1345 rpipe_ready = rpipe_avail_inc(rpipe);
1347 case WA_SEG_PENDING:
1348 seg->status = WA_SEG_ABORTED;
1350 rpipe_ready = rpipe_avail_inc(rpipe);
1352 case WA_SEG_DTI_PENDING:
1353 usb_unlink_urb(wa->dti_urb);
1354 seg->status = WA_SEG_ABORTED;
1356 rpipe_ready = rpipe_avail_inc(rpipe);
1360 case WA_SEG_ABORTED:
1364 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1365 __wa_xfer_is_done(xfer);
1366 spin_unlock_irqrestore(&xfer->lock, flags);
1367 wa_xfer_completion(xfer);
1369 wa_xfer_delayed_run(rpipe);
1373 spin_unlock_irqrestore(&xfer->lock, flags);
1378 list_del_init(&xfer->list_node);
1379 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1380 xfer->result = urb->status;
1381 spin_unlock_irqrestore(&xfer->lock, flags);
1382 wa_xfer_giveback(xfer);
1383 usb_put_urb(urb); /* we got a ref in enqueue() */
1386 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1389 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1392 * Positive errno values are internal inconsistencies and should be
1393 * flagged louder. Negative are to be passed up to the user in the
1396 * @status: USB WA status code -- high two bits are stripped.
1398 static int wa_xfer_status_to_errno(u8 status)
1401 u8 real_status = status;
1402 static int xlat[] = {
1403 [WA_XFER_STATUS_SUCCESS] = 0,
1404 [WA_XFER_STATUS_HALTED] = -EPIPE,
1405 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1406 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1407 [WA_XFER_RESERVED] = EINVAL,
1408 [WA_XFER_STATUS_NOT_FOUND] = 0,
1409 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1410 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1411 [WA_XFER_STATUS_ABORTED] = -EINTR,
1412 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1413 [WA_XFER_INVALID_FORMAT] = EINVAL,
1414 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1415 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1421 if (status >= ARRAY_SIZE(xlat)) {
1422 printk_ratelimited(KERN_ERR "%s(): BUG? "
1423 "Unknown WA transfer status 0x%02x\n",
1424 __func__, real_status);
1427 errno = xlat[status];
1428 if (unlikely(errno > 0)) {
1429 printk_ratelimited(KERN_ERR "%s(): BUG? "
1430 "Inconsistent WA status: 0x%02x\n",
1431 __func__, real_status);
1438 * Process a xfer result completion message
1440 * inbound transfers: need to schedule a DTI read
1442 * FIXME: this function needs to be broken up in parts
1444 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
1445 struct wa_xfer_result *xfer_result)
1448 struct device *dev = &wa->usb_iface->dev;
1449 unsigned long flags;
1452 struct wa_rpipe *rpipe;
1455 unsigned rpipe_ready = 0;
1457 spin_lock_irqsave(&xfer->lock, flags);
1458 seg_idx = xfer_result->bTransferSegment & 0x7f;
1459 if (unlikely(seg_idx >= xfer->segs))
1461 seg = xfer->seg[seg_idx];
1462 rpipe = xfer->ep->hcpriv;
1463 usb_status = xfer_result->bTransferStatus;
1464 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1465 xfer, seg_idx, usb_status, seg->status);
1466 if (seg->status == WA_SEG_ABORTED
1467 || seg->status == WA_SEG_ERROR) /* already handled */
1468 goto segment_aborted;
1469 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1470 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1471 if (seg->status != WA_SEG_PENDING) {
1472 if (printk_ratelimit())
1473 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1474 xfer, seg_idx, seg->status);
1475 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1477 if (usb_status & 0x80) {
1478 seg->result = wa_xfer_status_to_errno(usb_status);
1479 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1480 xfer, xfer->id, seg->index, usb_status);
1481 goto error_complete;
1483 /* FIXME: we ignore warnings, tally them for stats */
1484 if (usb_status & 0x40) /* Warning?... */
1485 usb_status = 0; /* ... pass */
1486 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1487 seg->status = WA_SEG_DTI_PENDING;
1488 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1489 /* this should always be 0 before a resubmit. */
1490 wa->buf_in_urb->num_mapped_sgs = 0;
1493 wa->buf_in_urb->transfer_dma =
1494 xfer->urb->transfer_dma
1495 + (seg_idx * xfer->seg_size);
1496 wa->buf_in_urb->transfer_flags
1497 |= URB_NO_TRANSFER_DMA_MAP;
1498 wa->buf_in_urb->transfer_buffer = NULL;
1499 wa->buf_in_urb->sg = NULL;
1500 wa->buf_in_urb->num_sgs = 0;
1502 /* do buffer or SG processing. */
1503 wa->buf_in_urb->transfer_flags
1504 &= ~URB_NO_TRANSFER_DMA_MAP;
1506 if (xfer->urb->transfer_buffer) {
1507 wa->buf_in_urb->transfer_buffer =
1508 xfer->urb->transfer_buffer
1509 + (seg_idx * xfer->seg_size);
1510 wa->buf_in_urb->sg = NULL;
1511 wa->buf_in_urb->num_sgs = 0;
1513 /* allocate an SG list to store seg_size bytes
1514 and copy the subset of the xfer->urb->sg
1515 that matches the buffer subset we are
1517 wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1519 seg_idx * xfer->seg_size,
1521 xfer_result->dwTransferLength),
1522 &(wa->buf_in_urb->num_sgs));
1524 if (!(wa->buf_in_urb->sg)) {
1525 wa->buf_in_urb->num_sgs = 0;
1526 goto error_sg_alloc;
1528 wa->buf_in_urb->transfer_buffer = NULL;
1531 wa->buf_in_urb->transfer_buffer_length =
1532 le32_to_cpu(xfer_result->dwTransferLength);
1533 wa->buf_in_urb->context = seg;
1534 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1536 goto error_submit_buf_in;
1538 /* OUT data phase, complete it -- */
1539 seg->status = WA_SEG_DONE;
1540 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1542 rpipe_ready = rpipe_avail_inc(rpipe);
1543 done = __wa_xfer_is_done(xfer);
1545 spin_unlock_irqrestore(&xfer->lock, flags);
1547 wa_xfer_completion(xfer);
1549 wa_xfer_delayed_run(rpipe);
1552 error_submit_buf_in:
1553 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1554 dev_err(dev, "DTI: URB max acceptable errors "
1555 "exceeded, resetting device\n");
1558 if (printk_ratelimit())
1559 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1560 xfer, seg_idx, result);
1561 seg->result = result;
1562 kfree(wa->buf_in_urb->sg);
1564 __wa_xfer_abort(xfer);
1566 seg->status = WA_SEG_ERROR;
1568 rpipe_ready = rpipe_avail_inc(rpipe);
1569 done = __wa_xfer_is_done(xfer);
1571 * queue work item to clear STALL for control endpoints.
1572 * Otherwise, let endpoint_reset take care of it.
1574 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1575 usb_endpoint_xfer_control(&xfer->ep->desc) &&
1578 dev_info(dev, "Control EP stall. Queue delayed work.\n");
1579 spin_lock_irq(&wa->xfer_list_lock);
1580 /* move xfer from xfer_list to xfer_errored_list. */
1581 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
1582 spin_unlock_irq(&wa->xfer_list_lock);
1583 spin_unlock_irqrestore(&xfer->lock, flags);
1584 queue_work(wusbd, &wa->xfer_error_work);
1586 spin_unlock_irqrestore(&xfer->lock, flags);
1588 wa_xfer_completion(xfer);
1590 wa_xfer_delayed_run(rpipe);
1596 spin_unlock_irqrestore(&xfer->lock, flags);
1597 wa_urb_dequeue(wa, xfer->urb);
1598 if (printk_ratelimit())
1599 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1600 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1601 dev_err(dev, "DTI: URB max acceptable errors "
1602 "exceeded, resetting device\n");
1608 /* nothing to do, as the aborter did the completion */
1609 spin_unlock_irqrestore(&xfer->lock, flags);
1613 * Callback for the IN data phase
1615 * If successful transition state; otherwise, take a note of the
1616 * error, mark this segment done and try completion.
1618 * Note we don't access until we are sure that the transfer hasn't
1619 * been cancelled (ECONNRESET, ENOENT), which could mean that
1620 * seg->xfer could be already gone.
1622 static void wa_buf_in_cb(struct urb *urb)
1624 struct wa_seg *seg = urb->context;
1625 struct wa_xfer *xfer = seg->xfer;
1628 struct wa_rpipe *rpipe;
1629 unsigned rpipe_ready;
1630 unsigned long flags;
1633 /* free the sg if it was used. */
1637 switch (urb->status) {
1639 spin_lock_irqsave(&xfer->lock, flags);
1641 dev = &wa->usb_iface->dev;
1642 rpipe = xfer->ep->hcpriv;
1643 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1644 xfer, seg->index, (size_t)urb->actual_length);
1645 seg->status = WA_SEG_DONE;
1646 seg->result = urb->actual_length;
1648 rpipe_ready = rpipe_avail_inc(rpipe);
1649 done = __wa_xfer_is_done(xfer);
1650 spin_unlock_irqrestore(&xfer->lock, flags);
1652 wa_xfer_completion(xfer);
1654 wa_xfer_delayed_run(rpipe);
1656 case -ECONNRESET: /* URB unlinked; no need to do anything */
1657 case -ENOENT: /* as it was done by the who unlinked us */
1659 default: /* Other errors ... */
1660 spin_lock_irqsave(&xfer->lock, flags);
1662 dev = &wa->usb_iface->dev;
1663 rpipe = xfer->ep->hcpriv;
1664 if (printk_ratelimit())
1665 dev_err(dev, "xfer %p#%u: data in error %d\n",
1666 xfer, seg->index, urb->status);
1667 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1668 EDC_ERROR_TIMEFRAME)){
1669 dev_err(dev, "DTO: URB max acceptable errors "
1670 "exceeded, resetting device\n");
1673 seg->status = WA_SEG_ERROR;
1674 seg->result = urb->status;
1676 rpipe_ready = rpipe_avail_inc(rpipe);
1677 __wa_xfer_abort(xfer);
1678 done = __wa_xfer_is_done(xfer);
1679 spin_unlock_irqrestore(&xfer->lock, flags);
1681 wa_xfer_completion(xfer);
1683 wa_xfer_delayed_run(rpipe);
1688 * Handle an incoming transfer result buffer
1690 * Given a transfer result buffer, it completes the transfer (possibly
1691 * scheduling and buffer in read) and then resubmits the DTI URB for a
1692 * new transfer result read.
1695 * The xfer_result DTI URB state machine
1697 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1699 * We start in OFF mode, the first xfer_result notification [through
1700 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1703 * We receive a buffer -- if it is not a xfer_result, we complain and
1704 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1705 * request accounting. If it is an IN segment, we move to RBI and post
1706 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1707 * repost the DTI-URB and move to RXR state. if there was no IN
1708 * segment, it will repost the DTI-URB.
1710 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1711 * errors) in the URBs.
1713 static void wa_dti_cb(struct urb *urb)
1716 struct wahc *wa = urb->context;
1717 struct device *dev = &wa->usb_iface->dev;
1718 struct wa_xfer_result *xfer_result;
1720 struct wa_xfer *xfer;
1723 BUG_ON(wa->dti_urb != urb);
1724 switch (wa->dti_urb->status) {
1726 /* We have a xfer result buffer; check it */
1727 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1728 urb->actual_length, urb->transfer_buffer);
1729 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1730 dev_err(dev, "DTI Error: xfer result--bad size "
1731 "xfer result (%d bytes vs %zu needed)\n",
1732 urb->actual_length, sizeof(*xfer_result));
1735 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
1736 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1737 dev_err(dev, "DTI Error: xfer result--"
1738 "bad header length %u\n",
1739 xfer_result->hdr.bLength);
1742 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1743 dev_err(dev, "DTI Error: xfer result--"
1744 "bad header type 0x%02x\n",
1745 xfer_result->hdr.bNotifyType);
1748 usb_status = xfer_result->bTransferStatus & 0x3f;
1749 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1750 /* taken care of already */
1752 xfer_id = xfer_result->dwTransferID;
1753 xfer = wa_xfer_get_by_id(wa, xfer_id);
1755 /* FIXME: transaction might have been cancelled */
1756 dev_err(dev, "DTI Error: xfer result--"
1757 "unknown xfer 0x%08x (status 0x%02x)\n",
1758 xfer_id, usb_status);
1761 wa_xfer_result_chew(wa, xfer, xfer_result);
1764 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1765 case -ESHUTDOWN: /* going away! */
1766 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1770 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1771 EDC_ERROR_TIMEFRAME)) {
1772 dev_err(dev, "DTI: URB max acceptable errors "
1773 "exceeded, resetting device\n");
1777 if (printk_ratelimit())
1778 dev_err(dev, "DTI: URB error %d\n", urb->status);
1781 /* Resubmit the DTI URB */
1782 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1784 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1785 "resetting\n", result);
1793 * Transfer complete notification
1795 * Called from the notif.c code. We get a notification on EP2 saying
1796 * that some endpoint has some transfer result data available. We are
1799 * To speed up things, we always have a URB reading the DTI URB; we
1800 * don't really set it up and start it until the first xfer complete
1801 * notification arrives, which is what we do here.
1803 * Follow up in wa_dti_cb(), as that's where the whole state
1806 * So here we just initialize the DTI URB for reading transfer result
1807 * notifications and also the buffer-in URB, for reading buffers. Then
1808 * we just submit the DTI URB.
1810 * @wa shall be referenced
1812 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1815 struct device *dev = &wa->usb_iface->dev;
1816 struct wa_notif_xfer *notif_xfer;
1817 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1819 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1820 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1822 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1823 /* FIXME: hardcoded limitation, adapt */
1824 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1825 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1828 if (wa->dti_urb != NULL) /* DTI URB already started */
1831 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1832 if (wa->dti_urb == NULL) {
1833 dev_err(dev, "Can't allocate DTI URB\n");
1834 goto error_dti_urb_alloc;
1837 wa->dti_urb, wa->usb_dev,
1838 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1839 wa->dti_buf, wa->dti_buf_size,
1842 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1843 if (wa->buf_in_urb == NULL) {
1844 dev_err(dev, "Can't allocate BUF-IN URB\n");
1845 goto error_buf_in_urb_alloc;
1848 wa->buf_in_urb, wa->usb_dev,
1849 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1850 NULL, 0, wa_buf_in_cb, wa);
1851 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1853 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1854 "resetting\n", result);
1855 goto error_dti_urb_submit;
1860 error_dti_urb_submit:
1861 usb_put_urb(wa->buf_in_urb);
1862 error_buf_in_urb_alloc:
1863 usb_put_urb(wa->dti_urb);
1865 error_dti_urb_alloc: