2 * udc.h - ChipIdea UDC driver
4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/usb/ch9.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/usb/otg.h>
29 #include <linux/usb/chipidea.h>
36 /* control endpoint description */
37 static const struct usb_endpoint_descriptor
38 ctrl_endpt_out_desc = {
39 .bLength = USB_DT_ENDPOINT_SIZE,
40 .bDescriptorType = USB_DT_ENDPOINT,
42 .bEndpointAddress = USB_DIR_OUT,
43 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
44 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
47 static const struct usb_endpoint_descriptor
48 ctrl_endpt_in_desc = {
49 .bLength = USB_DT_ENDPOINT_SIZE,
50 .bDescriptorType = USB_DT_ENDPOINT,
52 .bEndpointAddress = USB_DIR_IN,
53 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
54 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
58 * hw_ep_bit: calculates the bit number
59 * @num: endpoint number
60 * @dir: endpoint direction
62 * This function returns bit number
64 static inline int hw_ep_bit(int num, int dir)
66 return num + (dir ? 16 : 0);
69 static inline int ep_to_bit(struct ci13xxx *udc, int n)
71 int fill = 16 - udc->hw_ep_max / 2;
73 if (n >= udc->hw_ep_max / 2)
80 * hw_device_state: enables/disables interrupts & starts/stops device (execute
81 * without interruption)
82 * @dma: 0 => disable, !0 => enable and set dma engine
84 * This function returns an error code
86 static int hw_device_state(struct ci13xxx *udc, u32 dma)
89 hw_write(udc, OP_ENDPTLISTADDR, ~0, dma);
90 /* interrupt, error, port change, reset, sleep/suspend */
91 hw_write(udc, OP_USBINTR, ~0,
92 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
93 hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
95 hw_write(udc, OP_USBCMD, USBCMD_RS, 0);
96 hw_write(udc, OP_USBINTR, ~0, 0);
102 * hw_ep_flush: flush endpoint fifo (execute without interruption)
103 * @num: endpoint number
104 * @dir: endpoint direction
106 * This function returns an error code
108 static int hw_ep_flush(struct ci13xxx *udc, int num, int dir)
110 int n = hw_ep_bit(num, dir);
113 /* flush any pending transfer */
114 hw_write(udc, OP_ENDPTFLUSH, BIT(n), BIT(n));
115 while (hw_read(udc, OP_ENDPTFLUSH, BIT(n)))
117 } while (hw_read(udc, OP_ENDPTSTAT, BIT(n)));
123 * hw_ep_disable: disables endpoint (execute without interruption)
124 * @num: endpoint number
125 * @dir: endpoint direction
127 * This function returns an error code
129 static int hw_ep_disable(struct ci13xxx *udc, int num, int dir)
131 hw_ep_flush(udc, num, dir);
132 hw_write(udc, OP_ENDPTCTRL + num,
133 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
138 * hw_ep_enable: enables endpoint (execute without interruption)
139 * @num: endpoint number
140 * @dir: endpoint direction
141 * @type: endpoint type
143 * This function returns an error code
145 static int hw_ep_enable(struct ci13xxx *udc, int num, int dir, int type)
150 mask = ENDPTCTRL_TXT; /* type */
151 data = type << ffs_nr(mask);
153 mask |= ENDPTCTRL_TXS; /* unstall */
154 mask |= ENDPTCTRL_TXR; /* reset data toggle */
155 data |= ENDPTCTRL_TXR;
156 mask |= ENDPTCTRL_TXE; /* enable */
157 data |= ENDPTCTRL_TXE;
159 mask = ENDPTCTRL_RXT; /* type */
160 data = type << ffs_nr(mask);
162 mask |= ENDPTCTRL_RXS; /* unstall */
163 mask |= ENDPTCTRL_RXR; /* reset data toggle */
164 data |= ENDPTCTRL_RXR;
165 mask |= ENDPTCTRL_RXE; /* enable */
166 data |= ENDPTCTRL_RXE;
168 hw_write(udc, OP_ENDPTCTRL + num, mask, data);
173 * hw_ep_get_halt: return endpoint halt status
174 * @num: endpoint number
175 * @dir: endpoint direction
177 * This function returns 1 if endpoint halted
179 static int hw_ep_get_halt(struct ci13xxx *udc, int num, int dir)
181 u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
183 return hw_read(udc, OP_ENDPTCTRL + num, mask) ? 1 : 0;
187 * hw_test_and_clear_setup_status: test & clear setup status (execute without
189 * @n: endpoint number
191 * This function returns setup status
193 static int hw_test_and_clear_setup_status(struct ci13xxx *udc, int n)
195 n = ep_to_bit(udc, n);
196 return hw_test_and_clear(udc, OP_ENDPTSETUPSTAT, BIT(n));
200 * hw_ep_prime: primes endpoint (execute without interruption)
201 * @num: endpoint number
202 * @dir: endpoint direction
203 * @is_ctrl: true if control endpoint
205 * This function returns an error code
207 static int hw_ep_prime(struct ci13xxx *udc, int num, int dir, int is_ctrl)
209 int n = hw_ep_bit(num, dir);
211 if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
214 hw_write(udc, OP_ENDPTPRIME, BIT(n), BIT(n));
216 while (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
218 if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
221 /* status shoult be tested according with manual but it doesn't work */
226 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
227 * without interruption)
228 * @num: endpoint number
229 * @dir: endpoint direction
230 * @value: true => stall, false => unstall
232 * This function returns an error code
234 static int hw_ep_set_halt(struct ci13xxx *udc, int num, int dir, int value)
236 if (value != 0 && value != 1)
240 enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
241 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
242 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
244 /* data toggle - reserved for EP0 but it's in ESS */
245 hw_write(udc, reg, mask_xs|mask_xr,
246 value ? mask_xs : mask_xr);
247 } while (value != hw_ep_get_halt(udc, num, dir));
253 * hw_is_port_high_speed: test if port is high speed
255 * This function returns true if high speed port
257 static int hw_port_is_high_speed(struct ci13xxx *udc)
259 return udc->hw_bank.lpm ? hw_read(udc, OP_DEVLC, DEVLC_PSPD) :
260 hw_read(udc, OP_PORTSC, PORTSC_HSP);
264 * hw_read_intr_enable: returns interrupt enable register
266 * This function returns register data
268 static u32 hw_read_intr_enable(struct ci13xxx *udc)
270 return hw_read(udc, OP_USBINTR, ~0);
274 * hw_read_intr_status: returns interrupt status register
276 * This function returns register data
278 static u32 hw_read_intr_status(struct ci13xxx *udc)
280 return hw_read(udc, OP_USBSTS, ~0);
284 * hw_test_and_clear_complete: test & clear complete status (execute without
286 * @n: endpoint number
288 * This function returns complete status
290 static int hw_test_and_clear_complete(struct ci13xxx *udc, int n)
292 n = ep_to_bit(udc, n);
293 return hw_test_and_clear(udc, OP_ENDPTCOMPLETE, BIT(n));
297 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
298 * without interruption)
300 * This function returns active interrutps
302 static u32 hw_test_and_clear_intr_active(struct ci13xxx *udc)
304 u32 reg = hw_read_intr_status(udc) & hw_read_intr_enable(udc);
306 hw_write(udc, OP_USBSTS, ~0, reg);
311 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
314 * This function returns guard value
316 static int hw_test_and_clear_setup_guard(struct ci13xxx *udc)
318 return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, 0);
322 * hw_test_and_set_setup_guard: test & set setup guard (execute without
325 * This function returns guard value
327 static int hw_test_and_set_setup_guard(struct ci13xxx *udc)
329 return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
333 * hw_usb_set_address: configures USB address (execute without interruption)
334 * @value: new USB address
336 * This function explicitly sets the address, without the "USBADRA" (advance)
337 * feature, which is not supported by older versions of the controller.
339 static void hw_usb_set_address(struct ci13xxx *udc, u8 value)
341 hw_write(udc, OP_DEVICEADDR, DEVICEADDR_USBADR,
342 value << ffs_nr(DEVICEADDR_USBADR));
346 * hw_usb_reset: restart device after a bus reset (execute without
349 * This function returns an error code
351 static int hw_usb_reset(struct ci13xxx *udc)
353 hw_usb_set_address(udc, 0);
355 /* ESS flushes only at end?!? */
356 hw_write(udc, OP_ENDPTFLUSH, ~0, ~0);
358 /* clear setup token semaphores */
359 hw_write(udc, OP_ENDPTSETUPSTAT, 0, 0);
361 /* clear complete status */
362 hw_write(udc, OP_ENDPTCOMPLETE, 0, 0);
364 /* wait until all bits cleared */
365 while (hw_read(udc, OP_ENDPTPRIME, ~0))
366 udelay(10); /* not RTOS friendly */
368 /* reset all endpoints ? */
370 /* reset internal status and wait for further instructions
371 no need to verify the port reset status (ESS does it) */
376 /******************************************************************************
378 *****************************************************************************/
380 * _usb_addr: calculates endpoint address from direction & number
383 static inline u8 _usb_addr(struct ci13xxx_ep *ep)
385 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
389 * _hardware_queue: configures a request at hardware level
393 * This function returns an error code
395 static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
397 struct ci13xxx *udc = mEp->udc;
400 unsigned length = mReq->req.length;
402 /* don't queue twice */
403 if (mReq->req.status == -EALREADY)
406 mReq->req.status = -EALREADY;
407 if (length && mReq->req.dma == DMA_ADDR_INVALID) {
409 dma_map_single(mEp->device, mReq->req.buf,
410 length, mEp->dir ? DMA_TO_DEVICE :
412 if (mReq->req.dma == 0)
418 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
419 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
421 if (mReq->zptr == NULL) {
423 dma_unmap_single(mEp->device, mReq->req.dma,
424 length, mEp->dir ? DMA_TO_DEVICE :
426 mReq->req.dma = DMA_ADDR_INVALID;
431 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
432 mReq->zptr->next = TD_TERMINATE;
433 mReq->zptr->token = TD_STATUS_ACTIVE;
434 if (!mReq->req.no_interrupt)
435 mReq->zptr->token |= TD_IOC;
439 * TODO - handle requests which spawns into several TDs
441 memset(mReq->ptr, 0, sizeof(*mReq->ptr));
442 mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
443 mReq->ptr->token &= TD_TOTAL_BYTES;
444 mReq->ptr->token |= TD_STATUS_ACTIVE;
446 mReq->ptr->next = mReq->zdma;
448 mReq->ptr->next = TD_TERMINATE;
449 if (!mReq->req.no_interrupt)
450 mReq->ptr->token |= TD_IOC;
452 mReq->ptr->page[0] = mReq->req.dma;
453 for (i = 1; i < 5; i++)
455 (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
457 if (!list_empty(&mEp->qh.queue)) {
458 struct ci13xxx_req *mReqPrev;
459 int n = hw_ep_bit(mEp->num, mEp->dir);
462 mReqPrev = list_entry(mEp->qh.queue.prev,
463 struct ci13xxx_req, queue);
465 mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
467 mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
469 if (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
472 hw_write(udc, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
473 tmp_stat = hw_read(udc, OP_ENDPTSTAT, BIT(n));
474 } while (!hw_read(udc, OP_USBCMD, USBCMD_ATDTW));
475 hw_write(udc, OP_USBCMD, USBCMD_ATDTW, 0);
480 /* QH configuration */
481 mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
482 mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
483 mEp->qh.ptr->cap |= QH_ZLT;
485 wmb(); /* synchronize before ep prime */
487 ret = hw_ep_prime(udc, mEp->num, mEp->dir,
488 mEp->type == USB_ENDPOINT_XFER_CONTROL);
494 * _hardware_dequeue: handles a request at hardware level
498 * This function returns an error code
500 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
502 if (mReq->req.status != -EALREADY)
505 if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
509 if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
511 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
515 mReq->req.status = 0;
518 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
519 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
520 mReq->req.dma = DMA_ADDR_INVALID;
524 mReq->req.status = mReq->ptr->token & TD_STATUS;
525 if ((TD_STATUS_HALTED & mReq->req.status) != 0)
526 mReq->req.status = -1;
527 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
528 mReq->req.status = -1;
529 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
530 mReq->req.status = -1;
532 mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
533 mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
534 mReq->req.actual = mReq->req.length - mReq->req.actual;
535 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
537 return mReq->req.actual;
541 * _ep_nuke: dequeues all endpoint requests
544 * This function returns an error code
545 * Caller must hold lock
547 static int _ep_nuke(struct ci13xxx_ep *mEp)
548 __releases(mEp->lock)
549 __acquires(mEp->lock)
554 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
556 while (!list_empty(&mEp->qh.queue)) {
558 /* pop oldest request */
559 struct ci13xxx_req *mReq = \
560 list_entry(mEp->qh.queue.next,
561 struct ci13xxx_req, queue);
562 list_del_init(&mReq->queue);
563 mReq->req.status = -ESHUTDOWN;
565 if (mReq->req.complete != NULL) {
566 spin_unlock(mEp->lock);
567 mReq->req.complete(&mEp->ep, &mReq->req);
568 spin_lock(mEp->lock);
575 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
578 * This function returns an error code
580 static int _gadget_stop_activity(struct usb_gadget *gadget)
583 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
589 spin_lock_irqsave(&udc->lock, flags);
590 udc->gadget.speed = USB_SPEED_UNKNOWN;
591 udc->remote_wakeup = 0;
593 spin_unlock_irqrestore(&udc->lock, flags);
595 /* flush all endpoints */
596 gadget_for_each_ep(ep, gadget) {
597 usb_ep_fifo_flush(ep);
599 usb_ep_fifo_flush(&udc->ep0out->ep);
600 usb_ep_fifo_flush(&udc->ep0in->ep);
603 udc->driver->disconnect(gadget);
605 /* make sure to disable all endpoints */
606 gadget_for_each_ep(ep, gadget) {
610 if (udc->status != NULL) {
611 usb_ep_free_request(&udc->ep0in->ep, udc->status);
618 /******************************************************************************
620 *****************************************************************************/
622 * isr_reset_handler: USB reset interrupt handler
625 * This function resets USB engine after a bus reset occurred
627 static void isr_reset_handler(struct ci13xxx *udc)
628 __releases(udc->lock)
629 __acquires(udc->lock)
633 dbg_event(0xFF, "BUS RST", 0);
635 spin_unlock(&udc->lock);
636 retval = _gadget_stop_activity(&udc->gadget);
640 retval = hw_usb_reset(udc);
644 udc->status = usb_ep_alloc_request(&udc->ep0in->ep, GFP_ATOMIC);
645 if (udc->status == NULL)
648 spin_lock(&udc->lock);
652 dev_err(udc->dev, "error: %i\n", retval);
656 * isr_get_status_complete: get_status request complete function
658 * @req: request handled
660 * Caller must release lock
662 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
664 if (ep == NULL || req == NULL)
668 usb_ep_free_request(ep, req);
672 * isr_get_status_response: get_status request response
674 * @setup: setup request packet
676 * This function returns an error code
678 static int isr_get_status_response(struct ci13xxx *udc,
679 struct usb_ctrlrequest *setup)
680 __releases(mEp->lock)
681 __acquires(mEp->lock)
683 struct ci13xxx_ep *mEp = udc->ep0in;
684 struct usb_request *req = NULL;
685 gfp_t gfp_flags = GFP_ATOMIC;
686 int dir, num, retval;
688 if (mEp == NULL || setup == NULL)
691 spin_unlock(mEp->lock);
692 req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
693 spin_lock(mEp->lock);
697 req->complete = isr_get_status_complete;
699 req->buf = kzalloc(req->length, gfp_flags);
700 if (req->buf == NULL) {
705 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
706 /* Assume that device is bus powered for now. */
707 *(u16 *)req->buf = udc->remote_wakeup << 1;
709 } else if ((setup->bRequestType & USB_RECIP_MASK) \
710 == USB_RECIP_ENDPOINT) {
711 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
713 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
714 *(u16 *)req->buf = hw_ep_get_halt(udc, num, dir);
716 /* else do nothing; reserved for future use */
718 spin_unlock(mEp->lock);
719 retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
720 spin_lock(mEp->lock);
729 spin_unlock(mEp->lock);
730 usb_ep_free_request(&mEp->ep, req);
731 spin_lock(mEp->lock);
736 * isr_setup_status_complete: setup_status request complete function
738 * @req: request handled
740 * Caller must release lock. Put the port in test mode if test mode
741 * feature is selected.
744 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
746 struct ci13xxx *udc = req->context;
750 hw_usb_set_address(udc, udc->address);
751 udc->setaddr = false;
754 spin_lock_irqsave(&udc->lock, flags);
756 hw_port_test_set(udc, udc->test_mode);
757 spin_unlock_irqrestore(&udc->lock, flags);
761 * isr_setup_status_phase: queues the status phase of a setup transation
764 * This function returns an error code
766 static int isr_setup_status_phase(struct ci13xxx *udc)
767 __releases(mEp->lock)
768 __acquires(mEp->lock)
771 struct ci13xxx_ep *mEp;
773 mEp = (udc->ep0_dir == TX) ? udc->ep0out : udc->ep0in;
774 udc->status->context = udc;
775 udc->status->complete = isr_setup_status_complete;
777 spin_unlock(mEp->lock);
778 retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
779 spin_lock(mEp->lock);
785 * isr_tr_complete_low: transaction complete low level handler
788 * This function returns an error code
789 * Caller must hold lock
791 static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
792 __releases(mEp->lock)
793 __acquires(mEp->lock)
795 struct ci13xxx_req *mReq, *mReqTemp;
796 struct ci13xxx_ep *mEpTemp = mEp;
797 int uninitialized_var(retval);
799 if (list_empty(&mEp->qh.queue))
802 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
804 retval = _hardware_dequeue(mEp, mReq);
807 list_del_init(&mReq->queue);
808 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
809 if (mReq->req.complete != NULL) {
810 spin_unlock(mEp->lock);
811 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
813 mEpTemp = mEp->udc->ep0in;
814 mReq->req.complete(&mEpTemp->ep, &mReq->req);
815 spin_lock(mEp->lock);
819 if (retval == -EBUSY)
822 dbg_event(_usb_addr(mEp), "DONE", retval);
828 * isr_tr_complete_handler: transaction complete interrupt handler
829 * @udc: UDC descriptor
831 * This function handles traffic events
833 static void isr_tr_complete_handler(struct ci13xxx *udc)
834 __releases(udc->lock)
835 __acquires(udc->lock)
840 for (i = 0; i < udc->hw_ep_max; i++) {
841 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
842 int type, num, dir, err = -EINVAL;
843 struct usb_ctrlrequest req;
845 if (mEp->ep.desc == NULL)
846 continue; /* not configured */
848 if (hw_test_and_clear_complete(udc, i)) {
849 err = isr_tr_complete_low(mEp);
850 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
851 if (err > 0) /* needs status phase */
852 err = isr_setup_status_phase(udc);
854 dbg_event(_usb_addr(mEp),
856 spin_unlock(&udc->lock);
857 if (usb_ep_set_halt(&mEp->ep))
859 "error: ep_set_halt\n");
860 spin_lock(&udc->lock);
865 if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
866 !hw_test_and_clear_setup_status(udc, i))
870 dev_warn(udc->dev, "ctrl traffic at endpoint %d\n", i);
875 * Flush data and handshake transactions of previous
878 _ep_nuke(udc->ep0out);
879 _ep_nuke(udc->ep0in);
881 /* read_setup_packet */
883 hw_test_and_set_setup_guard(udc);
884 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
885 } while (!hw_test_and_clear_setup_guard(udc));
887 type = req.bRequestType;
889 udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
891 dbg_setup(_usb_addr(mEp), &req);
893 switch (req.bRequest) {
894 case USB_REQ_CLEAR_FEATURE:
895 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
896 le16_to_cpu(req.wValue) ==
898 if (req.wLength != 0)
900 num = le16_to_cpu(req.wIndex);
901 dir = num & USB_ENDPOINT_DIR_MASK;
902 num &= USB_ENDPOINT_NUMBER_MASK;
904 num += udc->hw_ep_max/2;
905 if (!udc->ci13xxx_ep[num].wedge) {
906 spin_unlock(&udc->lock);
907 err = usb_ep_clear_halt(
908 &udc->ci13xxx_ep[num].ep);
909 spin_lock(&udc->lock);
913 err = isr_setup_status_phase(udc);
914 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
915 le16_to_cpu(req.wValue) ==
916 USB_DEVICE_REMOTE_WAKEUP) {
917 if (req.wLength != 0)
919 udc->remote_wakeup = 0;
920 err = isr_setup_status_phase(udc);
925 case USB_REQ_GET_STATUS:
926 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
927 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
928 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
930 if (le16_to_cpu(req.wLength) != 2 ||
931 le16_to_cpu(req.wValue) != 0)
933 err = isr_get_status_response(udc, &req);
935 case USB_REQ_SET_ADDRESS:
936 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
938 if (le16_to_cpu(req.wLength) != 0 ||
939 le16_to_cpu(req.wIndex) != 0)
941 udc->address = (u8)le16_to_cpu(req.wValue);
943 err = isr_setup_status_phase(udc);
945 case USB_REQ_SET_FEATURE:
946 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
947 le16_to_cpu(req.wValue) ==
949 if (req.wLength != 0)
951 num = le16_to_cpu(req.wIndex);
952 dir = num & USB_ENDPOINT_DIR_MASK;
953 num &= USB_ENDPOINT_NUMBER_MASK;
955 num += udc->hw_ep_max/2;
957 spin_unlock(&udc->lock);
958 err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
959 spin_lock(&udc->lock);
961 isr_setup_status_phase(udc);
962 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
963 if (req.wLength != 0)
965 switch (le16_to_cpu(req.wValue)) {
966 case USB_DEVICE_REMOTE_WAKEUP:
967 udc->remote_wakeup = 1;
968 err = isr_setup_status_phase(udc);
970 case USB_DEVICE_TEST_MODE:
971 tmode = le16_to_cpu(req.wIndex) >> 8;
978 udc->test_mode = tmode;
979 err = isr_setup_status_phase(
994 if (req.wLength == 0) /* no data phase */
997 spin_unlock(&udc->lock);
998 err = udc->driver->setup(&udc->gadget, &req);
999 spin_lock(&udc->lock);
1004 dbg_event(_usb_addr(mEp), "ERROR", err);
1006 spin_unlock(&udc->lock);
1007 if (usb_ep_set_halt(&mEp->ep))
1008 dev_err(udc->dev, "error: ep_set_halt\n");
1009 spin_lock(&udc->lock);
1014 /******************************************************************************
1016 *****************************************************************************/
1018 * ep_enable: configure endpoint, making it usable
1020 * Check usb_ep_enable() at "usb_gadget.h" for details
1022 static int ep_enable(struct usb_ep *ep,
1023 const struct usb_endpoint_descriptor *desc)
1025 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1027 unsigned long flags;
1029 if (ep == NULL || desc == NULL)
1032 spin_lock_irqsave(mEp->lock, flags);
1034 /* only internal SW should enable ctrl endpts */
1036 mEp->ep.desc = desc;
1038 if (!list_empty(&mEp->qh.queue))
1039 dev_warn(mEp->udc->dev, "enabling a non-empty endpoint!\n");
1041 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1042 mEp->num = usb_endpoint_num(desc);
1043 mEp->type = usb_endpoint_type(desc);
1045 mEp->ep.maxpacket = usb_endpoint_maxp(desc);
1047 dbg_event(_usb_addr(mEp), "ENABLE", 0);
1049 mEp->qh.ptr->cap = 0;
1051 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1052 mEp->qh.ptr->cap |= QH_IOS;
1053 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
1054 mEp->qh.ptr->cap &= ~QH_MULT;
1056 mEp->qh.ptr->cap &= ~QH_ZLT;
1059 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
1060 mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
1063 * Enable endpoints in the HW other than ep0 as ep0
1067 retval |= hw_ep_enable(mEp->udc, mEp->num, mEp->dir, mEp->type);
1069 spin_unlock_irqrestore(mEp->lock, flags);
1074 * ep_disable: endpoint is no longer usable
1076 * Check usb_ep_disable() at "usb_gadget.h" for details
1078 static int ep_disable(struct usb_ep *ep)
1080 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1081 int direction, retval = 0;
1082 unsigned long flags;
1086 else if (mEp->ep.desc == NULL)
1089 spin_lock_irqsave(mEp->lock, flags);
1091 /* only internal SW should disable ctrl endpts */
1093 direction = mEp->dir;
1095 dbg_event(_usb_addr(mEp), "DISABLE", 0);
1097 retval |= _ep_nuke(mEp);
1098 retval |= hw_ep_disable(mEp->udc, mEp->num, mEp->dir);
1100 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1101 mEp->dir = (mEp->dir == TX) ? RX : TX;
1103 } while (mEp->dir != direction);
1105 mEp->ep.desc = NULL;
1107 spin_unlock_irqrestore(mEp->lock, flags);
1112 * ep_alloc_request: allocate a request object to use with this endpoint
1114 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1116 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1118 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1119 struct ci13xxx_req *mReq = NULL;
1124 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
1126 INIT_LIST_HEAD(&mReq->queue);
1127 mReq->req.dma = DMA_ADDR_INVALID;
1129 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
1131 if (mReq->ptr == NULL) {
1137 dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
1139 return (mReq == NULL) ? NULL : &mReq->req;
1143 * ep_free_request: frees a request object
1145 * Check usb_ep_free_request() at "usb_gadget.h" for details
1147 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1149 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1150 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1151 unsigned long flags;
1153 if (ep == NULL || req == NULL) {
1155 } else if (!list_empty(&mReq->queue)) {
1156 dev_err(mEp->udc->dev, "freeing queued request\n");
1160 spin_lock_irqsave(mEp->lock, flags);
1163 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
1166 dbg_event(_usb_addr(mEp), "FREE", 0);
1168 spin_unlock_irqrestore(mEp->lock, flags);
1172 * ep_queue: queues (submits) an I/O request to an endpoint
1174 * Check usb_ep_queue()* at usb_gadget.h" for details
1176 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1177 gfp_t __maybe_unused gfp_flags)
1179 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1180 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1181 struct ci13xxx *udc = mEp->udc;
1183 unsigned long flags;
1185 if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
1188 spin_lock_irqsave(mEp->lock, flags);
1190 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1192 mEp = (udc->ep0_dir == RX) ?
1193 udc->ep0out : udc->ep0in;
1194 if (!list_empty(&mEp->qh.queue)) {
1196 retval = -EOVERFLOW;
1197 dev_warn(mEp->udc->dev, "endpoint ctrl %X nuked\n",
1202 /* first nuke then test link, e.g. previous status has not sent */
1203 if (!list_empty(&mReq->queue)) {
1205 dev_err(mEp->udc->dev, "request already in queue\n");
1209 if (req->length > 4 * CI13XXX_PAGE_SIZE) {
1210 req->length = 4 * CI13XXX_PAGE_SIZE;
1212 dev_warn(mEp->udc->dev, "request length truncated\n");
1215 dbg_queue(_usb_addr(mEp), req, retval);
1218 mReq->req.status = -EINPROGRESS;
1219 mReq->req.actual = 0;
1221 retval = _hardware_enqueue(mEp, mReq);
1223 if (retval == -EALREADY) {
1224 dbg_event(_usb_addr(mEp), "QUEUE", retval);
1228 list_add_tail(&mReq->queue, &mEp->qh.queue);
1231 spin_unlock_irqrestore(mEp->lock, flags);
1236 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1238 * Check usb_ep_dequeue() at "usb_gadget.h" for details
1240 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1242 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1243 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1244 unsigned long flags;
1246 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
1247 mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
1248 list_empty(&mEp->qh.queue))
1251 spin_lock_irqsave(mEp->lock, flags);
1253 dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
1255 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1258 list_del_init(&mReq->queue);
1260 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
1261 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1262 mReq->req.dma = DMA_ADDR_INVALID;
1265 req->status = -ECONNRESET;
1267 if (mReq->req.complete != NULL) {
1268 spin_unlock(mEp->lock);
1269 mReq->req.complete(&mEp->ep, &mReq->req);
1270 spin_lock(mEp->lock);
1273 spin_unlock_irqrestore(mEp->lock, flags);
1278 * ep_set_halt: sets the endpoint halt feature
1280 * Check usb_ep_set_halt() at "usb_gadget.h" for details
1282 static int ep_set_halt(struct usb_ep *ep, int value)
1284 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1285 int direction, retval = 0;
1286 unsigned long flags;
1288 if (ep == NULL || mEp->ep.desc == NULL)
1291 spin_lock_irqsave(mEp->lock, flags);
1294 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1295 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
1296 !list_empty(&mEp->qh.queue)) {
1297 spin_unlock_irqrestore(mEp->lock, flags);
1302 direction = mEp->dir;
1304 dbg_event(_usb_addr(mEp), "HALT", value);
1305 retval |= hw_ep_set_halt(mEp->udc, mEp->num, mEp->dir, value);
1310 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1311 mEp->dir = (mEp->dir == TX) ? RX : TX;
1313 } while (mEp->dir != direction);
1315 spin_unlock_irqrestore(mEp->lock, flags);
1320 * ep_set_wedge: sets the halt feature and ignores clear requests
1322 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1324 static int ep_set_wedge(struct usb_ep *ep)
1326 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1327 unsigned long flags;
1329 if (ep == NULL || mEp->ep.desc == NULL)
1332 spin_lock_irqsave(mEp->lock, flags);
1334 dbg_event(_usb_addr(mEp), "WEDGE", 0);
1337 spin_unlock_irqrestore(mEp->lock, flags);
1339 return usb_ep_set_halt(ep);
1343 * ep_fifo_flush: flushes contents of a fifo
1345 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1347 static void ep_fifo_flush(struct usb_ep *ep)
1349 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1350 unsigned long flags;
1353 dev_err(mEp->udc->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
1357 spin_lock_irqsave(mEp->lock, flags);
1359 dbg_event(_usb_addr(mEp), "FFLUSH", 0);
1360 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1362 spin_unlock_irqrestore(mEp->lock, flags);
1366 * Endpoint-specific part of the API to the USB controller hardware
1367 * Check "usb_gadget.h" for details
1369 static const struct usb_ep_ops usb_ep_ops = {
1370 .enable = ep_enable,
1371 .disable = ep_disable,
1372 .alloc_request = ep_alloc_request,
1373 .free_request = ep_free_request,
1375 .dequeue = ep_dequeue,
1376 .set_halt = ep_set_halt,
1377 .set_wedge = ep_set_wedge,
1378 .fifo_flush = ep_fifo_flush,
1381 /******************************************************************************
1383 *****************************************************************************/
1384 static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
1386 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1387 unsigned long flags;
1388 int gadget_ready = 0;
1390 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
1393 spin_lock_irqsave(&udc->lock, flags);
1394 udc->vbus_active = is_active;
1397 spin_unlock_irqrestore(&udc->lock, flags);
1401 pm_runtime_get_sync(&_gadget->dev);
1402 hw_device_reset(udc);
1403 hw_device_state(udc, udc->ep0out->qh.dma);
1405 hw_device_state(udc, 0);
1406 if (udc->udc_driver->notify_event)
1407 udc->udc_driver->notify_event(udc,
1408 CI13XXX_CONTROLLER_STOPPED_EVENT);
1409 _gadget_stop_activity(&udc->gadget);
1410 pm_runtime_put_sync(&_gadget->dev);
1417 static int ci13xxx_wakeup(struct usb_gadget *_gadget)
1419 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1420 unsigned long flags;
1423 spin_lock_irqsave(&udc->lock, flags);
1424 if (!udc->remote_wakeup) {
1428 if (!hw_read(udc, OP_PORTSC, PORTSC_SUSP)) {
1432 hw_write(udc, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1434 spin_unlock_irqrestore(&udc->lock, flags);
1438 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1440 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1442 if (udc->transceiver)
1443 return usb_phy_set_power(udc->transceiver, mA);
1447 static int ci13xxx_start(struct usb_gadget *gadget,
1448 struct usb_gadget_driver *driver);
1449 static int ci13xxx_stop(struct usb_gadget *gadget,
1450 struct usb_gadget_driver *driver);
1452 * Device operations part of the API to the USB controller hardware,
1453 * which don't involve endpoints (or i/o)
1454 * Check "usb_gadget.h" for details
1456 static const struct usb_gadget_ops usb_gadget_ops = {
1457 .vbus_session = ci13xxx_vbus_session,
1458 .wakeup = ci13xxx_wakeup,
1459 .vbus_draw = ci13xxx_vbus_draw,
1460 .udc_start = ci13xxx_start,
1461 .udc_stop = ci13xxx_stop,
1464 static int init_eps(struct ci13xxx *udc)
1466 int retval = 0, i, j;
1468 for (i = 0; i < udc->hw_ep_max/2; i++)
1469 for (j = RX; j <= TX; j++) {
1470 int k = i + j * udc->hw_ep_max/2;
1471 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
1473 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
1474 (j == TX) ? "in" : "out");
1477 mEp->lock = &udc->lock;
1478 mEp->device = &udc->gadget.dev;
1479 mEp->td_pool = udc->td_pool;
1481 mEp->ep.name = mEp->name;
1482 mEp->ep.ops = &usb_ep_ops;
1483 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
1485 INIT_LIST_HEAD(&mEp->qh.queue);
1486 mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
1488 if (mEp->qh.ptr == NULL)
1491 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
1494 * set up shorthands for ep0 out and in endpoints,
1495 * don't add to gadget's ep_list
1506 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
1513 * ci13xxx_start: register a gadget driver
1514 * @gadget: our gadget
1515 * @driver: the driver being registered
1517 * Interrupts are enabled here.
1519 static int ci13xxx_start(struct usb_gadget *gadget,
1520 struct usb_gadget_driver *driver)
1522 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1523 unsigned long flags;
1524 int retval = -ENOMEM;
1526 if (driver->disconnect == NULL)
1530 udc->ep0out->ep.desc = &ctrl_endpt_out_desc;
1531 retval = usb_ep_enable(&udc->ep0out->ep);
1535 udc->ep0in->ep.desc = &ctrl_endpt_in_desc;
1536 retval = usb_ep_enable(&udc->ep0in->ep);
1539 spin_lock_irqsave(&udc->lock, flags);
1541 udc->driver = driver;
1542 pm_runtime_get_sync(&udc->gadget.dev);
1543 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
1544 if (udc->vbus_active) {
1545 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
1546 hw_device_reset(udc);
1548 pm_runtime_put_sync(&udc->gadget.dev);
1553 retval = hw_device_state(udc, udc->ep0out->qh.dma);
1555 pm_runtime_put_sync(&udc->gadget.dev);
1558 spin_unlock_irqrestore(&udc->lock, flags);
1563 * ci13xxx_stop: unregister a gadget driver
1565 static int ci13xxx_stop(struct usb_gadget *gadget,
1566 struct usb_gadget_driver *driver)
1568 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1569 unsigned long flags;
1571 spin_lock_irqsave(&udc->lock, flags);
1573 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
1575 hw_device_state(udc, 0);
1576 if (udc->udc_driver->notify_event)
1577 udc->udc_driver->notify_event(udc,
1578 CI13XXX_CONTROLLER_STOPPED_EVENT);
1580 spin_unlock_irqrestore(&udc->lock, flags);
1581 _gadget_stop_activity(&udc->gadget);
1582 spin_lock_irqsave(&udc->lock, flags);
1583 pm_runtime_put(&udc->gadget.dev);
1586 spin_unlock_irqrestore(&udc->lock, flags);
1591 /******************************************************************************
1593 *****************************************************************************/
1595 * udc_irq: global interrupt handler
1597 * This function returns IRQ_HANDLED if the IRQ has been handled
1598 * It locks access to registers
1600 irqreturn_t udc_irq(int irq, void *data)
1602 struct ci13xxx *udc = data;
1609 spin_lock(&udc->lock);
1611 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
1612 if (hw_read(udc, OP_USBMODE, USBMODE_CM) !=
1613 USBMODE_CM_DEVICE) {
1614 spin_unlock(&udc->lock);
1618 intr = hw_test_and_clear_intr_active(udc);
1619 dbg_interrupt(intr);
1622 /* order defines priority - do NOT change it */
1623 if (USBi_URI & intr)
1624 isr_reset_handler(udc);
1626 if (USBi_PCI & intr) {
1627 udc->gadget.speed = hw_port_is_high_speed(udc) ?
1628 USB_SPEED_HIGH : USB_SPEED_FULL;
1629 if (udc->suspended && udc->driver->resume) {
1630 spin_unlock(&udc->lock);
1631 udc->driver->resume(&udc->gadget);
1632 spin_lock(&udc->lock);
1638 isr_tr_complete_handler(udc);
1640 if (USBi_SLI & intr) {
1641 if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
1642 udc->driver->suspend) {
1644 spin_unlock(&udc->lock);
1645 udc->driver->suspend(&udc->gadget);
1646 spin_lock(&udc->lock);
1649 retval = IRQ_HANDLED;
1653 spin_unlock(&udc->lock);
1659 * udc_release: driver release function
1662 * Currently does nothing
1664 static void udc_release(struct device *dev)
1669 * udc_probe: parent probe must call this to initialize UDC
1670 * @dev: parent device
1671 * @regs: registers base address
1672 * @name: driver name
1674 * This function returns an error code
1675 * No interrupts active, the IRQ has not been requested yet
1676 * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
1678 int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
1679 void __iomem *regs, struct ci13xxx **_udc)
1681 struct ci13xxx *udc;
1684 if (dev == NULL || regs == NULL || driver == NULL ||
1685 driver->name == NULL)
1688 udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
1692 spin_lock_init(&udc->lock);
1694 udc->udc_driver = driver;
1696 udc->gadget.ops = &usb_gadget_ops;
1697 udc->gadget.speed = USB_SPEED_UNKNOWN;
1698 udc->gadget.max_speed = USB_SPEED_HIGH;
1699 udc->gadget.is_otg = 0;
1700 udc->gadget.name = driver->name;
1702 INIT_LIST_HEAD(&udc->gadget.ep_list);
1704 dev_set_name(&udc->gadget.dev, "gadget");
1705 udc->gadget.dev.dma_mask = dev->dma_mask;
1706 udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
1707 udc->gadget.dev.parent = dev;
1708 udc->gadget.dev.release = udc_release;
1712 /* alloc resources */
1713 udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
1714 sizeof(struct ci13xxx_qh),
1715 64, CI13XXX_PAGE_SIZE);
1716 if (udc->qh_pool == NULL) {
1721 udc->td_pool = dma_pool_create("ci13xxx_td", dev,
1722 sizeof(struct ci13xxx_td),
1723 64, CI13XXX_PAGE_SIZE);
1724 if (udc->td_pool == NULL) {
1729 retval = hw_device_init(udc, regs, driver->capoffset);
1733 retval = init_eps(udc);
1737 udc->gadget.ep0 = &udc->ep0in->ep;
1739 udc->transceiver = usb_get_transceiver();
1741 if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
1742 if (udc->transceiver == NULL) {
1748 if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
1749 retval = hw_device_reset(udc);
1751 goto put_transceiver;
1754 retval = device_register(&udc->gadget.dev);
1756 put_device(&udc->gadget.dev);
1757 goto put_transceiver;
1760 retval = dbg_create_files(&udc->gadget.dev);
1764 if (udc->transceiver) {
1765 retval = otg_set_peripheral(udc->transceiver->otg,
1771 retval = usb_add_gadget_udc(dev, &udc->gadget);
1775 pm_runtime_no_callbacks(&udc->gadget.dev);
1776 pm_runtime_enable(&udc->gadget.dev);
1782 if (udc->transceiver) {
1783 otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
1784 usb_put_transceiver(udc->transceiver);
1787 dev_err(dev, "error = %i\n", retval);
1789 dbg_remove_files(&udc->gadget.dev);
1791 device_unregister(&udc->gadget.dev);
1793 if (udc->transceiver)
1794 usb_put_transceiver(udc->transceiver);
1796 dma_pool_destroy(udc->td_pool);
1798 dma_pool_destroy(udc->qh_pool);
1806 * udc_remove: parent remove must call this to remove UDC
1808 * No interrupts active, the IRQ has been released
1810 void udc_remove(struct ci13xxx *udc)
1817 usb_del_gadget_udc(&udc->gadget);
1819 for (i = 0; i < udc->hw_ep_max; i++) {
1820 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
1822 dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
1825 dma_pool_destroy(udc->td_pool);
1826 dma_pool_destroy(udc->qh_pool);
1828 if (udc->transceiver) {
1829 otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
1830 usb_put_transceiver(udc->transceiver);
1832 dbg_remove_files(&udc->gadget.dev);
1833 device_unregister(&udc->gadget.dev);
1835 kfree(udc->hw_bank.regmap);