]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
staging: HCD files for the DWC2 driver
authorPaul Zimmerman <Paul.Zimmerman@synopsys.com>
Tue, 12 Mar 2013 00:47:59 +0000 (17:47 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Mar 2013 01:16:37 +0000 (18:16 -0700)
These files contain the HCD code, and implement the Linux
hc_driver API. Support for both slave mode and buffer DMA mode
of the controller is included.

Signed-off-by: Paul Zimmerman <paulz@synopsys.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/dwc2/hcd.c [new file with mode: 0644]
drivers/staging/dwc2/hcd.h [new file with mode: 0644]
drivers/staging/dwc2/hcd_intr.c [new file with mode: 0644]
drivers/staging/dwc2/hcd_queue.c [new file with mode: 0644]

diff --git a/drivers/staging/dwc2/hcd.c b/drivers/staging/dwc2/hcd.c
new file mode 100644 (file)
index 0000000..cdb142d
--- /dev/null
@@ -0,0 +1,2951 @@
+/*
+ * hcd.c - DesignWare HS OTG Controller host-mode routines
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains the core HCD code, and implements the Linux hc_driver
+ * API
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/**
+ * dwc2_dump_channel_info() - Prints the state of a host channel
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan:  Pointer to the channel to dump
+ *
+ * Must be called with interrupt disabled and spinlock held
+ *
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
+                                  struct dwc2_host_chan *chan)
+{
+#ifdef VERBOSE_DEBUG
+       int num_channels = hsotg->core_params->host_channels;
+       struct dwc2_qh *qh;
+       u32 hcchar;
+       u32 hcsplt;
+       u32 hctsiz;
+       u32 hc_dma;
+       int i;
+
+       if (chan == NULL)
+               return;
+
+       hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
+       hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
+       hctsiz = readl(hsotg->regs + HCTSIZ(chan->hc_num));
+       hc_dma = readl(hsotg->regs + HCDMA(chan->hc_num));
+
+       dev_dbg(hsotg->dev, "  Assigned to channel %p:\n", chan);
+       dev_dbg(hsotg->dev, "    hcchar 0x%08x, hcsplt 0x%08x\n",
+               hcchar, hcsplt);
+       dev_dbg(hsotg->dev, "    hctsiz 0x%08x, hc_dma 0x%08x\n",
+               hctsiz, hc_dma);
+       dev_dbg(hsotg->dev, "    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+               chan->dev_addr, chan->ep_num, chan->ep_is_in);
+       dev_dbg(hsotg->dev, "    ep_type: %d\n", chan->ep_type);
+       dev_dbg(hsotg->dev, "    max_packet: %d\n", chan->max_packet);
+       dev_dbg(hsotg->dev, "    data_pid_start: %d\n", chan->data_pid_start);
+       dev_dbg(hsotg->dev, "    xfer_started: %d\n", chan->xfer_started);
+       dev_dbg(hsotg->dev, "    halt_status: %d\n", chan->halt_status);
+       dev_dbg(hsotg->dev, "    xfer_buf: %p\n", chan->xfer_buf);
+       dev_dbg(hsotg->dev, "    xfer_dma: %08lx\n",
+               (unsigned long)chan->xfer_dma);
+       dev_dbg(hsotg->dev, "    xfer_len: %d\n", chan->xfer_len);
+       dev_dbg(hsotg->dev, "    qh: %p\n", chan->qh);
+       dev_dbg(hsotg->dev, "  NP inactive sched:\n");
+       list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
+                           qh_list_entry)
+               dev_dbg(hsotg->dev, "    %p\n", qh);
+       dev_dbg(hsotg->dev, "  NP active sched:\n");
+       list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
+                           qh_list_entry)
+               dev_dbg(hsotg->dev, "    %p\n", qh);
+       dev_dbg(hsotg->dev, "  Channels:\n");
+       for (i = 0; i < num_channels; i++) {
+               struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
+
+               dev_dbg(hsotg->dev, "    %2d: %p\n", i, chan);
+       }
+#endif /* VERBOSE_DEBUG */
+}
+
+/*
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
+                                     struct list_head *qh_list)
+{
+       struct dwc2_qh *qh, *qh_tmp;
+       struct dwc2_qtd *qtd, *qtd_tmp;
+
+       list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
+               list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+                                        qtd_list_entry) {
+                       if (qtd->urb != NULL) {
+                               dwc2_host_complete(hsotg, qtd->urb->priv,
+                                                  qtd->urb, -ETIMEDOUT);
+                               dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+                       }
+               }
+       }
+}
+
+static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
+                             struct list_head *qh_list)
+{
+       struct dwc2_qtd *qtd, *qtd_tmp;
+       struct dwc2_qh *qh, *qh_tmp;
+       unsigned long flags;
+
+       if (!qh_list->next)
+               /* The list hasn't been initialized yet */
+               return;
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+
+       /* Ensure there are no QTDs or URBs left */
+       dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
+
+       list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
+               dwc2_hcd_qh_unlink(hsotg, qh);
+
+               /* Free each QTD in the QH's QTD list */
+               list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+                                        qtd_list_entry)
+                       dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+               dwc2_hcd_qh_free(hsotg, qh);
+               spin_lock_irqsave(&hsotg->lock, flags);
+       }
+
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/*
+ * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
+{
+       dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
+       dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
+       dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
+       dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
+       dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
+       dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
+}
+
+/**
+ * dwc2_hcd_start() - Starts the HCD when switching to Host mode
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ */
+void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
+{
+       u32 hprt0;
+
+       if (hsotg->op_state == OTG_STATE_B_HOST) {
+               /*
+                * Reset the port. During a HNP mode switch the reset
+                * needs to occur within 1ms and have a duration of at
+                * least 50ms.
+                */
+               hprt0 = dwc2_read_hprt0(hsotg);
+               hprt0 |= HPRT0_RST;
+               writel(hprt0, hsotg->regs + HPRT0);
+       }
+
+       queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
+                          msecs_to_jiffies(50));
+}
+
+/* Must be called with interrupt disabled and spinlock held */
+static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
+{
+       int num_channels = hsotg->core_params->host_channels;
+       struct dwc2_host_chan *channel;
+       u32 hcchar;
+       int i;
+
+       if (hsotg->core_params->dma_enable <= 0) {
+               /* Flush out any channel requests in slave mode */
+               for (i = 0; i < num_channels; i++) {
+                       channel = hsotg->hc_ptr_array[i];
+                       if (!list_empty(&channel->hc_list_entry))
+                               continue;
+                       hcchar = readl(hsotg->regs + HCCHAR(i));
+                       if (hcchar & HCCHAR_CHENA) {
+                               hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
+                               hcchar |= HCCHAR_CHDIS;
+                               writel(hcchar, hsotg->regs + HCCHAR(i));
+                       }
+               }
+       }
+
+       for (i = 0; i < num_channels; i++) {
+               channel = hsotg->hc_ptr_array[i];
+               if (!list_empty(&channel->hc_list_entry))
+                       continue;
+               hcchar = readl(hsotg->regs + HCCHAR(i));
+               if (hcchar & HCCHAR_CHENA) {
+                       /* Halt the channel */
+                       hcchar |= HCCHAR_CHDIS;
+                       writel(hcchar, hsotg->regs + HCCHAR(i));
+               }
+
+               dwc2_hc_cleanup(hsotg, channel);
+               list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
+               /*
+                * Added for Descriptor DMA to prevent channel double cleanup in
+                * release_channel_ddma(), which is called from ep_disable when
+                * device disconnects
+                */
+               channel->qh = NULL;
+       }
+}
+
+/**
+ * dwc2_hcd_disconnect() - Handles disconnect of the HCD
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
+{
+       u32 intr;
+
+       /* Set status flags for the hub driver */
+       hsotg->flags.b.port_connect_status_change = 1;
+       hsotg->flags.b.port_connect_status = 0;
+
+       /*
+        * Shutdown any transfers in process by clearing the Tx FIFO Empty
+        * interrupt mask and status bits and disabling subsequent host
+        * channel interrupts.
+        */
+       intr = readl(hsotg->regs + GINTMSK);
+       intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
+       writel(intr, hsotg->regs + GINTMSK);
+       intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
+       writel(intr, hsotg->regs + GINTSTS);
+
+       /*
+        * Turn off the vbus power only if the core has transitioned to device
+        * mode. If still in host mode, need to keep power on to detect a
+        * reconnection.
+        */
+       if (dwc2_is_device_mode(hsotg)) {
+               if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
+                       dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
+                       writel(0, hsotg->regs + HPRT0);
+               }
+
+               dwc2_disable_host_interrupts(hsotg);
+       }
+
+       /* Respond with an error status to all URBs in the schedule */
+       dwc2_kill_all_urbs(hsotg);
+
+       if (dwc2_is_host_mode(hsotg))
+               /* Clean up any host channels that were in use */
+               dwc2_hcd_cleanup_channels(hsotg);
+
+       dwc2_host_disconnect(hsotg);
+}
+
+/**
+ * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ */
+static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
+{
+       if (hsotg->lx_state == DWC2_L2)
+               hsotg->flags.b.port_suspend_change = 1;
+       else
+               hsotg->flags.b.port_l1_change = 1;
+}
+
+/**
+ * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
+{
+       dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
+
+       /*
+        * The root hub should be disconnected before this function is called.
+        * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+        * and the QH lists (via ..._hcd_endpoint_disable).
+        */
+
+       /* Turn off all host-specific interrupts */
+       dwc2_disable_host_interrupts(hsotg);
+
+       /* Turn off the vbus power */
+       dev_dbg(hsotg->dev, "PortPower off\n");
+       writel(0, hsotg->regs + HPRT0);
+}
+
+static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
+                               struct dwc2_hcd_urb *urb, void **ep_handle,
+                               gfp_t mem_flags)
+{
+       struct dwc2_qtd *qtd;
+       unsigned long flags;
+       u32 intr_mask;
+       int retval;
+
+       if (!hsotg->flags.b.port_connect_status) {
+               /* No longer connected */
+               dev_err(hsotg->dev, "Not connected\n");
+               return -ENODEV;
+       }
+
+       qtd = kzalloc(sizeof(*qtd), mem_flags);
+       if (!qtd)
+               return -ENOMEM;
+
+       dwc2_hcd_qtd_init(qtd, urb);
+       retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
+                                 mem_flags);
+       if (retval < 0) {
+               dev_err(hsotg->dev,
+                       "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
+                       retval);
+               kfree(qtd);
+               return retval;
+       }
+
+       intr_mask = readl(hsotg->regs + GINTMSK);
+       if (!(intr_mask & GINTSTS_SOF) && retval == 0) {
+               enum dwc2_transaction_type tr_type;
+
+               if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
+                   !(qtd->urb->flags & URB_GIVEBACK_ASAP))
+                       /*
+                        * Do not schedule SG transactions until qtd has
+                        * URB_GIVEBACK_ASAP set
+                        */
+                       return 0;
+
+               spin_lock_irqsave(&hsotg->lock, flags);
+               tr_type = dwc2_hcd_select_transactions(hsotg);
+               if (tr_type != DWC2_TRANSACTION_NONE)
+                       dwc2_hcd_queue_transactions(hsotg, tr_type);
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+       }
+
+       return retval;
+}
+
+/* Must be called with interrupt disabled and spinlock held */
+static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
+                               struct dwc2_hcd_urb *urb)
+{
+       struct dwc2_qh *qh;
+       struct dwc2_qtd *urb_qtd;
+
+       urb_qtd = urb->qtd;
+       if (!urb_qtd) {
+               dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n");
+               return -EINVAL;
+       }
+
+       qh = urb_qtd->qh;
+       if (!qh) {
+               dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n");
+               return -EINVAL;
+       }
+
+       if (urb_qtd->in_process && qh->channel) {
+               dwc2_dump_channel_info(hsotg, qh->channel);
+
+               /* The QTD is in process (it has been assigned to a channel) */
+               if (hsotg->flags.b.port_connect_status)
+                       /*
+                        * If still connected (i.e. in host mode), halt the
+                        * channel so it can be used for other transfers. If
+                        * no longer connected, the host registers can't be
+                        * written to halt the channel since the core is in
+                        * device mode.
+                        */
+                       dwc2_hc_halt(hsotg, qh->channel,
+                                    DWC2_HC_XFER_URB_DEQUEUE);
+       }
+
+       /*
+        * Free the QTD and clean up the associated QH. Leave the QH in the
+        * schedule if it has any remaining QTDs.
+        */
+       if (hsotg->core_params->dma_desc_enable <= 0) {
+               u8 in_process = urb_qtd->in_process;
+
+               dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
+               if (in_process) {
+                       dwc2_hcd_qh_deactivate(hsotg, qh, 0);
+                       qh->channel = NULL;
+               } else if (list_empty(&qh->qtd_list)) {
+                       dwc2_hcd_qh_unlink(hsotg, qh);
+               }
+       } else {
+               dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
+       }
+
+       return 0;
+}
+
+/* Must NOT be called with interrupt disabled or spinlock held */
+static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
+                                    struct usb_host_endpoint *ep, int retry)
+{
+       struct dwc2_qtd *qtd, *qtd_tmp;
+       struct dwc2_qh *qh;
+       unsigned long flags;
+       int rc;
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+
+       qh = ep->hcpriv;
+       if (!qh) {
+               rc = -EINVAL;
+               goto err;
+       }
+
+       while (!list_empty(&qh->qtd_list) && retry--) {
+               if (retry == 0) {
+                       dev_err(hsotg->dev,
+                               "## timeout in dwc2_hcd_endpoint_disable() ##\n");
+                       rc = -EBUSY;
+                       goto err;
+               }
+
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+               usleep_range(20000, 40000);
+               spin_lock_irqsave(&hsotg->lock, flags);
+               qh = ep->hcpriv;
+               if (!qh) {
+                       rc = -EINVAL;
+                       goto err;
+               }
+       }
+
+       dwc2_hcd_qh_unlink(hsotg, qh);
+
+       /* Free each QTD in the QH's QTD list */
+       list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
+               dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+
+       ep->hcpriv = NULL;
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+       dwc2_hcd_qh_free(hsotg, qh);
+
+       return 0;
+
+err:
+       ep->hcpriv = NULL;
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+
+       return rc;
+}
+
+/* Must be called with interrupt disabled and spinlock held */
+static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
+                                  struct usb_host_endpoint *ep)
+{
+       struct dwc2_qh *qh = ep->hcpriv;
+
+       if (!qh)
+               return -EINVAL;
+
+       qh->data_toggle = DWC2_HC_PID_DATA0;
+
+       return 0;
+}
+
+/*
+ * Initializes dynamic portions of the DWC_otg HCD state
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
+{
+       struct dwc2_host_chan *chan, *chan_tmp;
+       int num_channels;
+       int i;
+
+       hsotg->flags.d32 = 0;
+
+       hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
+       hsotg->non_periodic_channels = 0;
+       hsotg->periodic_channels = 0;
+
+       /*
+        * Put all channels in the free channel list and clean up channel
+        * states
+        */
+       list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
+                                hc_list_entry)
+               list_del_init(&chan->hc_list_entry);
+
+       num_channels = hsotg->core_params->host_channels;
+       for (i = 0; i < num_channels; i++) {
+               chan = hsotg->hc_ptr_array[i];
+               list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
+               dwc2_hc_cleanup(hsotg, chan);
+       }
+
+       /* Initialize the DWC core for host mode operation */
+       dwc2_core_host_init(hsotg);
+}
+
+static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
+                              struct dwc2_host_chan *chan,
+                              struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
+{
+       int hub_addr, hub_port;
+
+       chan->do_split = 1;
+       chan->xact_pos = qtd->isoc_split_pos;
+       chan->complete_split = qtd->complete_split;
+       dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
+       chan->hub_addr = (u8)hub_addr;
+       chan->hub_port = (u8)hub_port;
+}
+
+static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
+                              struct dwc2_host_chan *chan,
+                              struct dwc2_qtd *qtd, void *bufptr)
+{
+       struct dwc2_hcd_urb *urb = qtd->urb;
+       struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+       switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               chan->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+               switch (qtd->control_phase) {
+               case DWC2_CONTROL_SETUP:
+                       dev_vdbg(hsotg->dev, "  Control setup transaction\n");
+                       chan->do_ping = 0;
+                       chan->ep_is_in = 0;
+                       chan->data_pid_start = DWC2_HC_PID_SETUP;
+                       if (hsotg->core_params->dma_enable > 0)
+                               chan->xfer_dma = urb->setup_dma;
+                       else
+                               chan->xfer_buf = urb->setup_packet;
+                       chan->xfer_len = 8;
+                       bufptr = NULL;
+                       break;
+
+               case DWC2_CONTROL_DATA:
+                       dev_vdbg(hsotg->dev, "  Control data transaction\n");
+                       chan->data_pid_start = qtd->data_toggle;
+                       break;
+
+               case DWC2_CONTROL_STATUS:
+                       /*
+                        * Direction is opposite of data direction or IN if no
+                        * data
+                        */
+                       dev_vdbg(hsotg->dev, "  Control status transaction\n");
+                       if (urb->length == 0)
+                               chan->ep_is_in = 1;
+                       else
+                               chan->ep_is_in =
+                                       dwc2_hcd_is_pipe_out(&urb->pipe_info);
+                       if (chan->ep_is_in)
+                               chan->do_ping = 0;
+                       chan->data_pid_start = DWC2_HC_PID_DATA1;
+                       chan->xfer_len = 0;
+                       if (hsotg->core_params->dma_enable > 0)
+                               chan->xfer_dma = hsotg->status_buf_dma;
+                       else
+                               chan->xfer_buf = hsotg->status_buf;
+                       bufptr = NULL;
+                       break;
+               }
+               break;
+
+       case USB_ENDPOINT_XFER_BULK:
+               chan->ep_type = USB_ENDPOINT_XFER_BULK;
+               break;
+
+       case USB_ENDPOINT_XFER_INT:
+               chan->ep_type = USB_ENDPOINT_XFER_INT;
+               break;
+
+       case USB_ENDPOINT_XFER_ISOC:
+               chan->ep_type = USB_ENDPOINT_XFER_ISOC;
+               if (hsotg->core_params->dma_desc_enable > 0)
+                       break;
+
+               frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+               frame_desc->status = 0;
+
+               if (hsotg->core_params->dma_enable > 0) {
+                       chan->xfer_dma = urb->dma;
+                       chan->xfer_dma += frame_desc->offset +
+                                       qtd->isoc_split_offset;
+               } else {
+                       chan->xfer_buf = urb->buf;
+                       chan->xfer_buf += frame_desc->offset +
+                                       qtd->isoc_split_offset;
+               }
+
+               chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
+
+               /* For non-dword aligned buffers */
+               if (hsotg->core_params->dma_enable > 0 &&
+                   (chan->xfer_dma & 0x3))
+                       bufptr = (u8 *)urb->buf + frame_desc->offset +
+                                       qtd->isoc_split_offset;
+               else
+                       bufptr = NULL;
+
+               if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
+                       if (chan->xfer_len <= 188)
+                               chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
+                       else
+                               chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN;
+               }
+               break;
+       }
+
+       return bufptr;
+}
+
+static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+                                  struct dwc2_host_chan *chan, void *bufptr)
+{
+       u32 buf_size;
+
+       if (chan->ep_type != USB_ENDPOINT_XFER_ISOC)
+               buf_size = hsotg->core_params->max_transfer_size;
+       else
+               buf_size = 4096;
+
+       if (!qh->dw_align_buf) {
+               qh->dw_align_buf = dma_alloc_coherent(hsotg->dev, buf_size,
+                                                     &qh->dw_align_buf_dma,
+                                                     GFP_ATOMIC);
+               if (!qh->dw_align_buf)
+                       return -ENOMEM;
+       }
+
+       if (!chan->ep_is_in && chan->xfer_len) {
+               dma_sync_single_for_cpu(hsotg->dev, chan->xfer_dma, buf_size,
+                                       DMA_TO_DEVICE);
+               memcpy(qh->dw_align_buf, bufptr, chan->xfer_len);
+               dma_sync_single_for_device(hsotg->dev, chan->xfer_dma, buf_size,
+                                          DMA_TO_DEVICE);
+       }
+
+       chan->align_buf = qh->dw_align_buf_dma;
+       return 0;
+}
+
+/**
+ * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
+ * channel and initializes the host channel to perform the transactions. The
+ * host channel is removed from the free list.
+ *
+ * @hsotg: The HCD state structure
+ * @qh:    Transactions from the first QTD for this QH are selected and assigned
+ *         to a free host channel
+ */
+static void dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg,
+                                   struct dwc2_qh *qh)
+{
+       struct dwc2_host_chan *chan;
+       struct dwc2_hcd_urb *urb;
+       struct dwc2_qtd *qtd;
+       void *bufptr = NULL;
+
+       dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
+
+       if (list_empty(&qh->qtd_list)) {
+               dev_dbg(hsotg->dev, "No QTDs in QH list\n");
+               return;
+       }
+
+       if (list_empty(&hsotg->free_hc_list)) {
+               dev_dbg(hsotg->dev, "No free channel to assign\n");
+               return;
+       }
+
+       chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
+                               hc_list_entry);
+
+       /* Remove the host channel from the free list */
+       list_del_init(&chan->hc_list_entry);
+
+       qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
+       urb = qtd->urb;
+       qh->channel = chan;
+       qtd->in_process = 1;
+
+       /*
+        * Use usb_pipedevice to determine device address. This address is
+        * 0 before the SET_ADDRESS command and the correct address afterward.
+        */
+       chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
+       chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
+       chan->speed = qh->dev_speed;
+       chan->max_packet = dwc2_max_packet(qh->maxp);
+
+       chan->xfer_started = 0;
+       chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
+       chan->error_state = (qtd->error_count > 0);
+       chan->halt_on_queue = 0;
+       chan->halt_pending = 0;
+       chan->requests = 0;
+
+       /*
+        * The following values may be modified in the transfer type section
+        * below. The xfer_len value may be reduced when the transfer is
+        * started to accommodate the max widths of the XferSize and PktCnt
+        * fields in the HCTSIZn register.
+        */
+
+       chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
+       if (chan->ep_is_in)
+               chan->do_ping = 0;
+       else
+               chan->do_ping = qh->ping_state;
+
+       chan->data_pid_start = qh->data_toggle;
+       chan->multi_count = 1;
+
+       if (hsotg->core_params->dma_enable > 0) {
+               chan->xfer_dma = urb->dma + urb->actual_length;
+
+               /* For non-dword aligned case */
+               if (hsotg->core_params->dma_desc_enable <= 0 &&
+                   (chan->xfer_dma & 0x3))
+                       bufptr = (u8 *)urb->buf + urb->actual_length;
+       } else {
+               chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
+       }
+
+       chan->xfer_len = urb->length - urb->actual_length;
+       chan->xfer_count = 0;
+
+       /* Set the split attributes if required */
+       if (qh->do_split)
+               dwc2_hc_init_split(hsotg, chan, qtd, urb);
+       else
+               chan->do_split = 0;
+
+       /* Set the transfer attributes */
+       bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr);
+
+       /* Non DWORD-aligned buffer case */
+       if (bufptr) {
+               dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+               if (dwc2_hc_setup_align_buf(hsotg, qh, chan, bufptr)) {
+                       dev_err(hsotg->dev,
+                               "%s: Failed to allocate memory to handle non-dword aligned buffer\n",
+                               __func__);
+                       /* Add channel back to free list */
+                       chan->align_buf = 0;
+                       chan->multi_count = 0;
+                       list_add_tail(&chan->hc_list_entry,
+                                     &hsotg->free_hc_list);
+                       qtd->in_process = 0;
+                       qh->channel = NULL;
+                       return;
+               }
+       } else {
+               chan->align_buf = 0;
+       }
+
+       if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+           chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+               /*
+                * This value may be modified when the transfer is started
+                * to reflect the actual transfer length
+                */
+               chan->multi_count = dwc2_hb_mult(qh->maxp);
+
+       if (hsotg->core_params->dma_desc_enable > 0)
+               chan->desc_list_addr = qh->desc_list_dma;
+
+       dwc2_hc_init(hsotg, chan);
+       chan->qh = qh;
+}
+
+/**
+ * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
+ * schedule and assigns them to available host channels. Called from the HCD
+ * interrupt handler functions.
+ *
+ * @hsotg: The HCD state structure
+ *
+ * Return: The types of new transactions that were assigned to host channels
+ */
+enum dwc2_transaction_type dwc2_hcd_select_transactions(
+               struct dwc2_hsotg *hsotg)
+{
+       enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
+       struct list_head *qh_ptr;
+       struct dwc2_qh *qh;
+       int num_channels;
+
+#ifdef DWC2_DEBUG_SOF
+       dev_vdbg(hsotg->dev, "  Select Transactions\n");
+#endif
+
+       /* Process entries in the periodic ready list */
+       qh_ptr = hsotg->periodic_sched_ready.next;
+       while (qh_ptr != &hsotg->periodic_sched_ready) {
+               if (list_empty(&hsotg->free_hc_list))
+                       break;
+               qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+               dwc2_assign_and_init_hc(hsotg, qh);
+
+               /*
+                * Move the QH from the periodic ready schedule to the
+                * periodic assigned schedule
+                */
+               qh_ptr = qh_ptr->next;
+               list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned);
+               ret_val = DWC2_TRANSACTION_PERIODIC;
+       }
+
+       /*
+        * Process entries in the inactive portion of the non-periodic
+        * schedule. Some free host channels may not be used if they are
+        * reserved for periodic transfers.
+        */
+       num_channels = hsotg->core_params->host_channels;
+       qh_ptr = hsotg->non_periodic_sched_inactive.next;
+       while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
+               if (hsotg->non_periodic_channels >= num_channels -
+                                               hsotg->periodic_channels)
+                       break;
+               if (list_empty(&hsotg->free_hc_list))
+                       break;
+               qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+               dwc2_assign_and_init_hc(hsotg, qh);
+
+               /*
+                * Move the QH from the non-periodic inactive schedule to the
+                * non-periodic active schedule
+                */
+               qh_ptr = qh_ptr->next;
+               list_move(&qh->qh_list_entry,
+                         &hsotg->non_periodic_sched_active);
+
+               if (ret_val == DWC2_TRANSACTION_NONE)
+                       ret_val = DWC2_TRANSACTION_NON_PERIODIC;
+               else
+                       ret_val = DWC2_TRANSACTION_ALL;
+
+               hsotg->non_periodic_channels++;
+       }
+
+       return ret_val;
+}
+
+/**
+ * dwc2_queue_transaction() - Attempts to queue a single transaction request for
+ * a host channel associated with either a periodic or non-periodic transfer
+ *
+ * @hsotg: The HCD state structure
+ * @chan:  Host channel descriptor associated with either a periodic or
+ *         non-periodic transfer
+ * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
+ *                     for periodic transfers or the non-periodic Tx FIFO
+ *                     for non-periodic transfers
+ *
+ * Return: 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO
+ *
+ * This function assumes that there is space available in the appropriate
+ * request queue. For an OUT transfer or SETUP transaction in Slave mode,
+ * it checks whether space is available in the appropriate Tx FIFO.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
+                                 struct dwc2_host_chan *chan,
+                                 u16 fifo_dwords_avail)
+{
+       int retval = 0;
+
+       if (hsotg->core_params->dma_enable > 0) {
+               if (hsotg->core_params->dma_desc_enable > 0) {
+                       if (!chan->xfer_started ||
+                           chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+                               dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
+                               chan->qh->ping_state = 0;
+                       }
+               } else if (!chan->xfer_started) {
+                       dwc2_hc_start_transfer(hsotg, chan);
+                       chan->qh->ping_state = 0;
+               }
+       } else if (chan->halt_pending) {
+               /* Don't queue a request if the channel has been halted */
+       } else if (chan->halt_on_queue) {
+               dwc2_hc_halt(hsotg, chan, chan->halt_status);
+       } else if (chan->do_ping) {
+               if (!chan->xfer_started)
+                       dwc2_hc_start_transfer(hsotg, chan);
+       } else if (!chan->ep_is_in ||
+                  chan->data_pid_start == DWC2_HC_PID_SETUP) {
+               if ((fifo_dwords_avail * 4) >= chan->max_packet) {
+                       if (!chan->xfer_started) {
+                               dwc2_hc_start_transfer(hsotg, chan);
+                               retval = 1;
+                       } else {
+                               retval = dwc2_hc_continue_transfer(hsotg, chan);
+                       }
+               } else {
+                       retval = -1;
+               }
+       } else {
+               if (!chan->xfer_started) {
+                       dwc2_hc_start_transfer(hsotg, chan);
+                       retval = 1;
+               } else {
+                       retval = dwc2_hc_continue_transfer(hsotg, chan);
+               }
+       }
+
+       return retval;
+}
+
+/*
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the DWC_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
+{
+       struct list_head *qh_ptr;
+       struct dwc2_qh *qh;
+       u32 tx_status;
+       u32 fspcavail;
+       u32 gintmsk;
+       int status;
+       int no_queue_space = 0;
+       int no_fifo_space = 0;
+       u32 qspcavail;
+
+       dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
+
+       tx_status = readl(hsotg->regs + HPTXSTS);
+       qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+                   TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+       fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+                   TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+       dev_vdbg(hsotg->dev, "  P Tx Req Queue Space Avail (before queue): %d\n",
+                qspcavail);
+       dev_vdbg(hsotg->dev, "  P Tx FIFO Space Avail (before queue): %d\n",
+                fspcavail);
+
+       qh_ptr = hsotg->periodic_sched_assigned.next;
+       while (qh_ptr != &hsotg->periodic_sched_assigned) {
+               tx_status = readl(hsotg->regs + HPTXSTS);
+               if ((tx_status & TXSTS_QSPCAVAIL_MASK) == 0) {
+                       no_queue_space = 1;
+                       break;
+               }
+
+               qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+               if (!qh->channel) {
+                       qh_ptr = qh_ptr->next;
+                       continue;
+               }
+
+               /* Make sure EP's TT buffer is clean before queueing qtds */
+               if (qh->tt_buffer_dirty) {
+                       qh_ptr = qh_ptr->next;
+                       continue;
+               }
+
+               /*
+                * Set a flag if we're queuing high-bandwidth in slave mode.
+                * The flag prevents any halts to get into the request queue in
+                * the middle of multiple high-bandwidth packets getting queued.
+                */
+               if (hsotg->core_params->dma_enable <= 0 &&
+                               qh->channel->multi_count > 1)
+                       hsotg->queuing_high_bandwidth = 1;
+
+               fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+                           TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+               status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
+               if (status < 0) {
+                       no_fifo_space = 1;
+                       break;
+               }
+
+               /*
+                * In Slave mode, stay on the current transfer until there is
+                * nothing more to do or the high-bandwidth request count is
+                * reached. In DMA mode, only need to queue one request. The
+                * controller automatically handles multiple packets for
+                * high-bandwidth transfers.
+                */
+               if (hsotg->core_params->dma_enable > 0 || status == 0 ||
+                   qh->channel->requests == qh->channel->multi_count) {
+                       qh_ptr = qh_ptr->next;
+                       /*
+                        * Move the QH from the periodic assigned schedule to
+                        * the periodic queued schedule
+                        */
+                       list_move(&qh->qh_list_entry,
+                                 &hsotg->periodic_sched_queued);
+
+                       /* done queuing high bandwidth */
+                       hsotg->queuing_high_bandwidth = 0;
+               }
+       }
+
+       if (hsotg->core_params->dma_enable <= 0) {
+               tx_status = readl(hsotg->regs + HPTXSTS);
+               qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+                           TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+               fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+                           TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+               dev_vdbg(hsotg->dev,
+                        "  P Tx Req Queue Space Avail (after queue): %d\n",
+                        qspcavail);
+               dev_vdbg(hsotg->dev,
+                        "  P Tx FIFO Space Avail (after queue): %d\n",
+                        fspcavail);
+
+               if (!list_empty(&hsotg->periodic_sched_assigned) ||
+                   no_queue_space || no_fifo_space) {
+                       /*
+                        * May need to queue more transactions as the request
+                        * queue or Tx FIFO empties. Enable the periodic Tx
+                        * FIFO empty interrupt. (Always use the half-empty
+                        * level to ensure that new requests are loaded as
+                        * soon as possible.)
+                        */
+                       gintmsk = readl(hsotg->regs + GINTMSK);
+                       gintmsk |= GINTSTS_PTXFEMP;
+                       writel(gintmsk, hsotg->regs + GINTMSK);
+               } else {
+                       /*
+                        * Disable the Tx FIFO empty interrupt since there are
+                        * no more transactions that need to be queued right
+                        * now. This function is called from interrupt
+                        * handlers to queue more transactions as transfer
+                        * states change.
+                        */
+                       gintmsk = readl(hsotg->regs + GINTMSK);
+                       gintmsk &= ~GINTSTS_PTXFEMP;
+                       writel(gintmsk, hsotg->regs + GINTMSK);
+               }
+       }
+}
+
+/*
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
+{
+       struct list_head *orig_qh_ptr;
+       struct dwc2_qh *qh;
+       u32 tx_status;
+       u32 qspcavail;
+       u32 fspcavail;
+       u32 gintmsk;
+       int status;
+       int no_queue_space = 0;
+       int no_fifo_space = 0;
+       int more_to_do = 0;
+
+       dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
+
+       tx_status = readl(hsotg->regs + GNPTXSTS);
+       qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+                   TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+       fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+                   TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+       dev_vdbg(hsotg->dev, "  NP Tx Req Queue Space Avail (before queue): %d\n",
+                qspcavail);
+       dev_vdbg(hsotg->dev, "  NP Tx FIFO Space Avail (before queue): %d\n",
+                fspcavail);
+
+       /*
+        * Keep track of the starting point. Skip over the start-of-list
+        * entry.
+        */
+       if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
+               hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
+       orig_qh_ptr = hsotg->non_periodic_qh_ptr;
+
+       /*
+        * Process once through the active list or until no more space is
+        * available in the request queue or the Tx FIFO
+        */
+       do {
+               tx_status = readl(hsotg->regs + GNPTXSTS);
+               qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+                           TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+               if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
+                       no_queue_space = 1;
+                       break;
+               }
+
+               qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
+                               qh_list_entry);
+               if (!qh->channel)
+                       goto next;
+
+               /* Make sure EP's TT buffer is clean before queueing qtds */
+               if (qh->tt_buffer_dirty)
+                       goto next;
+
+               fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+                           TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+               status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
+
+               if (status > 0) {
+                       more_to_do = 1;
+               } else if (status < 0) {
+                       no_fifo_space = 1;
+                       break;
+               }
+next:
+               /* Advance to next QH, skipping start-of-list entry */
+               hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
+               if (hsotg->non_periodic_qh_ptr ==
+                               &hsotg->non_periodic_sched_active)
+                       hsotg->non_periodic_qh_ptr =
+                                       hsotg->non_periodic_qh_ptr->next;
+       } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
+
+       if (hsotg->core_params->dma_enable <= 0) {
+               tx_status = readl(hsotg->regs + GNPTXSTS);
+               qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+                           TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+               fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+                           TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+               dev_vdbg(hsotg->dev,
+                        "  NP Tx Req Queue Space Avail (after queue): %d\n",
+                        qspcavail);
+               dev_vdbg(hsotg->dev,
+                        "  NP Tx FIFO Space Avail (after queue): %d\n",
+                        fspcavail);
+
+               if (more_to_do || no_queue_space || no_fifo_space) {
+                       /*
+                        * May need to queue more transactions as the request
+                        * queue or Tx FIFO empties. Enable the non-periodic
+                        * Tx FIFO empty interrupt. (Always use the half-empty
+                        * level to ensure that new requests are loaded as
+                        * soon as possible.)
+                        */
+                       gintmsk = readl(hsotg->regs + GINTMSK);
+                       gintmsk |= GINTSTS_NPTXFEMP;
+                       writel(gintmsk, hsotg->regs + GINTMSK);
+               } else {
+                       /*
+                        * Disable the Tx FIFO empty interrupt since there are
+                        * no more transactions that need to be queued right
+                        * now. This function is called from interrupt
+                        * handlers to queue more transactions as transfer
+                        * states change.
+                        */
+                       gintmsk = readl(hsotg->regs + GINTMSK);
+                       gintmsk &= ~GINTSTS_NPTXFEMP;
+                       writel(gintmsk, hsotg->regs + GINTMSK);
+               }
+       }
+}
+
+/**
+ * dwc2_hcd_queue_transactions() - Processes the currently active host channels
+ * and queues transactions for these channels to the DWC_otg controller. Called
+ * from the HCD interrupt handler functions.
+ *
+ * @hsotg:   The HCD state structure
+ * @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
+ *           or both)
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
+                                enum dwc2_transaction_type tr_type)
+{
+#ifdef DWC2_DEBUG_SOF
+       dev_vdbg(hsotg->dev, "Queue Transactions\n");
+#endif
+       /* Process host channels associated with periodic transfers */
+       if ((tr_type == DWC2_TRANSACTION_PERIODIC ||
+            tr_type == DWC2_TRANSACTION_ALL) &&
+           !list_empty(&hsotg->periodic_sched_assigned))
+               dwc2_process_periodic_channels(hsotg);
+
+       /* Process host channels associated with non-periodic transfers */
+       if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
+           tr_type == DWC2_TRANSACTION_ALL) {
+               if (!list_empty(&hsotg->non_periodic_sched_active)) {
+                       dwc2_process_non_periodic_channels(hsotg);
+               } else {
+                       /*
+                        * Ensure NP Tx FIFO empty interrupt is disabled when
+                        * there are no non-periodic transfers to process
+                        */
+                       u32 gintmsk = readl(hsotg->regs + GINTMSK);
+
+                       gintmsk &= ~GINTSTS_NPTXFEMP;
+                       writel(gintmsk, hsotg->regs + GINTMSK);
+               }
+       }
+}
+
+static void dwc2_conn_id_status_change(struct work_struct *work)
+{
+       struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+                                               wf_otg);
+       u32 count = 0;
+       u32 gotgctl;
+
+       dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+       gotgctl = readl(hsotg->regs + GOTGCTL);
+       dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
+       dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
+               !!(gotgctl & GOTGCTL_CONID_B));
+
+       /* B-Device connector (Device Mode) */
+       if (gotgctl & GOTGCTL_CONID_B) {
+               /* Wait for switch to device mode */
+               dev_dbg(hsotg->dev, "connId B\n");
+               while (!dwc2_is_device_mode(hsotg)) {
+                       dev_info(hsotg->dev,
+                                "Waiting for Peripheral Mode, Mode=%s\n",
+                                dwc2_is_host_mode(hsotg) ? "Host" :
+                                "Peripheral");
+                       usleep_range(20000, 40000);
+                       if (++count > 250)
+                               break;
+               }
+               if (count > 250)
+                       dev_err(hsotg->dev,
+                               "Connection id status change timed out");
+               hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+               dwc2_core_init(hsotg, false);
+               dwc2_enable_global_interrupts(hsotg);
+       } else {
+               /* A-Device connector (Host Mode) */
+               dev_dbg(hsotg->dev, "connId A\n");
+               while (!dwc2_is_host_mode(hsotg)) {
+                       dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n",
+                                dwc2_is_host_mode(hsotg) ?
+                                "Host" : "Peripheral");
+                       usleep_range(20000, 40000);
+                       if (++count > 250)
+                               break;
+               }
+               if (count > 250)
+                       dev_err(hsotg->dev,
+                               "Connection id status change timed out");
+               hsotg->op_state = OTG_STATE_A_HOST;
+
+               /* Initialize the Core for Host mode */
+               dwc2_core_init(hsotg, false);
+               dwc2_enable_global_interrupts(hsotg);
+               dwc2_hcd_start(hsotg);
+       }
+}
+
+static void dwc2_wakeup_detected(unsigned long data)
+{
+       struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
+       u32 hprt0;
+
+       dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+       /*
+        * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
+        * so that OPT tests pass with all PHYs.)
+        */
+       hprt0 = dwc2_read_hprt0(hsotg);
+       dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
+       hprt0 &= ~HPRT0_RES;
+       writel(hprt0, hsotg->regs + HPRT0);
+       dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
+               readl(hsotg->regs + HPRT0));
+
+       dwc2_hcd_rem_wakeup(hsotg);
+
+       /* Change to L0 state */
+       hsotg->lx_state = DWC2_L0;
+}
+
+static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
+{
+       struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+       return hcd->self.b_hnp_enable;
+}
+
+/* Must NOT be called with interrupt disabled or spinlock held */
+static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
+{
+       unsigned long flags;
+       u32 hprt0;
+       u32 pcgctl;
+       u32 gotgctl;
+
+       dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+
+       if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
+               gotgctl = readl(hsotg->regs + GOTGCTL);
+               gotgctl |= GOTGCTL_HSTSETHNPEN;
+               writel(gotgctl, hsotg->regs + GOTGCTL);
+               hsotg->op_state = OTG_STATE_A_SUSPEND;
+       }
+
+       hprt0 = dwc2_read_hprt0(hsotg);
+       hprt0 |= HPRT0_SUSP;
+       writel(hprt0, hsotg->regs + HPRT0);
+
+       /* Update lx_state */
+       hsotg->lx_state = DWC2_L2;
+
+       /* Suspend the Phy Clock */
+       pcgctl = readl(hsotg->regs + PCGCTL);
+       pcgctl |= PCGCTL_STOPPCLK;
+       writel(pcgctl, hsotg->regs + PCGCTL);
+       udelay(10);
+
+       /* For HNP the bus must be suspended for at least 200ms */
+       if (dwc2_host_is_b_hnp_enabled(hsotg)) {
+               pcgctl = readl(hsotg->regs + PCGCTL);
+               pcgctl &= ~PCGCTL_STOPPCLK;
+               writel(pcgctl, hsotg->regs + PCGCTL);
+
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+
+               usleep_range(200000, 250000);
+       } else {
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+       }
+}
+
+/* Handles hub class-specific requests */
+static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+                               u16 wvalue, u16 windex, char *buf, u16 wlength)
+{
+       struct usb_hub_descriptor *hub_desc;
+       int retval = 0;
+       u32 hprt0;
+       u32 port_status;
+       u32 speed;
+       u32 pcgctl;
+
+       switch (typereq) {
+       case ClearHubFeature:
+               dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue);
+
+               switch (wvalue) {
+               case C_HUB_LOCAL_POWER:
+               case C_HUB_OVER_CURRENT:
+                       /* Nothing required here */
+                       break;
+
+               default:
+                       retval = -EINVAL;
+                       dev_err(hsotg->dev,
+                               "ClearHubFeature request %1xh unknown\n",
+                               wvalue);
+               }
+               break;
+
+       case ClearPortFeature:
+               if (wvalue != USB_PORT_FEAT_L1)
+                       if (!windex || windex > 1)
+                               goto error;
+               switch (wvalue) {
+               case USB_PORT_FEAT_ENABLE:
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+                       hprt0 = dwc2_read_hprt0(hsotg);
+                       hprt0 |= HPRT0_ENA;
+                       writel(hprt0, hsotg->regs + HPRT0);
+                       break;
+
+               case USB_PORT_FEAT_SUSPEND:
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+                       writel(0, hsotg->regs + PCGCTL);
+                       usleep_range(20000, 40000);
+
+                       hprt0 = dwc2_read_hprt0(hsotg);
+                       hprt0 |= HPRT0_RES;
+                       writel(hprt0, hsotg->regs + HPRT0);
+                       hprt0 &= ~HPRT0_SUSP;
+                       usleep_range(100000, 150000);
+
+                       hprt0 &= ~HPRT0_RES;
+                       writel(hprt0, hsotg->regs + HPRT0);
+                       break;
+
+               case USB_PORT_FEAT_POWER:
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_POWER\n");
+                       hprt0 = dwc2_read_hprt0(hsotg);
+                       hprt0 &= ~HPRT0_PWR;
+                       writel(hprt0, hsotg->regs + HPRT0);
+                       break;
+
+               case USB_PORT_FEAT_INDICATOR:
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+                       /* Port indicator not supported */
+                       break;
+
+               case USB_PORT_FEAT_C_CONNECTION:
+                       /*
+                        * Clears driver's internal Connect Status Change flag
+                        */
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+                       hsotg->flags.b.port_connect_status_change = 0;
+                       break;
+
+               case USB_PORT_FEAT_C_RESET:
+                       /* Clears driver's internal Port Reset Change flag */
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+                       hsotg->flags.b.port_reset_change = 0;
+                       break;
+
+               case USB_PORT_FEAT_C_ENABLE:
+                       /*
+                        * Clears the driver's internal Port Enable/Disable
+                        * Change flag
+                        */
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+                       hsotg->flags.b.port_enable_change = 0;
+                       break;
+
+               case USB_PORT_FEAT_C_SUSPEND:
+                       /*
+                        * Clears the driver's internal Port Suspend Change
+                        * flag, which is set when resume signaling on the host
+                        * port is complete
+                        */
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+                       hsotg->flags.b.port_suspend_change = 0;
+                       break;
+
+               case USB_PORT_FEAT_C_PORT_L1:
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
+                       hsotg->flags.b.port_l1_change = 0;
+                       break;
+
+               case USB_PORT_FEAT_C_OVER_CURRENT:
+                       dev_dbg(hsotg->dev,
+                               "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+                       hsotg->flags.b.port_over_current_change = 0;
+                       break;
+
+               default:
+                       retval = -EINVAL;
+                       dev_err(hsotg->dev,
+                               "ClearPortFeature request %1xh unknown or unsupported\n",
+                               wvalue);
+               }
+               break;
+
+       case GetHubDescriptor:
+               dev_dbg(hsotg->dev, "GetHubDescriptor\n");
+               hub_desc = (struct usb_hub_descriptor *)buf;
+               hub_desc->bDescLength = 9;
+               hub_desc->bDescriptorType = 0x29;
+               hub_desc->bNbrPorts = 1;
+               hub_desc->wHubCharacteristics = cpu_to_le16(0x08);
+               hub_desc->bPwrOn2PwrGood = 1;
+               hub_desc->bHubContrCurrent = 0;
+               hub_desc->u.hs.DeviceRemovable[0] = 0;
+               hub_desc->u.hs.DeviceRemovable[1] = 0xff;
+               break;
+
+       case GetHubStatus:
+               dev_dbg(hsotg->dev, "GetHubStatus\n");
+               memset(buf, 0, 4);
+               break;
+
+       case GetPortStatus:
+               dev_dbg(hsotg->dev,
+                       "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
+                       hsotg->flags.d32);
+               if (!windex || windex > 1)
+                       goto error;
+
+               port_status = 0;
+               if (hsotg->flags.b.port_connect_status_change)
+                       port_status |= USB_PORT_STAT_C_CONNECTION << 16;
+               if (hsotg->flags.b.port_enable_change)
+                       port_status |= USB_PORT_STAT_C_ENABLE << 16;
+               if (hsotg->flags.b.port_suspend_change)
+                       port_status |= USB_PORT_STAT_C_SUSPEND << 16;
+               if (hsotg->flags.b.port_l1_change)
+                       port_status |= USB_PORT_STAT_C_L1 << 16;
+               if (hsotg->flags.b.port_reset_change)
+                       port_status |= USB_PORT_STAT_C_RESET << 16;
+               if (hsotg->flags.b.port_over_current_change) {
+                       dev_warn(hsotg->dev, "Overcurrent change detected\n");
+                       port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+               }
+
+               if (!hsotg->flags.b.port_connect_status) {
+                       /*
+                        * The port is disconnected, which means the core is
+                        * either in device mode or it soon will be. Just
+                        * return 0's for the remainder of the port status
+                        * since the port register can't be read if the core
+                        * is in device mode.
+                        */
+                       *(__le32 *)buf = cpu_to_le32(port_status);
+                       break;
+               }
+
+               hprt0 = readl(hsotg->regs + HPRT0);
+               dev_dbg(hsotg->dev, "  HPRT0: 0x%08x\n", hprt0);
+
+               if (hprt0 & HPRT0_CONNSTS)
+                       port_status |= USB_PORT_STAT_CONNECTION;
+               if (hprt0 & HPRT0_ENA)
+                       port_status |= USB_PORT_STAT_ENABLE;
+               if (hprt0 & HPRT0_SUSP)
+                       port_status |= USB_PORT_STAT_SUSPEND;
+               if (hprt0 & HPRT0_OVRCURRACT)
+                       port_status |= USB_PORT_STAT_OVERCURRENT;
+               if (hprt0 & HPRT0_RST)
+                       port_status |= USB_PORT_STAT_RESET;
+               if (hprt0 & HPRT0_PWR)
+                       port_status |= USB_PORT_STAT_POWER;
+
+               speed = hprt0 & HPRT0_SPD_MASK;
+               if (speed == HPRT0_SPD_HIGH_SPEED)
+                       port_status |= USB_PORT_STAT_HIGH_SPEED;
+               else if (speed == HPRT0_SPD_LOW_SPEED)
+                       port_status |= USB_PORT_STAT_LOW_SPEED;
+
+               if (hprt0 & HPRT0_TSTCTL_MASK)
+                       port_status |= USB_PORT_STAT_TEST;
+               /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+
+               dev_dbg(hsotg->dev, "port_status=%08x\n", port_status);
+               *(__le32 *)buf = cpu_to_le32(port_status);
+               break;
+
+       case SetHubFeature:
+               dev_dbg(hsotg->dev, "SetHubFeature\n");
+               /* No HUB features supported */
+               break;
+
+       case SetPortFeature:
+               dev_dbg(hsotg->dev, "SetPortFeature\n");
+               if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
+                       goto error;
+
+               if (!hsotg->flags.b.port_connect_status) {
+                       /*
+                        * The port is disconnected, which means the core is
+                        * either in device mode or it soon will be. Just
+                        * return without doing anything since the port
+                        * register can't be written if the core is in device
+                        * mode.
+                        */
+                       break;
+               }
+
+               switch (wvalue) {
+               case USB_PORT_FEAT_SUSPEND:
+                       dev_dbg(hsotg->dev,
+                               "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+                       if (windex != hsotg->otg_port)
+                               goto error;
+                       dwc2_port_suspend(hsotg, windex);
+                       break;
+
+               case USB_PORT_FEAT_POWER:
+                       dev_dbg(hsotg->dev,
+                               "SetPortFeature - USB_PORT_FEAT_POWER\n");
+                       hprt0 = dwc2_read_hprt0(hsotg);
+                       hprt0 |= HPRT0_PWR;
+                       writel(hprt0, hsotg->regs + HPRT0);
+                       break;
+
+               case USB_PORT_FEAT_RESET:
+                       hprt0 = dwc2_read_hprt0(hsotg);
+                       dev_dbg(hsotg->dev,
+                               "SetPortFeature - USB_PORT_FEAT_RESET\n");
+                       pcgctl = readl(hsotg->regs + PCGCTL);
+                       pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
+                       writel(pcgctl, hsotg->regs + PCGCTL);
+                       /* ??? Original driver does this */
+                       writel(0, hsotg->regs + PCGCTL);
+
+                       hprt0 = dwc2_read_hprt0(hsotg);
+                       /* Clear suspend bit if resetting from suspend state */
+                       hprt0 &= ~HPRT0_SUSP;
+
+                       /*
+                        * When B-Host the Port reset bit is set in the Start
+                        * HCD Callback function, so that the reset is started
+                        * within 1ms of the HNP success interrupt
+                        */
+                       if (!dwc2_hcd_is_b_host(hsotg)) {
+                               hprt0 |= HPRT0_PWR | HPRT0_RST;
+                               dev_dbg(hsotg->dev,
+                                       "In host mode, hprt0=%08x\n", hprt0);
+                               writel(hprt0, hsotg->regs + HPRT0);
+                       }
+
+                       /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+                       usleep_range(50000, 70000);
+                       hprt0 &= ~HPRT0_RST;
+                       writel(hprt0, hsotg->regs + HPRT0);
+                       hsotg->lx_state = DWC2_L0; /* Now back to On state */
+                       break;
+
+               case USB_PORT_FEAT_INDICATOR:
+                       dev_dbg(hsotg->dev,
+                               "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+                       /* Not supported */
+                       break;
+
+               default:
+                       retval = -EINVAL;
+                       dev_err(hsotg->dev,
+                               "SetPortFeature %1xh unknown or unsupported\n",
+                               wvalue);
+                       break;
+               }
+               break;
+
+       default:
+error:
+               retval = -EINVAL;
+               dev_dbg(hsotg->dev,
+                       "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
+                       typereq, windex, wvalue);
+               break;
+       }
+
+       return retval;
+}
+
+static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
+{
+       int retval;
+
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       if (port != 1)
+               return -EINVAL;
+
+       retval = (hsotg->flags.b.port_connect_status_change ||
+                 hsotg->flags.b.port_reset_change ||
+                 hsotg->flags.b.port_enable_change ||
+                 hsotg->flags.b.port_suspend_change ||
+                 hsotg->flags.b.port_over_current_change);
+
+       if (retval) {
+               dev_dbg(hsotg->dev,
+                       "DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
+               dev_dbg(hsotg->dev, "  port_connect_status_change: %d\n",
+                       hsotg->flags.b.port_connect_status_change);
+               dev_dbg(hsotg->dev, "  port_reset_change: %d\n",
+                       hsotg->flags.b.port_reset_change);
+               dev_dbg(hsotg->dev, "  port_enable_change: %d\n",
+                       hsotg->flags.b.port_enable_change);
+               dev_dbg(hsotg->dev, "  port_suspend_change: %d\n",
+                       hsotg->flags.b.port_suspend_change);
+               dev_dbg(hsotg->dev, "  port_over_current_change: %d\n",
+                       hsotg->flags.b.port_over_current_change);
+       }
+
+       return retval;
+}
+
+int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
+{
+       u32 hfnum = readl(hsotg->regs + HFNUM);
+
+#ifdef DWC2_DEBUG_SOF
+       dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
+                hfnum >> HFNUM_FRNUM_SHIFT &
+                HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT);
+#endif
+       return hfnum >> HFNUM_FRNUM_SHIFT &
+              HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
+}
+
+int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
+{
+       return (hsotg->op_state == OTG_STATE_B_HOST);
+}
+
+static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
+                                              int iso_desc_count,
+                                              gfp_t mem_flags)
+{
+       struct dwc2_hcd_urb *urb;
+       u32 size = sizeof(*urb) + iso_desc_count *
+                  sizeof(struct dwc2_hcd_iso_packet_desc);
+
+       urb = kzalloc(size, mem_flags);
+       if (urb)
+               urb->packet_count = iso_desc_count;
+       return urb;
+}
+
+static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
+                                     struct dwc2_hcd_urb *urb, u8 dev_addr,
+                                     u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
+{
+       dev_vdbg(hsotg->dev,
+                "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
+                dev_addr, ep_num, ep_dir, ep_type, mps);
+       urb->pipe_info.dev_addr = dev_addr;
+       urb->pipe_info.ep_num = ep_num;
+       urb->pipe_info.pipe_type = ep_type;
+       urb->pipe_info.pipe_dir = ep_dir;
+       urb->pipe_info.mps = mps;
+}
+
+/*
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
+{
+#ifdef DEBUG
+       struct dwc2_host_chan *chan;
+       struct dwc2_hcd_urb *urb;
+       struct dwc2_qtd *qtd;
+       int num_channels;
+       u32 np_tx_status;
+       u32 p_tx_status;
+       int i;
+
+       num_channels = hsotg->core_params->host_channels;
+       dev_dbg(hsotg->dev, "\n");
+       dev_dbg(hsotg->dev,
+               "************************************************************\n");
+       dev_dbg(hsotg->dev, "HCD State:\n");
+       dev_dbg(hsotg->dev, "  Num channels: %d\n", num_channels);
+
+       for (i = 0; i < num_channels; i++) {
+               chan = hsotg->hc_ptr_array[i];
+               dev_dbg(hsotg->dev, "  Channel %d:\n", i);
+               dev_dbg(hsotg->dev,
+                       "    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+                       chan->dev_addr, chan->ep_num, chan->ep_is_in);
+               dev_dbg(hsotg->dev, "    speed: %d\n", chan->speed);
+               dev_dbg(hsotg->dev, "    ep_type: %d\n", chan->ep_type);
+               dev_dbg(hsotg->dev, "    max_packet: %d\n", chan->max_packet);
+               dev_dbg(hsotg->dev, "    data_pid_start: %d\n",
+                       chan->data_pid_start);
+               dev_dbg(hsotg->dev, "    multi_count: %d\n", chan->multi_count);
+               dev_dbg(hsotg->dev, "    xfer_started: %d\n",
+                       chan->xfer_started);
+               dev_dbg(hsotg->dev, "    xfer_buf: %p\n", chan->xfer_buf);
+               dev_dbg(hsotg->dev, "    xfer_dma: %08lx\n",
+                       (unsigned long)chan->xfer_dma);
+               dev_dbg(hsotg->dev, "    xfer_len: %d\n", chan->xfer_len);
+               dev_dbg(hsotg->dev, "    xfer_count: %d\n", chan->xfer_count);
+               dev_dbg(hsotg->dev, "    halt_on_queue: %d\n",
+                       chan->halt_on_queue);
+               dev_dbg(hsotg->dev, "    halt_pending: %d\n",
+                       chan->halt_pending);
+               dev_dbg(hsotg->dev, "    halt_status: %d\n", chan->halt_status);
+               dev_dbg(hsotg->dev, "    do_split: %d\n", chan->do_split);
+               dev_dbg(hsotg->dev, "    complete_split: %d\n",
+                       chan->complete_split);
+               dev_dbg(hsotg->dev, "    hub_addr: %d\n", chan->hub_addr);
+               dev_dbg(hsotg->dev, "    hub_port: %d\n", chan->hub_port);
+               dev_dbg(hsotg->dev, "    xact_pos: %d\n", chan->xact_pos);
+               dev_dbg(hsotg->dev, "    requests: %d\n", chan->requests);
+               dev_dbg(hsotg->dev, "    qh: %p\n", chan->qh);
+
+               if (chan->xfer_started) {
+                       u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
+
+                       hfnum = readl(hsotg->regs + HFNUM);
+                       hcchar = readl(hsotg->regs + HCCHAR(i));
+                       hctsiz = readl(hsotg->regs + HCTSIZ(i));
+                       hcint = readl(hsotg->regs + HCINT(i));
+                       hcintmsk = readl(hsotg->regs + HCINTMSK(i));
+                       dev_dbg(hsotg->dev, "    hfnum: 0x%08x\n", hfnum);
+                       dev_dbg(hsotg->dev, "    hcchar: 0x%08x\n", hcchar);
+                       dev_dbg(hsotg->dev, "    hctsiz: 0x%08x\n", hctsiz);
+                       dev_dbg(hsotg->dev, "    hcint: 0x%08x\n", hcint);
+                       dev_dbg(hsotg->dev, "    hcintmsk: 0x%08x\n", hcintmsk);
+               }
+
+               if (!(chan->xfer_started && chan->qh))
+                       continue;
+
+               list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
+                       if (!qtd->in_process)
+                               break;
+                       urb = qtd->urb;
+                       dev_dbg(hsotg->dev, "    URB Info:\n");
+                       dev_dbg(hsotg->dev, "      qtd: %p, urb: %p\n",
+                               qtd, urb);
+                       if (urb) {
+                               dev_dbg(hsotg->dev,
+                                       "      Dev: %d, EP: %d %s\n",
+                                       dwc2_hcd_get_dev_addr(&urb->pipe_info),
+                                       dwc2_hcd_get_ep_num(&urb->pipe_info),
+                                       dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
+                                       "IN" : "OUT");
+                               dev_dbg(hsotg->dev,
+                                       "      Max packet size: %d\n",
+                                       dwc2_hcd_get_mps(&urb->pipe_info));
+                               dev_dbg(hsotg->dev,
+                                       "      transfer_buffer: %p\n",
+                                       urb->buf);
+                               dev_dbg(hsotg->dev, "      transfer_dma: %p\n",
+                                       (void *)urb->dma);
+                               dev_dbg(hsotg->dev,
+                                       "      transfer_buffer_length: %d\n",
+                                       urb->length);
+                               dev_dbg(hsotg->dev, "      actual_length: %d\n",
+                                       urb->actual_length);
+                       }
+               }
+       }
+
+       dev_dbg(hsotg->dev, "  non_periodic_channels: %d\n",
+               hsotg->non_periodic_channels);
+       dev_dbg(hsotg->dev, "  periodic_channels: %d\n",
+               hsotg->periodic_channels);
+       dev_dbg(hsotg->dev, "  periodic_usecs: %d\n", hsotg->periodic_usecs);
+       np_tx_status = readl(hsotg->regs + GNPTXSTS);
+       dev_dbg(hsotg->dev, "  NP Tx Req Queue Space Avail: %d\n",
+               np_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+               TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+       dev_dbg(hsotg->dev, "  NP Tx FIFO Space Avail: %d\n",
+               np_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+               TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+       p_tx_status = readl(hsotg->regs + HPTXSTS);
+       dev_dbg(hsotg->dev, "  P Tx Req Queue Space Avail: %d\n",
+               p_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+               TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+       dev_dbg(hsotg->dev, "  P Tx FIFO Space Avail: %d\n",
+               p_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+               TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+       dwc2_hcd_dump_frrem(hsotg);
+       dwc2_dump_global_registers(hsotg);
+       dwc2_dump_host_registers(hsotg);
+       dev_dbg(hsotg->dev,
+               "************************************************************\n");
+       dev_dbg(hsotg->dev, "\n");
+#endif
+}
+
+/*
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg)
+{
+#ifdef DWC2_DUMP_FRREM
+       dev_dbg(hsotg->dev, "Frame remaining at SOF:\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->frrem_samples, hsotg->frrem_accum,
+               hsotg->frrem_samples > 0 ?
+               hsotg->frrem_accum / hsotg->frrem_samples : 0);
+       dev_dbg(hsotg->dev, "\n");
+       dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 7):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_7_samples,
+               hsotg->hfnum_7_frrem_accum,
+               hsotg->hfnum_7_samples > 0 ?
+               hsotg->hfnum_7_frrem_accum / hsotg->hfnum_7_samples : 0);
+       dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 0):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_0_samples,
+               hsotg->hfnum_0_frrem_accum,
+               hsotg->hfnum_0_samples > 0 ?
+               hsotg->hfnum_0_frrem_accum / hsotg->hfnum_0_samples : 0);
+       dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 1-6):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_other_samples,
+               hsotg->hfnum_other_frrem_accum,
+               hsotg->hfnum_other_samples > 0 ?
+               hsotg->hfnum_other_frrem_accum / hsotg->hfnum_other_samples :
+               0);
+       dev_dbg(hsotg->dev, "\n");
+       dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 7):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_7_samples_a, hsotg->hfnum_7_frrem_accum_a,
+               hsotg->hfnum_7_samples_a > 0 ?
+               hsotg->hfnum_7_frrem_accum_a / hsotg->hfnum_7_samples_a : 0);
+       dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 0):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_0_samples_a, hsotg->hfnum_0_frrem_accum_a,
+               hsotg->hfnum_0_samples_a > 0 ?
+               hsotg->hfnum_0_frrem_accum_a / hsotg->hfnum_0_samples_a : 0);
+       dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 1-6):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_other_samples_a, hsotg->hfnum_other_frrem_accum_a,
+               hsotg->hfnum_other_samples_a > 0 ?
+               hsotg->hfnum_other_frrem_accum_a / hsotg->hfnum_other_samples_a
+               : 0);
+       dev_dbg(hsotg->dev, "\n");
+       dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 7):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_7_samples_b, hsotg->hfnum_7_frrem_accum_b,
+               hsotg->hfnum_7_samples_b > 0 ?
+               hsotg->hfnum_7_frrem_accum_b / hsotg->hfnum_7_samples_b : 0);
+       dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 0):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_0_samples_b, hsotg->hfnum_0_frrem_accum_b,
+               (hsotg->hfnum_0_samples_b > 0) ?
+               hsotg->hfnum_0_frrem_accum_b / hsotg->hfnum_0_samples_b : 0);
+       dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 1-6):\n");
+       dev_dbg(hsotg->dev, "  samples %u, accum %llu, avg %llu\n",
+               hsotg->hfnum_other_samples_b, hsotg->hfnum_other_frrem_accum_b,
+               (hsotg->hfnum_other_samples_b > 0) ?
+               hsotg->hfnum_other_frrem_accum_b / hsotg->hfnum_other_samples_b
+               : 0);
+#endif
+}
+
+struct wrapper_priv_data {
+       struct dwc2_hsotg *hsotg;
+};
+
+/* Gets the dwc2_hsotg from a usb_hcd */
+static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
+{
+       struct wrapper_priv_data *p;
+
+       p = (struct wrapper_priv_data *) &hcd->hcd_priv;
+       return p->hsotg;
+}
+
+static int _dwc2_hcd_start(struct usb_hcd *hcd);
+
+void dwc2_host_start(struct dwc2_hsotg *hsotg)
+{
+       struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+       hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
+       _dwc2_hcd_start(hcd);
+}
+
+void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
+{
+       struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+       hcd->self.is_b_host = 0;
+}
+
+void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
+                       int *hub_port)
+{
+       struct urb *urb = context;
+
+       if (urb->dev->tt)
+               *hub_addr = urb->dev->tt->hub->devnum;
+       else
+               *hub_addr = 0;
+       *hub_port = urb->dev->ttport;
+}
+
+int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
+{
+       struct urb *urb = context;
+
+       return urb->dev->speed;
+}
+
+static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
+                                       struct urb *urb)
+{
+       struct usb_bus *bus = hcd_to_bus(hcd);
+
+       if (urb->interval)
+               bus->bandwidth_allocated += bw / urb->interval;
+       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+               bus->bandwidth_isoc_reqs++;
+       else
+               bus->bandwidth_int_reqs++;
+}
+
+static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
+                                   struct urb *urb)
+{
+       struct usb_bus *bus = hcd_to_bus(hcd);
+
+       if (urb->interval)
+               bus->bandwidth_allocated -= bw / urb->interval;
+       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+               bus->bandwidth_isoc_reqs--;
+       else
+               bus->bandwidth_int_reqs--;
+}
+
+/*
+ * Sets the final status of an URB and returns it to the upper layer. Any
+ * required cleanup of the URB is performed.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
+                       struct dwc2_hcd_urb *dwc2_urb, int status)
+{
+       struct urb *urb = context;
+       int i;
+
+       if (!urb) {
+               dev_dbg(hsotg->dev, "## %s: context is NULL ##\n", __func__);
+               return;
+       }
+
+       if (!dwc2_urb) {
+               dev_dbg(hsotg->dev, "## %s: dwc2_urb is NULL ##\n", __func__);
+               return;
+       }
+
+       urb->actual_length = dwc2_hcd_urb_get_actual_length(dwc2_urb);
+
+       dev_vdbg(hsotg->dev,
+                "%s: urb %p device %d ep %d-%s status %d actual %d\n",
+                __func__, urb, usb_pipedevice(urb->pipe),
+                usb_pipeendpoint(urb->pipe),
+                usb_pipein(urb->pipe) ? "IN" : "OUT", status,
+                urb->actual_length);
+
+       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+               for (i = 0; i < urb->number_of_packets; i++)
+                       dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
+                                i, urb->iso_frame_desc[i].status);
+       }
+
+       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+               urb->error_count = dwc2_hcd_urb_get_error_count(dwc2_urb);
+               for (i = 0; i < urb->number_of_packets; ++i) {
+                       urb->iso_frame_desc[i].actual_length =
+                               dwc2_hcd_urb_get_iso_desc_actual_length(
+                                               dwc2_urb, i);
+                       urb->iso_frame_desc[i].status =
+                               dwc2_hcd_urb_get_iso_desc_status(dwc2_urb, i);
+               }
+       }
+
+       urb->status = status;
+       urb->hcpriv = NULL;
+       if (!status) {
+               if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+                   urb->actual_length < urb->transfer_buffer_length)
+                       urb->status = -EREMOTEIO;
+       }
+
+       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
+           usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+               struct usb_host_endpoint *ep = urb->ep;
+
+               if (ep)
+                       dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg),
+                                       dwc2_hcd_get_ep_bandwidth(hsotg, ep),
+                                       urb);
+       }
+
+       kfree(dwc2_urb);
+
+       spin_unlock(&hsotg->lock);
+       usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
+       spin_lock(&hsotg->lock);
+}
+
+/*
+ * Work queue function for starting the HCD when A-Cable is connected
+ */
+static void dwc2_hcd_start_func(struct work_struct *work)
+{
+       struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+                                               start_work.work);
+
+       dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg);
+       dwc2_host_start(hsotg);
+}
+
+/*
+ * Reset work queue function
+ */
+static void dwc2_hcd_reset_func(struct work_struct *work)
+{
+       struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+                                               reset_work.work);
+       u32 hprt0;
+
+       dev_dbg(hsotg->dev, "USB RESET function called\n");
+       hprt0 = dwc2_read_hprt0(hsotg);
+       hprt0 &= ~HPRT0_RST;
+       writel(hprt0, hsotg->regs + HPRT0);
+       hsotg->flags.b.port_reset_change = 1;
+}
+
+/*
+ * =========================================================================
+ *  Linux HC Driver Functions
+ * =========================================================================
+ */
+
+/*
+ * Initializes the DWC_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure.
+ */
+static int _dwc2_hcd_start(struct usb_hcd *hcd)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       struct usb_bus *bus = hcd_to_bus(hcd);
+       unsigned long flags;
+
+       dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+
+       hcd->state = HC_STATE_RUNNING;
+
+       if (dwc2_is_device_mode(hsotg)) {
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+               return 0;       /* why 0 ?? */
+       }
+
+       dwc2_hcd_reinit(hsotg);
+
+       /* Initialize and connect root hub if one is not already attached */
+       if (bus->root_hub) {
+               dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
+               /* Inform the HUB driver to resume */
+               usb_hcd_resume_root_hub(hcd);
+       }
+
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+       return 0;
+}
+
+/*
+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+static void _dwc2_hcd_stop(struct usb_hcd *hcd)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       unsigned long flags;
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+       dwc2_hcd_stop(hsotg);
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+
+       usleep_range(1000, 3000);
+}
+
+/* Returns the current frame number */
+static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+       return dwc2_hcd_get_frame_number(hsotg);
+}
+
+static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
+                              char *fn_name)
+{
+#ifdef VERBOSE_DEBUG
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       char *pipetype;
+       char *speed;
+
+       dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
+       dev_vdbg(hsotg->dev, "  Device address: %d\n",
+                usb_pipedevice(urb->pipe));
+       dev_vdbg(hsotg->dev, "  Endpoint: %d, %s\n",
+                usb_pipeendpoint(urb->pipe),
+                usb_pipein(urb->pipe) ? "IN" : "OUT");
+
+       switch (usb_pipetype(urb->pipe)) {
+       case PIPE_CONTROL:
+               pipetype = "CONTROL";
+               break;
+       case PIPE_BULK:
+               pipetype = "BULK";
+               break;
+       case PIPE_INTERRUPT:
+               pipetype = "INTERRUPT";
+               break;
+       case PIPE_ISOCHRONOUS:
+               pipetype = "ISOCHRONOUS";
+               break;
+       default:
+               pipetype = "UNKNOWN";
+               break;
+       }
+
+       dev_vdbg(hsotg->dev, "  Endpoint type: %s %s (%s)\n", pipetype,
+                usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
+                "IN" : "OUT");
+
+       switch (urb->dev->speed) {
+       case USB_SPEED_HIGH:
+               speed = "HIGH";
+               break;
+       case USB_SPEED_FULL:
+               speed = "FULL";
+               break;
+       case USB_SPEED_LOW:
+               speed = "LOW";
+               break;
+       default:
+               speed = "UNKNOWN";
+               break;
+       }
+
+       dev_vdbg(hsotg->dev, "  Speed: %s\n", speed);
+       dev_vdbg(hsotg->dev, "  Max packet size: %d\n",
+                usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+       dev_vdbg(hsotg->dev, "  Data buffer length: %d\n",
+                urb->transfer_buffer_length);
+       dev_vdbg(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %p\n",
+                urb->transfer_buffer, (void *)urb->transfer_dma);
+       dev_vdbg(hsotg->dev, "  Setup buffer: %p, Setup DMA: %p\n",
+                urb->setup_packet, (void *)urb->setup_dma);
+       dev_vdbg(hsotg->dev, "  Interval: %d\n", urb->interval);
+
+       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+               int i;
+
+               for (i = 0; i < urb->number_of_packets; i++) {
+                       dev_vdbg(hsotg->dev, "  ISO Desc %d:\n", i);
+                       dev_vdbg(hsotg->dev, "    offset: %d, length %d\n",
+                                urb->iso_frame_desc[i].offset,
+                                urb->iso_frame_desc[i].length);
+               }
+       }
+#endif
+}
+
+/*
+ * Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB.
+ */
+static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+                                gfp_t mem_flags)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       struct usb_host_endpoint *ep = urb->ep;
+       struct dwc2_hcd_urb *dwc2_urb;
+       int i;
+       int alloc_bandwidth = 0;
+       int retval = 0;
+       u8 ep_type = 0;
+       u32 tflags = 0;
+       void *buf;
+       unsigned long flags;
+
+       dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
+       dwc2_dump_urb_info(hcd, urb, "urb_enqueue");
+
+       if (ep == NULL)
+               return -EINVAL;
+
+       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
+           usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+               spin_lock_irqsave(&hsotg->lock, flags);
+               if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep))
+                       alloc_bandwidth = 1;
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+       }
+
+       switch (usb_pipetype(urb->pipe)) {
+       case PIPE_CONTROL:
+               ep_type = USB_ENDPOINT_XFER_CONTROL;
+               break;
+       case PIPE_ISOCHRONOUS:
+               ep_type = USB_ENDPOINT_XFER_ISOC;
+               break;
+       case PIPE_BULK:
+               ep_type = USB_ENDPOINT_XFER_BULK;
+               break;
+       case PIPE_INTERRUPT:
+               ep_type = USB_ENDPOINT_XFER_INT;
+               break;
+       default:
+               dev_warn(hsotg->dev, "Wrong ep type\n");
+       }
+
+       dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
+                                     mem_flags);
+       if (!dwc2_urb)
+               return -ENOMEM;
+
+       dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
+                                 usb_pipeendpoint(urb->pipe), ep_type,
+                                 usb_pipein(urb->pipe),
+                                 usb_maxpacket(urb->dev, urb->pipe,
+                                               !(usb_pipein(urb->pipe))));
+
+       buf = urb->transfer_buffer;
+       if (hcd->self.uses_dma) {
+               /*
+                * Calculate virtual address from physical address, because
+                * some class driver may not fill transfer_buffer.
+                * In Buffer DMA mode virtual address is used, when handling
+                * non-DWORD aligned buffers.
+                */
+               buf = bus_to_virt(urb->transfer_dma);
+       }
+
+       if (!(urb->transfer_flags & URB_NO_INTERRUPT))
+               tflags |= URB_GIVEBACK_ASAP;
+       if (urb->transfer_flags & URB_ZERO_PACKET)
+               tflags |= URB_SEND_ZERO_PACKET;
+
+       dwc2_urb->priv = urb;
+       dwc2_urb->buf = buf;
+       dwc2_urb->dma = urb->transfer_dma;
+       dwc2_urb->length = urb->transfer_buffer_length;
+       dwc2_urb->setup_packet = urb->setup_packet;
+       dwc2_urb->setup_dma = urb->setup_dma;
+       dwc2_urb->flags = tflags;
+       dwc2_urb->interval = urb->interval;
+       dwc2_urb->status = -EINPROGRESS;
+
+       for (i = 0; i < urb->number_of_packets; ++i)
+               dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
+                                                urb->iso_frame_desc[i].offset,
+                                                urb->iso_frame_desc[i].length);
+
+       urb->hcpriv = dwc2_urb;
+       retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv,
+                                     mem_flags);
+       if (retval) {
+               urb->hcpriv = NULL;
+               kfree(dwc2_urb);
+       } else {
+               if (alloc_bandwidth) {
+                       spin_lock_irqsave(&hsotg->lock, flags);
+                       dwc2_allocate_bus_bandwidth(hcd,
+                                       dwc2_hcd_get_ep_bandwidth(hsotg, ep),
+                                       urb);
+                       spin_unlock_irqrestore(&hsotg->lock, flags);
+               }
+       }
+
+       return retval;
+}
+
+/*
+ * Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
+ */
+static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+                                int status)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       int rc = 0;
+       unsigned long flags;
+
+       dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
+       dwc2_dump_urb_info(hcd, urb, "urb_dequeue");
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+
+       if (!urb->hcpriv) {
+               dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
+               goto out;
+       }
+
+       rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
+
+       kfree(urb->hcpriv);
+       urb->hcpriv = NULL;
+
+       /* Higher layer software sets URB status */
+       spin_unlock(&hsotg->lock);
+       usb_hcd_giveback_urb(hcd, urb, status);
+       spin_lock(&hsotg->lock);
+
+       dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n");
+       dev_dbg(hsotg->dev, "  urb->status = %d\n", urb->status);
+out:
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+
+       return rc;
+}
+
+/*
+ * Frees resources in the DWC_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued.
+ */
+static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd,
+                                      struct usb_host_endpoint *ep)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+       dev_dbg(hsotg->dev,
+               "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
+               ep->desc.bEndpointAddress, ep->hcpriv);
+       dwc2_hcd_endpoint_disable(hsotg, ep, 250);
+}
+
+/*
+ * Resets endpoint specific parameter values, in current version used to reset
+ * the data toggle (as a WA). This function can be called from usb_clear_halt
+ * routine.
+ */
+static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
+                                    struct usb_host_endpoint *ep)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       int is_control = usb_endpoint_xfer_control(&ep->desc);
+       int is_out = usb_endpoint_dir_out(&ep->desc);
+       int epnum = usb_endpoint_num(&ep->desc);
+       struct usb_device *udev;
+       unsigned long flags;
+
+       dev_dbg(hsotg->dev,
+               "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
+               ep->desc.bEndpointAddress);
+
+       udev = to_usb_device(hsotg->dev);
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+
+       usb_settoggle(udev, epnum, is_out, 0);
+       if (is_control)
+               usb_settoggle(udev, epnum, !is_out, 0);
+       dwc2_hcd_endpoint_reset(hsotg, ep);
+
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/*
+ * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs
+ */
+static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       int retval = dwc2_hcd_intr(hsotg);
+
+       return IRQ_RETVAL(retval);
+}
+
+/*
+ * Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0.
+ */
+static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+       buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1;
+       return buf[0] != 0;
+}
+
+/* Handles hub class-specific requests */
+static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
+                                u16 windex, char *buf, u16 wlength)
+{
+       int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq,
+                                         wvalue, windex, buf, wlength);
+       return retval;
+}
+
+/* Handles hub TT buffer clear completions */
+static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
+                                              struct usb_host_endpoint *ep)
+{
+       struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+       struct dwc2_qh *qh;
+       unsigned long flags;
+
+       qh = ep->hcpriv;
+       if (!qh)
+               return;
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+       qh->tt_buffer_dirty = 0;
+
+       if (hsotg->flags.b.port_connect_status)
+               dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL);
+
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+static struct hc_driver dwc2_hc_driver = {
+       .description = "dwc2_hsotg",
+       .product_desc = "DWC OTG Controller",
+       .hcd_priv_size = sizeof(struct wrapper_priv_data),
+
+       .irq = _dwc2_hcd_irq,
+       .flags = HCD_MEMORY | HCD_USB2,
+
+       .start = _dwc2_hcd_start,
+       .stop = _dwc2_hcd_stop,
+       .urb_enqueue = _dwc2_hcd_urb_enqueue,
+       .urb_dequeue = _dwc2_hcd_urb_dequeue,
+       .endpoint_disable = _dwc2_hcd_endpoint_disable,
+       .endpoint_reset = _dwc2_hcd_endpoint_reset,
+       .get_frame_number = _dwc2_hcd_get_frame_number,
+
+       .hub_status_data = _dwc2_hcd_hub_status_data,
+       .hub_control = _dwc2_hcd_hub_control,
+       .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
+};
+
+/*
+ * Frees secondary storage associated with the dwc2_hsotg structure contained
+ * in the struct usb_hcd field
+ */
+static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
+{
+       u32 ahbcfg;
+       u32 dctl;
+       int i;
+
+       dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n");
+
+       /* Free memory for QH/QTD lists */
+       dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
+       dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
+       dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
+       dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
+       dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned);
+       dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued);
+
+       /* Free memory for the host channels */
+       for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+               struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
+
+               if (chan != NULL) {
+                       dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n",
+                               i, chan);
+                       hsotg->hc_ptr_array[i] = NULL;
+                       kfree(chan);
+               }
+       }
+
+       if (hsotg->core_params->dma_enable > 0) {
+               if (hsotg->status_buf) {
+                       dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
+                                         hsotg->status_buf,
+                                         hsotg->status_buf_dma);
+                       hsotg->status_buf = NULL;
+               }
+       } else {
+               kfree(hsotg->status_buf);
+               hsotg->status_buf = NULL;
+       }
+
+       ahbcfg = readl(hsotg->regs + GAHBCFG);
+
+       /* Disable all interrupts */
+       ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
+       writel(ahbcfg, hsotg->regs + GAHBCFG);
+       writel(0, hsotg->regs + GINTMSK);
+
+       if (hsotg->snpsid >= DWC2_CORE_REV_3_00a) {
+               dctl = readl(hsotg->regs + DCTL);
+               dctl |= DCTL_SFTDISCON;
+               writel(dctl, hsotg->regs + DCTL);
+       }
+
+       if (hsotg->wq_otg) {
+               if (!cancel_work_sync(&hsotg->wf_otg))
+                       flush_workqueue(hsotg->wq_otg);
+               destroy_workqueue(hsotg->wq_otg);
+       }
+
+       kfree(hsotg->core_params);
+       hsotg->core_params = NULL;
+       del_timer(&hsotg->wkp_timer);
+}
+
+static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
+{
+       /* Turn off all host-specific interrupts */
+       dwc2_disable_host_interrupts(hsotg);
+
+       dwc2_hcd_free(hsotg);
+}
+
+static void dwc2_set_uninitialized(int *p, int size)
+{
+       int i;
+
+       for (i = 0; i < size; i++)
+               p[i] = -1;
+}
+
+/*
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int dwc2_hcd_init(struct device *dev, struct dwc2_hsotg *hsotg, int irq,
+                 struct dwc2_core_params *params)
+{
+       struct usb_hcd *hcd;
+       struct dwc2_host_chan *channel;
+       u32 snpsid, gusbcfg, hcfg;
+       int i, num_channels;
+       int retval = -ENOMEM;
+
+       dev_dbg(dev, "DWC OTG HCD INIT\n");
+
+       /*
+        * Attempt to ensure this device is really a DWC_otg Controller.
+        * Read and verify the GSNPSID register contents. The value should be
+        * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
+        * as in "OTG version 2.xx" or "OTG version 3.xx".
+        */
+       snpsid = readl(hsotg->regs + GSNPSID);
+       if ((snpsid & 0xfffff000) != 0x4f542000 &&
+           (snpsid & 0xfffff000) != 0x4f543000) {
+               dev_err(dev, "Bad value for GSNPSID: 0x%08x\n", snpsid);
+               retval = -ENODEV;
+               goto error1;
+       }
+
+       hcd = usb_create_hcd(&dwc2_hc_driver, dev, dev_name(dev));
+       if (!hcd)
+               goto error1;
+
+       hcd->has_tt = 1;
+
+       spin_lock_init(&hsotg->lock);
+       ((struct wrapper_priv_data *) &hcd->hcd_priv)->hsotg = hsotg;
+       hsotg->priv = hcd;
+       hsotg->dev = dev;
+
+       /*
+        * Store the contents of the hardware configuration registers here for
+        * easy access later
+        */
+       hsotg->hwcfg1 = readl(hsotg->regs + GHWCFG1);
+       hsotg->hwcfg2 = readl(hsotg->regs + GHWCFG2);
+       hsotg->hwcfg3 = readl(hsotg->regs + GHWCFG3);
+       hsotg->hwcfg4 = readl(hsotg->regs + GHWCFG4);
+
+       dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hsotg->hwcfg1);
+       dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hsotg->hwcfg2);
+       dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hsotg->hwcfg3);
+       dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hsotg->hwcfg4);
+
+       /* Force host mode to get HPTXFSIZ exact power on value */
+       gusbcfg = readl(hsotg->regs + GUSBCFG);
+       gusbcfg |= GUSBCFG_FORCEHOSTMODE;
+       writel(gusbcfg, hsotg->regs + GUSBCFG);
+       usleep_range(100000, 150000);
+
+       hsotg->hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
+       dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hsotg->hptxfsiz);
+       gusbcfg = readl(hsotg->regs + GUSBCFG);
+       gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
+       writel(gusbcfg, hsotg->regs + GUSBCFG);
+       usleep_range(100000, 150000);
+
+       hcfg = readl(hsotg->regs + HCFG);
+       dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
+       dev_dbg(hsotg->dev, "op_mode=%0x\n",
+               hsotg->hwcfg2 >> GHWCFG2_OP_MODE_SHIFT &
+               GHWCFG2_OP_MODE_MASK >> GHWCFG2_OP_MODE_SHIFT);
+       dev_dbg(hsotg->dev, "arch=%0x\n",
+               hsotg->hwcfg2 >> GHWCFG2_ARCHITECTURE_SHIFT &
+               GHWCFG2_ARCHITECTURE_MASK >> GHWCFG2_ARCHITECTURE_SHIFT);
+       dev_dbg(hsotg->dev, "num_dev_ep=%d\n",
+               hsotg->hwcfg2 >> GHWCFG2_NUM_DEV_EP_SHIFT &
+               GHWCFG2_NUM_DEV_EP_MASK >> GHWCFG2_NUM_DEV_EP_SHIFT);
+       dev_dbg(hsotg->dev, "max_host_chan=%d\n",
+               hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
+               GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT);
+       dev_dbg(hsotg->dev, "nonperio_tx_q_depth=0x%0x\n",
+               hsotg->hwcfg2 >> GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT &
+               GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK >>
+                               GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT);
+       dev_dbg(hsotg->dev, "host_perio_tx_q_depth=0x%0x\n",
+               hsotg->hwcfg2 >> GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT &
+               GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK >>
+                               GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT);
+       dev_dbg(hsotg->dev, "dev_token_q_depth=0x%0x\n",
+               hsotg->hwcfg2 >> GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT &
+               GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
+                               GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT);
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+       hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) *
+                                        FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
+       if (!hsotg->frame_num_array)
+               goto error2;
+       hsotg->last_frame_num_array = kzalloc(
+                       sizeof(*hsotg->last_frame_num_array) *
+                       FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
+       if (!hsotg->last_frame_num_array)
+               goto error2;
+       hsotg->last_frame_num = HFNUM_MAX_FRNUM;
+#endif
+
+       hsotg->core_params = kzalloc(sizeof(*hsotg->core_params), GFP_KERNEL);
+       if (!hsotg->core_params)
+               goto error2;
+
+       dwc2_set_uninitialized((int *)hsotg->core_params,
+                              sizeof(*hsotg->core_params) / sizeof(int));
+
+       /* Validate parameter values */
+       dwc2_set_parameters(hsotg, params);
+
+       /* Initialize the DWC_otg core, and select the Phy type */
+       retval = dwc2_core_init(hsotg, true);
+       if (retval)
+               goto error2;
+
+       /*
+        * Disable the global interrupt until all the interrupt handlers are
+        * installed
+        */
+       dwc2_disable_global_interrupts(hsotg);
+
+       /* Create new workqueue and init work */
+       hsotg->wq_otg = create_singlethread_workqueue("dwc_otg");
+       if (!hsotg->wq_otg) {
+               dev_err(hsotg->dev, "Failed to create workqueue\n");
+               goto error2;
+       }
+       INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
+
+       hsotg->snpsid = readl(hsotg->regs + GSNPSID);
+       dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x\n",
+               hsotg->snpsid >> 12 & 0xf, hsotg->snpsid >> 8 & 0xf,
+               hsotg->snpsid >> 4 & 0xf, hsotg->snpsid & 0xf);
+
+       setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
+                   (unsigned long)hsotg);
+
+       /* Initialize the non-periodic schedule */
+       INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
+       INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
+
+       /* Initialize the periodic schedule */
+       INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
+       INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
+       INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
+       INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
+
+       /*
+        * Create a host channel descriptor for each host channel implemented
+        * in the controller. Initialize the channel descriptor array.
+        */
+       INIT_LIST_HEAD(&hsotg->free_hc_list);
+       num_channels = hsotg->core_params->host_channels;
+       memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
+
+       for (i = 0; i < num_channels; i++) {
+               channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+               if (channel == NULL)
+                       goto error3;
+               channel->hc_num = i;
+               hsotg->hc_ptr_array[i] = channel;
+       }
+
+       /* Initialize hsotg start work */
+       INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
+
+       /* Initialize port reset work */
+       INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
+
+       /*
+        * Allocate space for storing data on status transactions. Normally no
+        * data is sent, but this space acts as a bit bucket. This must be
+        * done after usb_add_hcd since that function allocates the DMA buffer
+        * pool.
+        */
+       if (hsotg->core_params->dma_enable > 0)
+               hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
+                                       DWC2_HCD_STATUS_BUF_SIZE,
+                                       &hsotg->status_buf_dma, GFP_KERNEL);
+       else
+               hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE,
+                                         GFP_KERNEL);
+
+       if (!hsotg->status_buf)
+               goto error3;
+
+       hsotg->otg_port = 1;
+       hsotg->frame_list = NULL;
+       hsotg->frame_list_dma = 0;
+       hsotg->periodic_qh_count = 0;
+
+       /* Initiate lx_state to L3 disconnected state */
+       hsotg->lx_state = DWC2_L3;
+
+       hcd->self.otg_port = hsotg->otg_port;
+
+       /* Don't support SG list at this point */
+       hcd->self.sg_tablesize = 0;
+
+       /*
+        * Finish generic HCD initialization and start the HCD. This function
+        * allocates the DMA buffer pool, registers the USB bus, requests the
+        * IRQ line, and calls hcd_start method.
+        */
+       retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+       if (retval < 0)
+               goto error3;
+
+       dwc2_dump_global_registers(hsotg);
+       dwc2_dump_host_registers(hsotg);
+       dwc2_hcd_dump_state(hsotg);
+
+       dwc2_enable_global_interrupts(hsotg);
+
+       return 0;
+
+error3:
+       dwc2_hcd_release(hsotg);
+error2:
+       kfree(hsotg->core_params);
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+       kfree(hsotg->last_frame_num_array);
+       kfree(hsotg->frame_num_array);
+#endif
+
+       usb_put_hcd(hcd);
+error1:
+       dev_err(dev, "%s() FAILED, returning %d\n", __func__, retval);
+       return retval;
+}
+EXPORT_SYMBOL_GPL(dwc2_hcd_init);
+
+/*
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc2_hcd_remove(struct device *dev, struct dwc2_hsotg *hsotg)
+{
+       struct usb_hcd *hcd;
+
+       dev_dbg(dev, "DWC OTG HCD REMOVE\n");
+
+       hcd = dwc2_hsotg_to_hcd(hsotg);
+       dev_dbg(dev, "hsotg->hcd = %p\n", hcd);
+
+       if (!hcd) {
+               dev_dbg(dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
+                       __func__);
+               return;
+       }
+
+       usb_remove_hcd(hcd);
+       hsotg->priv = NULL;
+       dwc2_hcd_release(hsotg);
+       kfree(hsotg->core_params);
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+       kfree(hsotg->last_frame_num_array);
+       kfree(hsotg->frame_num_array);
+#endif
+
+       usb_put_hcd(hcd);
+}
+EXPORT_SYMBOL_GPL(dwc2_hcd_remove);
diff --git a/drivers/staging/dwc2/hcd.h b/drivers/staging/dwc2/hcd.h
new file mode 100644 (file)
index 0000000..775337e
--- /dev/null
@@ -0,0 +1,737 @@
+/*
+ * hcd.h - DesignWare HS OTG Controller host-mode declarations
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __DWC2_HCD_H__
+#define __DWC2_HCD_H__
+
+/*
+ * This file contains the structures, constants, and interfaces for the
+ * Host Contoller Driver (HCD)
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the DWC_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+struct dwc2_qh;
+
+/**
+ * struct dwc2_host_chan - Software host channel descriptor
+ *
+ * @hc_num:             Host channel number, used for register address lookup
+ * @dev_addr:           Address of the device
+ * @ep_num:             Endpoint of the device
+ * @ep_is_in:           Endpoint direction
+ * @speed:              Device speed. One of the following values:
+ *                       - USB_SPEED_LOW
+ *                       - USB_SPEED_FULL
+ *                       - USB_SPEED_HIGH
+ * @ep_type:            Endpoint type. One of the following values:
+ *                       - USB_ENDPOINT_XFER_CONTROL: 0
+ *                       - USB_ENDPOINT_XFER_ISOC:    1
+ *                       - USB_ENDPOINT_XFER_BULK:    2
+ *                       - USB_ENDPOINT_XFER_INTR:    3
+ * @max_packet:         Max packet size in bytes
+ * @data_pid_start:     PID for initial transaction.
+ *                       0: DATA0
+ *                       1: DATA2
+ *                       2: DATA1
+ *                       3: MDATA (non-Control EP),
+ *                          SETUP (Control EP)
+ * @multi_count:        Number of additional periodic transactions per
+ *                      (micro)frame
+ * @xfer_buf:           Pointer to current transfer buffer position
+ * @xfer_dma:           DMA address of xfer_buf
+ * @align_buf:          In Buffer DMA mode this will be used if xfer_buf is not
+ *                      DWORD aligned
+ * @xfer_len:           Total number of bytes to transfer
+ * @xfer_count:         Number of bytes transferred so far
+ * @start_pkt_count:    Packet count at start of transfer
+ * @xfer_started:       True if the transfer has been started
+ * @ping:               True if a PING request should be issued on this channel
+ * @error_state:        True if the error count for this transaction is non-zero
+ * @halt_on_queue:      True if this channel should be halted the next time a
+ *                      request is queued for the channel. This is necessary in
+ *                      slave mode if no request queue space is available when
+ *                      an attempt is made to halt the channel.
+ * @halt_pending:       True if the host channel has been halted, but the core
+ *                      is not finished flushing queued requests
+ * @do_split:           Enable split for the channel
+ * @complete_split:     Enable complete split
+ * @hub_addr:           Address of high speed hub for the split
+ * @hub_port:           Port of the low/full speed device for the split
+ * @xact_pos:           Split transaction position. One of the following values:
+ *                       - DWC2_HCSPLT_XACTPOS_MID
+ *                       - DWC2_HCSPLT_XACTPOS_BEGIN
+ *                       - DWC2_HCSPLT_XACTPOS_END
+ *                       - DWC2_HCSPLT_XACTPOS_ALL
+ * @requests:           Number of requests issued for this channel since it was
+ *                      assigned to the current transfer (not counting PINGs)
+ * @schinfo:            Scheduling micro-frame bitmap
+ * @ntd:                Number of transfer descriptors for the transfer
+ * @halt_status:        Reason for halting the host channel
+ * @hcint               Contents of the HCINT register when the interrupt came
+ * @qh:                 QH for the transfer being processed by this channel
+ * @hc_list_entry:      For linking to list of host channels
+ * @desc_list_addr:     Current QH's descriptor list DMA address
+ *
+ * This structure represents the state of a single host channel when acting in
+ * host mode. It contains the data items needed to transfer packets to an
+ * endpoint via a host channel.
+ */
+struct dwc2_host_chan {
+       u8 hc_num;
+
+       unsigned dev_addr:7;
+       unsigned ep_num:4;
+       unsigned ep_is_in:1;
+       unsigned speed:4;
+       unsigned ep_type:2;
+       unsigned max_packet:11;
+       unsigned data_pid_start:2;
+#define DWC2_HC_PID_DATA0      (TSIZ_SC_MC_PID_DATA0 >> TSIZ_SC_MC_PID_SHIFT)
+#define DWC2_HC_PID_DATA2      (TSIZ_SC_MC_PID_DATA2 >> TSIZ_SC_MC_PID_SHIFT)
+#define DWC2_HC_PID_DATA1      (TSIZ_SC_MC_PID_DATA1 >> TSIZ_SC_MC_PID_SHIFT)
+#define DWC2_HC_PID_MDATA      (TSIZ_SC_MC_PID_MDATA >> TSIZ_SC_MC_PID_SHIFT)
+#define DWC2_HC_PID_SETUP      (TSIZ_SC_MC_PID_SETUP >> TSIZ_SC_MC_PID_SHIFT)
+
+       unsigned multi_count:2;
+
+       u8 *xfer_buf;
+       dma_addr_t xfer_dma;
+       dma_addr_t align_buf;
+       u32 xfer_len;
+       u32 xfer_count;
+       u16 start_pkt_count;
+       u8 xfer_started;
+       u8 do_ping;
+       u8 error_state;
+       u8 halt_on_queue;
+       u8 halt_pending;
+       u8 do_split;
+       u8 complete_split;
+       u8 hub_addr;
+       u8 hub_port;
+       u8 xact_pos;
+#define DWC2_HCSPLT_XACTPOS_MID        (HCSPLT_XACTPOS_MID >> HCSPLT_XACTPOS_SHIFT)
+#define DWC2_HCSPLT_XACTPOS_END        (HCSPLT_XACTPOS_END >> HCSPLT_XACTPOS_SHIFT)
+#define DWC2_HCSPLT_XACTPOS_BEGIN (HCSPLT_XACTPOS_BEGIN >> HCSPLT_XACTPOS_SHIFT)
+#define DWC2_HCSPLT_XACTPOS_ALL        (HCSPLT_XACTPOS_ALL >> HCSPLT_XACTPOS_SHIFT)
+
+       u8 requests;
+       u8 schinfo;
+       u16 ntd;
+       enum dwc2_halt_status halt_status;
+       u32 hcint;
+       struct dwc2_qh *qh;
+       struct list_head hc_list_entry;
+       dma_addr_t desc_list_addr;
+};
+
+struct dwc2_hcd_pipe_info {
+       u8 dev_addr;
+       u8 ep_num;
+       u8 pipe_type;
+       u8 pipe_dir;
+       u16 mps;
+};
+
+struct dwc2_hcd_iso_packet_desc {
+       u32 offset;
+       u32 length;
+       u32 actual_length;
+       u32 status;
+};
+
+struct dwc2_qtd;
+
+struct dwc2_hcd_urb {
+       void *priv;
+       struct dwc2_qtd *qtd;
+       void *buf;
+       dma_addr_t dma;
+       void *setup_packet;
+       dma_addr_t setup_dma;
+       u32 length;
+       u32 actual_length;
+       u32 status;
+       u32 error_count;
+       u32 packet_count;
+       u32 flags;
+       u16 interval;
+       struct dwc2_hcd_pipe_info pipe_info;
+       struct dwc2_hcd_iso_packet_desc iso_descs[0];
+};
+
+/* Phases for control transfers */
+enum dwc2_control_phase {
+       DWC2_CONTROL_SETUP,
+       DWC2_CONTROL_DATA,
+       DWC2_CONTROL_STATUS,
+};
+
+/* Transaction types */
+enum dwc2_transaction_type {
+       DWC2_TRANSACTION_NONE,
+       DWC2_TRANSACTION_PERIODIC,
+       DWC2_TRANSACTION_NON_PERIODIC,
+       DWC2_TRANSACTION_ALL,
+};
+
+/**
+ * struct dwc2_qh - Software queue head structure
+ *
+ * @ep_type:            Endpoint type. One of the following values:
+ *                       - USB_ENDPOINT_XFER_CONTROL
+ *                       - USB_ENDPOINT_XFER_BULK
+ *                       - USB_ENDPOINT_XFER_INT
+ *                       - USB_ENDPOINT_XFER_ISOC
+ * @ep_is_in:           Endpoint direction
+ * @maxp:               Value from wMaxPacketSize field of Endpoint Descriptor
+ * @dev_speed:          Device speed. One of the following values:
+ *                       - USB_SPEED_LOW
+ *                       - USB_SPEED_FULL
+ *                       - USB_SPEED_HIGH
+ * @data_toggle:        Determines the PID of the next data packet for
+ *                      non-controltransfers. Ignored for control transfers.
+ *                      One of the following values:
+ *                       - DWC2_HC_PID_DATA0
+ *                       - DWC2_HC_PID_DATA1
+ * @ping_state:         Ping state
+ * @do_split:           Full/low speed endpoint on high-speed hub requires split
+ * @qtd_list:           List of QTDs for this QH
+ * @channel:            Host channel currently processing transfers for this QH
+ * @usecs:              Bandwidth in microseconds per (micro)frame
+ * @interval:           Interval between transfers in (micro)frames
+ * @sched_frame:        (micro)frame to initialize a periodic transfer.
+ *                      The transfer executes in the following (micro)frame.
+ * @start_split_frame:  (Micro)frame at which last start split was initialized
+ * @dw_align_buf:       Used instead of original buffer if its physical address
+ *                      is not dword-aligned
+ * @dw_align_buf_dma:   DMA address for align_buf
+ * @qh_list_entry:      Entry for QH in either the periodic or non-periodic
+ *                      schedule
+ * @desc_list:          List of transfer descriptors
+ * @desc_list_dma:      Physical address of desc_list
+ * @n_bytes:            Xfer Bytes array. Each element corresponds to a transfer
+ *                      descriptor and indicates original XferSize value for the
+ *                      descriptor
+ * @ntd:                Actual number of transfer descriptors in a list
+ * @td_first:           Index of first activated isochronous transfer descriptor
+ * @td_last:            Index of last activated isochronous transfer descriptor
+ * @tt_buffer_dirty     True if clear_tt_buffer_complete is pending
+ *
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+struct dwc2_qh {
+       u8 ep_type;
+       u8 ep_is_in;
+       u16 maxp;
+       u8 dev_speed;
+       u8 data_toggle;
+       u8 ping_state;
+       u8 do_split;
+       struct list_head qtd_list;
+       struct dwc2_host_chan *channel;
+       u16 usecs;
+       u16 interval;
+       u16 sched_frame;
+       u16 start_split_frame;
+       u8 *dw_align_buf;
+       dma_addr_t dw_align_buf_dma;
+       struct list_head qh_list_entry;
+       struct dwc2_hcd_dma_desc *desc_list;
+       dma_addr_t desc_list_dma;
+       u32 *n_bytes;
+       u16 ntd;
+       u8 td_first;
+       u8 td_last;
+       unsigned tt_buffer_dirty:1;
+};
+
+/**
+ * struct dwc2_qtd - Software queue transfer descriptor (QTD)
+ *
+ * @control_phase:      Current phase for control transfers (Setup, Data, or
+ *                      Status)
+ * @in_process:         Indicates if this QTD is currently processed by HW
+ * @data_toggle:        Determines the PID of the next data packet for the
+ *                      data phase of control transfers. Ignored for other
+ *                      transfer types. One of the following values:
+ *                       - DWC2_HC_PID_DATA0
+ *                       - DWC2_HC_PID_DATA1
+ * @complete_split:     Keeps track of the current split type for FS/LS
+ *                      endpoints on a HS Hub
+ * @isoc_split_pos:     Position of the ISOC split in full/low speed
+ * @isoc_frame_index:   Index of the next frame descriptor for an isochronous
+ *                      transfer. A frame descriptor describes the buffer
+ *                      position and length of the data to be transferred in the
+ *                      next scheduled (micro)frame of an isochronous transfer.
+ *                      It also holds status for that transaction. The frame
+ *                      index starts at 0.
+ * @isoc_split_offset:  Position of the ISOC split in the buffer for the
+ *                      current frame
+ * @ssplit_out_xfer_count: How many bytes transferred during SSPLIT OUT
+ * @error_count:        Holds the number of bus errors that have occurred for
+ *                      a transaction within this transfer
+ * @n_desc:             Number of DMA descriptors for this QTD
+ * @isoc_frame_index_last: Last activated frame (packet) index, used in
+ *                      descriptor DMA mode only
+ * @urb:                URB for this transfer
+ * @qh:                 Queue head for this QTD
+ * @qtd_list_entry:     For linking to the QH's list of QTDs
+ *
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+struct dwc2_qtd {
+       enum dwc2_control_phase control_phase;
+       u8 in_process;
+       u8 data_toggle;
+       u8 complete_split;
+       u8 isoc_split_pos;
+       u16 isoc_frame_index;
+       u16 isoc_split_offset;
+       u32 ssplit_out_xfer_count;
+       u8 error_count;
+       u8 n_desc;
+       u16 isoc_frame_index_last;
+       struct dwc2_hcd_urb *urb;
+       struct dwc2_qh *qh;
+       struct list_head qtd_list_entry;
+};
+
+#ifdef DEBUG
+struct hc_xfer_info {
+       struct dwc2_hsotg *hsotg;
+       struct dwc2_host_chan *chan;
+};
+#endif
+
+/* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
+static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
+{
+       return (struct usb_hcd *)hsotg->priv;
+}
+
+/*
+ * Inline used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
+{
+       u32 mask = readl(hsotg->regs + HCINTMSK(chnum));
+
+       mask &= ~intr;
+       writel(mask, hsotg->regs + HCINTMSK(chnum));
+}
+
+/*
+ * Returns the mode of operation, host or device
+ */
+static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg)
+{
+       return (readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0;
+}
+static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg)
+{
+       return (readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
+}
+
+/*
+ * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
+ * are read as 1, they won't clear when written back.
+ */
+static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
+{
+       u32 hprt0 = readl(hsotg->regs + HPRT0);
+
+       hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
+       return hprt0;
+}
+
+static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->ep_num;
+}
+
+static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->pipe_type;
+}
+
+static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->mps;
+}
+
+static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->dev_addr;
+}
+
+static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
+}
+
+static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
+}
+
+static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
+}
+
+static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
+}
+
+static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->pipe_dir == USB_DIR_IN;
+}
+
+static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
+{
+       return !dwc2_hcd_is_pipe_in(pipe);
+}
+
+extern int dwc2_hcd_init(struct device *dev, struct dwc2_hsotg *hsotg,
+                        int irq, struct dwc2_core_params *params);
+extern void dwc2_hcd_remove(struct device *dev, struct dwc2_hsotg *hsotg);
+extern int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+                              struct dwc2_core_params *params);
+
+/* Transaction Execution Functions */
+extern enum dwc2_transaction_type dwc2_hcd_select_transactions(
+                                               struct dwc2_hsotg *hsotg);
+extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
+                                       enum dwc2_transaction_type tr_type);
+
+/* Schedule Queue Functions */
+/* Implemented in hcd_queue.c */
+extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+                                  int sched_csplit);
+
+extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
+extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+                           struct dwc2_qh **qh, gfp_t mem_flags);
+
+/* Unlinks and frees a QTD */
+static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
+                                               struct dwc2_qtd *qtd,
+                                               struct dwc2_qh *qh)
+{
+       list_del(&qtd->qtd_list_entry);
+       kfree(qtd);
+}
+
+/* Descriptor DMA support functions */
+extern void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg,
+                                    struct dwc2_qh *qh);
+extern void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
+                                       struct dwc2_host_chan *chan, int chnum,
+                                       enum dwc2_halt_status halt_status);
+
+extern int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+                                gfp_t mem_flags);
+extern void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+
+/* Check if QH is non-periodic */
+#define dwc2_qh_is_non_per(_qh_ptr_) \
+       ((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
+        (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
+
+/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
+#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
+
+/* Packet size for any kind of endpoint descriptor */
+#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
+
+/*
+ * Returns true if frame1 is less than or equal to frame2. The comparison is
+ * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
+{
+       return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
+}
+
+/*
+ * Returns true if frame1 is greater than frame2. The comparison is done
+ * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
+{
+       return (frame1 != frame2) &&
+              ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
+}
+
+/*
+ * Increments frame by the amount specified by inc. The addition is done
+ * modulo HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
+{
+       return (frame + inc) & HFNUM_MAX_FRNUM;
+}
+
+static inline u16 dwc2_full_frame_num(u16 frame)
+{
+       return (frame & HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline u16 dwc2_micro_frame_num(u16 frame)
+{
+       return frame & 0x7;
+}
+
+/*
+ * Returns the Core Interrupt Status register contents, ANDed with the Core
+ * Interrupt Mask register contents
+ */
+static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
+{
+       return readl(hsotg->regs + GINTSTS) & readl(hsotg->regs + GINTMSK);
+}
+
+static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
+{
+       return dwc2_urb->status;
+}
+
+static inline u32 dwc2_hcd_urb_get_actual_length(
+               struct dwc2_hcd_urb *dwc2_urb)
+{
+       return dwc2_urb->actual_length;
+}
+
+static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
+{
+       return dwc2_urb->error_count;
+}
+
+static inline void dwc2_hcd_urb_set_iso_desc_params(
+               struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
+               u32 length)
+{
+       dwc2_urb->iso_descs[desc_num].offset = offset;
+       dwc2_urb->iso_descs[desc_num].length = length;
+}
+
+static inline u32 dwc2_hcd_urb_get_iso_desc_status(
+               struct dwc2_hcd_urb *dwc2_urb, int desc_num)
+{
+       return dwc2_urb->iso_descs[desc_num].status;
+}
+
+static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
+               struct dwc2_hcd_urb *dwc2_urb, int desc_num)
+{
+       return dwc2_urb->iso_descs[desc_num].actual_length;
+}
+
+static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
+                                                 struct usb_host_endpoint *ep)
+{
+       struct dwc2_qh *qh = ep->hcpriv;
+
+       if (qh && !list_empty(&qh->qh_list_entry))
+               return 1;
+
+       return 0;
+}
+
+static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
+                                           struct usb_host_endpoint *ep)
+{
+       struct dwc2_qh *qh = ep->hcpriv;
+
+       if (!qh) {
+               WARN_ON(1);
+               return 0;
+       }
+
+       return qh->usecs;
+}
+
+extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
+                                     struct dwc2_host_chan *chan, int chnum,
+                                     struct dwc2_qtd *qtd);
+
+/* HCD Core API */
+
+/**
+ * dwc2_hcd_intr() - Called on every hardware interrupt
+ *
+ * @hsotg: The DWC2 HCD
+ *
+ * Returns non zero if interrupt is handled
+ * Return 0 if interrupt is not handled
+ */
+extern int dwc2_hcd_intr(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_stop() - Halts the DWC_otg host mode operation
+ *
+ * @hsotg: The DWC2 HCD
+ */
+extern void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
+
+extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
+extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
+ * and 0 otherwise
+ *
+ * @hsotg: The DWC2 HCD
+ */
+extern int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_get_frame_number() - Returns current frame number
+ *
+ * @hsotg: The DWC2 HCD
+ */
+extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_dump_state() - Dumps hsotg state
+ *
+ * @hsotg: The DWC2 HCD
+ *
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+extern void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_dump_frrem() - Dumps the average frame remaining at SOF
+ *
+ * @hsotg: The DWC2 HCD
+ *
+ * This can be used to determine average interrupt latency. Frame remaining is
+ * also shown for start transfer and two additional sample points.
+ *
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg);
+
+/* URB interface */
+
+/* Transfer flags */
+#define URB_GIVEBACK_ASAP      0x1
+#define URB_SEND_ZERO_PACKET   0x2
+
+/* Host driver callbacks */
+
+extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
+extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
+extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
+                              int *hub_addr, int *hub_port);
+extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
+extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
+                              struct dwc2_hcd_urb *dwc2_urb, int status);
+
+#ifdef DEBUG
+/*
+ * Macro to sample the remaining PHY clocks left in the current frame. This
+ * may be used during debugging to determine the average time it takes to
+ * execute sections of code. There are two possible sample points, "a" and
+ * "b", so the _letter_ argument must be one of these values.
+ *
+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
+ * example, "cat /sys/devices/lm0/hcd_frrem".
+ */
+#define dwc2_sample_frrem(_hcd_, _qh_, _letter_)                       \
+do {                                                                   \
+       struct hfnum_data _hfnum_;                                      \
+       struct dwc2_qtd *_qtd_;                                         \
+                                                                       \
+       _qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd,      \
+                          qtd_list_entry);                             \
+       if (usb_pipeint(_qtd_->urb->pipe) &&                            \
+           (_qh_)->start_split_frame != 0 && !_qtd_->complete_split) { \
+               _hfnum_.d32 = readl((_hcd_)->regs + HFNUM);             \
+               switch (_hfnum_.b.frnum & 0x7) {                        \
+               case 7:                                                 \
+                       (_hcd_)->hfnum_7_samples_##_letter_++;          \
+                       (_hcd_)->hfnum_7_frrem_accum_##_letter_ +=      \
+                               _hfnum_.b.frrem;                        \
+                       break;                                          \
+               case 0:                                                 \
+                       (_hcd_)->hfnum_0_samples_##_letter_++;          \
+                       (_hcd_)->hfnum_0_frrem_accum_##_letter_ +=      \
+                               _hfnum_.b.frrem;                        \
+                       break;                                          \
+               default:                                                \
+                       (_hcd_)->hfnum_other_samples_##_letter_++;      \
+                       (_hcd_)->hfnum_other_frrem_accum_##_letter_ +=  \
+                               _hfnum_.b.frrem;                        \
+                       break;                                          \
+               }                                                       \
+       }                                                               \
+} while (0)
+#else
+#define dwc2_sample_frrem(_hcd_, _qh_, _letter_)       do {} while (0)
+#endif
+
+#endif /* __DWC2_HCD_H__ */
diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/staging/dwc2/hcd_intr.c
new file mode 100644 (file)
index 0000000..01addd0
--- /dev/null
@@ -0,0 +1,2079 @@
+/*
+ * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains the interrupt handlers for Host mode
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/* This function is for debug only */
+static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
+{
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs
+
+       u16 curr_frame_number = hsotg->frame_number;
+
+       if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+               if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
+                   curr_frame_number) {
+                       hsotg->frame_num_array[hsotg->frame_num_idx] =
+                                       curr_frame_number;
+                       hsotg->last_frame_num_array[hsotg->frame_num_idx] =
+                                       hsotg->last_frame_num;
+                       hsotg->frame_num_idx++;
+               }
+       } else if (!hsotg->dumped_frame_num_array) {
+               int i;
+
+               dev_info(hsotg->dev, "Frame     Last Frame\n");
+               dev_info(hsotg->dev, "-----     ----------\n");
+               for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+                       dev_info(hsotg->dev, "0x%04x    0x%04x\n",
+                                hsotg->frame_num_array[i],
+                                hsotg->last_frame_num_array[i]);
+               }
+               hsotg->dumped_frame_num_array = 1;
+       }
+       hsotg->last_frame_num = curr_frame_number;
+#endif
+}
+
+static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
+                                   struct dwc2_host_chan *chan,
+                                   struct dwc2_qtd *qtd)
+{
+       struct urb *usb_urb;
+
+       if (!chan->qh || !qtd->urb)
+               return;
+
+       usb_urb = qtd->urb->priv;
+       if (!usb_urb || !usb_urb->dev)
+               return;
+
+       if (chan->qh->dev_speed != USB_SPEED_HIGH &&
+           qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
+               chan->qh->tt_buffer_dirty = 1;
+               if (usb_hub_clear_tt_buffer(usb_urb))
+                       /* Clear failed; let's hope things work anyway */
+                       chan->qh->tt_buffer_dirty = 0;
+       }
+}
+
+/*
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller
+ * for the next (micro)frame.
+ */
+static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
+{
+       struct list_head *qh_entry;
+       struct dwc2_qh *qh;
+       u32 hfnum;
+       enum dwc2_transaction_type tr_type;
+
+#ifdef DEBUG_SOF
+       dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
+#endif
+
+       hfnum = readl(hsotg->regs + HFNUM);
+       hsotg->frame_number = hfnum >> HFNUM_FRNUM_SHIFT &
+                           HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
+
+       dwc2_track_missed_sofs(hsotg);
+
+       /* Determine whether any periodic QHs should be executed */
+       qh_entry = hsotg->periodic_sched_inactive.next;
+       while (qh_entry != &hsotg->periodic_sched_inactive) {
+               qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
+               qh_entry = qh_entry->next;
+               if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
+                       /*
+                        * Move QH to the ready list to be executed next
+                        * (micro)frame
+                        */
+                       list_move(&qh->qh_list_entry,
+                                 &hsotg->periodic_sched_ready);
+       }
+       tr_type = dwc2_hcd_select_transactions(hsotg);
+       if (tr_type != DWC2_TRANSACTION_NONE)
+               dwc2_hcd_queue_transactions(hsotg, tr_type);
+
+       /* Clear interrupt */
+       writel(GINTSTS_SOF, hsotg->regs + GINTSTS);
+}
+
+/*
+ * Handles the Rx FIFO Level Interrupt, which indicates that there is
+ * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
+ * memory if the DWC_otg controller is operating in Slave mode.
+ */
+static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
+{
+       u32 grxsts, chnum, bcnt, dpid, pktsts;
+       struct dwc2_host_chan *chan;
+
+       dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
+
+       grxsts = readl(hsotg->regs + GRXSTSP);
+       chnum = grxsts >> GRXSTS_HCHNUM_SHIFT &
+               GRXSTS_HCHNUM_MASK >> GRXSTS_HCHNUM_SHIFT;
+       chan = hsotg->hc_ptr_array[chnum];
+       if (!chan) {
+               dev_err(hsotg->dev, "Unable to get corresponding channel\n");
+               return;
+       }
+
+       bcnt = grxsts >> GRXSTS_BYTECNT_SHIFT &
+              GRXSTS_BYTECNT_MASK >> GRXSTS_BYTECNT_SHIFT;
+       dpid = grxsts >> GRXSTS_DPID_SHIFT &
+              GRXSTS_DPID_MASK >> GRXSTS_DPID_SHIFT;
+       pktsts = grxsts & GRXSTS_PKTSTS_MASK;
+
+       /* Packet Status */
+       dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
+       dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
+       dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n", dpid,
+                chan->data_pid_start);
+       dev_vdbg(hsotg->dev, "    PStatus = %d\n",
+                pktsts >> GRXSTS_PKTSTS_SHIFT &
+                GRXSTS_PKTSTS_MASK >> GRXSTS_PKTSTS_SHIFT);
+
+       switch (pktsts) {
+       case GRXSTS_PKTSTS_HCHIN:
+               /* Read the data into the host buffer */
+               if (bcnt > 0) {
+                       dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
+
+                       /* Update the HC fields for the next packet received */
+                       chan->xfer_count += bcnt;
+                       chan->xfer_buf += bcnt;
+               }
+               break;
+       case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
+       case GRXSTS_PKTSTS_DATATOGGLEERR:
+       case GRXSTS_PKTSTS_HCHHALTED:
+               /* Handled in interrupt, just ignore data */
+               break;
+       default:
+               dev_err(hsotg->dev,
+                       "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
+               break;
+       }
+}
+
+/*
+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode.
+ */
+static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
+{
+       dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+       dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
+}
+
+/*
+ * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode.
+ */
+static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
+{
+       dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
+       dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
+}
+
+static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
+                             u32 *hprt0_modify)
+{
+       struct dwc2_core_params *params = hsotg->core_params;
+       int do_reset = 0;
+       u32 usbcfg;
+       u32 prtspd;
+       u32 hcfg;
+       u32 hfir;
+
+       dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+
+       /* Every time when port enables calculate HFIR.FrInterval */
+       hfir = readl(hsotg->regs + HFIR);
+       hfir &= ~HFIR_FRINT_MASK;
+       hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
+               HFIR_FRINT_MASK;
+       writel(hfir, hsotg->regs + HFIR);
+
+       /* Check if we need to adjust the PHY clock speed for low power */
+       if (!params->host_support_fs_ls_low_power) {
+               /* Port has been enabled, set the reset change flag */
+               hsotg->flags.b.port_reset_change = 1;
+               return;
+       }
+
+       usbcfg = readl(hsotg->regs + GUSBCFG);
+       prtspd = hprt0 & HPRT0_SPD_MASK;
+
+       if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
+               /* Low power */
+               if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
+                       /* Set PHY low power clock select for FS/LS devices */
+                       usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
+                       writel(usbcfg, hsotg->regs + GUSBCFG);
+                       do_reset = 1;
+               }
+
+               hcfg = readl(hsotg->regs + HCFG);
+
+               if (prtspd == HPRT0_SPD_LOW_SPEED &&
+                   params->host_ls_low_power_phy_clk ==
+                   DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
+                       /* 6 MHZ */
+                       dev_vdbg(hsotg->dev,
+                                "FS_PHY programming HCFG to 6 MHz\n");
+                       if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
+                           HCFG_FSLSPCLKSEL_6_MHZ) {
+                               hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+                               hcfg |= HCFG_FSLSPCLKSEL_6_MHZ;
+                               writel(hcfg, hsotg->regs + HCFG);
+                               do_reset = 1;
+                       }
+               } else {
+                       /* 48 MHZ */
+                       dev_vdbg(hsotg->dev,
+                                "FS_PHY programming HCFG to 48 MHz\n");
+                       if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
+                           HCFG_FSLSPCLKSEL_48_MHZ) {
+                               hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+                               hcfg |= HCFG_FSLSPCLKSEL_48_MHZ;
+                               writel(hcfg, hsotg->regs + HCFG);
+                               do_reset = 1;
+                       }
+               }
+       } else {
+               /* Not low power */
+               if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
+                       usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
+                       writel(usbcfg, hsotg->regs + GUSBCFG);
+                       do_reset = 1;
+               }
+       }
+
+       if (do_reset) {
+               *hprt0_modify |= HPRT0_RST;
+               queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
+                                  msecs_to_jiffies(60));
+       } else {
+               /* Port has been enabled, set the reset change flag */
+               hsotg->flags.b.port_reset_change = 1;
+       }
+}
+
+/*
+ * There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately.
+ */
+static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
+{
+       u32 hprt0;
+       u32 hprt0_modify;
+
+       dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
+
+       hprt0 = readl(hsotg->regs + HPRT0);
+       hprt0_modify = hprt0;
+
+       /*
+        * Clear appropriate bits in HPRT0 to clear the interrupt bit in
+        * GINTSTS
+        */
+       hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
+                         HPRT0_OVRCURRCHG);
+
+       /*
+        * Port Connect Detected
+        * Set flag and clear if detected
+        */
+       if (hprt0 & HPRT0_CONNDET) {
+               dev_vdbg(hsotg->dev,
+                        "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
+                        hprt0);
+               hsotg->flags.b.port_connect_status_change = 1;
+               hsotg->flags.b.port_connect_status = 1;
+               hprt0_modify |= HPRT0_CONNDET;
+
+               /*
+                * The Hub driver asserts a reset when it sees port connect
+                * status change flag
+                */
+       }
+
+       /*
+        * Port Enable Changed
+        * Clear if detected - Set internal flag if disabled
+        */
+       if (hprt0 & HPRT0_ENACHG) {
+               dev_vdbg(hsotg->dev,
+                        "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
+                        hprt0, !!(hprt0 & HPRT0_ENA));
+               hprt0_modify |= HPRT0_ENACHG;
+               if (hprt0 & HPRT0_ENA)
+                       dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
+               else
+                       hsotg->flags.b.port_enable_change = 1;
+       }
+
+       /* Overcurrent Change Interrupt */
+       if (hprt0 & HPRT0_OVRCURRCHG) {
+               dev_vdbg(hsotg->dev,
+                        "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
+                        hprt0);
+               hsotg->flags.b.port_over_current_change = 1;
+               hprt0_modify |= HPRT0_OVRCURRCHG;
+       }
+
+       /* Clear Port Interrupts */
+       writel(hprt0_modify, hsotg->regs + HPRT0);
+}
+
+/*
+ * Gets the actual length of a transfer after the transfer halts. halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
+ * is set to 1 upon return if less than the requested number of bytes were
+ * transferred. short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
+                                      struct dwc2_host_chan *chan, int chnum,
+                                      struct dwc2_qtd *qtd,
+                                      enum dwc2_halt_status halt_status,
+                                      int *short_read)
+{
+       u32 hctsiz, count, length;
+
+       hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
+
+       if (halt_status == DWC2_HC_XFER_COMPLETE) {
+               if (chan->ep_is_in) {
+                       count = hctsiz >> TSIZ_XFERSIZE_SHIFT &
+                               TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT;
+                       length = chan->xfer_len - count;
+                       if (short_read != NULL)
+                               *short_read = (count != 0);
+               } else if (chan->qh->do_split) {
+                       length = qtd->ssplit_out_xfer_count;
+               } else {
+                       length = chan->xfer_len;
+               }
+       } else {
+               /*
+                * Must use the hctsiz.pktcnt field to determine how much data
+                * has been transferred. This field reflects the number of
+                * packets that have been transferred via the USB. This is
+                * always an integral number of packets if the transfer was
+                * halted before its normal completion. (Can't use the
+                * hctsiz.xfersize field because that reflects the number of
+                * bytes transferred via the AHB, not the USB).
+                */
+               count = hctsiz >> TSIZ_PKTCNT_SHIFT &
+                       TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT;
+               length = (chan->start_pkt_count - count) * chan->max_packet;
+       }
+
+       return length;
+}
+
+/**
+ * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
+ * Complete interrupt on the host channel. Updates the actual_length field
+ * of the URB based on the number of bytes transferred via the host channel.
+ * Sets the URB status if the data transfer is finished.
+ *
+ * Return: 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise
+ */
+static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
+                                struct dwc2_host_chan *chan, int chnum,
+                                struct dwc2_hcd_urb *urb,
+                                struct dwc2_qtd *qtd)
+{
+       u32 hctsiz;
+       int xfer_done = 0;
+       int short_read = 0;
+       int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
+                                                     DWC2_HC_XFER_COMPLETE,
+                                                     &short_read);
+
+       if (urb->actual_length + xfer_length > urb->length) {
+               dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+               xfer_length = urb->length - urb->actual_length;
+       }
+
+       /* Non DWORD-aligned buffer case handling */
+       if (chan->align_buf && xfer_length && chan->ep_is_in) {
+               dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+               dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
+                                       DMA_FROM_DEVICE);
+               memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
+                      xfer_length);
+               dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
+                                          DMA_FROM_DEVICE);
+       }
+
+       dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
+                urb->actual_length, xfer_length);
+       urb->actual_length += xfer_length;
+
+       if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
+           (urb->flags & URB_SEND_ZERO_PACKET) &&
+           urb->actual_length >= urb->length &&
+           !(urb->length % chan->max_packet)) {
+               xfer_done = 0;
+       } else if (short_read || urb->actual_length >= urb->length) {
+               xfer_done = 1;
+               urb->status = 0;
+       }
+
+       hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
+       dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
+                __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
+       dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
+       dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
+                hctsiz >> TSIZ_XFERSIZE_SHIFT &
+                TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
+       dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
+       dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
+       dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
+                xfer_done);
+
+       return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
+                              struct dwc2_host_chan *chan, int chnum,
+                              struct dwc2_qtd *qtd)
+{
+       u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
+       u32 pid = hctsiz & TSIZ_SC_MC_PID_MASK;
+
+       if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+               if (pid == TSIZ_SC_MC_PID_DATA0)
+                       chan->qh->data_toggle = DWC2_HC_PID_DATA0;
+               else
+                       chan->qh->data_toggle = DWC2_HC_PID_DATA1;
+       } else {
+               if (pid == TSIZ_SC_MC_PID_DATA0)
+                       qtd->data_toggle = DWC2_HC_PID_DATA0;
+               else
+                       qtd->data_toggle = DWC2_HC_PID_DATA1;
+       }
+}
+
+/**
+ * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
+ * the transfer is stopped for any reason. The fields of the current entry in
+ * the frame descriptor array are set based on the transfer state and the input
+ * halt_status. Completes the Isochronous URB if all the URB frames have been
+ * completed.
+ *
+ * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
+ */
+static enum dwc2_halt_status dwc2_update_isoc_urb_state(
+               struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+               int chnum, struct dwc2_qtd *qtd,
+               enum dwc2_halt_status halt_status)
+{
+       struct dwc2_hcd_iso_packet_desc *frame_desc;
+       struct dwc2_hcd_urb *urb = qtd->urb;
+
+       if (!urb)
+               return DWC2_HC_XFER_NO_HALT_STATUS;
+
+       frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+
+       switch (halt_status) {
+       case DWC2_HC_XFER_COMPLETE:
+               frame_desc->status = 0;
+               frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
+                                       chan, chnum, qtd, halt_status, NULL);
+
+               /* Non DWORD-aligned buffer case handling */
+               if (chan->align_buf && frame_desc->actual_length &&
+                   chan->ep_is_in) {
+                       dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
+                               __func__);
+                       dma_sync_single_for_cpu(hsotg->dev, urb->dma,
+                                               urb->length, DMA_FROM_DEVICE);
+                       memcpy(urb->buf + frame_desc->offset +
+                              qtd->isoc_split_offset, chan->qh->dw_align_buf,
+                              frame_desc->actual_length);
+                       dma_sync_single_for_device(hsotg->dev, urb->dma,
+                                                  urb->length,
+                                                  DMA_FROM_DEVICE);
+               }
+               break;
+       case DWC2_HC_XFER_FRAME_OVERRUN:
+               urb->error_count++;
+               if (chan->ep_is_in)
+                       frame_desc->status = -ENOSR;
+               else
+                       frame_desc->status = -ECOMM;
+               frame_desc->actual_length = 0;
+               break;
+       case DWC2_HC_XFER_BABBLE_ERR:
+               urb->error_count++;
+               frame_desc->status = -EOVERFLOW;
+               /* Don't need to update actual_length in this case */
+               break;
+       case DWC2_HC_XFER_XACT_ERR:
+               urb->error_count++;
+               frame_desc->status = -EPROTO;
+               frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
+                                       chan, chnum, qtd, halt_status, NULL);
+
+               /* Non DWORD-aligned buffer case handling */
+               if (chan->align_buf && frame_desc->actual_length &&
+                   chan->ep_is_in) {
+                       dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
+                               __func__);
+                       dma_sync_single_for_cpu(hsotg->dev, urb->dma,
+                                               urb->length, DMA_FROM_DEVICE);
+                       memcpy(urb->buf + frame_desc->offset +
+                              qtd->isoc_split_offset, chan->qh->dw_align_buf,
+                              frame_desc->actual_length);
+                       dma_sync_single_for_device(hsotg->dev, urb->dma,
+                                                  urb->length,
+                                                  DMA_FROM_DEVICE);
+               }
+
+               /* Skip whole frame */
+               if (chan->qh->do_split &&
+                   chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
+                   hsotg->core_params->dma_enable > 0) {
+                       qtd->complete_split = 0;
+                       qtd->isoc_split_offset = 0;
+               }
+
+               break;
+       default:
+               dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
+                       halt_status);
+               break;
+       }
+
+       if (++qtd->isoc_frame_index == urb->packet_count) {
+               /*
+                * urb->status is not used for isoc transfers. The individual
+                * frame_desc statuses are used instead.
+                */
+               dwc2_host_complete(hsotg, urb->priv, urb, 0);
+               halt_status = DWC2_HC_XFER_URB_COMPLETE;
+       } else {
+               halt_status = DWC2_HC_XFER_COMPLETE;
+       }
+
+       return halt_status;
+}
+
+/*
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+                              int free_qtd)
+{
+       int continue_split = 0;
+       struct dwc2_qtd *qtd;
+
+       dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__, hsotg, qh, free_qtd);
+
+       if (list_empty(&qh->qtd_list)) {
+               dev_dbg(hsotg->dev, "## QTD list empty ##\n");
+               goto no_qtd;
+       }
+
+       qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
+
+       if (qtd->complete_split)
+               continue_split = 1;
+       else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
+                qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
+               continue_split = 1;
+
+       if (free_qtd) {
+               dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+               continue_split = 0;
+       }
+
+no_qtd:
+       if (qh->channel)
+               qh->channel->align_buf = 0;
+       qh->channel = NULL;
+       dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
+}
+
+/**
+ * dwc2_release_channel() - Releases a host channel for use by other transfers
+ *
+ * @hsotg:       The HCD state structure
+ * @chan:        The host channel to release
+ * @qtd:         The QTD associated with the host channel. This QTD may be
+ *               freed if the transfer is complete or an error has occurred.
+ * @halt_status: Reason the channel is being released. This status
+ *               determines the actions taken by this function.
+ *
+ * Also attempts to select and queue more transactions since at least one host
+ * channel is available.
+ */
+static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
+                                struct dwc2_host_chan *chan,
+                                struct dwc2_qtd *qtd,
+                                enum dwc2_halt_status halt_status)
+{
+       enum dwc2_transaction_type tr_type;
+       u32 haintmsk;
+       int free_qtd = 0;
+
+       dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
+                __func__, chan->hc_num, halt_status);
+
+       switch (halt_status) {
+       case DWC2_HC_XFER_URB_COMPLETE:
+               free_qtd = 1;
+               break;
+       case DWC2_HC_XFER_AHB_ERR:
+       case DWC2_HC_XFER_STALL:
+       case DWC2_HC_XFER_BABBLE_ERR:
+               free_qtd = 1;
+               break;
+       case DWC2_HC_XFER_XACT_ERR:
+               if (qtd->error_count >= 3) {
+                       dev_vdbg(hsotg->dev,
+                                "  Complete URB with transaction error\n");
+                       free_qtd = 1;
+                       if (qtd->urb) {
+                               qtd->urb->status = -EPROTO;
+                               dwc2_host_complete(hsotg, qtd->urb->priv,
+                                                  qtd->urb, -EPROTO);
+                       }
+               }
+               break;
+       case DWC2_HC_XFER_URB_DEQUEUE:
+               /*
+                * The QTD has already been removed and the QH has been
+                * deactivated. Don't want to do anything except release the
+                * host channel and try to queue more transfers.
+                */
+               goto cleanup;
+       case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
+               dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
+               free_qtd = 1;
+               if (qtd->urb) {
+                       qtd->urb->status = -EIO;
+                       dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
+                                          -EIO);
+               }
+               break;
+       case DWC2_HC_XFER_NO_HALT_STATUS:
+       default:
+               break;
+       }
+
+       dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
+
+cleanup:
+       /*
+        * Release the host channel for use by other transfers. The cleanup
+        * function clears the channel interrupt enables and conditions, so
+        * there's no need to clear the Channel Halted interrupt separately.
+        */
+       if (!list_empty(&chan->hc_list_entry))
+               list_del(&chan->hc_list_entry);
+       dwc2_hc_cleanup(hsotg, chan);
+       list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
+
+       switch (chan->ep_type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+       case USB_ENDPOINT_XFER_BULK:
+               hsotg->non_periodic_channels--;
+               break;
+       default:
+               /*
+                * Don't release reservations for periodic channels here.
+                * That's done when a periodic transfer is descheduled (i.e.
+                * when the QH is removed from the periodic schedule).
+                */
+               break;
+       }
+
+       haintmsk = readl(hsotg->regs + HAINTMSK);
+       haintmsk &= ~(1 << chan->hc_num);
+       writel(haintmsk, hsotg->regs + HAINTMSK);
+
+       /* Try to queue more transfers now that there's a free channel */
+       tr_type = dwc2_hcd_select_transactions(hsotg);
+       if (tr_type != DWC2_TRANSACTION_NONE)
+               dwc2_hcd_queue_transactions(hsotg, tr_type);
+}
+
+/*
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
+                             struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
+                             enum dwc2_halt_status halt_status)
+{
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       if (hsotg->core_params->dma_enable > 0) {
+               dev_vdbg(hsotg->dev, "DMA enabled\n");
+               dwc2_release_channel(hsotg, chan, qtd, halt_status);
+               return;
+       }
+
+       /* Slave mode processing */
+       dwc2_hc_halt(hsotg, chan, halt_status);
+
+       if (chan->halt_on_queue) {
+               u32 gintmsk;
+
+               dev_vdbg(hsotg->dev, "Halt on queue\n");
+               if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+                   chan->ep_type == USB_ENDPOINT_XFER_BULK) {
+                       dev_vdbg(hsotg->dev, "control/bulk\n");
+                       /*
+                        * Make sure the Non-periodic Tx FIFO empty interrupt
+                        * is enabled so that the non-periodic schedule will
+                        * be processed
+                        */
+                       gintmsk = readl(hsotg->regs + GINTMSK);
+                       gintmsk |= GINTSTS_NPTXFEMP;
+                       writel(gintmsk, hsotg->regs + GINTMSK);
+               } else {
+                       dev_vdbg(hsotg->dev, "isoc/intr\n");
+                       /*
+                        * Move the QH from the periodic queued schedule to
+                        * the periodic assigned schedule. This allows the
+                        * halt to be queued when the periodic schedule is
+                        * processed.
+                        */
+                       list_move(&chan->qh->qh_list_entry,
+                                 &hsotg->periodic_sched_assigned);
+
+                       /*
+                        * Make sure the Periodic Tx FIFO Empty interrupt is
+                        * enabled so that the periodic schedule will be
+                        * processed
+                        */
+                       gintmsk = readl(hsotg->regs + GINTMSK);
+                       gintmsk |= GINTSTS_PTXFEMP;
+                       writel(gintmsk, hsotg->regs + GINTMSK);
+               }
+       }
+}
+
+/*
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
+                                           struct dwc2_host_chan *chan,
+                                           int chnum, struct dwc2_qtd *qtd,
+                                           enum dwc2_halt_status halt_status)
+{
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       qtd->error_count = 0;
+
+       if (chan->hcint & HCINTMSK_NYET) {
+               /*
+                * Got a NYET on the last transaction of the transfer. This
+                * means that the endpoint should be in the PING state at the
+                * beginning of the next transfer.
+                */
+               dev_vdbg(hsotg->dev, "got NYET\n");
+               chan->qh->ping_state = 1;
+       }
+
+       /*
+        * Always halt and release the host channel to make it available for
+        * more transfers. There may still be more phases for a control
+        * transfer or more data packets for a bulk transfer at this point,
+        * but the host channel is still halted. A channel will be reassigned
+        * to the transfer when the non-periodic schedule is processed after
+        * the channel is released. This allows transactions to be queued
+        * properly via dwc2_hcd_queue_transactions, which also enables the
+        * Tx FIFO Empty interrupt if necessary.
+        */
+       if (chan->ep_is_in) {
+               /*
+                * IN transfers in Slave mode require an explicit disable to
+                * halt the channel. (In DMA mode, this call simply releases
+                * the channel.)
+                */
+               dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+       } else {
+               /*
+                * The channel is automatically disabled by the core for OUT
+                * transfers in Slave mode
+                */
+               dwc2_release_channel(hsotg, chan, qtd, halt_status);
+       }
+}
+
+/*
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
+                                       struct dwc2_host_chan *chan, int chnum,
+                                       struct dwc2_qtd *qtd,
+                                       enum dwc2_halt_status halt_status)
+{
+       u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
+
+       qtd->error_count = 0;
+
+       if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
+               /* Core halts channel in these cases */
+               dwc2_release_channel(hsotg, chan, qtd, halt_status);
+       else
+               /* Flush any outstanding requests from the Tx queue */
+               dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+}
+
+static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
+                                      struct dwc2_host_chan *chan, int chnum,
+                                      struct dwc2_qtd *qtd)
+{
+       struct dwc2_hcd_iso_packet_desc *frame_desc;
+       u32 len;
+
+       if (!qtd->urb)
+               return 0;
+
+       frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
+       len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
+                                         DWC2_HC_XFER_COMPLETE, NULL);
+       if (!len) {
+               qtd->complete_split = 0;
+               qtd->isoc_split_offset = 0;
+               return 0;
+       }
+
+       frame_desc->actual_length += len;
+
+       if (chan->align_buf && len) {
+               dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+               dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
+                                       qtd->urb->length, DMA_FROM_DEVICE);
+               memcpy(qtd->urb->buf + frame_desc->offset +
+                      qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
+               dma_sync_single_for_device(hsotg->dev, qtd->urb->dma,
+                                          qtd->urb->length, DMA_FROM_DEVICE);
+       }
+
+       qtd->isoc_split_offset += len;
+
+       if (frame_desc->actual_length >= frame_desc->length) {
+               frame_desc->status = 0;
+               qtd->isoc_frame_index++;
+               qtd->complete_split = 0;
+               qtd->isoc_split_offset = 0;
+       }
+
+       if (qtd->isoc_frame_index == qtd->urb->packet_count) {
+               dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb, 0);
+               dwc2_release_channel(hsotg, chan, qtd,
+                                    DWC2_HC_XFER_URB_COMPLETE);
+       } else {
+               dwc2_release_channel(hsotg, chan, qtd,
+                                    DWC2_HC_XFER_NO_HALT_STATUS);
+       }
+
+       return 1;       /* Indicates that channel released */
+}
+
+/*
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
+                                 struct dwc2_host_chan *chan, int chnum,
+                                 struct dwc2_qtd *qtd)
+{
+       struct dwc2_hcd_urb *urb = qtd->urb;
+       int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+       enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
+       int urb_xfer_done;
+
+       dev_vdbg(hsotg->dev,
+                "--Host Channel %d Interrupt: Transfer Complete--\n", chnum);
+
+       if (hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
+               if (pipe_type == USB_ENDPOINT_XFER_ISOC)
+                       /* Do not disable the interrupt, just clear it */
+                       return;
+               goto handle_xfercomp_done;
+       }
+
+       /* Handle xfer complete on CSPLIT */
+       if (chan->qh->do_split) {
+               if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
+                   hsotg->core_params->dma_enable > 0) {
+                       if (qtd->complete_split &&
+                           dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
+                                                       qtd))
+                               goto handle_xfercomp_done;
+               } else {
+                       qtd->complete_split = 0;
+               }
+       }
+
+       if (!urb)
+               goto handle_xfercomp_done;
+
+       /* Update the QTD and URB states */
+       switch (pipe_type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               switch (qtd->control_phase) {
+               case DWC2_CONTROL_SETUP:
+                       if (urb->length > 0)
+                               qtd->control_phase = DWC2_CONTROL_DATA;
+                       else
+                               qtd->control_phase = DWC2_CONTROL_STATUS;
+                       dev_vdbg(hsotg->dev,
+                                "  Control setup transaction done\n");
+                       halt_status = DWC2_HC_XFER_COMPLETE;
+                       break;
+               case DWC2_CONTROL_DATA:
+                       urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
+                                                             chnum, urb, qtd);
+                       if (urb_xfer_done) {
+                               qtd->control_phase = DWC2_CONTROL_STATUS;
+                               dev_vdbg(hsotg->dev,
+                                        "  Control data transfer done\n");
+                       } else {
+                               dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
+                                                         qtd);
+                       }
+                       halt_status = DWC2_HC_XFER_COMPLETE;
+                       break;
+               case DWC2_CONTROL_STATUS:
+                       dev_vdbg(hsotg->dev, "  Control transfer complete\n");
+                       if (urb->status == -EINPROGRESS)
+                               urb->status = 0;
+                       dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+                       halt_status = DWC2_HC_XFER_URB_COMPLETE;
+                       break;
+               }
+
+               dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
+                                               halt_status);
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+               dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
+               urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
+                                                     qtd);
+               if (urb_xfer_done) {
+                       dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+                       halt_status = DWC2_HC_XFER_URB_COMPLETE;
+               } else {
+                       halt_status = DWC2_HC_XFER_COMPLETE;
+               }
+
+               dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+               dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
+                                               halt_status);
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
+               urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
+                                                     qtd);
+
+               /*
+                * Interrupt URB is done on the first transfer complete
+                * interrupt
+                */
+               if (urb_xfer_done) {
+                               dwc2_host_complete(hsotg, urb->priv, urb,
+                                                  urb->status);
+                               halt_status = DWC2_HC_XFER_URB_COMPLETE;
+               } else {
+                               halt_status = DWC2_HC_XFER_COMPLETE;
+               }
+
+               dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+               dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
+                                           halt_status);
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
+               if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
+                       halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
+                                       chnum, qtd, DWC2_HC_XFER_COMPLETE);
+               dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
+                                           halt_status);
+               break;
+       }
+
+handle_xfercomp_done:
+       disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
+}
+
+/*
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
+                              struct dwc2_host_chan *chan, int chnum,
+                              struct dwc2_qtd *qtd)
+{
+       struct dwc2_hcd_urb *urb = qtd->urb;
+       int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
+       dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
+               chnum);
+
+       if (hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+                                           DWC2_HC_XFER_STALL);
+               goto handle_stall_done;
+       }
+
+       if (!urb)
+               goto handle_stall_halt;
+
+       if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
+               dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
+
+       if (pipe_type == USB_ENDPOINT_XFER_BULK ||
+           pipe_type == USB_ENDPOINT_XFER_INT) {
+               dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
+               /*
+                * USB protocol requires resetting the data toggle for bulk
+                * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+                * setup command is issued to the endpoint. Anticipate the
+                * CLEAR_FEATURE command since a STALL has occurred and reset
+                * the data toggle now.
+                */
+               chan->qh->data_toggle = 0;
+       }
+
+handle_stall_halt:
+       dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
+
+handle_stall_done:
+       disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
+                                     struct dwc2_host_chan *chan, int chnum,
+                                     struct dwc2_hcd_urb *urb,
+                                     struct dwc2_qtd *qtd,
+                                     enum dwc2_halt_status halt_status)
+{
+       u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
+                                                     qtd, halt_status, NULL);
+       u32 hctsiz;
+
+       if (urb->actual_length + xfer_length > urb->length) {
+               dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+               xfer_length = urb->length - urb->actual_length;
+       }
+
+       /* Non DWORD-aligned buffer case handling */
+       if (chan->align_buf && xfer_length && chan->ep_is_in) {
+               dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+               dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
+                                       DMA_FROM_DEVICE);
+               memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
+                      xfer_length);
+               dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
+                                          DMA_FROM_DEVICE);
+       }
+
+       urb->actual_length += xfer_length;
+
+       hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
+       dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
+                __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
+       dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
+                chan->start_pkt_count);
+       dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
+                hctsiz >> TSIZ_PKTCNT_SHIFT &
+                TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
+       dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
+       dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
+                xfer_length);
+       dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
+                urb->actual_length);
+       dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
+                urb->length);
+}
+
+/*
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
+                            struct dwc2_host_chan *chan, int chnum,
+                            struct dwc2_qtd *qtd)
+{
+       dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
+                chnum);
+
+       /*
+        * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+        * interrupt. Re-start the SSPLIT transfer.
+        */
+       if (chan->do_split) {
+               if (chan->complete_split)
+                       qtd->error_count = 0;
+               qtd->complete_split = 0;
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
+               goto handle_nak_done;
+       }
+
+       switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+       case USB_ENDPOINT_XFER_CONTROL:
+       case USB_ENDPOINT_XFER_BULK:
+               if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
+                       /*
+                        * NAK interrupts are enabled on bulk/control IN
+                        * transfers in DMA mode for the sole purpose of
+                        * resetting the error count after a transaction error
+                        * occurs. The core will continue transferring data.
+                        */
+                       qtd->error_count = 0;
+                       break;
+               }
+
+               /*
+                * NAK interrupts normally occur during OUT transfers in DMA
+                * or Slave mode. For IN transfers, more requests will be
+                * queued as request queue space is available.
+                */
+               qtd->error_count = 0;
+
+               if (!chan->qh->ping_state) {
+                       dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+                                                 qtd, DWC2_HC_XFER_NAK);
+                       dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+
+                       if (chan->speed == USB_SPEED_HIGH)
+                               chan->qh->ping_state = 1;
+               }
+
+               /*
+                * Halt the channel so the transfer can be re-started from
+                * the appropriate point or the PING protocol will
+                * start/continue
+                */
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               qtd->error_count = 0;
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               /* Should never get called for isochronous transfers */
+               dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
+               break;
+       }
+
+handle_nak_done:
+       disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
+}
+
+/*
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
+                            struct dwc2_host_chan *chan, int chnum,
+                            struct dwc2_qtd *qtd)
+{
+       struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+       dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
+                chnum);
+
+       if (chan->do_split) {
+               /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
+               if (!chan->ep_is_in &&
+                   chan->data_pid_start != DWC2_HC_PID_SETUP)
+                       qtd->ssplit_out_xfer_count = chan->xfer_len;
+
+               if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
+                       qtd->complete_split = 1;
+                       dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
+               } else {
+                       /* ISOC OUT */
+                       switch (chan->xact_pos) {
+                       case DWC2_HCSPLT_XACTPOS_ALL:
+                               break;
+                       case DWC2_HCSPLT_XACTPOS_END:
+                               qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
+                               qtd->isoc_split_offset = 0;
+                               break;
+                       case DWC2_HCSPLT_XACTPOS_BEGIN:
+                       case DWC2_HCSPLT_XACTPOS_MID:
+                               /*
+                                * For BEGIN or MID, calculate the length for
+                                * the next microframe to determine the correct
+                                * SSPLIT token, either MID or END
+                                */
+                               frame_desc = &qtd->urb->iso_descs[
+                                               qtd->isoc_frame_index];
+                               qtd->isoc_split_offset += 188;
+
+                               if (frame_desc->length - qtd->isoc_split_offset
+                                                       <= 188)
+                                       qtd->isoc_split_pos =
+                                                       DWC2_HCSPLT_XACTPOS_END;
+                               else
+                                       qtd->isoc_split_pos =
+                                                       DWC2_HCSPLT_XACTPOS_MID;
+                               break;
+                       }
+               }
+       } else {
+               qtd->error_count = 0;
+
+               if (chan->qh->ping_state) {
+                       chan->qh->ping_state = 0;
+                       /*
+                        * Halt the channel so the transfer can be re-started
+                        * from the appropriate point. This only happens in
+                        * Slave mode. In DMA mode, the ping_state is cleared
+                        * when the transfer is started because the core
+                        * automatically executes the PING, then the transfer.
+                        */
+                       dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
+               }
+       }
+
+       /*
+        * If the ACK occurred when _not_ in the PING state, let the channel
+        * continue transferring data after clearing the error count
+        */
+       disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
+}
+
+/*
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
+                             struct dwc2_host_chan *chan, int chnum,
+                             struct dwc2_qtd *qtd)
+{
+       dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
+                chnum);
+
+       /*
+        * NYET on CSPLIT
+        * re-do the CSPLIT immediately on non-periodic
+        */
+       if (chan->do_split && chan->complete_split) {
+               if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
+                   hsotg->core_params->dma_enable > 0) {
+                       qtd->complete_split = 0;
+                       qtd->isoc_split_offset = 0;
+                       if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+                               if (qtd->urb)
+                                       dwc2_host_complete(hsotg,
+                                                          qtd->urb->priv,
+                                                          qtd->urb, 0);
+                               dwc2_release_channel(hsotg, chan, qtd,
+                                               DWC2_HC_XFER_URB_COMPLETE);
+                       } else {
+                               dwc2_release_channel(hsotg, chan, qtd,
+                                               DWC2_HC_XFER_NO_HALT_STATUS);
+                       }
+                       goto handle_nyet_done;
+               }
+
+               if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+                   chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+                       int frnum = dwc2_hcd_get_frame_number(hsotg);
+
+                       if (dwc2_full_frame_num(frnum) !=
+                           dwc2_full_frame_num(chan->qh->sched_frame)) {
+                               /*
+                                * No longer in the same full speed frame.
+                                * Treat this as a transaction error.
+                                */
+#if 0
+                               /*
+                                * Todo: Fix system performance so this can
+                                * be treated as an error. Right now complete
+                                * splits cannot be scheduled precisely enough
+                                * due to other system activity, so this error
+                                * occurs regularly in Slave mode.
+                                */
+                               qtd->error_count++;
+#endif
+                               qtd->complete_split = 0;
+                               dwc2_halt_channel(hsotg, chan, qtd,
+                                                 DWC2_HC_XFER_XACT_ERR);
+                               /* Todo: add support for isoc release */
+                               goto handle_nyet_done;
+                       }
+               }
+
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
+               goto handle_nyet_done;
+       }
+
+       chan->qh->ping_state = 1;
+       qtd->error_count = 0;
+
+       dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
+                                 DWC2_HC_XFER_NYET);
+       dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+
+       /*
+        * Halt the channel and re-start the transfer so the PING protocol
+        * will start
+        */
+       dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
+
+handle_nyet_done:
+       disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
+}
+
+/*
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
+                               struct dwc2_host_chan *chan, int chnum,
+                               struct dwc2_qtd *qtd)
+{
+       dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
+               chnum);
+
+       if (hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+                                           DWC2_HC_XFER_BABBLE_ERR);
+               goto handle_babble_done;
+       }
+
+       if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
+               if (qtd->urb)
+                       dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
+                                          -EOVERFLOW);
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
+       } else {
+               enum dwc2_halt_status halt_status;
+
+               halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
+                                               qtd, DWC2_HC_XFER_BABBLE_ERR);
+               dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+       }
+
+handle_babble_done:
+       dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+       disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
+}
+
+/*
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
+                               struct dwc2_host_chan *chan, int chnum,
+                               struct dwc2_qtd *qtd)
+{
+       struct dwc2_hcd_urb *urb = qtd->urb;
+       char *pipetype, *speed;
+       u32 hcchar;
+       u32 hcsplt;
+       u32 hctsiz;
+       u32 hc_dma;
+
+       dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
+               chnum);
+
+       if (!urb)
+               goto handle_ahberr_halt;
+
+       hcchar = readl(hsotg->regs + HCCHAR(chnum));
+       hcsplt = readl(hsotg->regs + HCSPLT(chnum));
+       hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
+       hc_dma = readl(hsotg->regs + HCDMA(chnum));
+
+       dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
+       dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
+       dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
+       dev_err(hsotg->dev, "  Device address: %d\n",
+               dwc2_hcd_get_dev_addr(&urb->pipe_info));
+       dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
+               dwc2_hcd_get_ep_num(&urb->pipe_info),
+               dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
+
+       switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               pipetype = "CONTROL";
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+               pipetype = "BULK";
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               pipetype = "INTERRUPT";
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               pipetype = "ISOCHRONOUS";
+               break;
+       default:
+               pipetype = "UNKNOWN";
+               break;
+       }
+
+       dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
+
+       switch (chan->speed) {
+       case USB_SPEED_HIGH:
+               speed = "HIGH";
+               break;
+       case USB_SPEED_FULL:
+               speed = "FULL";
+               break;
+       case USB_SPEED_LOW:
+               speed = "LOW";
+               break;
+       default:
+               speed = "UNKNOWN";
+               break;
+       }
+
+       dev_err(hsotg->dev, "  Speed: %s\n", speed);
+
+       dev_err(hsotg->dev, "  Max packet size: %d\n",
+               dwc2_hcd_get_mps(&urb->pipe_info));
+       dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
+       dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %p\n",
+               urb->buf, (void *)urb->dma);
+       dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %p\n",
+               urb->setup_packet, (void *)urb->setup_dma);
+       dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
+
+       /* Core halts the channel for Descriptor DMA mode */
+       if (hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+                                           DWC2_HC_XFER_AHB_ERR);
+               goto handle_ahberr_done;
+       }
+
+       dwc2_host_complete(hsotg, urb->priv, urb, -EIO);
+
+handle_ahberr_halt:
+       /*
+        * Force a channel halt. Don't call dwc2_halt_channel because that won't
+        * write to the HCCHARn register in DMA mode to force the halt.
+        */
+       dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
+
+handle_ahberr_done:
+       dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+       disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
+}
+
+/*
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
+                                struct dwc2_host_chan *chan, int chnum,
+                                struct dwc2_qtd *qtd)
+{
+       dev_dbg(hsotg->dev,
+               "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
+
+       if (hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+                                           DWC2_HC_XFER_XACT_ERR);
+               goto handle_xacterr_done;
+       }
+
+       switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+       case USB_ENDPOINT_XFER_CONTROL:
+       case USB_ENDPOINT_XFER_BULK:
+               qtd->error_count++;
+               if (!chan->qh->ping_state) {
+
+                       dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+                                                 qtd, DWC2_HC_XFER_XACT_ERR);
+                       dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+                       if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
+                               chan->qh->ping_state = 1;
+               }
+
+               /*
+                * Halt the channel so the transfer can be re-started from
+                * the appropriate point or the PING protocol will start
+                */
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               qtd->error_count++;
+               if (chan->do_split && chan->complete_split)
+                       qtd->complete_split = 0;
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               {
+                       enum dwc2_halt_status halt_status;
+
+                       halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
+                                       chnum, qtd, DWC2_HC_XFER_XACT_ERR);
+                       dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+               }
+               break;
+       }
+
+handle_xacterr_done:
+       dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+       disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
+}
+
+/*
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
+                                 struct dwc2_host_chan *chan, int chnum,
+                                 struct dwc2_qtd *qtd)
+{
+       enum dwc2_halt_status halt_status;
+
+       dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
+               chnum);
+
+       switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+       case USB_ENDPOINT_XFER_CONTROL:
+       case USB_ENDPOINT_XFER_BULK:
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
+                                       qtd, DWC2_HC_XFER_FRAME_OVERRUN);
+               dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+               break;
+       }
+
+       dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+       disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
+}
+
+/*
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
+                                   struct dwc2_host_chan *chan, int chnum,
+                                   struct dwc2_qtd *qtd)
+{
+       dev_dbg(hsotg->dev,
+               "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
+
+       if (chan->ep_is_in)
+               qtd->error_count = 0;
+       else
+               dev_err(hsotg->dev,
+                       "Data Toggle Error on OUT transfer, channel %d\n",
+                       chnum);
+
+       dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+       disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
+}
+
+/*
+ * For debug only. It checks that a valid halt status is set and that
+ * HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ *
+ * Return: true if halt status is ok, false otherwise
+ */
+static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
+                               struct dwc2_host_chan *chan, int chnum,
+                               struct dwc2_qtd *qtd)
+{
+#ifdef DEBUG
+       u32 hcchar;
+       u32 hctsiz;
+       u32 hcintmsk;
+       u32 hcsplt;
+
+       if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
+               /*
+                * This code is here only as a check. This condition should
+                * never happen. Ignore the halt if it does occur.
+                */
+               hcchar = readl(hsotg->regs + HCCHAR(chnum));
+               hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
+               hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
+               hcsplt = readl(hsotg->regs + HCSPLT(chnum));
+               dev_dbg(hsotg->dev,
+                       "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
+                        __func__);
+               dev_dbg(hsotg->dev,
+                       "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
+                       chnum, hcchar, hctsiz);
+               dev_dbg(hsotg->dev,
+                       "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
+                       chan->hcint, hcintmsk, hcsplt);
+               dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
+                       qtd->complete_split);
+               dev_warn(hsotg->dev,
+                        "%s: no halt status, channel %d, ignoring interrupt\n",
+                        __func__, chnum);
+               return false;
+       }
+
+       /*
+        * This code is here only as a check. hcchar.chdis should never be set
+        * when the halt interrupt occurs. Halt the channel again if it does
+        * occur.
+        */
+       hcchar = readl(hsotg->regs + HCCHAR(chnum));
+       if (hcchar & HCCHAR_CHDIS) {
+               dev_warn(hsotg->dev,
+                        "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
+                        __func__, hcchar);
+               chan->halt_pending = 0;
+               dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
+               return false;
+       }
+#endif
+
+       return true;
+}
+
+/*
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
+                                   struct dwc2_host_chan *chan, int chnum,
+                                   struct dwc2_qtd *qtd)
+{
+       u32 hcintmsk;
+       int out_nak_enh = 0;
+
+       dev_vdbg(hsotg->dev,
+                "--Host Channel %d Interrupt: DMA Channel Halted--\n", chnum);
+
+       /*
+        * For core with OUT NAK enhancement, the flow for high-speed
+        * CONTROL/BULK OUT is handled a little differently
+        */
+       if (hsotg->snpsid >= DWC2_CORE_REV_2_71a) {
+               if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
+                   (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+                    chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
+                       out_nak_enh = 1;
+               }
+       }
+
+       if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
+           (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
+            hsotg->core_params->dma_desc_enable <= 0)) {
+               if (hsotg->core_params->dma_desc_enable > 0)
+                       dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+                                                   chan->halt_status);
+               else
+                       /*
+                        * Just release the channel. A dequeue can happen on a
+                        * transfer timeout. In the case of an AHB Error, the
+                        * channel was forced to halt because there's no way to
+                        * gracefully recover.
+                        */
+                       dwc2_release_channel(hsotg, chan, qtd,
+                                            chan->halt_status);
+               return;
+       }
+
+       hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
+
+       if (chan->hcint & HCINTMSK_XFERCOMPL) {
+               /*
+                * Todo: This is here because of a possible hardware bug. Spec
+                * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+                * interrupt w/ACK bit set should occur, but I only see the
+                * XFERCOMP bit, even with it masked out. This is a workaround
+                * for that behavior. Should fix this when hardware is fixed.
+                */
+               if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
+                       dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
+               dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
+       } else if (chan->hcint & HCINTMSK_STALL) {
+               dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
+       } else if ((chan->hcint & HCINTMSK_XACTERR) &&
+                  hsotg->core_params->dma_desc_enable <= 0) {
+               if (out_nak_enh) {
+                       if (chan->hcint &
+                           (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
+                               dev_vdbg(hsotg->dev,
+                                        "XactErr with NYET/NAK/ACK\n");
+                               qtd->error_count = 0;
+                       } else {
+                               dev_vdbg(hsotg->dev,
+                                        "XactErr without NYET/NAK/ACK\n");
+                       }
+               }
+
+               /*
+                * Must handle xacterr before nak or ack. Could get a xacterr
+                * at the same time as either of these on a BULK/CONTROL OUT
+                * that started with a PING. The xacterr takes precedence.
+                */
+               dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
+       } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
+                  hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
+       } else if ((chan->hcint & HCINTMSK_AHBERR) &&
+                  hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
+       } else if (chan->hcint & HCINTMSK_BBLERR) {
+               dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
+       } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
+               dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
+       } else if (!out_nak_enh) {
+               if (chan->hcint & HCINTMSK_NYET) {
+                       /*
+                        * Must handle nyet before nak or ack. Could get a nyet
+                        * at the same time as either of those on a BULK/CONTROL
+                        * OUT that started with a PING. The nyet takes
+                        * precedence.
+                        */
+                       dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
+               } else if ((chan->hcint & HCINTMSK_NAK) &&
+                          !(hcintmsk & HCINTMSK_NAK)) {
+                       /*
+                        * If nak is not masked, it's because a non-split IN
+                        * transfer is in an error state. In that case, the nak
+                        * is handled by the nak interrupt handler, not here.
+                        * Handle nak here for BULK/CONTROL OUT transfers, which
+                        * halt on a NAK to allow rewinding the buffer pointer.
+                        */
+                       dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
+               } else if ((chan->hcint & HCINTMSK_ACK) &&
+                          !(hcintmsk & HCINTMSK_ACK)) {
+                       /*
+                        * If ack is not masked, it's because a non-split IN
+                        * transfer is in an error state. In that case, the ack
+                        * is handled by the ack interrupt handler, not here.
+                        * Handle ack here for split transfers. Start splits
+                        * halt on ACK.
+                        */
+                       dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
+               } else {
+                       if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+                           chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+                               /*
+                                * A periodic transfer halted with no other
+                                * channel interrupts set. Assume it was halted
+                                * by the core because it could not be completed
+                                * in its scheduled (micro)frame.
+                                */
+                               dev_dbg(hsotg->dev,
+                                       "%s: Halt channel %d (assume incomplete periodic transfer)\n",
+                                       __func__, chnum);
+                               dwc2_halt_channel(hsotg, chan, qtd,
+                                       DWC2_HC_XFER_PERIODIC_INCOMPLETE);
+                       } else {
+                               dev_err(hsotg->dev,
+                                       "%s: Channel %d - ChHltd set, but reason is unknown\n",
+                                       __func__, chnum);
+                               dev_err(hsotg->dev,
+                                       "hcint 0x%08x, intsts 0x%08x\n",
+                                       chan->hcint,
+                                       readl(hsotg->regs + GINTSTS));
+                       }
+               }
+       } else {
+               dev_info(hsotg->dev,
+                        "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
+                        chan->hcint);
+       }
+}
+
+/*
+ * Handles a host channel Channel Halted interrupt
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
+                               struct dwc2_host_chan *chan, int chnum,
+                               struct dwc2_qtd *qtd)
+{
+       dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
+                chnum);
+
+       if (hsotg->core_params->dma_enable > 0) {
+               dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
+       } else {
+               if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
+                       return;
+               dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
+       }
+}
+
+/* Handles interrupt for a specific Host Channel */
+static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+{
+       struct dwc2_qtd *qtd;
+       struct dwc2_host_chan *chan;
+       u32 hcint, hcintmsk;
+
+       dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n", chnum);
+
+       hcint = readl(hsotg->regs + HCINT(chnum));
+       hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
+       dev_vdbg(hsotg->dev,
+                "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+                hcint, hcintmsk, hcint & hcintmsk);
+
+       chan = hsotg->hc_ptr_array[chnum];
+       if (!chan) {
+               dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
+               writel(hcint, hsotg->regs + HCINT(chnum));
+               return;
+       }
+
+       writel(hcint, hsotg->regs + HCINT(chnum));
+       chan->hcint = hcint;
+       hcint &= hcintmsk;
+
+       if (list_empty(&chan->qh->qtd_list)) {
+               dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
+                       chnum);
+               dev_dbg(hsotg->dev,
+                       "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+                       chan->hcint, hcintmsk, hcint);
+               chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
+               disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
+               chan->hcint = 0;
+               return;
+       }
+
+       qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
+                              qtd_list_entry);
+
+       if (hsotg->core_params->dma_enable <= 0) {
+               if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
+                       hcint &= ~HCINTMSK_CHHLTD;
+       }
+
+       if (hcint & HCINTMSK_XFERCOMPL) {
+               dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
+               /*
+                * If NYET occurred at same time as Xfer Complete, the NYET is
+                * handled by the Xfer Complete interrupt handler. Don't want
+                * to call the NYET interrupt handler in this case.
+                */
+               hcint &= ~HCINTMSK_NYET;
+       }
+       if (hcint & HCINTMSK_CHHLTD)
+               dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_AHBERR)
+               dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_STALL)
+               dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_NAK)
+               dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_ACK)
+               dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_NYET)
+               dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_XACTERR)
+               dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_BBLERR)
+               dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_FRMOVRUN)
+               dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
+       if (hcint & HCINTMSK_DATATGLERR)
+               dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
+
+       chan->hcint = 0;
+}
+
+/*
+ * This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately.
+ */
+static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
+{
+       u32 haint;
+       int i;
+
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       haint = readl(hsotg->regs + HAINT);
+       dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
+
+       for (i = 0; i < hsotg->core_params->host_channels; i++) {
+               if (haint & (1 << i))
+                       dwc2_hc_n_intr(hsotg, i);
+       }
+}
+
+/* This function handles interrupts for the HCD */
+int dwc2_hcd_intr(struct dwc2_hsotg *hsotg)
+{
+       u32 gintsts;
+       int retval = 0;
+
+       if (dwc2_check_core_status(hsotg) < 0) {
+               dev_warn(hsotg->dev, "Controller is disconnected");
+               return 0;
+       }
+
+       spin_lock(&hsotg->lock);
+
+       /* Check if HOST Mode */
+       if (dwc2_is_host_mode(hsotg)) {
+               gintsts = dwc2_read_core_intr(hsotg);
+               if (!gintsts) {
+                       spin_unlock(&hsotg->lock);
+                       return 0;
+               }
+
+               retval = 1;
+
+#ifndef DEBUG_SOF
+               /* Don't print debug message in the interrupt handler on SOF */
+               if (gintsts != GINTSTS_SOF)
+#endif
+                       dev_vdbg(hsotg->dev,
+                                "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
+                                gintsts);
+
+               if (gintsts & GINTSTS_SOF)
+                       dwc2_sof_intr(hsotg);
+               if (gintsts & GINTSTS_RXFLVL)
+                       dwc2_rx_fifo_level_intr(hsotg);
+               if (gintsts & GINTSTS_NPTXFEMP)
+                       dwc2_np_tx_fifo_empty_intr(hsotg);
+               if (gintsts & GINTSTS_I2CINT)
+                       /* Todo: Implement i2cintr handler */
+                       writel(GINTSTS_I2CINT, hsotg->regs + GINTSTS);
+               if (gintsts & GINTSTS_PRTINT)
+                       dwc2_port_intr(hsotg);
+               if (gintsts & GINTSTS_HCHINT)
+                       dwc2_hc_intr(hsotg);
+               if (gintsts & GINTSTS_PTXFEMP)
+                       dwc2_perio_tx_fifo_empty_intr(hsotg);
+
+#ifndef DEBUG_SOF
+               if (gintsts != GINTSTS_SOF) {
+#endif
+                       dev_vdbg(hsotg->dev,
+                                "DWC OTG HCD Finished Servicing Interrupts\n");
+                       dev_vdbg(hsotg->dev,
+                                "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
+                                readl(hsotg->regs + GINTSTS),
+                                readl(hsotg->regs + GINTMSK));
+#ifndef DEBUG_SOF
+               }
+#endif
+       }
+
+       spin_unlock(&hsotg->lock);
+
+       return retval;
+}
diff --git a/drivers/staging/dwc2/hcd_queue.c b/drivers/staging/dwc2/hcd_queue.c
new file mode 100644 (file)
index 0000000..74b7b9b
--- /dev/null
@@ -0,0 +1,675 @@
+/*
+ * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors for Host mode
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/**
+ * dwc2_qh_init() - Initializes a QH structure
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh:    The QH to init
+ * @urb:   Holds the information about the device/endpoint needed to initialize
+ *         the QH
+ */
+#define SCHEDULE_SLOP 10
+static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+                        struct dwc2_hcd_urb *urb)
+{
+       int dev_speed, hub_addr, hub_port;
+       char *speed, *type;
+
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       /* Initialize QH */
+       qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+       qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
+
+       qh->data_toggle = DWC2_HC_PID_DATA0;
+       qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
+       INIT_LIST_HEAD(&qh->qtd_list);
+       INIT_LIST_HEAD(&qh->qh_list_entry);
+
+       /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
+       dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
+
+       dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
+
+       if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
+           hub_addr != 0 && hub_addr != 1) {
+               dev_vdbg(hsotg->dev,
+                        "QH init: EP %d: TT found at hub addr %d, for port %d\n",
+                        dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
+                        hub_port);
+               qh->do_split = 1;
+       }
+
+       if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
+           qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
+               /* Compute scheduling parameters once and save them */
+               u32 hprt, prtspd;
+
+               /* Todo: Account for split transfers in the bus time */
+               int bytecount =
+                       dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
+
+               qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ?
+                               USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
+                               qh->ep_type == USB_ENDPOINT_XFER_ISOC,
+                               bytecount));
+               /* Start in a slightly future (micro)frame */
+               qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
+                                                    SCHEDULE_SLOP);
+               qh->interval = urb->interval;
+#if 0
+               /* Increase interrupt polling rate for debugging */
+               if (qh->ep_type == USB_ENDPOINT_XFER_INT)
+                       qh->interval = 8;
+#endif
+               hprt = readl(hsotg->regs + HPRT0);
+               prtspd = hprt & HPRT0_SPD_MASK;
+               if (prtspd == HPRT0_SPD_HIGH_SPEED &&
+                   (dev_speed == USB_SPEED_LOW ||
+                    dev_speed == USB_SPEED_FULL)) {
+                       qh->interval *= 8;
+                       qh->sched_frame |= 0x7;
+                       qh->start_split_frame = qh->sched_frame;
+               }
+               dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
+       }
+
+       dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
+       dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
+       dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
+                dwc2_hcd_get_dev_addr(&urb->pipe_info));
+       dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
+                dwc2_hcd_get_ep_num(&urb->pipe_info),
+                dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
+
+       qh->dev_speed = dev_speed;
+
+       switch (dev_speed) {
+       case USB_SPEED_LOW:
+               speed = "low";
+               break;
+       case USB_SPEED_FULL:
+               speed = "full";
+               break;
+       case USB_SPEED_HIGH:
+               speed = "high";
+               break;
+       default:
+               speed = "?";
+               break;
+       }
+       dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
+
+       switch (qh->ep_type) {
+       case USB_ENDPOINT_XFER_ISOC:
+               type = "isochronous";
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               type = "interrupt";
+               break;
+       case USB_ENDPOINT_XFER_CONTROL:
+               type = "control";
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+               type = "bulk";
+               break;
+       default:
+               type = "?";
+               break;
+       }
+
+       dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
+
+       if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
+               dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
+                        qh->usecs);
+               dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
+                        qh->interval);
+       }
+}
+
+/**
+ * dwc2_hcd_qh_create() - Allocates and initializes a QH
+ *
+ * @hsotg:        The HCD state structure for the DWC OTG controller
+ * @urb:          Holds the information about the device/endpoint needed
+ *                to initialize the QH
+ * @atomic_alloc: Flag to do atomic allocation if needed
+ *
+ * Return: Pointer to the newly allocated QH, or NULL on error
+ */
+static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+                                         struct dwc2_hcd_urb *urb,
+                                         gfp_t mem_flags)
+{
+       struct dwc2_qh *qh;
+
+       /* Allocate memory */
+       qh = kzalloc(sizeof(*qh), mem_flags);
+       if (!qh)
+               return NULL;
+
+       dwc2_qh_init(hsotg, qh, urb);
+
+       if (hsotg->core_params->dma_desc_enable > 0 &&
+           dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
+               dwc2_hcd_qh_free(hsotg, qh);
+               return NULL;
+       }
+
+       return qh;
+}
+
+/**
+ * dwc2_hcd_qh_free() - Frees the QH
+ *
+ * @hsotg: HCD instance
+ * @qh:    The QH to free
+ *
+ * QH should already be removed from the list. QTD list should already be empty
+ * if called from URB Dequeue.
+ *
+ * Must NOT be called with interrupt disabled or spinlock held
+ */
+void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+       u32 buf_size;
+
+       if (hsotg->core_params->dma_desc_enable > 0) {
+               dwc2_hcd_qh_free_ddma(hsotg, qh);
+       } else if (qh->dw_align_buf) {
+               if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
+                       buf_size = 4096;
+               else
+                       buf_size = hsotg->core_params->max_transfer_size;
+               dma_free_coherent(hsotg->dev, buf_size, qh->dw_align_buf,
+                                 qh->dw_align_buf_dma);
+       }
+
+       kfree(qh);
+}
+
+/**
+ * dwc2_periodic_channel_available() - Checks that a channel is available for a
+ * periodic transfer
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ *
+ * Return: 0 if successful, negative error code otherise
+ */
+static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
+{
+       /*
+        * Currently assuming that there is a dedicated host channnel for
+        * each periodic transaction plus at least one host channel for
+        * non-periodic transactions
+        */
+       int status;
+       int num_channels;
+
+       num_channels = hsotg->core_params->host_channels;
+       if (hsotg->periodic_channels + hsotg->non_periodic_channels <
+                                                               num_channels
+           && hsotg->periodic_channels < num_channels - 1) {
+               status = 0;
+       } else {
+               dev_dbg(hsotg->dev,
+                       "%s: Total channels: %d, Periodic: %d, "
+                       "Non-periodic: %d\n", __func__, num_channels,
+                       hsotg->periodic_channels, hsotg->non_periodic_channels);
+               status = -ENOSPC;
+       }
+
+       return status;
+}
+
+/**
+ * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
+ * for the specified QH in the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh:    QH containing periodic bandwidth required
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * For simplicity, this calculation assumes that all the transfers in the
+ * periodic schedule may occur in the same (micro)frame
+ */
+static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
+                                        struct dwc2_qh *qh)
+{
+       int status;
+       s16 max_claimed_usecs;
+
+       status = 0;
+
+       if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
+               /*
+                * High speed mode
+                * Max periodic usecs is 80% x 125 usec = 100 usec
+                */
+               max_claimed_usecs = 100 - qh->usecs;
+       } else {
+               /*
+                * Full speed mode
+                * Max periodic usecs is 90% x 1000 usec = 900 usec
+                */
+               max_claimed_usecs = 900 - qh->usecs;
+       }
+
+       if (hsotg->periodic_usecs > max_claimed_usecs) {
+               dev_err(hsotg->dev,
+                       "%s: already claimed usecs %d, required usecs %d\n",
+                       __func__, hsotg->periodic_usecs, qh->usecs);
+               status = -ENOSPC;
+       }
+
+       return status;
+}
+
+/**
+ * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
+ * host channel is large enough to handle the maximum data transfer in a single
+ * (micro)frame for a periodic transfer
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh:    QH for a periodic endpoint
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
+                                   struct dwc2_qh *qh)
+{
+       u32 max_xfer_size;
+       u32 max_channel_xfer_size;
+       int status = 0;
+
+       max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
+       max_channel_xfer_size = hsotg->core_params->max_transfer_size;
+
+       if (max_xfer_size > max_channel_xfer_size) {
+               dev_err(hsotg->dev,
+                       "%s: Periodic xfer length %d > max xfer length for channel %d\n",
+                       __func__, max_xfer_size, max_channel_xfer_size);
+               status = -ENOSPC;
+       }
+
+       return status;
+}
+
+/**
+ * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
+ * the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh:    QH for the periodic transfer. The QH should already contain the
+ *         scheduling information.
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+       int status;
+
+       status = dwc2_periodic_channel_available(hsotg);
+       if (status) {
+               dev_dbg(hsotg->dev,
+                       "%s: No host channel available for periodic transfer\n",
+                       __func__);
+               return status;
+       }
+
+       status = dwc2_check_periodic_bandwidth(hsotg, qh);
+       if (status) {
+               dev_dbg(hsotg->dev,
+                       "%s: Insufficient periodic bandwidth for periodic transfer\n",
+                       __func__);
+               return status;
+       }
+
+       status = dwc2_check_max_xfer_size(hsotg, qh);
+       if (status) {
+               dev_dbg(hsotg->dev,
+                       "%s: Channel max transfer size too small for periodic transfer\n",
+                       __func__);
+               return status;
+       }
+
+       if (hsotg->core_params->dma_desc_enable > 0)
+               /* Don't rely on SOF and start in ready schedule */
+               list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
+       else
+               /* Always start in inactive schedule */
+               list_add_tail(&qh->qh_list_entry,
+                             &hsotg->periodic_sched_inactive);
+
+       /* Reserve periodic channel */
+       hsotg->periodic_channels++;
+
+       /* Update claimed usecs per (micro)frame */
+       hsotg->periodic_usecs += qh->usecs;
+
+       return status;
+}
+
+/**
+ * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
+ * from the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh:           QH for the periodic transfer
+ */
+static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
+                                    struct dwc2_qh *qh)
+{
+       list_del_init(&qh->qh_list_entry);
+
+       /* Release periodic channel reservation */
+       hsotg->periodic_channels--;
+
+       /* Update claimed usecs per (micro)frame */
+       hsotg->periodic_usecs -= qh->usecs;
+}
+
+/**
+ * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
+ * schedule if it is not already in the schedule. If the QH is already in
+ * the schedule, no action is taken.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh:    The QH to add
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+       int status = 0;
+       u32 intr_mask;
+
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       if (!list_empty(&qh->qh_list_entry))
+               /* QH already in a schedule */
+               return status;
+
+       /* Add the new QH to the appropriate schedule */
+       if (dwc2_qh_is_non_per(qh)) {
+               /* Always start in inactive schedule */
+               list_add_tail(&qh->qh_list_entry,
+                             &hsotg->non_periodic_sched_inactive);
+       } else {
+               status = dwc2_schedule_periodic(hsotg, qh);
+               if (status == 0) {
+                       if (!hsotg->periodic_qh_count) {
+                               intr_mask = readl(hsotg->regs + GINTMSK);
+                               intr_mask |= GINTSTS_SOF;
+                               writel(intr_mask, hsotg->regs + GINTMSK);
+                       }
+                       hsotg->periodic_qh_count++;
+               }
+       }
+
+       return status;
+}
+
+/**
+ * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
+ * schedule. Memory is not freed.
+ *
+ * @hsotg: The HCD state structure
+ * @qh:    QH to remove from schedule
+ */
+void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+       u32 intr_mask;
+
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       if (list_empty(&qh->qh_list_entry))
+               /* QH is not in a schedule */
+               return;
+
+       if (dwc2_qh_is_non_per(qh)) {
+               if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
+                       hsotg->non_periodic_qh_ptr =
+                                       hsotg->non_periodic_qh_ptr->next;
+               list_del_init(&qh->qh_list_entry);
+       } else {
+               dwc2_deschedule_periodic(hsotg, qh);
+               hsotg->periodic_qh_count--;
+               if (!hsotg->periodic_qh_count) {
+                       intr_mask = readl(hsotg->regs + GINTMSK);
+                       intr_mask &= ~GINTSTS_SOF;
+                       writel(intr_mask, hsotg->regs + GINTMSK);
+               }
+       }
+}
+
+/*
+ * Schedule the next continuing periodic split transfer
+ */
+static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
+                                     struct dwc2_qh *qh, u16 frame_number,
+                                     int sched_next_periodic_split)
+{
+       u16 incr;
+
+       if (sched_next_periodic_split) {
+               qh->sched_frame = frame_number;
+               incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
+               if (dwc2_frame_num_le(frame_number, incr)) {
+                       /*
+                        * Allow one frame to elapse after start split
+                        * microframe before scheduling complete split, but
+                        * DON'T if we are doing the next start split in the
+                        * same frame for an ISOC out
+                        */
+                       if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
+                           qh->ep_is_in != 0) {
+                               qh->sched_frame =
+                                       dwc2_frame_num_inc(qh->sched_frame, 1);
+                       }
+               }
+       } else {
+               qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
+                                                    qh->interval);
+               if (dwc2_frame_num_le(qh->sched_frame, frame_number))
+                       qh->sched_frame = frame_number;
+               qh->sched_frame |= 0x7;
+               qh->start_split_frame = qh->sched_frame;
+       }
+}
+
+/*
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+                           int sched_next_periodic_split)
+{
+       dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+       if (dwc2_qh_is_non_per(qh)) {
+               dwc2_hcd_qh_unlink(hsotg, qh);
+               if (!list_empty(&qh->qtd_list))
+                       /* Add back to inactive non-periodic schedule */
+                       dwc2_hcd_qh_add(hsotg, qh);
+       } else {
+               u16 frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+               if (qh->do_split) {
+                       dwc2_sched_periodic_split(hsotg, qh, frame_number,
+                                                 sched_next_periodic_split);
+               } else {
+                       qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
+                                                            qh->interval);
+                       if (dwc2_frame_num_le(qh->sched_frame, frame_number))
+                               qh->sched_frame = frame_number;
+               }
+
+               if (list_empty(&qh->qtd_list)) {
+                       dwc2_hcd_qh_unlink(hsotg, qh);
+               } else {
+                       /*
+                        * Remove from periodic_sched_queued and move to
+                        * appropriate queue
+                        */
+                       if (qh->sched_frame == frame_number)
+                               list_move(&qh->qh_list_entry,
+                                         &hsotg->periodic_sched_ready);
+                       else
+                               list_move(&qh->qh_list_entry,
+                                         &hsotg->periodic_sched_inactive);
+               }
+       }
+}
+
+/**
+ * dwc2_hcd_qtd_init() - Initializes a QTD structure
+ *
+ * @qtd: The QTD to initialize
+ * @urb: The associated URB
+ */
+void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
+{
+       qtd->urb = urb;
+       if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
+                       USB_ENDPOINT_XFER_CONTROL) {
+               /*
+                * The only time the QTD data toggle is used is on the data
+                * phase of control transfers. This phase always starts with
+                * DATA1.
+                */
+               qtd->data_toggle = DWC2_HC_PID_DATA1;
+               qtd->control_phase = DWC2_CONTROL_SETUP;
+       }
+
+       /* Start split */
+       qtd->complete_split = 0;
+       qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
+       qtd->isoc_split_offset = 0;
+       qtd->in_process = 0;
+
+       /* Store the qtd ptr in the urb to reference the QTD */
+       urb->qtd = qtd;
+}
+
+/**
+ * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
+ *
+ * @hsotg:        The DWC HCD structure
+ * @qtd:          The QTD to add
+ * @qh:           Out parameter to return queue head
+ * @atomic_alloc: Flag to do atomic alloc if needed
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * Finds the correct QH to place the QTD into. If it does not find a QH, it
+ * will create a new QH. If the QH to which the QTD is added is not currently
+ * scheduled, it is placed into the proper schedule based on its EP type.
+ */
+int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+                    struct dwc2_qh **qh, gfp_t mem_flags)
+{
+       struct dwc2_hcd_urb *urb = qtd->urb;
+       unsigned long flags;
+       int allocated = 0;
+       int retval = 0;
+
+       /*
+        * Get the QH which holds the QTD-list to insert to. Create QH if it
+        * doesn't exist.
+        */
+       if (*qh == NULL) {
+               *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
+               if (*qh == NULL)
+                       return -ENOMEM;
+               allocated = 1;
+       }
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+       retval = dwc2_hcd_qh_add(hsotg, *qh);
+       if (retval && allocated) {
+               struct dwc2_qtd *qtd2, *qtd2_tmp;
+               struct dwc2_qh *qh_tmp = *qh;
+
+               *qh = NULL;
+               dwc2_hcd_qh_unlink(hsotg, qh_tmp);
+
+               /* Free each QTD in the QH's QTD list */
+               list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
+                                        qtd_list_entry)
+                       dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
+
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+               dwc2_hcd_qh_free(hsotg, qh_tmp);
+       } else {
+               qtd->qh = *qh;
+               list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
+               spin_unlock_irqrestore(&hsotg->lock, flags);
+       }
+
+       return retval;
+}