]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/dma/xgene-dma.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[karo-tx-linux.git] / drivers / dma / xgene-dma.c
index dff22ab01851aadb37fd434a334cf6cc681cf29c..8d57b1b12e411ef902d26af984e7d34a741a4cf2 100644 (file)
@@ -21,6 +21,7 @@
  * NOTE: PM support is currently not available.
  */
 
+#include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -58,7 +59,6 @@
 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN                0xD070
 #define XGENE_DMA_RING_BLK_MEM_RDY             0xD074
 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL         0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v)             (((v) & 0x0001FFFE) >> 1)
 #define XGENE_DMA_RING_ID_GET(owner, num)      (((owner) << 6) | (num))
 #define XGENE_DMA_RING_DST_ID(v)               ((1 << 10) | (v))
 #define XGENE_DMA_RING_CMD_OFFSET              0x2C
 #define XGENE_DMA_PQ_CHANNEL           1
 #define XGENE_DMA_MAX_BYTE_CNT         0x4000  /* 16 KB */
 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT        0x14000 /* 80 KB */
-#define XGENE_DMA_XOR_ALIGNMENT                6       /* 64 Bytes */
 #define XGENE_DMA_MAX_XOR_SRC          5
 #define XGENE_DMA_16K_BUFFER_LEN_CODE  0x0
 #define XGENE_DMA_INVALID_LEN_CODE     0x7800000000000000ULL
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
        return flyby_type[src_cnt];
 }
 
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
-       u32 __iomem *cmd_base = ring->cmd_base;
-       u32 ring_state = ioread32(&cmd_base[1]);
-
-       return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
                                     dma_addr_t *paddr)
 {
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
        dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
 }
 
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
-                                  struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+                                   struct xgene_dma_desc_sw *desc_sw)
 {
+       struct xgene_dma_ring *ring = &chan->tx_ring;
        struct xgene_dma_desc_hw *desc_hw;
 
-       /* Check if can push more descriptor to hw for execution */
-       if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
-               return -EBUSY;
-
        /* Get hw descriptor from DMA tx ring */
        desc_hw = &ring->desc_hw[ring->head];
 
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
                memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
        }
 
+       /* Increment the pending transaction count */
+       chan->pending += ((desc_sw->flags &
+                         XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
        /* Notify the hw that we have descriptor ready for execution */
        iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
                  2 : 1, ring->cmd);
-
-       return 0;
 }
 
 /**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
 {
        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
-       int ret;
 
        /*
         * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
                if (chan->pending >= chan->max_outstanding)
                        return;
 
-               ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
-               if (ret)
-                       return;
+               xgene_chan_xfer_request(chan, desc_sw);
 
                /*
                 * Delete this element from ld pending queue and append it to
                 * ld running queue
                 */
                list_move_tail(&desc_sw->node, &chan->ld_running);
-
-               /* Increment the pending transaction count */
-               chan->pending++;
        }
 }
 
@@ -764,12 +748,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
        struct xgene_dma_ring *ring = &chan->rx_ring;
        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
        struct xgene_dma_desc_hw *desc_hw;
+       struct list_head ld_completed;
        u8 status;
 
+       INIT_LIST_HEAD(&ld_completed);
+
+       spin_lock_bh(&chan->lock);
+
        /* Clean already completed and acked descriptors */
        xgene_dma_clean_completed_descriptor(chan);
 
-       /* Run the callback for each descriptor, in order */
+       /* Move all completed descriptors to ld completed queue, in order */
        list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
                /* Get subsequent hw descriptor from DMA rx ring */
                desc_hw = &ring->desc_hw[ring->head];
@@ -812,15 +801,18 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
                /* Mark this hw descriptor as processed */
                desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
 
-               xgene_dma_run_tx_complete_actions(chan, desc_sw);
-
-               xgene_dma_clean_running_descriptor(chan, desc_sw);
-
                /*
                 * Decrement the pending transaction count
                 * as we have processed one
                 */
-               chan->pending--;
+               chan->pending -= ((desc_sw->flags &
+                                 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
+               /*
+                * Delete this node from ld running queue and append it to
+                * ld completed queue for further processing
+                */
+               list_move_tail(&desc_sw->node, &ld_completed);
        }
 
        /*
@@ -829,6 +821,14 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
         * ahead and free the descriptors below.
         */
        xgene_chan_xfer_ld_pending(chan);
+
+       spin_unlock_bh(&chan->lock);
+
+       /* Run the callback for each descriptor, in order */
+       list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
+               xgene_dma_run_tx_complete_actions(chan, desc_sw);
+               xgene_dma_clean_running_descriptor(chan, desc_sw);
+       }
 }
 
 static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
@@ -877,11 +877,11 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
        if (!chan->desc_pool)
                return;
 
-       spin_lock_bh(&chan->lock);
-
        /* Process all running descriptor */
        xgene_dma_cleanup_descriptors(chan);
 
+       spin_lock_bh(&chan->lock);
+
        /* Clean all link descriptor queues */
        xgene_dma_free_desc_list(chan, &chan->ld_pending);
        xgene_dma_free_desc_list(chan, &chan->ld_running);
@@ -1201,15 +1201,11 @@ static void xgene_dma_tasklet_cb(unsigned long data)
 {
        struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
 
-       spin_lock_bh(&chan->lock);
-
        /* Run all cleanup for descriptors which have been completed */
        xgene_dma_cleanup_descriptors(chan);
 
        /* Re-enable DMA channel IRQ */
        enable_irq(chan->rx_irq);
-
-       spin_unlock_bh(&chan->lock);
 }
 
 static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
@@ -1410,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
                                     struct xgene_dma_ring *ring,
                                     enum xgene_dma_ring_cfgsize cfgsize)
 {
+       int ret;
+
        /* Setup DMA ring descriptor variables */
        ring->pdma = chan->pdma;
        ring->cfgsize = cfgsize;
        ring->num = chan->pdma->ring_num++;
        ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
 
-       ring->size = xgene_dma_get_ring_size(chan, cfgsize);
-       if (ring->size <= 0)
-               return ring->size;
+       ret = xgene_dma_get_ring_size(chan, cfgsize);
+       if (ret <= 0)
+               return ret;
+       ring->size = ret;
 
        /* Allocate memory for DMA ring descriptor */
        ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1471,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
                 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
 
        /* Set the max outstanding request possible to this channel */
-       chan->max_outstanding = rx_ring->slots;
+       chan->max_outstanding = tx_ring->slots;
 
        return ret;
 }
@@ -1741,13 +1740,13 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
                dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
                dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
-               dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT;
+               dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
        }
 
        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
                dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
                dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
-               dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT;
+               dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
        }
 }
 
@@ -1944,16 +1943,18 @@ static int xgene_dma_probe(struct platform_device *pdev)
                return ret;
 
        pdma->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(pdma->clk)) {
+       if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
                dev_err(&pdev->dev, "Failed to get clk\n");
                return PTR_ERR(pdma->clk);
        }
 
        /* Enable clk before accessing registers */
-       ret = clk_prepare_enable(pdma->clk);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
-               return ret;
+       if (!IS_ERR(pdma->clk)) {
+               ret = clk_prepare_enable(pdma->clk);
+               if (ret) {
+                       dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
+                       return ret;
+               }
        }
 
        /* Remove DMA RAM out of shutdown */
@@ -1998,7 +1999,8 @@ err_request_irq:
 
 err_dma_mask:
 err_clk_enable:
-       clk_disable_unprepare(pdma->clk);
+       if (!IS_ERR(pdma->clk))
+               clk_disable_unprepare(pdma->clk);
 
        return ret;
 }
@@ -2022,11 +2024,20 @@ static int xgene_dma_remove(struct platform_device *pdev)
                xgene_dma_delete_chan_rings(chan);
        }
 
-       clk_disable_unprepare(pdma->clk);
+       if (!IS_ERR(pdma->clk))
+               clk_disable_unprepare(pdma->clk);
 
        return 0;
 }
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
+       {"APMC0D43", 0},
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
+#endif
+
 static const struct of_device_id xgene_dma_of_match_ptr[] = {
        {.compatible = "apm,xgene-storm-dma",},
        {},
@@ -2039,6 +2050,7 @@ static struct platform_driver xgene_dma_driver = {
        .driver = {
                .name = "X-Gene-DMA",
                .of_match_table = xgene_dma_of_match_ptr,
+               .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
        },
 };