]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
ENGR00290664 PXP: allocate DMA TX descriptors on demand instead of in PXP initialization
authorFancy Fang <B47543@freescale.com>
Wed, 4 Dec 2013 07:32:20 +0000 (15:32 +0800)
committerLothar Waßmann <LW@KARO-electronics.de>
Wed, 20 Aug 2014 08:06:48 +0000 (10:06 +0200)
In previous PXP driver, the number of tx descriptors allocated
for each channel is a constant 16 and they can only be allocated
during PXP initialization. But since the driver allows users to
queue more than one PXP tasks for each channel before issuing
pending tasks, so in this case the descriptors may be not enough
for some cases.

Signed-off-by: Fancy Fang <B47543@freescale.com>
drivers/dma/pxp/pxp_dma_v2.c
include/linux/pxp_dma.h

index 279c033e3ea2ebb4d9d4f2d49328dafcf8350234..28fe2046858bcf9200ffbc272157814592b6536e 100644 (file)
@@ -47,6 +47,7 @@
 static LIST_HEAD(head);
 static int timeout_in_ms = 600;
 static unsigned int block_size;
+static struct kmem_cache *tx_desc_cache;
 
 struct pxp_dma {
        struct dma_device dma;
@@ -1184,36 +1185,6 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
        return cookie;
 }
 
-/* Called with pxp_chan->chan_mutex held */
-static int pxp_desc_alloc(struct pxp_channel *pxp_chan, int n)
-{
-       struct pxp_tx_desc *desc = vmalloc(n * sizeof(struct pxp_tx_desc));
-
-       if (!desc)
-               return -ENOMEM;
-
-       pxp_chan->n_tx_desc = n;
-       pxp_chan->desc = desc;
-       INIT_LIST_HEAD(&pxp_chan->active_list);
-       INIT_LIST_HEAD(&pxp_chan->queue);
-       INIT_LIST_HEAD(&pxp_chan->free_list);
-
-       while (n--) {
-               struct dma_async_tx_descriptor *txd = &desc->txd;
-
-               memset(txd, 0, sizeof(*txd));
-               INIT_LIST_HEAD(&desc->tx_list);
-               dma_async_tx_descriptor_init(txd, &pxp_chan->dma_chan);
-               txd->tx_submit = pxp_tx_submit;
-
-               list_add(&desc->list, &pxp_chan->free_list);
-
-               desc++;
-       }
-
-       return 0;
-}
-
 /**
  * pxp_init_channel() - initialize a PXP channel.
  * @pxp_dma:   PXP DMA context.
@@ -1223,9 +1194,7 @@ static int pxp_desc_alloc(struct pxp_channel *pxp_chan, int n)
 static int pxp_init_channel(struct pxp_dma *pxp_dma,
                            struct pxp_channel *pxp_chan)
 {
-       unsigned long flags;
-       struct pxps *pxp = to_pxp(pxp_dma);
-       int ret = 0, n_desc = 0;
+       int ret = 0;
 
        /*
         * We are using _virtual_ channel here.
@@ -1234,34 +1203,8 @@ static int pxp_init_channel(struct pxp_dma *pxp_dma,
         * (i.e., pxp_tx_desc) here.
         */
 
-       spin_lock_irqsave(&pxp->lock, flags);
-
-       /* max desc nr: S0+OL+OUT = 1+8+1 */
-       n_desc = 16;
-
-       spin_unlock_irqrestore(&pxp->lock, flags);
-
-       if (n_desc && !pxp_chan->desc)
-               ret = pxp_desc_alloc(pxp_chan, n_desc);
-
-       return ret;
-}
-
-/**
- * pxp_uninit_channel() - uninitialize a PXP channel.
- * @pxp_dma:   PXP DMA context.
- * @pchan:  pointer to the channel object.
- * @return      0 on success or negative error code on failure.
- */
-static int pxp_uninit_channel(struct pxp_dma *pxp_dma,
-                             struct pxp_channel *pxp_chan)
-{
-       int ret = 0;
-
-       if (pxp_chan->desc)
-               vfree(pxp_chan->desc);
-
-       pxp_chan->desc = NULL;
+       INIT_LIST_HEAD(&pxp_chan->active_list);
+       INIT_LIST_HEAD(&pxp_chan->queue);
 
        return ret;
 }
@@ -1271,6 +1214,7 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
        struct pxps *pxp = dev_id;
        struct pxp_channel *pxp_chan;
        struct pxp_tx_desc *desc;
+       struct pxp_tx_desc *child, *_child;
        dma_async_tx_callback callback;
        void *callback_param;
        unsigned long flags;
@@ -1317,8 +1261,12 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
 
        pxp_chan->status = PXP_CHANNEL_INITIALIZED;
 
-       list_splice_init(&desc->tx_list, &pxp_chan->free_list);
-       list_move(&desc->list, &pxp_chan->free_list);
+       list_for_each_entry_safe(child, _child, &desc->tx_list, list) {
+               list_del_init(&child->list);
+               kmem_cache_free(tx_desc_cache, (void *)child);
+       }
+       list_del_init(&desc->list);
+       kmem_cache_free(tx_desc_cache, (void *)desc);
 
        if (list_empty(&pxp_chan->active_list))
                list_del_init(&pxp_chan->list);
@@ -1332,35 +1280,23 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/* called with pxp_chan->lock held */
-static struct pxp_tx_desc *pxpdma_desc_get(struct pxp_channel *pxp_chan)
+/* allocate/free dma tx descriptor dynamically*/
+static struct pxp_tx_desc *pxpdma_desc_alloc(struct pxp_channel *pxp_chan)
 {
-       struct pxp_tx_desc *desc, *_desc;
-       struct pxp_tx_desc *ret = NULL;
+       struct pxp_tx_desc *desc = NULL;
+       struct dma_async_tx_descriptor *txd = NULL;
 
-       list_for_each_entry_safe(desc, _desc, &pxp_chan->free_list, list) {
-               list_del_init(&desc->list);
-               ret = desc;
-               break;
-       }
+       desc = kmem_cache_alloc(tx_desc_cache, GFP_KERNEL | __GFP_ZERO);
+       if (desc == NULL)
+               return NULL;
 
-       return ret;
-}
+       INIT_LIST_HEAD(&desc->list);
+       INIT_LIST_HEAD(&desc->tx_list);
+       txd = &desc->txd;
+       dma_async_tx_descriptor_init(txd, &pxp_chan->dma_chan);
+       txd->tx_submit = pxp_tx_submit;
 
-/* called with pxp_chan->lock held */
-static void pxpdma_desc_put(struct pxp_channel *pxp_chan,
-                           struct pxp_tx_desc *desc)
-{
-       if (desc) {
-               struct device *dev = &pxp_chan->dma_chan.dev->device;
-               struct pxp_tx_desc *child;
-
-               list_for_each_entry(child, &desc->tx_list, list)
-                   dev_info(dev, "moving child desc %p to freelist\n", child);
-               list_splice_init(&desc->tx_list, &pxp_chan->free_list);
-               dev_info(dev, "moving desc %p to freelist\n", desc);
-               list_add(&desc->list, &pxp_chan->free_list);
-       }
+       return desc;
 }
 
 /* Allocate and initialise a transfer descriptor. */
@@ -1395,11 +1331,10 @@ static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
 
        spin_lock_irqsave(&pxp_chan->lock, flags);
        for_each_sg(sgl, sg, sg_len, i) {
-               desc = pxpdma_desc_get(pxp_chan);
+               desc = pxpdma_desc_alloc(pxp_chan);
                if (!desc) {
-                       pxpdma_desc_put(pxp_chan, first);
-                       dev_err(chan->device->dev, "Can't get DMA desc.\n");
                        spin_unlock_irqrestore(&pxp_chan->lock, flags);
+                       dev_err(chan->device->dev, "no enough memory to allocate tx descriptor\n");
                        return NULL;
                }
 
@@ -1478,14 +1413,6 @@ static void pxp_issue_pending(struct dma_chan *chan)
 static void __pxp_terminate_all(struct dma_chan *chan)
 {
        struct pxp_channel *pxp_chan = to_pxp_channel(chan);
-       unsigned long flags;
-
-       /* pchan->queue is modified in ISR, have to spinlock */
-       spin_lock_irqsave(&pxp_chan->lock, flags);
-       list_splice_init(&pxp_chan->queue, &pxp_chan->free_list);
-       list_splice_init(&pxp_chan->active_list, &pxp_chan->free_list);
-
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
 
        pxp_chan->status = PXP_CHANNEL_INITIALIZED;
 }
@@ -1538,7 +1465,6 @@ err_chan:
 static void pxp_free_chan_resources(struct dma_chan *chan)
 {
        struct pxp_channel *pxp_chan = to_pxp_channel(chan);
-       struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
 
        mutex_lock(&pxp_chan->chan_mutex);
 
@@ -1546,8 +1472,6 @@ static void pxp_free_chan_resources(struct dma_chan *chan)
 
        pxp_chan->status = PXP_CHANNEL_FREE;
 
-       pxp_uninit_channel(pxp_dma, pxp_chan);
-
        mutex_unlock(&pxp_chan->chan_mutex);
 }
 
@@ -1894,6 +1818,12 @@ static int pxp_probe(struct platform_device *pdev)
                goto exit;
        }
        init_waitqueue_head(&pxp->thread_waitq);
+       tx_desc_cache = kmem_cache_create("tx_desc", sizeof(struct pxp_tx_desc),
+                                         0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!tx_desc_cache) {
+               err = -ENOMEM;
+               goto exit;
+       }
 
        register_pxp_device();
 
@@ -1908,6 +1838,7 @@ static int pxp_remove(struct platform_device *pdev)
        struct pxps *pxp = platform_get_drvdata(pdev);
 
        unregister_pxp_device();
+       kmem_cache_destroy(tx_desc_cache);
        kthread_stop(pxp->dispatch);
        cancel_work_sync(&pxp->work);
        del_timer_sync(&pxp->clk_timer);
index bed6fc2d22fc241e592089ad00fb7f2d7764db57..7cb5436a575e4821d6f0e950d837c009e2ba32c9 100644 (file)
@@ -46,7 +46,6 @@ struct pxp_channel {
        unsigned int n_tx_desc;
        struct pxp_tx_desc *desc;       /* allocated tx-descriptors */
        struct list_head active_list;   /* active tx-descriptors */
-       struct list_head free_list;     /* free tx-descriptors */
        struct list_head queue; /* queued tx-descriptors */
        struct list_head list;  /* track queued channel number */
        spinlock_t lock;        /* protects sg[0,1], queue */