]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
dmaengine: Use dma_sg_len(sg) instead of sg->length
authorLars-Peter Clausen <lars@metafoo.de>
Wed, 25 Apr 2012 18:50:52 +0000 (20:50 +0200)
committerVinod Koul <vinod.koul@linux.intel.com>
Fri, 11 May 2012 06:34:38 +0000 (12:04 +0530)
sg->length may or may not contain the length of the dma region to transfer,
depending on the architecture - dma_sg_len(sg) always will though. For the
architectures which use the drivers modified by this patch it probably is the
case that sg->length contains the dma transfer length. But to be consistent and
future proof change them to use dma_sg_len.

To quote Russel King:
sg->length is meaningless to something performing DMA.

In cases where sg_dma_len(sg) and sg->length are the same storage, then
there's no problem. But scatterlists _can_ (and one some architectures) do
split them - especially when you have an IOMMU which can allow you to
combine a scatterlist into fewer entries.

So, anything using sg->length for the size of a scatterlist's DMA transfer
_after_ a call to dma_map_sg() is almost certainly buggy.

The patch has been generated using the following coccinelle patch:
<smpl>
@@
struct scatterlist *sg;
expression X;
@@
-sg[X].length
+sg_dma_len(&sg[X])
@@
struct scatterlist *sg;
@@
-sg->length
+sg_dma_len(sg)
</smpl>

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
drivers/dma/amba-pl08x.c
drivers/dma/coh901318.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/mxs-dma.c
drivers/dma/ste_dma40.c

index 003220a60bcb156b2ee98ecc46a9447493585edf..49ecbbb8932df2ecbc10aa601edd274acc21cf19 100644 (file)
@@ -1328,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        int ret, tmp;
 
        dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
-                       __func__, sgl->length, plchan->name);
+                       __func__, sg_dma_len(sgl), plchan->name);
 
        txd = pl08x_get_txd(plchan, flags);
        if (!txd) {
index dc89455f5550c90ffec150d16f3d0e06e5614ee0..c0b650c70bbdf0b7c05a22536f45fd168486d0f2 100644 (file)
@@ -1040,7 +1040,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        if (!sgl)
                goto out;
-       if (sgl->length == 0)
+       if (sg_dma_len(sgl) == 0)
                goto out;
 
        spin_lock_irqsave(&cohc->lock, flg);
index bb787d8e15296ed17eef8032be17f4b52647d173..fcfeb3cd8d3170aff7c30d6d151a0e7e15ecd968 100644 (file)
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
        struct scatterlist *sg = d->sg;
        unsigned long now;
 
-       now = min(d->len, sg->length);
+       now = min(d->len, sg_dma_len(sg));
        if (d->len != IMX_DMA_LENGTH_LOOP)
                d->len -= now;
 
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
        desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 
        for_each_sg(sgl, sg, sg_len, i) {
-               dma_length += sg->length;
+               dma_length += sg_dma_len(sg);
        }
 
        switch (imxdmac->word_size) {
        case DMA_SLAVE_BUSWIDTH_4_BYTES:
-               if (sgl->length & 3 || sgl->dma_address & 3)
+               if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
                        return NULL;
                break;
        case DMA_SLAVE_BUSWIDTH_2_BYTES:
-               if (sgl->length & 1 || sgl->dma_address & 1)
+               if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
                        return NULL;
                break;
        case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
                imxdmac->sg_list[i].page_link = 0;
                imxdmac->sg_list[i].offset = 0;
                imxdmac->sg_list[i].dma_address = dma_addr;
-               imxdmac->sg_list[i].length = period_len;
+               sg_dma_len(&imxdmac->sg_list[i]) = period_len;
                dma_addr += period_len;
        }
 
        /* close the loop */
        imxdmac->sg_list[periods].offset = 0;
-       imxdmac->sg_list[periods].length = 0;
+       sg_dma_len(&imxdmac->sg_list[periods]) = 0;
        imxdmac->sg_list[periods].page_link =
                ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
 
index 5a457777f5c016991fafa91127948bf8130539f7..cd0619a897ff8ce9ceb3ebcb3295a9c1d61c90c7 100644 (file)
@@ -941,7 +941,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
                bd->buffer_addr = sg->dma_address;
 
-               count = sg->length;
+               count = sg_dma_len(sg);
 
                if (count > 0xffff) {
                        dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
index d0ef5937fbf64ceb168e790d0a4780bdf063f96c..222e907bfaaa4b50d501f804d27669bd8091476a 100644 (file)
@@ -394,7 +394,7 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
                        }
                }
                /*Populate CTL_HI values*/
-               ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+               ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
                                                        desc->width,
                                                        midc->dma->block_size);
                /*Populate SAR and DAR values*/
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
                        txd = intel_mid_dma_prep_memcpy(chan,
                                                mids->dma_slave.dst_addr,
                                                mids->dma_slave.src_addr,
-                                               sgl->length,
+                                               sg_dma_len(sgl),
                                                flags);
                        return txd;
                } else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
        pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
                        sg_len, direction, flags);
 
-       txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+       txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
        if (NULL == txd) {
                pr_err("MDMA: Prep memcpy failed\n");
                return NULL;
index 655d4ce6ed0d94fcae71ed687641f707e08a8ed8..3db3a48d3f01e23d58d060b326cf65ec783ff335 100644 (file)
@@ -415,9 +415,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
        } else {
                for_each_sg(sgl, sg, sg_len, i) {
-                       if (sg->length > MAX_XFER_BYTES) {
+                       if (sg_dma_len(sg) > MAX_XFER_BYTES) {
                                dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
-                                               sg->length, MAX_XFER_BYTES);
+                                               sg_dma_len(sg), MAX_XFER_BYTES);
                                goto err_out;
                        }
 
@@ -425,7 +425,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
 
                        ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
                        ccw->bufaddr = sg->dma_address;
-                       ccw->xfer_bytes = sg->length;
+                       ccw->xfer_bytes = sg_dma_len(sg);
 
                        ccw->bits = 0;
                        ccw->bits |= CCW_CHAIN;
index 2ed1ac3513f3d4de118d7937f40fadc202748a93..000d309602b2d76fd825d895be5ab5b91837d19f 100644 (file)
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
        }
 
        sg[periods].offset = 0;
-       sg[periods].length = 0;
+       sg_dma_len(&sg[periods]) = 0;
        sg[periods].page_link =
                ((unsigned long)sg | 0x01) & ~0x02;