From 4f46f8ac80416b0e8fd3aba6a0d842205fb29140 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 30 Jul 2012 21:28:27 +0200 Subject: dmaengine: shdma: restore partial transfer calculation The recent shdma driver split has mistakenly removed support for partial DMA transfer size calculation on forced termination. This patch restores it. Signed-off-by: Guennadi Liakhovetski Acked-by: Vinod Koul Signed-off-by: Paul Mundt --- drivers/dma/sh/shdma-base.c | 9 +++++++++ drivers/dma/sh/shdma.c | 12 ++++++++++++ include/linux/shdma-base.h | 2 ++ 3 files changed, 23 insertions(+) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 27f5c781fd73..f4cd946d259d 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, new->mark = DESC_PREPARED; new->async_tx.flags = flags; new->direction = direction; + new->partial = 0; *len -= copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) @@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, case DMA_TERMINATE_ALL: spin_lock_irqsave(&schan->chan_lock, flags); ops->halt_channel(schan); + + if (ops->get_partial && !list_empty(&schan->ld_queue)) { + /* Record partial transfer */ + struct shdma_desc *desc = list_first_entry(&schan->ld_queue, + struct shdma_desc, node); + desc->partial = ops->get_partial(schan, desc); + } + spin_unlock_irqrestore(&schan->chan_lock, flags); shdma_chan_ld_cleanup(schan, true); diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 027c9be97654..f41bcc5267fd 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) return true; } +static size_t sh_dmae_get_partial(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); + return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << + sh_chan->xmit_shift; +} + /* Called from error IRQ or NMI */ static bool sh_dmae_reset(struct sh_dmae_device *shdev) { @@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = { .start_xfer = sh_dmae_start_xfer, .embedded_desc = sh_dmae_embedded_desc, .chan_irq = sh_dmae_chan_irq, + .get_partial = sh_dmae_get_partial, }; static int __devinit sh_dmae_probe(struct platform_device *pdev) diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 93f9821554b6..a3728bf66f0e 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -50,6 +50,7 @@ struct shdma_desc { struct list_head node; struct dma_async_tx_descriptor async_tx; enum dma_transfer_direction direction; + size_t partial; dma_cookie_t cookie; int chunks; int mark; @@ -98,6 +99,7 @@ struct shdma_ops { void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); struct shdma_desc *(*embedded_desc)(void *, int); bool (*chan_irq)(struct shdma_chan *, int); + size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *); }; struct shdma_dev { -- cgit v1.2.3-55-g7522