Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (28 commits)
  ioat: cleanup ->timer_fn() and ->cleanup_fn() prototypes
  ioat3: interrupt coalescing
  ioat: close potential BUG_ON race in the descriptor cleanup path
  ioat2: kill pending flag
  ioat3: use ioat2_quiesce()
  ioat3: cleanup, don't enable DCA completion writes
  DMAENGINE: COH 901 318 lli sg offset fix
  DMAENGINE: COH 901 318 configure channel direction
  DMAENGINE: COH 901 318 remove irq counting
  DMAENGINE: COH 901 318 descriptor pool refactoring
  DMAENGINE: COH 901 318 cleanups
  dma: Add MPC512x DMA driver
  Debugging options for the DMA engine subsystem
  iop-adma: redundant/wrong tests in iop_*_count()?
  dmatest: fix handling of an even number of xor_sources
  dmatest: correct raid6 PQ test
  fsldma: Fix cookie issues
  fsldma: Fix cookie issues
  dma: cases IPU_PIX_FMT_BGRA32, BGR32 and ABGR32 are the same in ipu_ch_param_set_size()
  dma: make Open Firmware device id constant
  ...
This commit is contained in:
Linus Torvalds 2010-03-04 08:20:14 -08:00
commit 9bb676966a
20 changed files with 1738 additions and 800 deletions

View file

@ -44,21 +44,29 @@ Example:
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <0>; cell-index = <0>;
reg = <0 0x80>; reg = <0 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@80 { dma-channel@80 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <1>; cell-index = <1>;
reg = <0x80 0x80>; reg = <0x80 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@100 { dma-channel@100 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <2>; cell-index = <2>;
reg = <0x100 0x80>; reg = <0x100 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@180 { dma-channel@180 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <3>; cell-index = <3>;
reg = <0x180 0x80>; reg = <0x180 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
}; };

View file

@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
} }
if (len) slot_cnt += *slots_per_op;
slot_cnt += *slots_per_op;
return slot_cnt; return slot_cnt;
} }
@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
} }
if (len) slot_cnt += *slots_per_op;
slot_cnt += *slots_per_op;
return slot_cnt; return slot_cnt;
} }
@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
i += slots_per_op; i += slots_per_op;
} while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
if (len) { iter = iop_hw_desc_slot_idx(hw_desc, i);
iter = iop_hw_desc_slot_idx(hw_desc, i); iter->byte_count = len;
iter->byte_count = len;
}
} }
} }

View file

@ -53,7 +53,7 @@ struct coh901318_params {
* struct coh_dma_channel - dma channel base * struct coh_dma_channel - dma channel base
* @name: ascii name of dma channel * @name: ascii name of dma channel
* @number: channel id number * @number: channel id number
* @desc_nbr_max: number of preallocated descriptortors * @desc_nbr_max: number of preallocated descriptors
* @priority_high: prio of channel, 0 low otherwise high. * @priority_high: prio of channel, 0 low otherwise high.
* @param: configuration parameters * @param: configuration parameters
* @dev_addr: physical address of periphal connected to channel * @dev_addr: physical address of periphal connected to channel

View file

@ -13,6 +13,22 @@ menuconfig DMADEVICES
DMA Device drivers supported by the configured arch, it may DMA Device drivers supported by the configured arch, it may
be empty in some cases. be empty in some cases.
config DMADEVICES_DEBUG
bool "DMA Engine debugging"
depends on DMADEVICES != n
help
This is an option for use by developers; most people should
say N here. This enables DMA engine core and driver debugging.
config DMADEVICES_VDEBUG
bool "DMA Engine verbose debugging"
depends on DMADEVICES_DEBUG != n
help
This is an option for use by developers; most people should
say N here. This enables deeper (more verbose) debugging of
the DMA engine core and drivers.
if DMADEVICES if DMADEVICES
comment "DMA Devices" comment "DMA Devices"
@ -69,6 +85,13 @@ config FSL_DMA
The Elo is the DMA controller on some 82xx and 83xx parts, and the The Elo is the DMA controller on some 82xx and 83xx parts, and the
Elo Plus is the DMA controller on 85xx and 86xx parts. Elo Plus is the DMA controller on 85xx and 86xx parts.
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
depends on PPC_MPC512x
select DMA_ENGINE
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
config MV_XOR config MV_XOR
bool "Marvell XOR engine support" bool "Marvell XOR engine support"
depends on PLAT_ORION depends on PLAT_ORION

View file

@ -1,9 +1,17 @@
ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
EXTRA_CFLAGS += -DVERBOSE_DEBUG
endif
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o

View file

@ -39,7 +39,6 @@ struct coh901318_desc {
unsigned int sg_len; unsigned int sg_len;
struct coh901318_lli *data; struct coh901318_lli *data;
enum dma_data_direction dir; enum dma_data_direction dir;
int pending_irqs;
unsigned long flags; unsigned long flags;
}; };
@ -72,7 +71,6 @@ struct coh901318_chan {
unsigned long nbr_active_done; unsigned long nbr_active_done;
unsigned long busy; unsigned long busy;
int pending_irqs;
struct coh901318_base *base; struct coh901318_base *base;
}; };
@ -80,18 +78,16 @@ struct coh901318_chan {
static void coh901318_list_print(struct coh901318_chan *cohc, static void coh901318_list_print(struct coh901318_chan *cohc,
struct coh901318_lli *lli) struct coh901318_lli *lli)
{ {
struct coh901318_lli *l; struct coh901318_lli *l = lli;
dma_addr_t addr = virt_to_phys(lli);
int i = 0; int i = 0;
while (addr) { while (l) {
l = phys_to_virt(addr);
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
", dst 0x%x, link 0x%x link_virt 0x%p\n", ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
i, l, l->control, l->src_addr, l->dst_addr, i, l, l->control, l->src_addr, l->dst_addr,
l->link_addr, phys_to_virt(l->link_addr)); l->link_addr, l->virt_link_addr);
i++; i++;
addr = l->link_addr; l = l->virt_link_addr;
} }
} }
@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
goto err_kmalloc; goto err_kmalloc;
tmp = dev_buf; tmp = dev_buf;
tmp += sprintf(tmp, "DMA -- enable dma channels\n"); tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
if (started_channels & (1 << i)) if (started_channels & (1 << i))
@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc)
* TODO: alloc a pile of descs instead of just one, * TODO: alloc a pile of descs instead of just one,
* avoid many small allocations. * avoid many small allocations.
*/ */
desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
if (desc == NULL) if (desc == NULL)
goto out; goto out;
INIT_LIST_HEAD(&desc->node); INIT_LIST_HEAD(&desc->node);
dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
} else { } else {
/* Reuse an old desc. */ /* Reuse an old desc. */
desc = list_first_entry(&cohc->free, desc = list_first_entry(&cohc->free,
struct coh901318_desc, struct coh901318_desc,
node); node);
list_del(&desc->node); list_del(&desc->node);
/* Initialize it a bit so it's not insane */
desc->sg = NULL;
desc->sg_len = 0;
desc->desc.callback = NULL;
desc->desc.callback_param = NULL;
} }
out: out:
@ -364,10 +366,6 @@ static void
coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
{ {
list_add_tail(&desc->node, &cohc->active); list_add_tail(&desc->node, &cohc->active);
BUG_ON(cohc->pending_irqs != 0);
cohc->pending_irqs = desc->pending_irqs;
} }
static struct coh901318_desc * static struct coh901318_desc *
@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
return cohd_que; return cohd_que;
} }
/*
* This tasklet is called from the interrupt handler to
* handle each descriptor (DMA job) that is sent to a channel.
*/
static void dma_tasklet(unsigned long data) static void dma_tasklet(unsigned long data)
{ {
struct coh901318_chan *cohc = (struct coh901318_chan *) data; struct coh901318_chan *cohc = (struct coh901318_chan *) data;
@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data)
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
" nbr_active_done %ld\n", __func__,
cohc->id, cohc->nbr_active_done);
spin_lock_irqsave(&cohc->lock, flags); spin_lock_irqsave(&cohc->lock, flags);
/* get first active entry from list */ /* get first active descriptor entry from list */
cohd_fin = coh901318_first_active_get(cohc); cohd_fin = coh901318_first_active_get(cohc);
BUG_ON(cohd_fin->pending_irqs == 0);
if (cohd_fin == NULL) if (cohd_fin == NULL)
goto err; goto err;
cohd_fin->pending_irqs--; /* locate callback to client */
cohc->completed = cohd_fin->desc.cookie;
if (cohc->nbr_active_done == 0)
return;
if (!cohd_fin->pending_irqs) {
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
}
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
" nbr_active_done %ld\n", __func__,
cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
/* callback to client */
callback = cohd_fin->desc.callback; callback = cohd_fin->desc.callback;
callback_param = cohd_fin->desc.callback_param; callback_param = cohd_fin->desc.callback_param;
if (!cohd_fin->pending_irqs) { /* sign this job as completed on the channel */
coh901318_desc_remove(cohd_fin); cohc->completed = cohd_fin->desc.cookie;
/* return desc to free-list */ /* release the lli allocation and remove the descriptor */
coh901318_desc_free(cohc, cohd_fin); coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
}
if (cohc->nbr_active_done) /* return desc to free-list */
cohc->nbr_active_done--; coh901318_desc_remove(cohd_fin);
coh901318_desc_free(cohc, cohd_fin);
spin_unlock_irqrestore(&cohc->lock, flags);
/* Call the callback when we're done */
if (callback)
callback(callback_param);
spin_lock_irqsave(&cohc->lock, flags);
/*
* If another interrupt fired while the tasklet was scheduling,
* we don't get called twice, so we have this number of active
* counter that keep track of the number of IRQs expected to
* be handled for this channel. If there happen to be more than
* one IRQ to be ack:ed, we simply schedule this tasklet again.
*/
cohc->nbr_active_done--;
if (cohc->nbr_active_done) { if (cohc->nbr_active_done) {
dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
"came in while we were scheduling this tasklet\n");
if (cohc_chan_conf(cohc)->priority_high) if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet); tasklet_hi_schedule(&cohc->tasklet);
else else
tasklet_schedule(&cohc->tasklet); tasklet_schedule(&cohc->tasklet);
} }
spin_unlock_irqrestore(&cohc->lock, flags);
if (callback) spin_unlock_irqrestore(&cohc->lock, flags);
callback(callback_param);
return; return;
@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (!cohc->allocated) if (!cohc->allocated)
return; return;
BUG_ON(cohc->pending_irqs == 0); spin_lock(&cohc->lock);
cohc->pending_irqs--;
cohc->nbr_active_done++; cohc->nbr_active_done++;
if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL) if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0; cohc->busy = 0;
BUG_ON(list_empty(&cohc->active)); BUG_ON(list_empty(&cohc->active));
spin_unlock(&cohc->lock);
if (cohc_chan_conf(cohc)->priority_high) if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet); tasklet_hi_schedule(&cohc->tasklet);
else else
@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
int lli_len; int lli_len;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
int ret;
spin_lock_irqsave(&cohc->lock, flg); spin_lock_irqsave(&cohc->lock, flg);
@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (data == NULL) if (data == NULL)
goto err; goto err;
cohd = coh901318_desc_get(cohc); ret = coh901318_lli_fill_memcpy(
cohd->sg = NULL; &cohc->base->pool, data, src, size, dest,
cohd->sg_len = 0; cohc_chan_param(cohc)->ctrl_lli_chained,
cohd->data = data; ctrl_last);
if (ret)
cohd->pending_irqs = goto err;
coh901318_lli_fill_memcpy(
&cohc->base->pool, data, src, size, dest,
cohc_chan_param(cohc)->ctrl_lli_chained,
ctrl_last);
cohd->flags = flags;
COH_DBG(coh901318_list_print(cohc, data)); COH_DBG(coh901318_list_print(cohc, data));
dma_async_tx_descriptor_init(&cohd->desc, chan); /* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->data = data;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit; cohd->desc.tx_submit = coh901318_tx_submit;
spin_unlock_irqrestore(&cohc->lock, flg); spin_unlock_irqrestore(&cohc->lock, flg);
@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *data; struct coh901318_lli *data;
struct coh901318_desc *cohd; struct coh901318_desc *cohd;
const struct coh901318_params *params;
struct scatterlist *sg; struct scatterlist *sg;
int len = 0; int len = 0;
int size; int size;
@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
u32 config;
unsigned long flg; unsigned long flg;
int ret;
if (!sgl) if (!sgl)
goto out; goto out;
@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* Trigger interrupt after last lli */ /* Trigger interrupt after last lli */
ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
cohd = coh901318_desc_get(cohc); params = cohc_chan_param(cohc);
cohd->sg = NULL; config = params->config;
cohd->sg_len = 0;
cohd->dir = direction;
if (direction == DMA_TO_DEVICE) { if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
ctrl_chained |= tx_flags; ctrl_chained |= tx_flags;
ctrl_last |= tx_flags; ctrl_last |= tx_flags;
ctrl |= tx_flags; ctrl |= tx_flags;
@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
ctrl_chained |= rx_flags; ctrl_chained |= rx_flags;
ctrl_last |= rx_flags; ctrl_last |= rx_flags;
ctrl |= rx_flags; ctrl |= rx_flags;
} else } else
goto err_direction; goto err_direction;
dma_async_tx_descriptor_init(&cohd->desc, chan); coh901318_set_conf(cohc, config);
cohd->desc.tx_submit = coh901318_tx_submit;
/* The dma only supports transmitting packages up to /* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of * MAX_DMA_PACKET_SIZE. Calculate to total number of
@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
len += factor; len += factor;
} }
pr_debug("Allocate %d lli:s for this transfer\n", len);
data = coh901318_lli_alloc(&cohc->base->pool, len); data = coh901318_lli_alloc(&cohc->base->pool, len);
if (data == NULL) if (data == NULL)
goto err_dma_alloc; goto err_dma_alloc;
/* initiate allocated data list */ /* initiate allocated data list */
cohd->pending_irqs = ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, cohc_dev_addr(cohc),
cohc_dev_addr(cohc), ctrl_chained,
ctrl_chained, ctrl,
ctrl, ctrl_last,
ctrl_last, direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); if (ret)
cohd->data = data; goto err_lli_fill;
cohd->flags = flags;
COH_DBG(coh901318_list_print(cohc, data)); COH_DBG(coh901318_list_print(cohc, data));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
cohd->data = data;
spin_unlock_irqrestore(&cohc->lock, flg); spin_unlock_irqrestore(&cohc->lock, flg);
return &cohd->desc; return &cohd->desc;
err_lli_fill:
err_dma_alloc: err_dma_alloc:
err_direction: err_direction:
coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flg); spin_unlock_irqrestore(&cohc->lock, flg);
out: out:
return NULL; return NULL;
@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/ /* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data); coh901318_lli_free(&cohc->base->pool, &cohd->data);
coh901318_desc_remove(cohd);
/* return desc to free-list */ /* return desc to free-list */
coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd); coh901318_desc_free(cohc, cohd);
} }
@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/ /* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data); coh901318_lli_free(&cohc->base->pool, &cohd->data);
coh901318_desc_remove(cohd);
/* return desc to free-list */ /* return desc to free-list */
coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd); coh901318_desc_free(cohc, cohd);
} }
cohc->nbr_active_done = 0; cohc->nbr_active_done = 0;
cohc->busy = 0; cohc->busy = 0;
cohc->pending_irqs = 0;
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
} }
@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
spin_lock_init(&cohc->lock); spin_lock_init(&cohc->lock);
cohc->pending_irqs = 0;
cohc->nbr_active_done = 0; cohc->nbr_active_done = 0;
cohc->busy = 0; cohc->busy = 0;
INIT_LIST_HEAD(&cohc->free); INIT_LIST_HEAD(&cohc->free);
@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_issue_pending = coh901318_issue_pending; base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all; base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
base->dma_memcpy.dev = &pdev->dev; base->dma_memcpy.dev = &pdev->dev;
/*
* This controller can only access address at even 32bit boundaries,
* i.e. 2^2
*/
base->dma_memcpy.copy_align = 2;
err = dma_async_device_register(&base->dma_memcpy); err = dma_async_device_register(&base->dma_memcpy);
if (err) if (err)
goto err_register_memcpy; goto err_register_memcpy;
dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase); (u32) base->virtbase);
return err; return err;

View file

@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head; lli = head;
lli->phy_this = phy; lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
for (i = 1; i < len; i++) { for (i = 1; i < len; i++) {
lli_prev = lli; lli_prev = lli;
@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1); DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy; lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
lli_prev->link_addr = phy; lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli; lli_prev->virt_link_addr = lli;
} }
lli->link_addr = 0x00000000U;
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return head; return head;
@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
lli->src_addr = src; lli->src_addr = src;
lli->dst_addr = dst; lli->dst_addr = dst;
/* One irq per single transfer */ return 0;
return 1;
} }
int int
@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli->src_addr = src; lli->src_addr = src;
lli->dst_addr = dst; lli->dst_addr = dst;
/* One irq per single transfer */ return 0;
return 1;
} }
int int
@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl_sg; u32 ctrl_sg;
dma_addr_t src = 0; dma_addr_t src = 0;
dma_addr_t dst = 0; dma_addr_t dst = 0;
int nbr_of_irq = 0;
u32 bytes_to_transfer; u32 bytes_to_transfer;
u32 elem_size; u32 elem_size;
@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last; ctrl_sg = ctrl ? ctrl : ctrl_last;
if ((ctrl_sg & ctrl_irq_mask))
nbr_of_irq++;
if (dir == DMA_TO_DEVICE) if (dir == DMA_TO_DEVICE)
/* increment source address */ /* increment source address */
src = sg_dma_address(sg); src = sg_phys(sg);
else else
/* increment destination address */ /* increment destination address */
dst = sg_dma_address(sg); dst = sg_phys(sg);
bytes_to_transfer = sg_dma_len(sg); bytes_to_transfer = sg_dma_len(sg);
@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
} }
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
/* There can be many IRQs per sg transfer */ return 0;
return nbr_of_irq;
err: err:
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return -EINVAL; return -EINVAL;

View file

@ -237,7 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie; dma_cookie_t cookie;
enum dma_status status; enum dma_status status;
enum dma_ctrl_flags flags; enum dma_ctrl_flags flags;
u8 pq_coefs[pq_sources]; u8 pq_coefs[pq_sources + 1];
int ret; int ret;
int src_cnt; int src_cnt;
int dst_cnt; int dst_cnt;
@ -257,7 +257,7 @@ static int dmatest_func(void *data)
} else if (thread->type == DMA_PQ) { } else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2; dst_cnt = 2;
for (i = 0; i < pq_sources; i++) for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1; pq_coefs[i] = 1;
} else } else
goto err_srcs; goto err_srcs;
@ -347,7 +347,7 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR) else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan, tx = dev->device_prep_dma_xor(chan,
dma_dsts[0] + dst_off, dma_dsts[0] + dst_off,
dma_srcs, xor_sources, dma_srcs, src_cnt,
len, flags); len, flags);
else if (thread->type == DMA_PQ) { else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt]; dma_addr_t dma_pq[dst_cnt];
@ -355,7 +355,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++) for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off; dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
pq_sources, pq_coefs, src_cnt, pq_coefs,
len, flags); len, flags);
} }

File diff suppressed because it is too large Load diff

View file

@ -92,11 +92,9 @@ struct fsl_desc_sw {
struct list_head node; struct list_head node;
struct list_head tx_list; struct list_head tx_list;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
struct list_head *ld;
void *priv;
} __attribute__((aligned(32))); } __attribute__((aligned(32)));
struct fsl_dma_chan_regs { struct fsldma_chan_regs {
u32 mr; /* 0x00 - Mode Register */ u32 mr; /* 0x00 - Mode Register */
u32 sr; /* 0x04 - Status Register */ u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */ u64 cdar; /* 0x08 - Current descriptor address register */
@ -106,20 +104,19 @@ struct fsl_dma_chan_regs {
u64 ndar; /* 0x24 - Next Descriptor Address Register */ u64 ndar; /* 0x24 - Next Descriptor Address Register */
}; };
struct fsl_dma_chan; struct fsldma_chan;
#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 #define FSL_DMA_MAX_CHANS_PER_DEVICE 4
struct fsl_dma_device { struct fsldma_device {
void __iomem *reg_base; /* DGSR register base */ void __iomem *regs; /* DGSR register base */
struct resource reg; /* Resource for register */
struct device *dev; struct device *dev;
struct dma_device common; struct dma_device common;
struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */ u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */ int irq; /* Channel IRQ */
}; };
/* Define macros for fsl_dma_chan->feature property */ /* Define macros for fsldma_chan->feature property */
#define FSL_DMA_LITTLE_ENDIAN 0x00000000 #define FSL_DMA_LITTLE_ENDIAN 0x00000000
#define FSL_DMA_BIG_ENDIAN 0x00000001 #define FSL_DMA_BIG_ENDIAN 0x00000001
@ -130,28 +127,28 @@ struct fsl_dma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000 #define FSL_DMA_CHAN_START_EXT 0x00002000
struct fsl_dma_chan { struct fsldma_chan {
struct fsl_dma_chan_regs __iomem *reg_base; struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */ dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */ spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_queue; /* Link descriptors queue */ struct list_head ld_pending; /* Link descriptors queue */
struct list_head ld_running; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */ struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */ struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */ struct device *dev; /* Channel device */
struct resource reg; /* Resource for register */
int irq; /* Channel IRQ */ int irq; /* Channel IRQ */
int id; /* Raw id of this channel */ int id; /* Raw id of this channel */
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 feature; u32 feature;
void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
}; };
#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)

View file

@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void ioat1_cleanup_tasklet(unsigned long data);
/* common channel initialization */ /* common channel initialization */
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
struct ioat_chan_common *chan, int idx,
void (*timer_fn)(unsigned long),
void (*tasklet)(unsigned long),
unsigned long ioat)
{ {
struct dma_device *dma = &device->common; struct dma_device *dma = &device->common;
struct dma_chan *c = &chan->common;
unsigned long data = (unsigned long) c;
chan->device = device; chan->device = device;
chan->reg_base = device->reg_base + (0x80 * (idx + 1)); chan->reg_base = device->reg_base + (0x80 * (idx + 1));
@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device,
list_add_tail(&chan->common.device_node, &dma->channels); list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan; device->idx[idx] = chan;
init_timer(&chan->timer); init_timer(&chan->timer);
chan->timer.function = timer_fn; chan->timer.function = device->timer_fn;
chan->timer.data = ioat; chan->timer.data = data;
tasklet_init(&chan->cleanup_task, tasklet, ioat); tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
tasklet_disable(&chan->cleanup_task); tasklet_disable(&chan->cleanup_task);
} }
static void ioat1_timer_event(unsigned long data);
/** /**
* ioat1_dma_enumerate_channels - find and initialize the device's channels * ioat1_dma_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated * @device: the device to be enumerated
@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
if (!ioat) if (!ioat)
break; break;
ioat_init_channel(device, &ioat->base, i, ioat_init_channel(device, &ioat->base, i);
ioat1_timer_event,
ioat1_cleanup_tasklet,
(unsigned long) ioat);
ioat->xfercap = xfercap; ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock); spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc); INIT_LIST_HEAD(&ioat->free_desc);
@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
return &desc->txd; return &desc->txd;
} }
static void ioat1_cleanup_tasklet(unsigned long data) static void ioat1_cleanup_event(unsigned long data)
{ {
struct ioat_dma_chan *chan = (void *)data; struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
ioat1_cleanup(chan); ioat1_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
} }
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
static void ioat1_timer_event(unsigned long data) static void ioat1_timer_event(unsigned long data)
{ {
struct ioat_dma_chan *ioat = (void *) data; struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
static enum dma_status enum dma_status
ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) dma_cookie_t *done, dma_cookie_t *used)
{ {
struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS; return DMA_SUCCESS;
ioat1_cleanup(ioat); device->cleanup_fn((unsigned long) c);
return ioat_is_complete(c, cookie, done, used); return ioat_is_complete(c, cookie, done, used);
} }
@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
device->intr_quirk = ioat1_intr_quirk; device->intr_quirk = ioat1_intr_quirk;
device->enumerate_channels = ioat1_enumerate_channels; device->enumerate_channels = ioat1_enumerate_channels;
device->self_test = ioat_dma_self_test; device->self_test = ioat_dma_self_test;
device->timer_fn = ioat1_timer_event;
device->cleanup_fn = ioat1_cleanup_event;
dma = &device->common; dma = &device->common;
dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
dma->device_is_tx_complete = ioat1_dma_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)

View file

@ -61,7 +61,7 @@
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices) * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration * @enumerate_channels: hw version specific channel enumeration
* @reset_hw: hw version specific channel (re)initialization * @reset_hw: hw version specific channel (re)initialization
* @cleanup_tasklet: select between the v2 and v3 cleanup routines * @cleanup_fn: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines * @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type * @self_test: hardware version specific self test for each supported op type
* *
@ -80,7 +80,7 @@ struct ioatdma_device {
void (*intr_quirk)(struct ioatdma_device *device); void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan); int (*reset_hw)(struct ioat_chan_common *chan);
void (*cleanup_tasklet)(unsigned long data); void (*cleanup_fn)(unsigned long data);
void (*timer_fn)(unsigned long data); void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device); int (*self_test)(struct ioatdma_device *device);
}; };
@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase); void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx, struct ioat_chan_common *chan, int idx);
void (*timer_fn)(unsigned long), enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
void (*tasklet)(unsigned long), dma_cookie_t *done, dma_cookie_t *used);
unsigned long ioat);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw); size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan, bool ioat_cleanup_preamble(struct ioat_chan_common *chan,

View file

@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{ {
void * __iomem reg_base = ioat->base.reg_base; struct ioat_chan_common *chan = &ioat->base;
ioat->pending = 0;
ioat->dmacount += ioat2_ring_pending(ioat); ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head; ioat->issued = ioat->head;
/* make descriptor updates globally visible before notifying channel */ /* make descriptor updates globally visible before notifying channel */
wmb(); wmb();
writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(&ioat->base), dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n", "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
} }
void ioat2_issue_pending(struct dma_chan *chan) void ioat2_issue_pending(struct dma_chan *c)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
spin_lock_bh(&ioat->ring_lock); if (ioat2_ring_pending(ioat)) {
if (ioat->pending == 1) spin_lock_bh(&ioat->ring_lock);
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
}
} }
/** /**
* ioat2_update_pending - log pending descriptors * ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel * @ioat: ioat2+ channel
* *
* set pending to '1' unless pending is already set to '2', pending == 2 * Check if the number of unsubmitted descriptors has exceeded the
* indicates that submission is temporarily blocked due to an in-flight * watermark. Called with ring_lock held
* reset. If we are already above the ioat_pending_level threshold then
* just issue pending.
*
* called with ring_lock held
*/ */
static void ioat2_update_pending(struct ioat2_dma_chan *ioat) static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{ {
if (unlikely(ioat->pending == 2)) if (ioat2_ring_pending(ioat) > ioat_pending_level)
return;
else if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
else
ioat->pending = 1;
} }
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
seen_current = true; seen_current = true;
} }
ioat->tail += i; ioat->tail += i;
BUG_ON(!seen_current); /* no active descs have written a completion? */ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete; chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) { if (ioat->head == ioat->tail) {
@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
void ioat2_cleanup_tasklet(unsigned long data) void ioat2_cleanup_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat2_cleanup(ioat); ioat2_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
void ioat2_timer_event(unsigned long data) void ioat2_timer_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
if (!ioat) if (!ioat)
break; break;
ioat_init_channel(device, &ioat->base, i, ioat_init_channel(device, &ioat->base, i);
device->timer_fn,
device->cleanup_tasklet,
(unsigned long) ioat);
ioat->xfercap_log = xfercap_log; ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock); spin_lock_init(&ioat->ring_lock);
if (device->reset_hw(&ioat->base)) { if (device->reset_hw(&ioat->base)) {
@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->head = 0; ioat->head = 0;
ioat->issued = 0; ioat->issued = 0;
ioat->tail = 0; ioat->tail = 0;
ioat->pending = 0;
ioat->alloc_order = order; ioat->alloc_order = order;
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
device->timer_fn((unsigned long) ioat); device->timer_fn((unsigned long) &chan->common);
} else } else
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
return -ENOMEM; return -ENOMEM;
@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task); tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer); del_timer_sync(&chan->timer);
device->cleanup_tasklet((unsigned long) ioat); device->cleanup_fn((unsigned long) c);
device->reset_hw(chan); device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock); spin_lock_bh(&ioat->ring_lock);
@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c)
chan->last_completion = 0; chan->last_completion = 0;
chan->completion_dma = 0; chan->completion_dma = 0;
ioat->pending = 0;
ioat->dmacount = 0; ioat->dmacount = 0;
} }
enum dma_status
ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioatdma_device *device = ioat->base.device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
device->cleanup_tasklet((unsigned long) ioat);
return ioat_is_complete(c, cookie, done, used);
}
static ssize_t ring_size_show(struct dma_chan *c, char *page) static ssize_t ring_size_show(struct dma_chan *c, char *page)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
device->enumerate_channels = ioat2_enumerate_channels; device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat2_reset_hw; device->reset_hw = ioat2_reset_hw;
device->cleanup_tasklet = ioat2_cleanup_tasklet; device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event; device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test; device->self_test = ioat_dma_self_test;
dma = &device->common; dma = &device->common;
@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending; dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources;
dma->device_is_tx_complete = ioat2_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)

View file

@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
* @head: allocated index * @head: allocated index
* @issued: hardware notification point * @issued: hardware notification point
* @tail: cleanup index * @tail: cleanup index
* @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero * @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors * @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring * @ring: software ring buffer implementation of hardware ring
@ -61,7 +60,6 @@ struct ioat2_dma_chan {
u16 tail; u16 tail;
u16 dmacount; u16 dmacount;
u16 alloc_order; u16 alloc_order;
int pending;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
spinlock_t ring_lock; spinlock_t ring_lock;
}; };
@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
void ioat2_issue_pending(struct dma_chan *chan); void ioat2_issue_pending(struct dma_chan *chan);
int ioat2_alloc_chan_resources(struct dma_chan *c); int ioat2_alloc_chan_resources(struct dma_chan *c);
void ioat2_free_chan_resources(struct dma_chan *c); void ioat2_free_chan_resources(struct dma_chan *c);
enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used);
void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
bool reshape_ring(struct ioat2_dma_chan *ioat, int order); bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
void ioat2_cleanup_tasklet(unsigned long data); void ioat2_cleanup_event(unsigned long data);
void ioat2_timer_event(unsigned long data); void ioat2_timer_event(unsigned long data);
int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);

View file

@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
} }
} }
ioat->tail += i; ioat->tail += i;
BUG_ON(!seen_current); /* no active descs have written a completion? */ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete; chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) {
active = ioat2_ring_active(ioat);
if (active == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__); __func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state); clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
} }
/* 5 microsecond delay per pending descriptor */
writew(min((5 * active), IOAT_INTRDELAY_MASK),
chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
} }
static void ioat3_cleanup(struct ioat2_dma_chan *ioat) /* try to cleanup, but yield (via spin_trylock) to incoming submissions
* with the expectation that we will immediately poll again shortly
*/
static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; unsigned long phys_complete;
@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
static void ioat3_cleanup_tasklet(unsigned long data) /* run cleanup now because we already delayed the interrupt via INTRDELAY */
static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
ioat3_cleanup(ioat); prefetch(chan->completion);
writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN,
ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); spin_lock_bh(&chan->cleanup_lock);
if (!ioat_cleanup_preamble(chan, &phys_complete)) {
spin_unlock_bh(&chan->cleanup_lock);
return;
}
spin_lock_bh(&ioat->ring_lock);
__cleanup(ioat, phys_complete);
spin_unlock_bh(&ioat->ring_lock);
spin_unlock_bh(&chan->cleanup_lock);
}
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat3_cleanup_sync(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
} }
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; unsigned long phys_complete;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
status = ioat_chansts(chan);
cpu_relax();
}
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete)) if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete); __cleanup(ioat, phys_complete);
@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
static void ioat3_timer_event(unsigned long data) static void ioat3_timer_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS; return DMA_SUCCESS;
ioat3_cleanup(ioat); ioat3_cleanup_poll(ioat);
return ioat_is_complete(c, cookie, done, used); return ioat_is_complete(c, cookie, done, used);
} }
@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) { if (is_raid_device) {
dma->device_is_tx_complete = ioat3_is_complete; dma->device_is_tx_complete = ioat3_is_complete;
device->cleanup_tasklet = ioat3_cleanup_tasklet; device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event; device->timer_fn = ioat3_timer_event;
} else { } else {
dma->device_is_tx_complete = ioat2_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
device->cleanup_tasklet = ioat2_cleanup_tasklet; device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event; device->timer_fn = ioat2_timer_event;
} }

View file

@ -60,7 +60,7 @@
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ #define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */
#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */

View file

@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
break; break;
case IPU_PIX_FMT_BGRA32: case IPU_PIX_FMT_BGRA32:
case IPU_PIX_FMT_BGR32: case IPU_PIX_FMT_BGR32:
case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0; params->ip.bpp = 0;
params->ip.pfs = 4; params->ip.pfs = 4;
params->ip.npb = 7; params->ip.npb = 7;
@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->ip.wid2 = 7; /* Blue bit width - 1 */ params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */ params->ip.wid3 = 7; /* Alpha bit width - 1 */
break; break;
case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 8; /* Red bit offset */
params->ip.ofs1 = 16; /* Green bit offset */
params->ip.ofs2 = 24; /* Blue bit offset */
params->ip.ofs3 = 0; /* Alpha bit offset */
params->ip.wid0 = 7; /* Red bit width - 1 */
params->ip.wid1 = 7; /* Green bit width - 1 */
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
case IPU_PIX_FMT_UYVY: case IPU_PIX_FMT_UYVY:
params->ip.bpp = 2; params->ip.bpp = 2;
params->ip.pfs = 6; params->ip.pfs = 6;

800
drivers/dma/mpc512x_dma.c Normal file
View file

@ -0,0 +1,800 @@
/*
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
* Copyright (C) Semihalf 2009
*
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
* (defines, structures and comments) was taken from MPC5121 DMA driver
* written by Hongjun Chen <hong-jun.chen@freescale.com>.
*
* Approved as OSADL project by a majority of OSADL members and funded
* by OSADL membership fees in 2009; for details see www.osadl.org.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This is initial version of MPC5121 DMA driver. Only memory to memory
* transfers are supported (tested using dmatest module).
*/
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/random.h>
/* Number of DMA Transfer descriptors allocated per channel */
#define MPC_DMA_DESCRIPTORS 64
/* Macro definitions */
#define MPC_DMA_CHANNELS 64
#define MPC_DMA_TCD_OFFSET 0x1000
/* Arbitration mode of group and channel */
#define MPC_DMA_DMACR_EDCG (1 << 31)
#define MPC_DMA_DMACR_ERGA (1 << 3)
#define MPC_DMA_DMACR_ERCA (1 << 2)
/* Error codes */
#define MPC_DMA_DMAES_VLD (1 << 31)
#define MPC_DMA_DMAES_GPE (1 << 15)
#define MPC_DMA_DMAES_CPE (1 << 14)
#define MPC_DMA_DMAES_ERRCHN(err) \
(((err) >> 8) & 0x3f)
#define MPC_DMA_DMAES_SAE (1 << 7)
#define MPC_DMA_DMAES_SOE (1 << 6)
#define MPC_DMA_DMAES_DAE (1 << 5)
#define MPC_DMA_DMAES_DOE (1 << 4)
#define MPC_DMA_DMAES_NCE (1 << 3)
#define MPC_DMA_DMAES_SGE (1 << 2)
#define MPC_DMA_DMAES_SBE (1 << 1)
#define MPC_DMA_DMAES_DBE (1 << 0)
#define MPC_DMA_TSIZE_1 0x00
#define MPC_DMA_TSIZE_2 0x01
#define MPC_DMA_TSIZE_4 0x02
#define MPC_DMA_TSIZE_16 0x04
#define MPC_DMA_TSIZE_32 0x05
/* MPC5121 DMA engine registers */
struct __attribute__ ((__packed__)) mpc_dma_regs {
/* 0x00 */
u32 dmacr; /* DMA control register */
u32 dmaes; /* DMA error status */
/* 0x08 */
u32 dmaerqh; /* DMA enable request high(channels 63~32) */
u32 dmaerql; /* DMA enable request low(channels 31~0) */
u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
/* 0x18 */
u8 dmaserq; /* DMA set enable request */
u8 dmacerq; /* DMA clear enable request */
u8 dmaseei; /* DMA set enable error interrupt */
u8 dmaceei; /* DMA clear enable error interrupt */
/* 0x1c */
u8 dmacint; /* DMA clear interrupt request */
u8 dmacerr; /* DMA clear error */
u8 dmassrt; /* DMA set start bit */
u8 dmacdne; /* DMA clear DONE status bit */
/* 0x20 */
u32 dmainth; /* DMA interrupt request high(ch63~32) */
u32 dmaintl; /* DMA interrupt request low(ch31~0) */
u32 dmaerrh; /* DMA error high(ch63~32) */
u32 dmaerrl; /* DMA error low(ch31~0) */
/* 0x30 */
u32 dmahrsh; /* DMA hw request status high(ch63~32) */
u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
/* 0x40 ~ 0xff */
u32 reserve0[48]; /* Reserved */
/* 0x100 */
u8 dchpri[MPC_DMA_CHANNELS];
/* DMA channels(0~63) priority */
};
struct __attribute__ ((__packed__)) mpc_dma_tcd {
/* 0x00 */
u32 saddr; /* Source address */
u32 smod:5; /* Source address modulo */
u32 ssize:3; /* Source data transfer size */
u32 dmod:5; /* Destination address modulo */
u32 dsize:3; /* Destination data transfer size */
u32 soff:16; /* Signed source address offset */
/* 0x08 */
u32 nbytes; /* Inner "minor" byte count */
u32 slast; /* Last source address adjustment */
u32 daddr; /* Destination address */
/* 0x14 */
u32 citer_elink:1; /* Enable channel-to-channel linking on
* minor loop complete
*/
u32 citer_linkch:6; /* Link channel for minor loop complete */
u32 citer:9; /* Current "major" iteration count */
u32 doff:16; /* Signed destination address offset */
/* 0x18 */
u32 dlast_sga; /* Last Destination address adjustment/scatter
* gather address
*/
/* 0x1c */
u32 biter_elink:1; /* Enable channel-to-channel linking on major
* loop complete
*/
u32 biter_linkch:6;
u32 biter:9; /* Beginning "major" iteration count */
u32 bwc:2; /* Bandwidth control */
u32 major_linkch:6; /* Link channel number */
u32 done:1; /* Channel done */
u32 active:1; /* Channel active */
u32 major_elink:1; /* Enable channel-to-channel linking on major
* loop complete
*/
u32 e_sg:1; /* Enable scatter/gather processing */
u32 d_req:1; /* Disable request */
u32 int_half:1; /* Enable an interrupt when major counter is
* half complete
*/
u32 int_maj:1; /* Enable an interrupt when major iteration
* count completes
*/
u32 start:1; /* Channel start */
};
struct mpc_dma_desc {
struct dma_async_tx_descriptor desc;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
int error;
struct list_head node;
};
struct mpc_dma_chan {
struct dma_chan chan;
struct list_head free;
struct list_head prepared;
struct list_head queued;
struct list_head active;
struct list_head completed;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
dma_cookie_t completed_cookie;
/* Lock for this structure */
spinlock_t lock;
};
struct mpc_dma {
struct dma_device dma;
struct tasklet_struct tasklet;
struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
struct mpc_dma_regs __iomem *regs;
struct mpc_dma_tcd __iomem *tcd;
int irq;
uint error_status;
/* Lock for error_status field in this structure */
spinlock_t error_status_lock;
};
#define DRV_NAME "mpc512x_dma"
/* Convert struct dma_chan to struct mpc_dma_chan */
static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
{
return container_of(c, struct mpc_dma_chan, chan);
}
/* Convert struct dma_chan to struct mpc_dma */
static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
}
/*
* Execute all queued DMA descriptors.
*
* Following requirements must be met while calling mpc_dma_execute():
* a) mchan->lock is acquired,
* b) mchan->active list is empty,
* c) mchan->queued list contains at least one entry.
*/
static void mpc_dma_execute(struct mpc_dma_chan *mchan)
{
struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
struct mpc_dma_desc *first = NULL;
struct mpc_dma_desc *prev = NULL;
struct mpc_dma_desc *mdesc;
int cid = mchan->chan.chan_id;
/* Move all queued descriptors to active list */
list_splice_tail_init(&mchan->queued, &mchan->active);
/* Chain descriptors into one transaction */
list_for_each_entry(mdesc, &mchan->active, node) {
if (!first)
first = mdesc;
if (!prev) {
prev = mdesc;
continue;
}
prev->tcd->dlast_sga = mdesc->tcd_paddr;
prev->tcd->e_sg = 1;
mdesc->tcd->start = 1;
prev = mdesc;
}
prev->tcd->start = 0;
prev->tcd->int_maj = 1;
/* Send first descriptor in chain into hardware */
memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
out_8(&mdma->regs->dmassrt, cid);
}
/* Handle interrupt on one half of DMA controller (32 channels) */
static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
{
struct mpc_dma_chan *mchan;
struct mpc_dma_desc *mdesc;
u32 status = is | es;
int ch;
while ((ch = fls(status) - 1) >= 0) {
status &= ~(1 << ch);
mchan = &mdma->channels[ch + off];
spin_lock(&mchan->lock);
/* Check error status */
if (es & (1 << ch))
list_for_each_entry(mdesc, &mchan->active, node)
mdesc->error = -EIO;
/* Execute queued descriptors */
list_splice_tail_init(&mchan->active, &mchan->completed);
if (!list_empty(&mchan->queued))
mpc_dma_execute(mchan);
spin_unlock(&mchan->lock);
}
}
/* Interrupt handler */
static irqreturn_t mpc_dma_irq(int irq, void *data)
{
struct mpc_dma *mdma = data;
uint es;
/* Save error status register */
es = in_be32(&mdma->regs->dmaes);
spin_lock(&mdma->error_status_lock);
if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
mdma->error_status = es;
spin_unlock(&mdma->error_status_lock);
/* Handle interrupt on each channel */
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
in_be32(&mdma->regs->dmaerrh), 32);
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
in_be32(&mdma->regs->dmaerrl), 0);
/* Ack interrupt on all channels */
out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
/* Schedule tasklet */
tasklet_schedule(&mdma->tasklet);
return IRQ_HANDLED;
}
/* DMA Tasklet */
static void mpc_dma_tasklet(unsigned long data)
{
struct mpc_dma *mdma = (void *)data;
dma_cookie_t last_cookie = 0;
struct mpc_dma_chan *mchan;
struct mpc_dma_desc *mdesc;
struct dma_async_tx_descriptor *desc;
unsigned long flags;
LIST_HEAD(list);
uint es;
int i;
spin_lock_irqsave(&mdma->error_status_lock, flags);
es = mdma->error_status;
mdma->error_status = 0;
spin_unlock_irqrestore(&mdma->error_status_lock, flags);
/* Print nice error report */
if (es) {
dev_err(mdma->dma.dev,
"Hardware reported following error(s) on channel %u:\n",
MPC_DMA_DMAES_ERRCHN(es));
if (es & MPC_DMA_DMAES_GPE)
dev_err(mdma->dma.dev, "- Group Priority Error\n");
if (es & MPC_DMA_DMAES_CPE)
dev_err(mdma->dma.dev, "- Channel Priority Error\n");
if (es & MPC_DMA_DMAES_SAE)
dev_err(mdma->dma.dev, "- Source Address Error\n");
if (es & MPC_DMA_DMAES_SOE)
dev_err(mdma->dma.dev, "- Source Offset"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_DAE)
dev_err(mdma->dma.dev, "- Destination Address"
" Error\n");
if (es & MPC_DMA_DMAES_DOE)
dev_err(mdma->dma.dev, "- Destination Offset"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_NCE)
dev_err(mdma->dma.dev, "- NBytes/Citter"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_SGE)
dev_err(mdma->dma.dev, "- Scatter/Gather"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_SBE)
dev_err(mdma->dma.dev, "- Source Bus Error\n");
if (es & MPC_DMA_DMAES_DBE)
dev_err(mdma->dma.dev, "- Destination Bus Error\n");
}
for (i = 0; i < mdma->dma.chancnt; i++) {
mchan = &mdma->channels[i];
/* Get all completed descriptors */
spin_lock_irqsave(&mchan->lock, flags);
if (!list_empty(&mchan->completed))
list_splice_tail_init(&mchan->completed, &list);
spin_unlock_irqrestore(&mchan->lock, flags);
if (list_empty(&list))
continue;
/* Execute callbacks and run dependencies */
list_for_each_entry(mdesc, &list, node) {
desc = &mdesc->desc;
if (desc->callback)
desc->callback(desc->callback_param);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
}
/* Free descriptors */
spin_lock_irqsave(&mchan->lock, flags);
list_splice_tail_init(&list, &mchan->free);
mchan->completed_cookie = last_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
}
}
/* Submit descriptor to hardware */
static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
struct mpc_dma_desc *mdesc;
unsigned long flags;
dma_cookie_t cookie;
mdesc = container_of(txd, struct mpc_dma_desc, desc);
spin_lock_irqsave(&mchan->lock, flags);
/* Move descriptor to queue */
list_move_tail(&mdesc->node, &mchan->queued);
/* If channel is idle, execute all queued descriptors */
if (list_empty(&mchan->active))
mpc_dma_execute(mchan);
/* Update cookie */
cookie = mchan->chan.cookie + 1;
if (cookie <= 0)
cookie = 1;
mchan->chan.cookie = cookie;
mdesc->desc.cookie = cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
return cookie;
}
/* Alloc channel resources */
static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
unsigned long flags;
LIST_HEAD(descs);
int i;
/* Alloc DMA memory for Transfer Control Descriptors */
tcd = dma_alloc_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
&tcd_paddr, GFP_KERNEL);
if (!tcd)
return -ENOMEM;
/* Alloc descriptors for this channel */
for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
if (!mdesc) {
dev_notice(mdma->dma.dev, "Memory allocation error. "
"Allocated only %u descriptors\n", i);
break;
}
dma_async_tx_descriptor_init(&mdesc->desc, chan);
mdesc->desc.flags = DMA_CTRL_ACK;
mdesc->desc.tx_submit = mpc_dma_tx_submit;
mdesc->tcd = &tcd[i];
mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
list_add_tail(&mdesc->node, &descs);
}
/* Return error only if no descriptors were allocated */
if (i == 0) {
dma_free_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
tcd, tcd_paddr);
return -ENOMEM;
}
spin_lock_irqsave(&mchan->lock, flags);
mchan->tcd = tcd;
mchan->tcd_paddr = tcd_paddr;
list_splice_tail_init(&descs, &mchan->free);
spin_unlock_irqrestore(&mchan->lock, flags);
/* Enable Error Interrupt */
out_8(&mdma->regs->dmaseei, chan->chan_id);
return 0;
}
/* Free channel resources */
static void mpc_dma_free_chan_resources(struct dma_chan *chan)
{
struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc, *tmp;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
unsigned long flags;
LIST_HEAD(descs);
spin_lock_irqsave(&mchan->lock, flags);
/* Channel must be idle */
BUG_ON(!list_empty(&mchan->prepared));
BUG_ON(!list_empty(&mchan->queued));
BUG_ON(!list_empty(&mchan->active));
BUG_ON(!list_empty(&mchan->completed));
/* Move data */
list_splice_tail_init(&mchan->free, &descs);
tcd = mchan->tcd;
tcd_paddr = mchan->tcd_paddr;
spin_unlock_irqrestore(&mchan->lock, flags);
/* Free DMA memory used by descriptors */
dma_free_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
tcd, tcd_paddr);
/* Free descriptors */
list_for_each_entry_safe(mdesc, tmp, &descs, node)
kfree(mdesc);
/* Disable Error Interrupt */
out_8(&mdma->regs->dmaceei, chan->chan_id);
}
/* Send all pending descriptor to hardware */
static void mpc_dma_issue_pending(struct dma_chan *chan)
{
/*
* We are posting descriptors to the hardware as soon as
* they are ready, so this function does nothing.
*/
}
/* Check request completion status */
static enum dma_status
mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
unsigned long flags;
dma_cookie_t last_used;
dma_cookie_t last_complete;
spin_lock_irqsave(&mchan->lock, flags);
last_used = mchan->chan.cookie;
last_complete = mchan->completed_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
if (done)
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used);
}
/* Prepare descriptor for memory to memory copy */
static struct dma_async_tx_descriptor *
mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc = NULL;
struct mpc_dma_tcd *tcd;
unsigned long iflags;
/* Get free descriptor */
spin_lock_irqsave(&mchan->lock, iflags);
if (!list_empty(&mchan->free)) {
mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
node);
list_del(&mdesc->node);
}
spin_unlock_irqrestore(&mchan->lock, iflags);
if (!mdesc)
return NULL;
mdesc->error = 0;
tcd = mdesc->tcd;
/* Prepare Transfer Control Descriptor for this transaction */
memset(tcd, 0, sizeof(struct mpc_dma_tcd));
if (IS_ALIGNED(src | dst | len, 32)) {
tcd->ssize = MPC_DMA_TSIZE_32;
tcd->dsize = MPC_DMA_TSIZE_32;
tcd->soff = 32;
tcd->doff = 32;
} else if (IS_ALIGNED(src | dst | len, 16)) {
tcd->ssize = MPC_DMA_TSIZE_16;
tcd->dsize = MPC_DMA_TSIZE_16;
tcd->soff = 16;
tcd->doff = 16;
} else if (IS_ALIGNED(src | dst | len, 4)) {
tcd->ssize = MPC_DMA_TSIZE_4;
tcd->dsize = MPC_DMA_TSIZE_4;
tcd->soff = 4;
tcd->doff = 4;
} else if (IS_ALIGNED(src | dst | len, 2)) {
tcd->ssize = MPC_DMA_TSIZE_2;
tcd->dsize = MPC_DMA_TSIZE_2;
tcd->soff = 2;
tcd->doff = 2;
} else {
tcd->ssize = MPC_DMA_TSIZE_1;
tcd->dsize = MPC_DMA_TSIZE_1;
tcd->soff = 1;
tcd->doff = 1;
}
tcd->saddr = src;
tcd->daddr = dst;
tcd->nbytes = len;
tcd->biter = 1;
tcd->citer = 1;
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, iflags);
list_add_tail(&mdesc->node, &mchan->prepared);
spin_unlock_irqrestore(&mchan->lock, iflags);
return &mdesc->desc;
}
static int __devinit mpc_dma_probe(struct of_device *op,
const struct of_device_id *match)
{
struct device_node *dn = op->node;
struct device *dev = &op->dev;
struct dma_device *dma;
struct mpc_dma *mdma;
struct mpc_dma_chan *mchan;
struct resource res;
ulong regs_start, regs_size;
int retval, i;
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
if (!mdma) {
dev_err(dev, "Memory exhausted!\n");
return -ENOMEM;
}
mdma->irq = irq_of_parse_and_map(dn, 0);
if (mdma->irq == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
return -EINVAL;
}
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
return retval;
}
regs_start = res.start;
regs_size = res.end - res.start + 1;
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
return -EBUSY;
}
mdma->regs = devm_ioremap(dev, regs_start, regs_size);
if (!mdma->regs) {
dev_err(dev, "Error mapping memory region!\n");
return -ENOMEM;
}
mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
+ MPC_DMA_TCD_OFFSET);
retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
mdma);
if (retval) {
dev_err(dev, "Error requesting IRQ!\n");
return -EINVAL;
}
spin_lock_init(&mdma->error_status_lock);
dma = &mdma->dma;
dma->dev = dev;
dma->chancnt = MPC_DMA_CHANNELS;
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
dma->device_is_tx_complete = mpc_dma_is_tx_complete;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
for (i = 0; i < dma->chancnt; i++) {
mchan = &mdma->channels[i];
mchan->chan.device = dma;
mchan->chan.chan_id = i;
mchan->chan.cookie = 1;
mchan->completed_cookie = mchan->chan.cookie;
INIT_LIST_HEAD(&mchan->free);
INIT_LIST_HEAD(&mchan->prepared);
INIT_LIST_HEAD(&mchan->queued);
INIT_LIST_HEAD(&mchan->active);
INIT_LIST_HEAD(&mchan->completed);
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &dma->channels);
}
tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
/*
* Configure DMA Engine:
* - Dynamic clock,
* - Round-robin group arbitration,
* - Round-robin channel arbitration.
*/
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
/* Disable hardware DMA requests */
out_be32(&mdma->regs->dmaerqh, 0);
out_be32(&mdma->regs->dmaerql, 0);
/* Disable error interrupts */
out_be32(&mdma->regs->dmaeeih, 0);
out_be32(&mdma->regs->dmaeeil, 0);
/* Clear interrupts status */
out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
/* Route interrupts to IPIC */
out_be32(&mdma->regs->dmaihsa, 0);
out_be32(&mdma->regs->dmailsa, 0);
/* Register DMA engine */
dev_set_drvdata(dev, mdma);
retval = dma_async_device_register(dma);
if (retval) {
devm_free_irq(dev, mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
}
return retval;
}
static int __devexit mpc_dma_remove(struct of_device *op)
{
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
dma_async_device_unregister(&mdma->dma);
devm_free_irq(dev, mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
return 0;
}
static struct of_device_id mpc_dma_match[] = {
{ .compatible = "fsl,mpc5121-dma", },
{},
};
static struct of_platform_driver mpc_dma_driver = {
.match_table = mpc_dma_match,
.probe = mpc_dma_probe,
.remove = __devexit_p(mpc_dma_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init mpc_dma_init(void)
{
return of_register_platform_driver(&mpc_dma_driver);
}
module_init(mpc_dma_init);
static void __exit mpc_dma_exit(void)
{
of_unregister_platform_driver(&mpc_dma_driver);
}
module_exit(mpc_dma_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");

View file

@ -4940,7 +4940,7 @@ out_free:
return ret; return ret;
} }
static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
{ .compatible = "ibm,dma-440spe", }, { .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", }, { .compatible = "amcc,xor-accelerator", },
{}, {},

View file

@ -31,6 +31,8 @@
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
*/ */
typedef s32 dma_cookie_t; typedef s32 dma_cookie_t;
#define DMA_MIN_COOKIE 1
#define DMA_MAX_COOKIE INT_MAX
#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)