@@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
{
- if (len == 0 || len > chan->max_length)
+ if (len == 0)
return false;
if ((len & chan->align_mask) != 0) /* Not aligned */
return false;
@@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
return desc;
}
+static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
+ enum dma_transfer_direction direction, dma_addr_t addr,
+ unsigned int num_periods, unsigned int period_len,
+ struct axi_dmac_sg *sg)
+{
+ unsigned int num_segments, i;
+ unsigned int segment_size;
+ unsigned int len;
+
+ /* Split into multiple equally sized segments if necessary */
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+ segment_size = DIV_ROUND_UP(period_len, num_segments);
+ /* Take care of alignment */
+ segment_size = ((segment_size - 1) | chan->align_mask) + 1;
+
+ for (i = 0; i < num_periods; i++) {
+ len = period_len;
+
+ while (len > segment_size) {
+ if (direction == DMA_DEV_TO_MEM)
+ sg->dest_addr = addr;
+ else
+ sg->src_addr = addr;
+ sg->x_len = segment_size;
+ sg->y_len = 1;
+ sg++;
+ addr += segment_size;
+ len -= segment_size;
+ }
+
+ if (direction == DMA_DEV_TO_MEM)
+ sg->dest_addr = addr;
+ else
+ sg->src_addr = addr;
+ sg->x_len = len;
+ sg->y_len = 1;
+ sg++;
+ addr += len;
+ }
+
+ return sg;
+}
+
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
+ struct axi_dmac_sg *dsg;
struct scatterlist *sg;
+ unsigned int num_sgs;
unsigned int i;
if (direction != chan->direction)
return NULL;
- desc = axi_dmac_alloc_desc(sg_len);
+ num_sgs = 0;
+ for_each_sg(sgl, sg, sg_len, i)
+ num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
+
+ desc = axi_dmac_alloc_desc(num_sgs);
if (!desc)
return NULL;
+ dsg = desc->sg;
+
for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) {
@@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
return NULL;
}
- if (direction == DMA_DEV_TO_MEM)
- desc->sg[i].dest_addr = sg_dma_address(sg);
- else
- desc->sg[i].src_addr = sg_dma_address(sg);
- desc->sg[i].x_len = sg_dma_len(sg);
- desc->sg[i].y_len = 1;
+ dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
+ sg_dma_len(sg), dsg);
}
desc->cyclic = false;
@@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
- unsigned int num_periods, i;
+ unsigned int num_periods, num_segments;
if (direction != chan->direction)
return NULL;
@@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
return NULL;
num_periods = buf_len / period_len;
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
- desc = axi_dmac_alloc_desc(num_periods);
+ desc = axi_dmac_alloc_desc(num_periods * num_segments);
if (!desc)
return NULL;
- for (i = 0; i < num_periods; i++) {
- if (direction == DMA_DEV_TO_MEM)
- desc->sg[i].dest_addr = buf_addr;
- else
- desc->sg[i].src_addr = buf_addr;
- desc->sg[i].x_len = period_len;
- desc->sg[i].y_len = 1;
- buf_addr += period_len;
- }
+ axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
+ period_len, desc->sg);
desc->cyclic = true;
@@ -647,7 +688,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
of_node_put(of_channels);
pdev->dev.dma_parms = &dmac->dma_parms;
- dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
dma_dev = &dmac->dma_dev;
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);