diff mbox

[PATCH/RFC,1/4] dma: add a function to request a DMA channel for a specific client

Message ID Pine.LNX.4.64.1207171242230.17060@axis700.grange (mailing list archive)
State New, archived
Headers show

Commit Message

Guennadi Liakhovetski July 17, 2012, 10:53 a.m. UTC
Add a new channel-request function, that uses a device pointer and a
transfer direction to locate the required client DMA request line. Devices,
that require DMA channels exceeding the traditional Tx / Rx scheme, can use
the optional "name" parameter.

This new function uses a new DMA multiplexer interface. The new DMA request
line routing concept declares, that client DMA request lines are always
statically connected to a DMA provider. If a client is directly connected
to a DMAC, such a statical connection results in a 1-to-1 relationship
between client DMA request lines and DMAC DMA channels. A more flexible
configuration includes a multiplexer between clients and DMAC instances. In
these cases it is the multiplexer, that controls the routing. The initial
DMA multiplexer API consists of only one call-back: .request_chan(), that
finds the next free (physical or virtual) DMA channel, that can be routed
to the requested slave DMA interface. The actual routing, if required,
should be performed as a part of the dmaengine_slave_config() processing.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/dma/dmaengine.c   |   73 +++++++++++++++++++++++++++++++++++++++++++++
 include/linux/dmaengine.h |    9 +++++
 2 files changed, 82 insertions(+), 0 deletions(-)

Comments

Vinod Koul July 26, 2012, 6:31 a.m. UTC | #1
On Tue, 2012-07-17 at 12:53 +0200, Guennadi Liakhovetski wrote:
> Add a new channel-request function, that uses a device pointer and a
> transfer direction to locate the required client DMA request line. Devices,
> that require DMA channels exceeding the traditional Tx / Rx scheme, can use
> the optional "name" parameter.
> 
> This new function uses a new DMA multiplexer interface. The new DMA request
> line routing concept declares, that client DMA request lines are always
> statically connected to a DMA provider. If a client is directly connected
> to a DMAC, such a statical connection results in a 1-to-1 relationship
> between client DMA request lines and DMAC DMA channels. A more flexible
> configuration includes a multiplexer between clients and DMAC instances. In
> these cases it is the multiplexer, that controls the routing. The initial
> DMA multiplexer API consists of only one call-back: .request_chan(), that
> finds the next free (physical or virtual) DMA channel, that can be routed
> to the requested slave DMA interface. The actual routing, if required,
> should be performed as a part of the dmaengine_slave_config() processing.
> 
> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> ---
>  drivers/dma/dmaengine.c   |   73 +++++++++++++++++++++++++++++++++++++++++++++
>  include/linux/dmaengine.h |    9 +++++
>  2 files changed, 82 insertions(+), 0 deletions(-)
> 
> diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
> index 2397f6f..4fb927c 100644
> --- a/drivers/dma/dmaengine.c
> +++ b/drivers/dma/dmaengine.c
> @@ -542,6 +542,79 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
>  }
>  EXPORT_SYMBOL_GPL(__dma_request_channel);
>  
> +/**
> + * dma_request_slave_channel() - try to allocate an exclusive channel for device
> + * @dev:	client device, for which a channel should be allocated
> + * @direction:	transfer direction
> + * @name:	optional name of the channel, used, when the direction is not
> + *		sufficient to uniquely identify the DMA request
> + */
> +struct dma_chan *dma_request_slave_channel(struct device *dev,
> +		enum dma_transfer_direction direction, const char *name)
> +{
> +	struct dma_device *device, *_d;
> +	struct dma_chan *chan = NULL;
> +	dma_cap_mask_t mask;
> +	int err;
> +
> +	dma_cap_zero(mask);
> +	dma_cap_set(DMA_SLAVE, mask);
> +
> +	/* Find a channel */
> +	mutex_lock(&dma_list_mutex);
> +	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
> +		if (!device->mux || !device->mux->request_chan)
> +			continue;
> +
> +		if (!dma_device_satisfies_mask(device, mask)) {
> +			pr_info("%s: wrong capabilities\n", __func__);
> +			continue;
> +		}
> +
> +		/* ensure that all channels are either private or public. */
> +		if (!dma_has_cap(DMA_PRIVATE, device->cap_mask)) {
> +			bool public = false;
> +			list_for_each_entry(chan, &device->channels, device_node) {
> +				/* some channels are already publicly allocated */
> +				if (chan->client_count) {
> +					public = true;
> +					break;
> +				}
> +			}
> +			if (public)
> +				continue;
> +		}
> +
> +		chan = device->mux->request_chan(device, dev, direction, name);
NAK

1. We dont want dmacs to have anything to do with filtering and
allocating. That is role of dmaengien and we need to add stuff there
only.
2. Even if we start using optional name argument here we still don't
know out of 4 channels dmac supports which one to allocate.

> +		if (chan) {
> +			dma_cap_set(DMA_PRIVATE, device->cap_mask);
> +			device->privatecnt++;
> +			err = dma_chan_get(chan);
> +			if (!err)
> +				break;
> +			if (err == -ENODEV) {
> +				pr_debug("%s: %s module removed\n", __func__,
> +					 dma_chan_name(chan));
> +				list_del_rcu(&device->global_node);
> +			} else
> +				pr_debug("%s: failed to get %s: (%d)\n",
> +					 __func__, dma_chan_name(chan), err);
> +
> +			if (--device->privatecnt == 0)
> +				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
> +
> +			chan = NULL;
> +		}
> +	}
> +	mutex_unlock(&dma_list_mutex);
> +
> +	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
> +		 chan ? dma_chan_name(chan) : NULL);
> +
> +	return chan;
> +}
> +EXPORT_SYMBOL_GPL(dma_request_slave_channel);
> +
>  void dma_release_channel(struct dma_chan *chan)
>  {
>  	mutex_lock(&dma_list_mutex);
> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
> index ccec62f..19b96d9 100644
> --- a/include/linux/dmaengine.h
> +++ b/include/linux/dmaengine.h
> @@ -482,6 +482,11 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
>  }
>  #endif
>  
> +struct dma_multiplexer {
> +	struct dma_chan *(*request_chan)(struct dma_device *, struct device *,
> +				enum dma_transfer_direction, const char *);
> +};
> +
>  /**
>   * struct dma_tx_state - filled in to report the status of
>   * a transfer.
> @@ -553,6 +558,8 @@ struct dma_device {
>  	int dev_id;
>  	struct device *dev;
>  
> +	const struct dma_multiplexer *mux;
> +
>  	int (*device_alloc_chan_resources)(struct dma_chan *chan);
>  	void (*device_free_chan_resources)(struct dma_chan *chan);
>  
> @@ -994,6 +1001,8 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
>  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
>  struct dma_chan *net_dma_find_channel(void);
>  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
> +struct dma_chan *dma_request_slave_channel(struct device *dev,
> +		enum dma_transfer_direction direction, const char *name);
>  
>  /* --- Helper iov-locking functions --- */
>
Guennadi Liakhovetski Aug. 2, 2012, 9:11 a.m. UTC | #2
Hi Vinod

Thanks for your review and sorry for a delayed reply.

On Thu, 26 Jul 2012, Vinod Koul wrote:

> On Tue, 2012-07-17 at 12:53 +0200, Guennadi Liakhovetski wrote:
> > Add a new channel-request function, that uses a device pointer and a
> > transfer direction to locate the required client DMA request line. Devices,
> > that require DMA channels exceeding the traditional Tx / Rx scheme, can use
> > the optional "name" parameter.
> > 
> > This new function uses a new DMA multiplexer interface. The new DMA request
> > line routing concept declares, that client DMA request lines are always
> > statically connected to a DMA provider. If a client is directly connected
> > to a DMAC, such a statical connection results in a 1-to-1 relationship
> > between client DMA request lines and DMAC DMA channels. A more flexible
> > configuration includes a multiplexer between clients and DMAC instances. In
> > these cases it is the multiplexer, that controls the routing. The initial
> > DMA multiplexer API consists of only one call-back: .request_chan(), that
> > finds the next free (physical or virtual) DMA channel, that can be routed
> > to the requested slave DMA interface. The actual routing, if required,
> > should be performed as a part of the dmaengine_slave_config() processing.
> > 
> > Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> > ---
> >  drivers/dma/dmaengine.c   |   73 +++++++++++++++++++++++++++++++++++++++++++++
> >  include/linux/dmaengine.h |    9 +++++
> >  2 files changed, 82 insertions(+), 0 deletions(-)
> > 
> > diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
> > index 2397f6f..4fb927c 100644
> > --- a/drivers/dma/dmaengine.c
> > +++ b/drivers/dma/dmaengine.c
> > @@ -542,6 +542,79 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
> >  }
> >  EXPORT_SYMBOL_GPL(__dma_request_channel);
> >  
> > +/**
> > + * dma_request_slave_channel() - try to allocate an exclusive channel for device
> > + * @dev:	client device, for which a channel should be allocated
> > + * @direction:	transfer direction
> > + * @name:	optional name of the channel, used, when the direction is not
> > + *		sufficient to uniquely identify the DMA request
> > + */
> > +struct dma_chan *dma_request_slave_channel(struct device *dev,
> > +		enum dma_transfer_direction direction, const char *name)
> > +{
> > +	struct dma_device *device, *_d;
> > +	struct dma_chan *chan = NULL;
> > +	dma_cap_mask_t mask;
> > +	int err;
> > +
> > +	dma_cap_zero(mask);
> > +	dma_cap_set(DMA_SLAVE, mask);
> > +
> > +	/* Find a channel */
> > +	mutex_lock(&dma_list_mutex);
> > +	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
> > +		if (!device->mux || !device->mux->request_chan)
> > +			continue;
> > +
> > +		if (!dma_device_satisfies_mask(device, mask)) {
> > +			pr_info("%s: wrong capabilities\n", __func__);
> > +			continue;
> > +		}
> > +
> > +		/* ensure that all channels are either private or public. */
> > +		if (!dma_has_cap(DMA_PRIVATE, device->cap_mask)) {
> > +			bool public = false;
> > +			list_for_each_entry(chan, &device->channels, device_node) {
> > +				/* some channels are already publicly allocated */
> > +				if (chan->client_count) {
> > +					public = true;
> > +					break;
> > +				}
> > +			}
> > +			if (public)
> > +				continue;
> > +		}
> > +
> > +		chan = device->mux->request_chan(device, dev, direction, name);
> NAK
> 
> 1. We dont want dmacs to have anything to do with filtering and
> allocating. That is role of dmaengien and we need to add stuff there
> only.
> 2. Even if we start using optional name argument here we still don't
> know out of 4 channels dmac supports which one to allocate.

I don't think we can be generic enough in the dmaengine core to identify 
which channels are suitable for all slave requests. This is why I proposed 
a concept of a DMA channel multiplexer driver, and this is exactly what 
this callback is. I tried to explain this in the patch description above. 
I thought we discussed this earlier in this thread and agreed, that a 
multiplexer API is the way to go with channel allocation. I think, I 
mentioned in one of my mails, that in the beginning I wanted to implement 
multiplexers as a proper (platform) device, but at least one problem with 
this is, that at least on my hardware the multiplexer and the DMAC share 
registers, and an MFD seems an overkill for this, but we can discuss this. 
I don't know, whether there are systems, where multiplexers are really 
separate hardware blocks, if there are, we can think of a way to handle 
those nicely too.

Thanks
Guennadi

> 
> > +		if (chan) {
> > +			dma_cap_set(DMA_PRIVATE, device->cap_mask);
> > +			device->privatecnt++;
> > +			err = dma_chan_get(chan);
> > +			if (!err)
> > +				break;
> > +			if (err == -ENODEV) {
> > +				pr_debug("%s: %s module removed\n", __func__,
> > +					 dma_chan_name(chan));
> > +				list_del_rcu(&device->global_node);
> > +			} else
> > +				pr_debug("%s: failed to get %s: (%d)\n",
> > +					 __func__, dma_chan_name(chan), err);
> > +
> > +			if (--device->privatecnt == 0)
> > +				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
> > +
> > +			chan = NULL;
> > +		}
> > +	}
> > +	mutex_unlock(&dma_list_mutex);
> > +
> > +	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
> > +		 chan ? dma_chan_name(chan) : NULL);
> > +
> > +	return chan;
> > +}
> > +EXPORT_SYMBOL_GPL(dma_request_slave_channel);
> > +
> >  void dma_release_channel(struct dma_chan *chan)
> >  {
> >  	mutex_lock(&dma_list_mutex);
> > diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
> > index ccec62f..19b96d9 100644
> > --- a/include/linux/dmaengine.h
> > +++ b/include/linux/dmaengine.h
> > @@ -482,6 +482,11 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
> >  }
> >  #endif
> >  
> > +struct dma_multiplexer {
> > +	struct dma_chan *(*request_chan)(struct dma_device *, struct device *,
> > +				enum dma_transfer_direction, const char *);
> > +};
> > +
> >  /**
> >   * struct dma_tx_state - filled in to report the status of
> >   * a transfer.
> > @@ -553,6 +558,8 @@ struct dma_device {
> >  	int dev_id;
> >  	struct device *dev;
> >  
> > +	const struct dma_multiplexer *mux;
> > +
> >  	int (*device_alloc_chan_resources)(struct dma_chan *chan);
> >  	void (*device_free_chan_resources)(struct dma_chan *chan);
> >  
> > @@ -994,6 +1001,8 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
> >  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
> >  struct dma_chan *net_dma_find_channel(void);
> >  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
> > +struct dma_chan *dma_request_slave_channel(struct device *dev,
> > +		enum dma_transfer_direction direction, const char *name);
> >  
> >  /* --- Helper iov-locking functions --- */
> >  
> 
> 
> -- 
> ~Vinod
> 

---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/
Vinod Koul Aug. 3, 2012, 10:14 a.m. UTC | #3
On Thu, 2012-08-02 at 11:11 +0200, Guennadi Liakhovetski wrote:
> > > +           chan = device->mux->request_chan(device, dev, direction, name);
> > NAK
> > 
> > 1. We dont want dmacs to have anything to do with filtering and
> > allocating. That is role of dmaengien and we need to add stuff there
> > only.
> > 2. Even if we start using optional name argument here we still don't
> > know out of 4 channels dmac supports which one to allocate.
> 
> I don't think we can be generic enough in the dmaengine core to identify 
> which channels are suitable for all slave requests. This is why I proposed 
> a concept of a DMA channel multiplexer driver, and this is exactly what 
> this callback is. I tried to explain this in the patch description above. 
> I thought we discussed this earlier in this thread and agreed, that a 
> multiplexer API is the way to go with channel allocation. I think, I 
> mentioned in one of my mails, that in the beginning I wanted to implement 
> multiplexers as a proper (platform) device, but at least one problem with 
> this is, that at least on my hardware the multiplexer and the DMAC share 
> registers, and an MFD seems an overkill for this, but we can discuss this. 
> I don't know, whether there are systems, where multiplexers are really 
> separate hardware blocks, if there are, we can think of a way to handle 
> those nicely too.
I would disagree.

As I have said in past, we need the mapping information to be available
with dmaengine for slave channels even before any slave allocation
occurs. This way dmaengine knows what to do with a channel.
I am planning to add the support in dmanegine for this...

The problem with this solution here is again how the hell will a dmac
know what to do with alloc request. It doesn't and any current way it
does is just a hack and nothing else. This also prevenrts us form
building genric dmac driver which should only know how to deal with dmac
only and nothing else.
Guennadi Liakhovetski Aug. 3, 2012, 10:35 a.m. UTC | #4
On Fri, 3 Aug 2012, Vinod Koul wrote:

> On Thu, 2012-08-02 at 11:11 +0200, Guennadi Liakhovetski wrote:
> > > > +           chan = device->mux->request_chan(device, dev, direction, name);
> > > NAK
> > > 
> > > 1. We dont want dmacs to have anything to do with filtering and
> > > allocating. That is role of dmaengien and we need to add stuff there
> > > only.
> > > 2. Even if we start using optional name argument here we still don't
> > > know out of 4 channels dmac supports which one to allocate.
> > 
> > I don't think we can be generic enough in the dmaengine core to identify 
> > which channels are suitable for all slave requests. This is why I proposed 
> > a concept of a DMA channel multiplexer driver, and this is exactly what 
> > this callback is. I tried to explain this in the patch description above. 
> > I thought we discussed this earlier in this thread and agreed, that a 
> > multiplexer API is the way to go with channel allocation. I think, I 
> > mentioned in one of my mails, that in the beginning I wanted to implement 
> > multiplexers as a proper (platform) device, but at least one problem with 
> > this is, that at least on my hardware the multiplexer and the DMAC share 
> > registers, and an MFD seems an overkill for this, but we can discuss this. 
> > I don't know, whether there are systems, where multiplexers are really 
> > separate hardware blocks, if there are, we can think of a way to handle 
> > those nicely too.
> I would disagree.
> 
> As I have said in past, we need the mapping information to be available
> with dmaengine for slave channels even before any slave allocation
> occurs. This way dmaengine knows what to do with a channel.

I think, this depends on our interpretation of what struct dma_chan 
represents and who has control over it. If we think, that this is a 
purely software object, which at allocation time has absolutely no 
hardware association attached to it, then yes, only the dmaengine core 
should manage them at least at that time. If, however, we define, that 
struct dma_chan _can_ get hardware association already at allocation time 
in the context of DMAC's .alloc_chan_resources() function, then it's 
the DMAC that has to manage them.

And, I think, there is one important aspect to this, that tells us, that 
those channels cannot be absolutely hardware-agnostic: they are attached 
to DMAC instances. So, they intrinsically have some hardware association.

So, you have to take into account hardware limitations already at 
allocation time. You can do this by either asking the DMAC, whether a 
certain allocation can be performed (what we currently do with filters, 
and what this patch proposed to do with the multiplexer callback), or you 
can require some static information from DMACs, like a mapping that you 
propose.

Currently both approaches seem possible, however, a static mapping seems 
less flexible to me, I don't know whether it can describe all available 
hardware configurations. We shouldn't forget, that those mappings can be 
many-to-many, as in the sh-dma example. On sh-dma it would suffice and be 
optimal enough to have a pointer to a slave list in struct dma_chan. Would 
this also suit everyone else? Is this what you're thinking about?

Thanks
Guennadi

> I am planning to add the support in dmanegine for this...
> 
> The problem with this solution here is again how the hell will a dmac
> know what to do with alloc request. It doesn't and any current way it
> does is just a hack and nothing else. This also prevenrts us form
> building genric dmac driver which should only know how to deal with dmac
> only and nothing else.
> 
> -- 
> ~Vinod
> 

---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/
diff mbox

Patch

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2397f6f..4fb927c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -542,6 +542,79 @@  struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
 }
 EXPORT_SYMBOL_GPL(__dma_request_channel);
 
+/**
+ * dma_request_slave_channel() - try to allocate an exclusive channel for device
+ * @dev:	client device, for which a channel should be allocated
+ * @direction:	transfer direction
+ * @name:	optional name of the channel, used, when the direction is not
+ *		sufficient to uniquely identify the DMA request
+ */
+struct dma_chan *dma_request_slave_channel(struct device *dev,
+		enum dma_transfer_direction direction, const char *name)
+{
+	struct dma_device *device, *_d;
+	struct dma_chan *chan = NULL;
+	dma_cap_mask_t mask;
+	int err;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* Find a channel */
+	mutex_lock(&dma_list_mutex);
+	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+		if (!device->mux || !device->mux->request_chan)
+			continue;
+
+		if (!dma_device_satisfies_mask(device, mask)) {
+			pr_info("%s: wrong capabilities\n", __func__);
+			continue;
+		}
+
+		/* ensure that all channels are either private or public. */
+		if (!dma_has_cap(DMA_PRIVATE, device->cap_mask)) {
+			bool public = false;
+			list_for_each_entry(chan, &device->channels, device_node) {
+				/* some channels are already publicly allocated */
+				if (chan->client_count) {
+					public = true;
+					break;
+				}
+			}
+			if (public)
+				continue;
+		}
+
+		chan = device->mux->request_chan(device, dev, direction, name);
+		if (chan) {
+			dma_cap_set(DMA_PRIVATE, device->cap_mask);
+			device->privatecnt++;
+			err = dma_chan_get(chan);
+			if (!err)
+				break;
+			if (err == -ENODEV) {
+				pr_debug("%s: %s module removed\n", __func__,
+					 dma_chan_name(chan));
+				list_del_rcu(&device->global_node);
+			} else
+				pr_debug("%s: failed to get %s: (%d)\n",
+					 __func__, dma_chan_name(chan), err);
+
+			if (--device->privatecnt == 0)
+				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+
+			chan = NULL;
+		}
+	}
+	mutex_unlock(&dma_list_mutex);
+
+	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
+		 chan ? dma_chan_name(chan) : NULL);
+
+	return chan;
+}
+EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+
 void dma_release_channel(struct dma_chan *chan)
 {
 	mutex_lock(&dma_list_mutex);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index ccec62f..19b96d9 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -482,6 +482,11 @@  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
 }
 #endif
 
+struct dma_multiplexer {
+	struct dma_chan *(*request_chan)(struct dma_device *, struct device *,
+				enum dma_transfer_direction, const char *);
+};
+
 /**
  * struct dma_tx_state - filled in to report the status of
  * a transfer.
@@ -553,6 +558,8 @@  struct dma_device {
 	int dev_id;
 	struct device *dev;
 
+	const struct dma_multiplexer *mux;
+
 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
 	void (*device_free_chan_resources)(struct dma_chan *chan);
 
@@ -994,6 +1001,8 @@  void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
 struct dma_chan *net_dma_find_channel(void);
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+struct dma_chan *dma_request_slave_channel(struct device *dev,
+		enum dma_transfer_direction direction, const char *name);
 
 /* --- Helper iov-locking functions --- */