diff mbox

[v1] dma: imx-sdma: add support for sdma memory copy

Message ID 1397728870-22086-1-git-send-email-b38343@freescale.com (mailing list archive)
State Rejected
Headers show

Commit Message

Robin Gong April 17, 2014, 10:01 a.m. UTC
add "device_prep_dma_memcpy" and "device_prep_dma_sg" for memory copy by sdma.

Signed-off-by: Robin Gong <b38343@freescale.com>
---
 drivers/dma/imx-sdma.c |  188 +++++++++++++++++++++++++++++++++++++++++------
 1 files changed, 164 insertions(+), 24 deletions(-)

Comments

Andy Shevchenko April 17, 2014, 10:24 a.m. UTC | #1
T24gVGh1LCAyMDE0LTA0LTE3IGF0IDE4OjAxICswODAwLCBSb2JpbiBHb25nIHdyb3RlOg0KPiBh
ZGQgImRldmljZV9wcmVwX2RtYV9tZW1jcHkiIGFuZCAiZGV2aWNlX3ByZXBfZG1hX3NnIiBmb3Ig
bWVtb3J5IGNvcHkgYnkgc2RtYS4NCj4gDQo+IFNpZ25lZC1vZmYtYnk6IFJvYmluIEdvbmcgPGIz
ODM0M0BmcmVlc2NhbGUuY29tPg0KPiAtLS0NCj4gIGRyaXZlcnMvZG1hL2lteC1zZG1hLmMgfCAg
MTg4ICsrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrLS0tLS0tDQo+ICAx
IGZpbGVzIGNoYW5nZWQsIDE2NCBpbnNlcnRpb25zKCspLCAyNCBkZWxldGlvbnMoLSkNCj4gDQo+
IGRpZmYgLS1naXQgYS9kcml2ZXJzL2RtYS9pbXgtc2RtYS5jIGIvZHJpdmVycy9kbWEvaW14LXNk
bWEuYw0KPiBpbmRleCA0ZTc5MTgzLi4yYTk3ZTAzIDEwMDY0NA0KPiAtLS0gYS9kcml2ZXJzL2Rt
YS9pbXgtc2RtYS5jDQo+ICsrKyBiL2RyaXZlcnMvZG1hL2lteC1zZG1hLmMNCj4gQEAgLTIyOSw2
ICsyMjksNyBAQCBzdHJ1Y3Qgc2RtYV9jb250ZXh0X2RhdGEgew0KPiAgfSBfX2F0dHJpYnV0ZV9f
ICgocGFja2VkKSk7DQo+ICANCj4gICNkZWZpbmUgTlVNX0JEIChpbnQpKFBBR0VfU0laRSAvIHNp
emVvZihzdHJ1Y3Qgc2RtYV9idWZmZXJfZGVzY3JpcHRvcikpDQo+ICsjZGVmaW5lIFNETUFfQkRf
TUFYX0NOVAkoMHhmZmZjKSAvKiBhbGlnbiB3aXRoIDQgYnl0ZXMgKi8NCj4gIA0KPiAgc3RydWN0
IHNkbWFfZW5naW5lOw0KPiAgDQo+IEBAIC0yNjAsNiArMjYxLDcgQEAgc3RydWN0IHNkbWFfY2hh
bm5lbCB7DQo+ICAJdW5zaWduZWQgaW50CQkJcGNfZnJvbV9kZXZpY2UsIHBjX3RvX2RldmljZTsN
Cj4gIAl1bnNpZ25lZCBsb25nCQkJZmxhZ3M7DQo+ICAJZG1hX2FkZHJfdAkJCXBlcl9hZGRyZXNz
Ow0KPiArCXVuc2lnbmVkIGludCAgICAgICAgICAgICAgICAgICAgcGNfdG9fcGM7DQo+ICAJdW5z
aWduZWQgbG9uZwkJCWV2ZW50X21hc2tbMl07DQo+ICAJdW5zaWduZWQgbG9uZwkJCXdhdGVybWFy
a19sZXZlbDsNCj4gIAl1MzIJCQkJc2hwX2FkZHIsIHBlcl9hZGRyOw0KPiBAQCAtNjk0LDYgKzY5
Niw3IEBAIHN0YXRpYyB2b2lkIHNkbWFfZ2V0X3BjKHN0cnVjdCBzZG1hX2NoYW5uZWwgKnNkbWFj
LA0KPiAgDQo+ICAJc2RtYWMtPnBjX2Zyb21fZGV2aWNlID0gMDsNCj4gIAlzZG1hYy0+cGNfdG9f
ZGV2aWNlID0gMDsNCj4gKwlzZG1hYy0+cGNfdG9fcGMgPSAwOw0KPiAgDQo+ICAJc3dpdGNoIChw
ZXJpcGhlcmFsX3R5cGUpIHsNCj4gIAljYXNlIElNWF9ETUFUWVBFX01FTU9SWToNCj4gQEAgLTc2
Myw2ICs3NjYsNyBAQCBzdGF0aWMgdm9pZCBzZG1hX2dldF9wYyhzdHJ1Y3Qgc2RtYV9jaGFubmVs
ICpzZG1hYywNCj4gIA0KPiAgCXNkbWFjLT5wY19mcm9tX2RldmljZSA9IHBlcl8yX2VtaTsNCj4g
IAlzZG1hYy0+cGNfdG9fZGV2aWNlID0gZW1pXzJfcGVyOw0KPiArCXNkbWFjLT5wY190b19wYyA9
IGVtaV8yX2VtaTsNCj4gIH0NCj4gIA0KPiAgc3RhdGljIGludCBzZG1hX2xvYWRfY29udGV4dChz
dHJ1Y3Qgc2RtYV9jaGFubmVsICpzZG1hYykNCj4gQEAgLTc3NSwxMSArNzc5LDEyIEBAIHN0YXRp
YyBpbnQgc2RtYV9sb2FkX2NvbnRleHQoc3RydWN0IHNkbWFfY2hhbm5lbCAqc2RtYWMpDQo+ICAJ
aW50IHJldDsNCj4gIAl1bnNpZ25lZCBsb25nIGZsYWdzOw0KPiAgDQo+IC0JaWYgKHNkbWFjLT5k
aXJlY3Rpb24gPT0gRE1BX0RFVl9UT19NRU0pIHsNCj4gKwlpZiAoc2RtYWMtPmRpcmVjdGlvbiA9
PSBETUFfREVWX1RPX01FTSkNCj4gIAkJbG9hZF9hZGRyZXNzID0gc2RtYWMtPnBjX2Zyb21fZGV2
aWNlOw0KPiAtCX0gZWxzZSB7DQo+ICsJZWxzZSBpZiAoc2RtYWMtPmRpcmVjdGlvbiA9PSBETUFf
TUVNX1RPX01FTSkNCj4gKwkJbG9hZF9hZGRyZXNzID0gc2RtYWMtPnBjX3RvX3BjOw0KPiArCWVs
c2UNCj4gIAkJbG9hZF9hZGRyZXNzID0gc2RtYWMtPnBjX3RvX2RldmljZTsNCj4gLQl9DQo+ICAN
Cj4gIAlpZiAobG9hZF9hZGRyZXNzIDwgMCkNCj4gIAkJcmV0dXJuIGxvYWRfYWRkcmVzczsNCj4g
QEAgLTEwMTAsMTYgKzEwMTUsMTE4IEBAIHN0YXRpYyB2b2lkIHNkbWFfZnJlZV9jaGFuX3Jlc291
cmNlcyhzdHJ1Y3QgZG1hX2NoYW4gKmNoYW4pDQo+ICAJY2xrX2Rpc2FibGUoc2RtYS0+Y2xrX2Fo
Yik7DQo+ICB9DQo+ICANCj4gLXN0YXRpYyBzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3Ig
KnNkbWFfcHJlcF9zbGF2ZV9zZygNCj4gLQkJc3RydWN0IGRtYV9jaGFuICpjaGFuLCBzdHJ1Y3Qg
c2NhdHRlcmxpc3QgKnNnbCwNCj4gLQkJdW5zaWduZWQgaW50IHNnX2xlbiwgZW51bSBkbWFfdHJh
bnNmZXJfZGlyZWN0aW9uIGRpcmVjdGlvbiwNCj4gLQkJdW5zaWduZWQgbG9uZyBmbGFncywgdm9p
ZCAqY29udGV4dCkNCj4gK3N0YXRpYyBzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3IgKnNk
bWFfcHJlcF9tZW1jcHkoDQo+ICsJCXN0cnVjdCBkbWFfY2hhbiAqY2hhbiwgZG1hX2FkZHJfdCBk
bWFfZHN0LA0KPiArCQlkbWFfYWRkcl90IGRtYV9zcmMsIHNpemVfdCBsZW4sIHVuc2lnbmVkIGxv
bmcgZmxhZ3MpDQo+ICt7DQo+ICsJc3RydWN0IHNkbWFfY2hhbm5lbCAqc2RtYWMgPSB0b19zZG1h
X2NoYW4oY2hhbik7DQo+ICsJc3RydWN0IHNkbWFfZW5naW5lICpzZG1hID0gc2RtYWMtPnNkbWE7
DQo+ICsJaW50IGNoYW5uZWwgPSBzZG1hYy0+Y2hhbm5lbDsNCj4gKwlzaXplX3QgY291bnQ7DQo+
ICsJaW50IGkgPSAwLCBwYXJhbSwgcmV0Ow0KPiArCXN0cnVjdCBzZG1hX2J1ZmZlcl9kZXNjcmlw
dG9yICpiZDsNCj4gKw0KPiArCWlmICghY2hhbiB8fCAhbGVuIHx8IHNkbWFjLT5zdGF0dXMgPT0g
RE1BX0lOX1BST0dSRVNTKQ0KPiArCQlyZXR1cm4gTlVMTDsNCj4gKw0KPiArCWlmIChsZW4gPj0g
TlVNX0JEICogU0RNQV9CRF9NQVhfQ05UKSB7DQo+ICsJCWRldl9lcnIoc2RtYS0+ZGV2LCAiY2hh
bm5lbCVkOiBtYXhpbXVtIGJ5dGVzIGV4Y2VlZGVkOiVkID4gJWRcbiINCj4gKwkJCSwgY2hhbm5l
bCwgbGVuLCBOVU1fQkQgKiBTRE1BX0JEX01BWF9DTlQpOw0KPiArCQlnb3RvIGVycl9vdXQ7DQo+
ICsJfQ0KPiArDQo+ICsJc2RtYWMtPnN0YXR1cyA9IERNQV9JTl9QUk9HUkVTUzsNCj4gKw0KPiAr
CXNkbWFjLT5idWZfdGFpbCA9IDA7DQo+ICsNCj4gKwlkZXZfZGJnKHNkbWEtPmRldiwgIm1lbWNw
eTogJXgtPiV4LCBsZW49JWQsIGNoYW5uZWw9JWQuXG4iLA0KDQolcGFkIGZvciBkbWFfYWRkcl90
IHZhcmlhYmxlcy4NCg0KPiArCQlkbWFfc3JjLCBkbWFfZHN0LCBsZW4sIGNoYW5uZWwpOw0KPiAr
DQo+ICsJc2RtYWMtPmRpcmVjdGlvbiA9IERNQV9NRU1fVE9fTUVNOw0KPiArDQo+ICsJcmV0ID0g
c2RtYV9sb2FkX2NvbnRleHQoc2RtYWMpOw0KPiArCWlmIChyZXQpDQo+ICsJCWdvdG8gZXJyX291
dDsNCj4gKw0KPiArCXNkbWFjLT5jaG5fY291bnQgPSAwOw0KPiArDQo+ICsJZG8gew0KPiArCQlj
b3VudCA9IG1pbl90KHNpemVfdCwgbGVuLCBTRE1BX0JEX01BWF9DTlQpOw0KPiArCQliZCA9ICZz
ZG1hYy0+YmRbaV07DQo+ICsJCWJkLT5idWZmZXJfYWRkciA9IGRtYV9zcmM7DQo+ICsJCWJkLT5l
eHRfYnVmZmVyX2FkZHIgPSBkbWFfZHN0Ow0KPiArCQliZC0+bW9kZS5jb3VudCA9IGNvdW50Ow0K
PiArDQo+ICsJCWlmIChzZG1hYy0+d29yZF9zaXplID4gRE1BX1NMQVZFX0JVU1dJRFRIXzRfQllU
RVMpIHsNCj4gKwkJCXJldCA9ICAtRUlOVkFMOw0KPiArCQkJZ290byBlcnJfb3V0Ow0KPiArCQl9
DQo+ICsNCj4gKwkJc3dpdGNoIChzZG1hYy0+d29yZF9zaXplKSB7DQo+ICsJCWNhc2UgRE1BX1NM
QVZFX0JVU1dJRFRIXzRfQllURVM6DQo+ICsJCQliZC0+bW9kZS5jb21tYW5kID0gMDsNCj4gKwkJ
CWlmIChjb3VudCAmIDMgfHwgZG1hX2RzdCAmIDMgfHwgZG1hX3NyYyAmIDMpDQoNCkNvdWxkIGl0
IGJlIGxpa2UNCmlmICgoY291bnQgfCBkbWFfZHN0IHwgZG1hX3NyYykgJiAzKQ0KPw0KDQo+ICsJ
CQkJcmV0dXJuIE5VTEw7DQo+ICsJCQlicmVhazsNCj4gKwkJY2FzZSBETUFfU0xBVkVfQlVTV0lE
VEhfMl9CWVRFUzoNCj4gKwkJCWJkLT5tb2RlLmNvbW1hbmQgPSAyOw0KPiArCQkJaWYgKGNvdW50
ICYgMSB8fCBkbWFfZHN0ICYgMSB8fCBkbWFfc3JjICYgMSkNCg0KU2ltaWxhciBoZXJlLg0KDQo+
ICsJCQkJcmV0dXJuIE5VTEw7DQo+ICsJCQlicmVhazsNCj4gKwkJY2FzZSBETUFfU0xBVkVfQlVT
V0lEVEhfMV9CWVRFOg0KPiArCQkJYmQtPm1vZGUuY29tbWFuZCA9IDE7DQo+ICsJCQlicmVhazsN
Cj4gKwkJZGVmYXVsdDoNCj4gKwkJCXJldHVybiBOVUxMOw0KPiArCQl9DQoNCk1vcmVvdmVyLCBj
b3VsZCB5b3UgY29uc2lkZXIgdG8gbWFrZSBhYm92ZSBwaWVjZSBvZiBjb2RlIChzd2l0Y2gpIGEN
CnNlcGFyYXRlIGZ1bmN0aW9uIGFuZCByZS11c2UgaXQgaW4gc2RtYV9wcmVwX3NsYXZlX3NnKCk/
DQoNCj4gKw0KPiArCQlkbWFfc3JjICs9IGNvdW50Ow0KPiArCQlkbWFfZHN0ICs9IGNvdW50Ow0K
PiArCQlsZW4gLT0gY291bnQ7DQo+ICsJCWkrKzsNCj4gKw0KPiArCQlwYXJhbSA9IEJEX0RPTkUg
fCBCRF9FWFREIHwgQkRfQ09OVDsNCj4gKwkJLyogbGFzdCBiZCAqLw0KPiArCQlpZiAoIWxlbikg
ew0KPiArCQkJcGFyYW0gfD0gQkRfSU5UUjsNCj4gKwkJCXBhcmFtIHw9IEJEX0xBU1Q7DQo+ICsJ
CQlwYXJhbSAmPSB+QkRfQ09OVDsNCj4gKwkJfQ0KPiArDQo+ICsJCWRldl9kYmcoc2RtYS0+ZGV2
LCAiZW50cnkgJWQ6IGNvdW50OiAlZCBkbWE6IDB4JTA4eCAlcyVzXG4iLA0KPiArCQkJCWksIGNv
dW50LCBiZC0+YnVmZmVyX2FkZHIsDQoNCg0KPiArCQkJCXBhcmFtICYgQkRfV1JBUCA/ICJ3cmFw
IiA6ICIiLA0KPiArCQkJCXBhcmFtICYgQkRfSU5UUiA/ICIgaW50ciIgOiAiIik7DQo+ICsNCj4g
KwkJYmQtPm1vZGUuc3RhdHVzID0gcGFyYW07DQo+ICsJCXNkbWFjLT5jaG5fY291bnQgKz0gY291
bnQ7DQo+ICsJfSB3aGlsZSAobGVuKTsNCj4gKw0KPiArCXNkbWFjLT5udW1fYmQgPSBpOw0KPiAr
CXNkbWEtPmNoYW5uZWxfY29udHJvbFtjaGFubmVsXS5jdXJyZW50X2JkX3B0ciA9IHNkbWFjLT5i
ZF9waHlzOw0KPiArDQo+ICsJcmV0dXJuICZzZG1hYy0+ZGVzYzsNCj4gK2Vycl9vdXQ6DQo+ICsJ
c2RtYWMtPnN0YXR1cyA9IERNQV9FUlJPUjsNCj4gKwlyZXR1cm4gTlVMTDsNCj4gK30NCj4gKw0K
PiArLyoNCj4gKyAqIFBsZWFzZSBlbnN1cmUgZHN0X25lbnRzIG5vIHNtYWxsZXIgdGhhbiBzcmNf
bmVudHMgLCBhbHNvIGV2ZXJ5IHNnX2xlbiBvZg0KPiArICogZHN0X3NnIG5vZGUgbm8gc21hbGxl
ciB0aGFuIHNyY19zZy4gVG8gc2ltcGx5IHRoaW5ncywgcGxlYXNlIHVzZSB0aGUgc2FtZQ0KPiAr
ICogc2l6ZSBvZiBkc3Rfc2cgYXMgc3JjX3NnLg0KPiArICovDQo+ICtzdGF0aWMgc3RydWN0IGRt
YV9hc3luY190eF9kZXNjcmlwdG9yICpzZG1hX3ByZXBfc2coDQo+ICsJCXN0cnVjdCBkbWFfY2hh
biAqY2hhbiwNCj4gKwkJc3RydWN0IHNjYXR0ZXJsaXN0ICpkc3Rfc2csIHVuc2lnbmVkIGludCBk
c3RfbmVudHMsDQo+ICsJCXN0cnVjdCBzY2F0dGVybGlzdCAqc3JjX3NnLCB1bnNpZ25lZCBpbnQg
c3JjX25lbnRzLA0KPiArCQllbnVtIGRtYV90cmFuc2Zlcl9kaXJlY3Rpb24gZGlyZWN0aW9uKQ0K
PiAgew0KPiAgCXN0cnVjdCBzZG1hX2NoYW5uZWwgKnNkbWFjID0gdG9fc2RtYV9jaGFuKGNoYW4p
Ow0KPiAgCXN0cnVjdCBzZG1hX2VuZ2luZSAqc2RtYSA9IHNkbWFjLT5zZG1hOw0KPiAgCWludCBy
ZXQsIGksIGNvdW50Ow0KPiAgCWludCBjaGFubmVsID0gc2RtYWMtPmNoYW5uZWw7DQo+IC0Jc3Ry
dWN0IHNjYXR0ZXJsaXN0ICpzZzsNCj4gKwlzdHJ1Y3Qgc2NhdHRlcmxpc3QgKnNnX3NyYyA9IHNy
Y19zZywgKnNnX2RzdCA9IGRzdF9zZzsNCj4gIA0KPiAgCWlmIChzZG1hYy0+c3RhdHVzID09IERN
QV9JTl9QUk9HUkVTUykNCj4gIAkJcmV0dXJuIE5VTEw7DQo+IEBAIC0xMDMwLDMyICsxMTM3LDM4
IEBAIHN0YXRpYyBzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3IgKnNkbWFfcHJlcF9zbGF2
ZV9zZygNCj4gIAlzZG1hYy0+YnVmX3RhaWwgPSAwOw0KPiAgDQo+ICAJZGV2X2RiZyhzZG1hLT5k
ZXYsICJzZXR0aW5nIHVwICVkIGVudHJpZXMgZm9yIGNoYW5uZWwgJWQuXG4iLA0KPiAtCQkJc2df
bGVuLCBjaGFubmVsKTsNCj4gKwkJCXNyY19uZW50cywgY2hhbm5lbCk7DQo+ICANCj4gIAlzZG1h
Yy0+ZGlyZWN0aW9uID0gZGlyZWN0aW9uOw0KPiArDQo+ICAJcmV0ID0gc2RtYV9sb2FkX2NvbnRl
eHQoc2RtYWMpOw0KPiAgCWlmIChyZXQpDQo+ICAJCWdvdG8gZXJyX291dDsNCj4gIA0KPiAtCWlm
IChzZ19sZW4gPiBOVU1fQkQpIHsNCj4gKwlpZiAoc3JjX25lbnRzID4gTlVNX0JEKSB7DQo+ICAJ
CWRldl9lcnIoc2RtYS0+ZGV2LCAiU0RNQSBjaGFubmVsICVkOiBtYXhpbXVtIG51bWJlciBvZiBz
ZyBleGNlZWRlZDogJWQgPiAlZFxuIiwNCj4gLQkJCQljaGFubmVsLCBzZ19sZW4sIE5VTV9CRCk7
DQoNCiV1IGZvciBzZ19sZW4uDQpJIGd1ZXNzIHRoZSBzYW1lIGZvciBOVU1fQkQuDQoNCj4gKwkJ
CQljaGFubmVsLCBzcmNfbmVudHMsIE5VTV9CRCk7DQo+ICAJCXJldCA9IC1FSU5WQUw7DQo+ICAJ
CWdvdG8gZXJyX291dDsNCj4gIAl9DQo+ICANCj4gIAlzZG1hYy0+Y2huX2NvdW50ID0gMDsNCj4g
LQlmb3JfZWFjaF9zZyhzZ2wsIHNnLCBzZ19sZW4sIGkpIHsNCj4gKwlmb3JfZWFjaF9zZyhzcmNf
c2csIHNnX3NyYywgc3JjX25lbnRzLCBpKSB7DQo+ICAJCXN0cnVjdCBzZG1hX2J1ZmZlcl9kZXNj
cmlwdG9yICpiZCA9ICZzZG1hYy0+YmRbaV07DQo+ICAJCWludCBwYXJhbTsNCj4gIA0KPiAtCQli
ZC0+YnVmZmVyX2FkZHIgPSBzZy0+ZG1hX2FkZHJlc3M7DQo+ICsJCWJkLT5idWZmZXJfYWRkciA9
IHNnX3NyYy0+ZG1hX2FkZHJlc3M7DQo+ICANCj4gLQkJY291bnQgPSBzZ19kbWFfbGVuKHNnKTsN
Cj4gKwkJaWYgKGRpcmVjdGlvbiA9PSBETUFfTUVNX1RPX01FTSkgew0KPiArCQkJQlVHX09OKCFz
Z19kc3QpOw0KPiArCQkJYmQtPmV4dF9idWZmZXJfYWRkciA9IHNnX2RzdC0+ZG1hX2FkZHJlc3M7
DQo+ICsJCX0NCj4gKw0KPiArCQljb3VudCA9IHNnX2RtYV9sZW4oc2dfc3JjKTsNCj4gIA0KPiAt
CQlpZiAoY291bnQgPiAweGZmZmYpIHsNCj4gKwkJaWYgKGNvdW50ID4gU0RNQV9CRF9NQVhfQ05U
KSB7DQo+ICAJCQlkZXZfZXJyKHNkbWEtPmRldiwgIlNETUEgY2hhbm5lbCAlZDogbWF4aW11bSBi
eXRlcyBmb3Igc2cgZW50cnkgZXhjZWVkZWQ6ICVkID4gJWRcbiIsDQo+IC0JCQkJCWNoYW5uZWws
IGNvdW50LCAweGZmZmYpOw0KPiArCQkJCQljaGFubmVsLCBjb3VudCwgU0RNQV9CRF9NQVhfQ05U
KTsNCj4gIAkJCXJldCA9IC1FSU5WQUw7DQo+ICAJCQlnb3RvIGVycl9vdXQ7DQo+ICAJCX0NCj4g
QEAgLTEwNzEsMTIgKzExODQsMTQgQEAgc3RhdGljIHN0cnVjdCBkbWFfYXN5bmNfdHhfZGVzY3Jp
cHRvciAqc2RtYV9wcmVwX3NsYXZlX3NnKA0KPiAgCQlzd2l0Y2ggKHNkbWFjLT53b3JkX3NpemUp
IHsNCj4gIAkJY2FzZSBETUFfU0xBVkVfQlVTV0lEVEhfNF9CWVRFUzoNCj4gIAkJCWJkLT5tb2Rl
LmNvbW1hbmQgPSAwOw0KPiAtCQkJaWYgKGNvdW50ICYgMyB8fCBzZy0+ZG1hX2FkZHJlc3MgJiAz
KQ0KPiArCQkJaWYgKGNvdW50ICYgMyB8fCBzZ19zcmMtPmRtYV9hZGRyZXNzICYgMyB8fA0KPiAr
CQkJCShzZ19kc3QgJiYgKHNnX2RzdC0+ZG1hX2FkZHJlc3MgJiAzKSkpDQo+ICAJCQkJcmV0dXJu
IE5VTEw7DQo+ICAJCQlicmVhazsNCj4gIAkJY2FzZSBETUFfU0xBVkVfQlVTV0lEVEhfMl9CWVRF
UzoNCj4gIAkJCWJkLT5tb2RlLmNvbW1hbmQgPSAyOw0KPiAtCQkJaWYgKGNvdW50ICYgMSB8fCBz
Zy0+ZG1hX2FkZHJlc3MgJiAxKQ0KPiArCQkJaWYgKGNvdW50ICYgMSB8fCBzZ19zcmMtPmRtYV9h
ZGRyZXNzICYgMSB8fA0KPiArCQkJCShzZ19kc3QgJiYgKHNnX2RzdC0+ZG1hX2FkZHJlc3MgJiAx
KSkpDQo+ICAJCQkJcmV0dXJuIE5VTEw7DQo+ICAJCQlicmVhazsNCj4gIAkJY2FzZSBETUFfU0xB
VkVfQlVTV0lEVEhfMV9CWVRFOg0KPiBAQCAtMTA4OCwyMSArMTIwMywyMyBAQCBzdGF0aWMgc3Ry
dWN0IGRtYV9hc3luY190eF9kZXNjcmlwdG9yICpzZG1hX3ByZXBfc2xhdmVfc2coDQo+ICANCj4g
IAkJcGFyYW0gPSBCRF9ET05FIHwgQkRfRVhURCB8IEJEX0NPTlQ7DQo+ICANCj4gLQkJaWYgKGkg
KyAxID09IHNnX2xlbikgew0KPiArCQlpZiAoaSArIDEgPT0gc3JjX25lbnRzKSB7DQo+ICAJCQlw
YXJhbSB8PSBCRF9JTlRSOw0KPiAgCQkJcGFyYW0gfD0gQkRfTEFTVDsNCj4gIAkJCXBhcmFtICY9
IH5CRF9DT05UOw0KPiAgCQl9DQo+ICANCj4gLQkJZGV2X2RiZyhzZG1hLT5kZXYsICJlbnRyeSAl
ZDogY291bnQ6ICVkIGRtYTogJSNsbHggJXMlc1xuIiwNCj4gLQkJCQlpLCBjb3VudCwgKHU2NClz
Zy0+ZG1hX2FkZHJlc3MsDQo+ICsJCWRldl9kYmcoc2RtYS0+ZGV2LCAiZW50cnkgJWQ6IGNvdW50
OiAlZCBkbWE6IDB4JTA4eCAlcyVzXG4iLA0KPiArCQkJCWksIGNvdW50LCBzZ19zcmMtPmRtYV9h
ZGRyZXNzLA0KDQolcGFkIGZvciBkbWFfYWRkcl90Lg0KDQo+ICAJCQkJcGFyYW0gJiBCRF9XUkFQ
ID8gIndyYXAiIDogIiIsDQo+ICAJCQkJcGFyYW0gJiBCRF9JTlRSID8gIiBpbnRyIiA6ICIiKTsN
Cj4gIA0KPiAgCQliZC0+bW9kZS5zdGF0dXMgPSBwYXJhbTsNCj4gKwkJaWYgKGRpcmVjdGlvbiA9
PSBETUFfTUVNX1RPX01FTSkNCj4gKwkJCXNnX2RzdCA9IHNnX25leHQoc2dfZHN0KTsNCj4gIAl9
DQo+ICANCj4gLQlzZG1hYy0+bnVtX2JkID0gc2dfbGVuOw0KPiArCXNkbWFjLT5udW1fYmQgPSBz
cmNfbmVudHM7DQo+ICAJc2RtYS0+Y2hhbm5lbF9jb250cm9sW2NoYW5uZWxdLmN1cnJlbnRfYmRf
cHRyID0gc2RtYWMtPmJkX3BoeXM7DQo+ICANCj4gIAlyZXR1cm4gJnNkbWFjLT5kZXNjOw0KPiBA
QCAtMTExMSw2ICsxMjI4LDI0IEBAIGVycl9vdXQ6DQo+ICAJcmV0dXJuIE5VTEw7DQo+ICB9DQo+
ICANCj4gK3N0YXRpYyBzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3IgKnNkbWFfcHJlcF9t
ZW1jcHlfc2coDQo+ICsJCXN0cnVjdCBkbWFfY2hhbiAqY2hhbiwNCj4gKwkJc3RydWN0IHNjYXR0
ZXJsaXN0ICpkc3Rfc2csIHVuc2lnbmVkIGludCBkc3RfbmVudHMsDQo+ICsJCXN0cnVjdCBzY2F0
dGVybGlzdCAqc3JjX3NnLCB1bnNpZ25lZCBpbnQgc3JjX25lbnRzLA0KPiArCQl1bnNpZ25lZCBs
b25nIGZsYWdzKQ0KPiArew0KPiArCXJldHVybiBzZG1hX3ByZXBfc2coY2hhbiwgZHN0X3NnLCBk
c3RfbmVudHMsIHNyY19zZywgc3JjX25lbnRzLA0KPiArCQkJICAgRE1BX01FTV9UT19NRU0pOw0K
PiArfQ0KPiArDQo+ICtzdGF0aWMgc3RydWN0IGRtYV9hc3luY190eF9kZXNjcmlwdG9yICpzZG1h
X3ByZXBfc2xhdmVfc2coDQo+ICsJCXN0cnVjdCBkbWFfY2hhbiAqY2hhbiwgc3RydWN0IHNjYXR0
ZXJsaXN0ICpzZ2wsDQo+ICsJCXVuc2lnbmVkIGludCBzZ19sZW4sIGVudW0gZG1hX3RyYW5zZmVy
X2RpcmVjdGlvbiBkaXJlY3Rpb24sDQo+ICsJCXVuc2lnbmVkIGxvbmcgZmxhZ3MsIHZvaWQgKmNv
bnRleHQpDQo+ICt7DQo+ICsJcmV0dXJuIHNkbWFfcHJlcF9zZyhjaGFuLCBOVUxMLCAwLCBzZ2ws
IHNnX2xlbiwgZGlyZWN0aW9uKTsNCj4gK30NCj4gKw0KPiAgc3RhdGljIHN0cnVjdCBkbWFfYXN5
bmNfdHhfZGVzY3JpcHRvciAqc2RtYV9wcmVwX2RtYV9jeWNsaWMoDQo+ICAJCXN0cnVjdCBkbWFf
Y2hhbiAqY2hhbiwgZG1hX2FkZHJfdCBkbWFfYWRkciwgc2l6ZV90IGJ1Zl9sZW4sDQo+ICAJCXNp
emVfdCBwZXJpb2RfbGVuLCBlbnVtIGRtYV90cmFuc2Zlcl9kaXJlY3Rpb24gZGlyZWN0aW9uLA0K
PiBAQCAtMTE0Myw5ICsxMjc4LDkgQEAgc3RhdGljIHN0cnVjdCBkbWFfYXN5bmNfdHhfZGVzY3Jp
cHRvciAqc2RtYV9wcmVwX2RtYV9jeWNsaWMoDQo+ICAJCWdvdG8gZXJyX291dDsNCj4gIAl9DQo+
ICANCj4gLQlpZiAocGVyaW9kX2xlbiA+IDB4ZmZmZikgew0KPiArCWlmIChwZXJpb2RfbGVuID4g
U0RNQV9CRF9NQVhfQ05UKSB7DQo+ICAJCWRldl9lcnIoc2RtYS0+ZGV2LCAiU0RNQSBjaGFubmVs
ICVkOiBtYXhpbXVtIHBlcmlvZCBzaXplIGV4Y2VlZGVkOiAlZCA+ICVkXG4iLA0KPiAtCQkJCWNo
YW5uZWwsIHBlcmlvZF9sZW4sIDB4ZmZmZik7DQo+ICsJCQkJY2hhbm5lbCwgcGVyaW9kX2xlbiwg
U0RNQV9CRF9NQVhfQ05UKTsNCg0KJXp1IGZvciBwZXJpb2RfbGVuLiBDaGVjayBjYXJlZnVsbHkg
cHJpbnQgc3BlY2lmaWVycyBvdmVyIHlvdXIgY29kZSwNCnBsZWFzZS4NCg0KPiAgCQlnb3RvIGVy
cl9vdXQ7DQo+ICAJfQ0KPiAgDQo+IEBAIC0xMjA2LDYgKzEzNDEsOCBAQCBzdGF0aWMgaW50IHNk
bWFfY29udHJvbChzdHJ1Y3QgZG1hX2NoYW4gKmNoYW4sIGVudW0gZG1hX2N0cmxfY21kIGNtZCwN
Cj4gIAkJCXNkbWFjLT53YXRlcm1hcmtfbGV2ZWwgPSBkbWFlbmdpbmVfY2ZnLT5zcmNfbWF4YnVy
c3QgKg0KPiAgCQkJCQkJZG1hZW5naW5lX2NmZy0+c3JjX2FkZHJfd2lkdGg7DQo+ICAJCQlzZG1h
Yy0+d29yZF9zaXplID0gZG1hZW5naW5lX2NmZy0+c3JjX2FkZHJfd2lkdGg7DQo+ICsJCX0gZWxz
ZSBpZiAoZG1hZW5naW5lX2NmZy0+ZGlyZWN0aW9uID09IERNQV9NRU1fVE9fTUVNKSB7DQo+ICsJ
CQlzZG1hYy0+d29yZF9zaXplID0gZG1hZW5naW5lX2NmZy0+ZHN0X2FkZHJfd2lkdGg7DQo+ICAJ
CX0gZWxzZSB7DQo+ICAJCQlzZG1hYy0+cGVyX2FkZHJlc3MgPSBkbWFlbmdpbmVfY2ZnLT5kc3Rf
YWRkcjsNCj4gIAkJCXNkbWFjLT53YXRlcm1hcmtfbGV2ZWwgPSBkbWFlbmdpbmVfY2ZnLT5kc3Rf
bWF4YnVyc3QgKg0KPiBAQCAtMTUxNiw2ICsxNjUzLDcgQEAgc3RhdGljIGludCBfX2luaXQgc2Rt
YV9wcm9iZShzdHJ1Y3QgcGxhdGZvcm1fZGV2aWNlICpwZGV2KQ0KPiAgDQo+ICAJZG1hX2NhcF9z
ZXQoRE1BX1NMQVZFLCBzZG1hLT5kbWFfZGV2aWNlLmNhcF9tYXNrKTsNCj4gIAlkbWFfY2FwX3Nl
dChETUFfQ1lDTElDLCBzZG1hLT5kbWFfZGV2aWNlLmNhcF9tYXNrKTsNCj4gKwlkbWFfY2FwX3Nl
dChETUFfTUVNQ1BZLCBzZG1hLT5kbWFfZGV2aWNlLmNhcF9tYXNrKTsNCj4gIA0KPiAgCUlOSVRf
TElTVF9IRUFEKCZzZG1hLT5kbWFfZGV2aWNlLmNoYW5uZWxzKTsNCj4gIAkvKiBJbml0aWFsaXpl
IGNoYW5uZWwgcGFyYW1ldGVycyAqLw0KPiBAQCAtMTU3OCw2ICsxNzE2LDggQEAgc3RhdGljIGlu
dCBfX2luaXQgc2RtYV9wcm9iZShzdHJ1Y3QgcGxhdGZvcm1fZGV2aWNlICpwZGV2KQ0KPiAgCXNk
bWEtPmRtYV9kZXZpY2UuZGV2aWNlX3R4X3N0YXR1cyA9IHNkbWFfdHhfc3RhdHVzOw0KPiAgCXNk
bWEtPmRtYV9kZXZpY2UuZGV2aWNlX3ByZXBfc2xhdmVfc2cgPSBzZG1hX3ByZXBfc2xhdmVfc2c7
DQo+ICAJc2RtYS0+ZG1hX2RldmljZS5kZXZpY2VfcHJlcF9kbWFfY3ljbGljID0gc2RtYV9wcmVw
X2RtYV9jeWNsaWM7DQo+ICsJc2RtYS0+ZG1hX2RldmljZS5kZXZpY2VfcHJlcF9kbWFfbWVtY3B5
ID0gc2RtYV9wcmVwX21lbWNweTsNCj4gKwlzZG1hLT5kbWFfZGV2aWNlLmRldmljZV9wcmVwX2Rt
YV9zZyA9IHNkbWFfcHJlcF9tZW1jcHlfc2c7DQo+ICAJc2RtYS0+ZG1hX2RldmljZS5kZXZpY2Vf
Y29udHJvbCA9IHNkbWFfY29udHJvbDsNCj4gIAlzZG1hLT5kbWFfZGV2aWNlLmRldmljZV9pc3N1
ZV9wZW5kaW5nID0gc2RtYV9pc3N1ZV9wZW5kaW5nOw0KPiAgCXNkbWEtPmRtYV9kZXZpY2UuZGV2
LT5kbWFfcGFybXMgPSAmc2RtYS0+ZG1hX3Bhcm1zOw0KDQoNCi0tIA0KQW5keSBTaGV2Y2hlbmtv
IDxhbmRyaXkuc2hldmNoZW5rb0BpbnRlbC5jb20+DQpJbnRlbCBGaW5sYW5kIE95DQotLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0KSW50ZWwgRmlubGFuZCBPeQpSZWdpc3RlcmVkIEFkZHJlc3M6IFBMIDI4MSwgMDAxODEg
SGVsc2lua2kgCkJ1c2luZXNzIElkZW50aXR5IENvZGU6IDAzNTc2MDYgLSA0IApEb21pY2lsZWQg
aW4gSGVsc2lua2kgCgpUaGlzIGUtbWFpbCBhbmQgYW55IGF0dGFjaG1lbnRzIG1heSBjb250YWlu
IGNvbmZpZGVudGlhbCBtYXRlcmlhbCBmb3IKdGhlIHNvbGUgdXNlIG9mIHRoZSBpbnRlbmRlZCBy
ZWNpcGllbnQocykuIEFueSByZXZpZXcgb3IgZGlzdHJpYnV0aW9uCmJ5IG90aGVycyBpcyBzdHJp
Y3RseSBwcm9oaWJpdGVkLiBJZiB5b3UgYXJlIG5vdCB0aGUgaW50ZW5kZWQKcmVjaXBpZW50LCBw
bGVhc2UgY29udGFjdCB0aGUgc2VuZGVyIGFuZCBkZWxldGUgYWxsIGNvcGllcy4K

--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Robin Gong April 18, 2014, 9:41 a.m. UTC | #2
On Thu, Apr 17, 2014 at 10:24:50AM +0000, Shevchenko, Andriy wrote:
> On Thu, 2014-04-17 at 18:01 +0800, Robin Gong wrote:
> > add "device_prep_dma_memcpy" and "device_prep_dma_sg" for memory copy by sdma.
> > 
> > Signed-off-by: Robin Gong <b38343@freescale.com>
> > ---
> >  drivers/dma/imx-sdma.c |  188 +++++++++++++++++++++++++++++++++++++++++------
> >  1 files changed, 164 insertions(+), 24 deletions(-)
> > 
> > diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
> > index 4e79183..2a97e03 100644
> > --- a/drivers/dma/imx-sdma.c
> > +++ b/drivers/dma/imx-sdma.c
> > @@ -229,6 +229,7 @@ struct sdma_context_data {
> >  } __attribute__ ((packed));
> >  
> >  #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
> > +#define SDMA_BD_MAX_CNT	(0xfffc) /* align with 4 bytes */
> >  
> >  struct sdma_engine;
> >  
> > @@ -260,6 +261,7 @@ struct sdma_channel {
> >  	unsigned int			pc_from_device, pc_to_device;
> >  	unsigned long			flags;
> >  	dma_addr_t			per_address;
> > +	unsigned int                    pc_to_pc;
> >  	unsigned long			event_mask[2];
> >  	unsigned long			watermark_level;
> >  	u32				shp_addr, per_addr;
> > @@ -694,6 +696,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
> >  
> >  	sdmac->pc_from_device = 0;
> >  	sdmac->pc_to_device = 0;
> > +	sdmac->pc_to_pc = 0;
> >  
> >  	switch (peripheral_type) {
> >  	case IMX_DMATYPE_MEMORY:
> > @@ -763,6 +766,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
> >  
> >  	sdmac->pc_from_device = per_2_emi;
> >  	sdmac->pc_to_device = emi_2_per;
> > +	sdmac->pc_to_pc = emi_2_emi;
> >  }
> >  
> >  static int sdma_load_context(struct sdma_channel *sdmac)
> > @@ -775,11 +779,12 @@ static int sdma_load_context(struct sdma_channel *sdmac)
> >  	int ret;
> >  	unsigned long flags;
> >  
> > -	if (sdmac->direction == DMA_DEV_TO_MEM) {
> > +	if (sdmac->direction == DMA_DEV_TO_MEM)
> >  		load_address = sdmac->pc_from_device;
> > -	} else {
> > +	else if (sdmac->direction == DMA_MEM_TO_MEM)
> > +		load_address = sdmac->pc_to_pc;
> > +	else
> >  		load_address = sdmac->pc_to_device;
> > -	}
> >  
> >  	if (load_address < 0)
> >  		return load_address;
> > @@ -1010,16 +1015,118 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
> >  	clk_disable(sdma->clk_ahb);
> >  }
> >  
> > -static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> > -		struct dma_chan *chan, struct scatterlist *sgl,
> > -		unsigned int sg_len, enum dma_transfer_direction direction,
> > -		unsigned long flags, void *context)
> > +static struct dma_async_tx_descriptor *sdma_prep_memcpy(
> > +		struct dma_chan *chan, dma_addr_t dma_dst,
> > +		dma_addr_t dma_src, size_t len, unsigned long flags)
> > +{
> > +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> > +	struct sdma_engine *sdma = sdmac->sdma;
> > +	int channel = sdmac->channel;
> > +	size_t count;
> > +	int i = 0, param, ret;
> > +	struct sdma_buffer_descriptor *bd;
> > +
> > +	if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
> > +		return NULL;
> > +
> > +	if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
> > +		dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%d > %d\n"
> > +			, channel, len, NUM_BD * SDMA_BD_MAX_CNT);
> > +		goto err_out;
> > +	}
> > +
> > +	sdmac->status = DMA_IN_PROGRESS;
> > +
> > +	sdmac->buf_tail = 0;
> > +
> > +	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
> 
> %pad for dma_addr_t variables.
>
Yes, %x here is not proper, will be %#llx here to align with others similar
code in this file.
> > +		dma_src, dma_dst, len, channel);
> > +
> > +	sdmac->direction = DMA_MEM_TO_MEM;
> > +
> > +	ret = sdma_load_context(sdmac);
> > +	if (ret)
> > +		goto err_out;
> > +
> > +	sdmac->chn_count = 0;
> > +
> > +	do {
> > +		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
> > +		bd = &sdmac->bd[i];
> > +		bd->buffer_addr = dma_src;
> > +		bd->ext_buffer_addr = dma_dst;
> > +		bd->mode.count = count;
> > +
> > +		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
> > +			ret =  -EINVAL;
> > +			goto err_out;
> > +		}
> > +
> > +		switch (sdmac->word_size) {
> > +		case DMA_SLAVE_BUSWIDTH_4_BYTES:
> > +			bd->mode.command = 0;
> > +			if (count & 3 || dma_dst & 3 || dma_src & 3)
> 
> Could it be like
> if ((count | dma_dst | dma_src) & 3)
> ?
> 
Accept.
> > +				return NULL;
> > +			break;
> > +		case DMA_SLAVE_BUSWIDTH_2_BYTES:
> > +			bd->mode.command = 2;
> > +			if (count & 1 || dma_dst & 1 || dma_src & 1)
> 
> Similar here.
> 
Accept.
> > +				return NULL;
> > +			break;
> > +		case DMA_SLAVE_BUSWIDTH_1_BYTE:
> > +			bd->mode.command = 1;
> > +			break;
> > +		default:
> > +			return NULL;
> > +		}
> 
> Moreover, could you consider to make above piece of code (switch) a
> separate function and re-use it in sdma_prep_slave_sg()?
> 
Initially, I didn't want to make so many changes in the driver. But maybe we
need make code clean firstly. Will make it better in V2.
> > +
> > +		dma_src += count;
> > +		dma_dst += count;
> > +		len -= count;
> > +		i++;
> > +
> > +		param = BD_DONE | BD_EXTD | BD_CONT;
> > +		/* last bd */
> > +		if (!len) {
> > +			param |= BD_INTR;
> > +			param |= BD_LAST;
> > +			param &= ~BD_CONT;
> > +		}
> > +
> > +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
> > +				i, count, bd->buffer_addr,
> 
> 
> > +				param & BD_WRAP ? "wrap" : "",
> > +				param & BD_INTR ? " intr" : "");
> > +
> > +		bd->mode.status = param;
> > +		sdmac->chn_count += count;
> > +	} while (len);
> > +
> > +	sdmac->num_bd = i;
> > +	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
> > +
> > +	return &sdmac->desc;
> > +err_out:
> > +	sdmac->status = DMA_ERROR;
> > +	return NULL;
> > +}
> > +
> > +/*
> > + * Please ensure dst_nents no smaller than src_nents , also every sg_len of
> > + * dst_sg node no smaller than src_sg. To simply things, please use the same
> > + * size of dst_sg as src_sg.
> > + */
> > +static struct dma_async_tx_descriptor *sdma_prep_sg(
> > +		struct dma_chan *chan,
> > +		struct scatterlist *dst_sg, unsigned int dst_nents,
> > +		struct scatterlist *src_sg, unsigned int src_nents,
> > +		enum dma_transfer_direction direction)
> >  {
> >  	struct sdma_channel *sdmac = to_sdma_chan(chan);
> >  	struct sdma_engine *sdma = sdmac->sdma;
> >  	int ret, i, count;
> >  	int channel = sdmac->channel;
> > -	struct scatterlist *sg;
> > +	struct scatterlist *sg_src = src_sg, *sg_dst = dst_sg;
> >  
> >  	if (sdmac->status == DMA_IN_PROGRESS)
> >  		return NULL;
> > @@ -1030,32 +1137,38 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> >  	sdmac->buf_tail = 0;
> >  
> >  	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
> > -			sg_len, channel);
> > +			src_nents, channel);
> >  
> >  	sdmac->direction = direction;
> > +
> >  	ret = sdma_load_context(sdmac);
> >  	if (ret)
> >  		goto err_out;
> >  
> > -	if (sg_len > NUM_BD) {
> > +	if (src_nents > NUM_BD) {
> >  		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
> > -				channel, sg_len, NUM_BD);
> 
> %u for sg_len.
> I guess the same for NUM_BD.
> 
This line has been removed...
> > +				channel, src_nents, NUM_BD);
> >  		ret = -EINVAL;
> >  		goto err_out;
> >  	}
> >  
> >  	sdmac->chn_count = 0;
> > -	for_each_sg(sgl, sg, sg_len, i) {
> > +	for_each_sg(src_sg, sg_src, src_nents, i) {
> >  		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
> >  		int param;
> >  
> > -		bd->buffer_addr = sg->dma_address;
> > +		bd->buffer_addr = sg_src->dma_address;
> >  
> > -		count = sg_dma_len(sg);
> > +		if (direction == DMA_MEM_TO_MEM) {
> > +			BUG_ON(!sg_dst);
> > +			bd->ext_buffer_addr = sg_dst->dma_address;
> > +		}
> > +
> > +		count = sg_dma_len(sg_src);
> >  
> > -		if (count > 0xffff) {
> > +		if (count > SDMA_BD_MAX_CNT) {
> >  			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
> > -					channel, count, 0xffff);
> > +					channel, count, SDMA_BD_MAX_CNT);
> >  			ret = -EINVAL;
> >  			goto err_out;
> >  		}
> > @@ -1071,12 +1184,14 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> >  		switch (sdmac->word_size) {
> >  		case DMA_SLAVE_BUSWIDTH_4_BYTES:
> >  			bd->mode.command = 0;
> > -			if (count & 3 || sg->dma_address & 3)
> > +			if (count & 3 || sg_src->dma_address & 3 ||
> > +				(sg_dst && (sg_dst->dma_address & 3)))
> >  				return NULL;
> >  			break;
> >  		case DMA_SLAVE_BUSWIDTH_2_BYTES:
> >  			bd->mode.command = 2;
> > -			if (count & 1 || sg->dma_address & 1)
> > +			if (count & 1 || sg_src->dma_address & 1 ||
> > +				(sg_dst && (sg_dst->dma_address & 1)))
> >  				return NULL;
> >  			break;
> >  		case DMA_SLAVE_BUSWIDTH_1_BYTE:
> > @@ -1088,21 +1203,23 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> >  
> >  		param = BD_DONE | BD_EXTD | BD_CONT;
> >  
> > -		if (i + 1 == sg_len) {
> > +		if (i + 1 == src_nents) {
> >  			param |= BD_INTR;
> >  			param |= BD_LAST;
> >  			param &= ~BD_CONT;
> >  		}
> >  
> > -		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
> > -				i, count, (u64)sg->dma_address,
> > +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
> > +				i, count, sg_src->dma_address,
> 
> %pad for dma_addr_t.
> 
Accept the idea, same as the above.
> >  				param & BD_WRAP ? "wrap" : "",
> >  				param & BD_INTR ? " intr" : "");
> >  
> >  		bd->mode.status = param;
> > +		if (direction == DMA_MEM_TO_MEM)
> > +			sg_dst = sg_next(sg_dst);
> >  	}
> >  
> > -	sdmac->num_bd = sg_len;
> > +	sdmac->num_bd = src_nents;
> >  	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
> >  
> >  	return &sdmac->desc;
> > @@ -1111,6 +1228,24 @@ err_out:
> >  	return NULL;
> >  }
> >  
> > +static struct dma_async_tx_descriptor *sdma_prep_memcpy_sg(
> > +		struct dma_chan *chan,
> > +		struct scatterlist *dst_sg, unsigned int dst_nents,
> > +		struct scatterlist *src_sg, unsigned int src_nents,
> > +		unsigned long flags)
> > +{
> > +	return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents,
> > +			   DMA_MEM_TO_MEM);
> > +}
> > +
> > +static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> > +		struct dma_chan *chan, struct scatterlist *sgl,
> > +		unsigned int sg_len, enum dma_transfer_direction direction,
> > +		unsigned long flags, void *context)
> > +{
> > +	return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction);
> > +}
> > +
> >  static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
> >  		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
> >  		size_t period_len, enum dma_transfer_direction direction,
> > @@ -1143,9 +1278,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
> >  		goto err_out;
> >  	}
> >  
> > -	if (period_len > 0xffff) {
> > +	if (period_len > SDMA_BD_MAX_CNT) {
> >  		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
> > -				channel, period_len, 0xffff);
> > +				channel, period_len, SDMA_BD_MAX_CNT);
> 
> %zu for period_len. Check carefully print specifiers over your code,
> please.
> 
Accept, although it has been here for long time.
> >  		goto err_out;
> >  	}
> >  
> > @@ -1206,6 +1341,8 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> >  			sdmac->watermark_level = dmaengine_cfg->src_maxburst *
> >  						dmaengine_cfg->src_addr_width;
> >  			sdmac->word_size = dmaengine_cfg->src_addr_width;
> > +		} else if (dmaengine_cfg->direction == DMA_MEM_TO_MEM) {
> > +			sdmac->word_size = dmaengine_cfg->dst_addr_width;
> >  		} else {
> >  			sdmac->per_address = dmaengine_cfg->dst_addr;
> >  			sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
> > @@ -1516,6 +1653,7 @@ static int __init sdma_probe(struct platform_device *pdev)
> >  
> >  	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
> >  	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
> > +	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
> >  
> >  	INIT_LIST_HEAD(&sdma->dma_device.channels);
> >  	/* Initialize channel parameters */
> > @@ -1578,6 +1716,8 @@ static int __init sdma_probe(struct platform_device *pdev)
> >  	sdma->dma_device.device_tx_status = sdma_tx_status;
> >  	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
> >  	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
> > +	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
> > +	sdma->dma_device.device_prep_dma_sg = sdma_prep_memcpy_sg;
> >  	sdma->dma_device.device_control = sdma_control;
> >  	sdma->dma_device.device_issue_pending = sdma_issue_pending;
> >  	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
> 
> 
> -- 
> Andy Shevchenko <andriy.shevchenko@intel.com>
> Intel Finland Oy
> ---------------------------------------------------------------------
> Intel Finland Oy
> Registered Address: PL 281, 00181 Helsinki 
> Business Identity Code: 0357606 - 4 
> Domiciled in Helsinki 
> 
> This e-mail and any attachments may contain confidential material for
> the sole use of the intended recipient(s). Any review or distribution
> by others is strictly prohibited. If you are not the intended
> recipient, please contact the sender and delete all copies.

--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andy Shevchenko April 22, 2014, 10:28 a.m. UTC | #3
On Fri, 2014-04-18 at 17:41 +0800, Robin Gong wrote:
> On Thu, Apr 17, 2014 at 10:24:50AM +0000, Shevchenko, Andriy wrote:

> > On Thu, 2014-04-17 at 18:01 +0800, Robin Gong wrote:


[]

> > > +	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",

> > 

> > %pad for dma_addr_t variables.

> >

> Yes, %x here is not proper, will be %#llx here to align with others similar

> code in this file.


Why %#llx? You don't need the specific casting since kernel has special
specifiers for phys_addr_t and dma_addr_t and their derivatives (see
Documentation/printk-formats.txt)

> > > +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",

> > > +				i, count, sg_src->dma_address,

> > 

> > %pad for dma_addr_t.

> > 

> Accept the idea, same as the above.


Same as above.

-- 
Andy Shevchenko <andriy.shevchenko@intel.com>
Intel Finland Oy
---------------------------------------------------------------------
Intel Finland Oy
Registered Address: PL 281, 00181 Helsinki 
Business Identity Code: 0357606 - 4 
Domiciled in Helsinki 

This e-mail and any attachments may contain confidential material for
the sole use of the intended recipient(s). Any review or distribution
by others is strictly prohibited. If you are not the intended
recipient, please contact the sender and delete all copies.
Robin Gong April 22, 2014, 10:51 a.m. UTC | #4
On Tue, Apr 22, 2014 at 10:28:05AM +0000, Shevchenko, Andriy wrote:
> On Fri, 2014-04-18 at 17:41 +0800, Robin Gong wrote:
> > On Thu, Apr 17, 2014 at 10:24:50AM +0000, Shevchenko, Andriy wrote:
> > > On Thu, 2014-04-17 at 18:01 +0800, Robin Gong wrote:
> 
> []
> 
> > > > +	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
> > > 
> > > %pad for dma_addr_t variables.
> > >
> > Yes, %x here is not proper, will be %#llx here to align with others similar
> > code in this file.
> 
> Why %#llx? You don't need the specific casting since kernel has special
> specifiers for phys_addr_t and dma_addr_t and their derivatives (see
> Documentation/printk-formats.txt)
>
I think both are ok, why I choose %llx is only for align the code style, you
can find the same code in sdma_prep_dma_cyclic function. below description also
copy from Documentation/printk-formats.txt:


If <type> is dependent on a config option for its size (e.g., sector_t,
blkcnt_t, phys_addr_t, resource_size_t) or is architecture-dependent
for its size (e.g., tcflag_t), use a format specifier of its largest
possible type and explicitly cast to it.  Example:

	printk("test: sector number/total blocks: %llu/%llu\n",
		(unsigned long long)sector, (unsigned long long)blockcount);
> > > > +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
> > > > +				i, count, sg_src->dma_address,
> > > 
> > > %pad for dma_addr_t.
> > > 
> > Accept the idea, same as the above.
> 
> Same as above.
> 
> -- 
> Andy Shevchenko <andriy.shevchenko@intel.com>
> Intel Finland Oy
> ---------------------------------------------------------------------
> Intel Finland Oy
> Registered Address: PL 281, 00181 Helsinki 
> Business Identity Code: 0357606 - 4 
> Domiciled in Helsinki 
> 
> This e-mail and any attachments may contain confidential material for
> the sole use of the intended recipient(s). Any review or distribution
> by others is strictly prohibited. If you are not the intended
> recipient, please contact the sender and delete all copies.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andy Shevchenko April 22, 2014, 11:24 a.m. UTC | #5
On Tue, 2014-04-22 at 18:51 +0800, Robin Gong wrote:
> On Tue, Apr 22, 2014 at 10:28:05AM +0000, Shevchenko, Andriy wrote:
> > On Fri, 2014-04-18 at 17:41 +0800, Robin Gong wrote:
> > > On Thu, Apr 17, 2014 at 10:24:50AM +0000, Shevchenko, Andriy wrote:
> > > > On Thu, 2014-04-17 at 18:01 +0800, Robin Gong wrote:
> > 
> > []
> > 
> > > > > +	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
> > > > 
> > > > %pad for dma_addr_t variables.
> > > >
> > > Yes, %x here is not proper, will be %#llx here to align with others similar
> > > code in this file.
> > 
> > Why %#llx? You don't need the specific casting since kernel has special
> > specifiers for phys_addr_t and dma_addr_t and their derivatives (see
> > Documentation/printk-formats.txt)
> >
> I think both are ok, why I choose %llx is only for align the code style,

Yes, but it was started being developed earlier than kernel gains the
feature.

>  you
> can find the same code in sdma_prep_dma_cyclic function. below description also
> copy from Documentation/printk-formats.txt:
> 
> 
> If <type> is dependent on a config option for its size (e.g., sector_t,
> blkcnt_t, phys_addr_t, resource_size_t) or is architecture-dependent
> for its size (e.g., tcflag_t), use a format specifier of its largest
> possible type and explicitly cast to it.  Example:
> 
> 	printk("test: sector number/total blocks: %llu/%llu\n",
> 		(unsigned long long)sector, (unsigned long long)blockcount);

Yeah, I think it requires to be updated accordingly to last changes
regarding to new specifiers.

+Cc Randy. Randy, do you think is a matter of fact that we have to
update that paragraph somehow and recommend to prefer special specifiers
over explicit casting to longest possible type?

> > > > > +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
> > > > > +				i, count, sg_src->dma_address,
> > > > 
> > > > %pad for dma_addr_t.
> > > > 
> > > Accept the idea, same as the above.
> > 
> > Same as above.
Randy Dunlap April 23, 2014, 5:31 p.m. UTC | #6
On 04/22/14 04:24, Andy Shevchenko wrote:
> On Tue, 2014-04-22 at 18:51 +0800, Robin Gong wrote:
>> On Tue, Apr 22, 2014 at 10:28:05AM +0000, Shevchenko, Andriy wrote:
>>> On Fri, 2014-04-18 at 17:41 +0800, Robin Gong wrote:
>>>> On Thu, Apr 17, 2014 at 10:24:50AM +0000, Shevchenko, Andriy wrote:
>>>>> On Thu, 2014-04-17 at 18:01 +0800, Robin Gong wrote:
>>>
>>> []
>>>
>>>>>> +	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
>>>>>
>>>>> %pad for dma_addr_t variables.
>>>>>
>>>> Yes, %x here is not proper, will be %#llx here to align with others similar
>>>> code in this file.
>>>
>>> Why %#llx? You don't need the specific casting since kernel has special
>>> specifiers for phys_addr_t and dma_addr_t and their derivatives (see
>>> Documentation/printk-formats.txt)
>>>
>> I think both are ok, why I choose %llx is only for align the code style,
> 
> Yes, but it was started being developed earlier than kernel gains the
> feature.
> 
>>  you
>> can find the same code in sdma_prep_dma_cyclic function. below description also
>> copy from Documentation/printk-formats.txt:
>>
>>
>> If <type> is dependent on a config option for its size (e.g., sector_t,
>> blkcnt_t, phys_addr_t, resource_size_t) or is architecture-dependent
>> for its size (e.g., tcflag_t), use a format specifier of its largest
>> possible type and explicitly cast to it.  Example:
>>
>> 	printk("test: sector number/total blocks: %llu/%llu\n",
>> 		(unsigned long long)sector, (unsigned long long)blockcount);
> 
> Yeah, I think it requires to be updated accordingly to last changes
> regarding to new specifiers.
> 
> +Cc Randy. Randy, do you think is a matter of fact that we have to
> update that paragraph somehow and recommend to prefer special specifiers
> over explicit casting to longest possible type?

Yes, I agree, if there is a printk format specifier that supports a certain type
to be printed, we should prefer to use that instead of the generic casting method.

IMO, the cast to (long long) or (unsigned long long) or (u64) or (s64) should be a last
resort safe method.


>>>>>> +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
>>>>>> +				i, count, sg_src->dma_address,
>>>>>
>>>>> %pad for dma_addr_t.
>>>>>
>>>> Accept the idea, same as the above.
>>>
>>> Same as above.
> 
>
diff mbox

Patch

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4e79183..2a97e03 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -229,6 +229,7 @@  struct sdma_context_data {
 } __attribute__ ((packed));
 
 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
+#define SDMA_BD_MAX_CNT	(0xfffc) /* align with 4 bytes */
 
 struct sdma_engine;
 
@@ -260,6 +261,7 @@  struct sdma_channel {
 	unsigned int			pc_from_device, pc_to_device;
 	unsigned long			flags;
 	dma_addr_t			per_address;
+	unsigned int                    pc_to_pc;
 	unsigned long			event_mask[2];
 	unsigned long			watermark_level;
 	u32				shp_addr, per_addr;
@@ -694,6 +696,7 @@  static void sdma_get_pc(struct sdma_channel *sdmac,
 
 	sdmac->pc_from_device = 0;
 	sdmac->pc_to_device = 0;
+	sdmac->pc_to_pc = 0;
 
 	switch (peripheral_type) {
 	case IMX_DMATYPE_MEMORY:
@@ -763,6 +766,7 @@  static void sdma_get_pc(struct sdma_channel *sdmac,
 
 	sdmac->pc_from_device = per_2_emi;
 	sdmac->pc_to_device = emi_2_per;
+	sdmac->pc_to_pc = emi_2_emi;
 }
 
 static int sdma_load_context(struct sdma_channel *sdmac)
@@ -775,11 +779,12 @@  static int sdma_load_context(struct sdma_channel *sdmac)
 	int ret;
 	unsigned long flags;
 
-	if (sdmac->direction == DMA_DEV_TO_MEM) {
+	if (sdmac->direction == DMA_DEV_TO_MEM)
 		load_address = sdmac->pc_from_device;
-	} else {
+	else if (sdmac->direction == DMA_MEM_TO_MEM)
+		load_address = sdmac->pc_to_pc;
+	else
 		load_address = sdmac->pc_to_device;
-	}
 
 	if (load_address < 0)
 		return load_address;
@@ -1010,16 +1015,118 @@  static void sdma_free_chan_resources(struct dma_chan *chan)
 	clk_disable(sdma->clk_ahb);
 }
 
-static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
-		struct dma_chan *chan, struct scatterlist *sgl,
-		unsigned int sg_len, enum dma_transfer_direction direction,
-		unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *sdma_prep_memcpy(
+		struct dma_chan *chan, dma_addr_t dma_dst,
+		dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
+	struct sdma_engine *sdma = sdmac->sdma;
+	int channel = sdmac->channel;
+	size_t count;
+	int i = 0, param, ret;
+	struct sdma_buffer_descriptor *bd;
+
+	if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
+		return NULL;
+
+	if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
+		dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%d > %d\n"
+			, channel, len, NUM_BD * SDMA_BD_MAX_CNT);
+		goto err_out;
+	}
+
+	sdmac->status = DMA_IN_PROGRESS;
+
+	sdmac->buf_tail = 0;
+
+	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
+		dma_src, dma_dst, len, channel);
+
+	sdmac->direction = DMA_MEM_TO_MEM;
+
+	ret = sdma_load_context(sdmac);
+	if (ret)
+		goto err_out;
+
+	sdmac->chn_count = 0;
+
+	do {
+		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
+		bd = &sdmac->bd[i];
+		bd->buffer_addr = dma_src;
+		bd->ext_buffer_addr = dma_dst;
+		bd->mode.count = count;
+
+		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
+			ret =  -EINVAL;
+			goto err_out;
+		}
+
+		switch (sdmac->word_size) {
+		case DMA_SLAVE_BUSWIDTH_4_BYTES:
+			bd->mode.command = 0;
+			if (count & 3 || dma_dst & 3 || dma_src & 3)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_2_BYTES:
+			bd->mode.command = 2;
+			if (count & 1 || dma_dst & 1 || dma_src & 1)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_1_BYTE:
+			bd->mode.command = 1;
+			break;
+		default:
+			return NULL;
+		}
+
+		dma_src += count;
+		dma_dst += count;
+		len -= count;
+		i++;
+
+		param = BD_DONE | BD_EXTD | BD_CONT;
+		/* last bd */
+		if (!len) {
+			param |= BD_INTR;
+			param |= BD_LAST;
+			param &= ~BD_CONT;
+		}
+
+		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+				i, count, bd->buffer_addr,
+				param & BD_WRAP ? "wrap" : "",
+				param & BD_INTR ? " intr" : "");
+
+		bd->mode.status = param;
+		sdmac->chn_count += count;
+	} while (len);
+
+	sdmac->num_bd = i;
+	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+	return &sdmac->desc;
+err_out:
+	sdmac->status = DMA_ERROR;
+	return NULL;
+}
+
+/*
+ * Please ensure dst_nents no smaller than src_nents , also every sg_len of
+ * dst_sg node no smaller than src_sg. To simply things, please use the same
+ * size of dst_sg as src_sg.
+ */
+static struct dma_async_tx_descriptor *sdma_prep_sg(
+		struct dma_chan *chan,
+		struct scatterlist *dst_sg, unsigned int dst_nents,
+		struct scatterlist *src_sg, unsigned int src_nents,
+		enum dma_transfer_direction direction)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int ret, i, count;
 	int channel = sdmac->channel;
-	struct scatterlist *sg;
+	struct scatterlist *sg_src = src_sg, *sg_dst = dst_sg;
 
 	if (sdmac->status == DMA_IN_PROGRESS)
 		return NULL;
@@ -1030,32 +1137,38 @@  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 	sdmac->buf_tail = 0;
 
 	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
-			sg_len, channel);
+			src_nents, channel);
 
 	sdmac->direction = direction;
+
 	ret = sdma_load_context(sdmac);
 	if (ret)
 		goto err_out;
 
-	if (sg_len > NUM_BD) {
+	if (src_nents > NUM_BD) {
 		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
-				channel, sg_len, NUM_BD);
+				channel, src_nents, NUM_BD);
 		ret = -EINVAL;
 		goto err_out;
 	}
 
 	sdmac->chn_count = 0;
-	for_each_sg(sgl, sg, sg_len, i) {
+	for_each_sg(src_sg, sg_src, src_nents, i) {
 		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
 		int param;
 
-		bd->buffer_addr = sg->dma_address;
+		bd->buffer_addr = sg_src->dma_address;
 
-		count = sg_dma_len(sg);
+		if (direction == DMA_MEM_TO_MEM) {
+			BUG_ON(!sg_dst);
+			bd->ext_buffer_addr = sg_dst->dma_address;
+		}
+
+		count = sg_dma_len(sg_src);
 
-		if (count > 0xffff) {
+		if (count > SDMA_BD_MAX_CNT) {
 			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
-					channel, count, 0xffff);
+					channel, count, SDMA_BD_MAX_CNT);
 			ret = -EINVAL;
 			goto err_out;
 		}
@@ -1071,12 +1184,14 @@  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 		switch (sdmac->word_size) {
 		case DMA_SLAVE_BUSWIDTH_4_BYTES:
 			bd->mode.command = 0;
-			if (count & 3 || sg->dma_address & 3)
+			if (count & 3 || sg_src->dma_address & 3 ||
+				(sg_dst && (sg_dst->dma_address & 3)))
 				return NULL;
 			break;
 		case DMA_SLAVE_BUSWIDTH_2_BYTES:
 			bd->mode.command = 2;
-			if (count & 1 || sg->dma_address & 1)
+			if (count & 1 || sg_src->dma_address & 1 ||
+				(sg_dst && (sg_dst->dma_address & 1)))
 				return NULL;
 			break;
 		case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -1088,21 +1203,23 @@  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
 		param = BD_DONE | BD_EXTD | BD_CONT;
 
-		if (i + 1 == sg_len) {
+		if (i + 1 == src_nents) {
 			param |= BD_INTR;
 			param |= BD_LAST;
 			param &= ~BD_CONT;
 		}
 
-		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
-				i, count, (u64)sg->dma_address,
+		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+				i, count, sg_src->dma_address,
 				param & BD_WRAP ? "wrap" : "",
 				param & BD_INTR ? " intr" : "");
 
 		bd->mode.status = param;
+		if (direction == DMA_MEM_TO_MEM)
+			sg_dst = sg_next(sg_dst);
 	}
 
-	sdmac->num_bd = sg_len;
+	sdmac->num_bd = src_nents;
 	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 
 	return &sdmac->desc;
@@ -1111,6 +1228,24 @@  err_out:
 	return NULL;
 }
 
+static struct dma_async_tx_descriptor *sdma_prep_memcpy_sg(
+		struct dma_chan *chan,
+		struct scatterlist *dst_sg, unsigned int dst_nents,
+		struct scatterlist *src_sg, unsigned int src_nents,
+		unsigned long flags)
+{
+	return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents,
+			   DMA_MEM_TO_MEM);
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction);
+}
+
 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 		size_t period_len, enum dma_transfer_direction direction,
@@ -1143,9 +1278,9 @@  static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 		goto err_out;
 	}
 
-	if (period_len > 0xffff) {
+	if (period_len > SDMA_BD_MAX_CNT) {
 		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
-				channel, period_len, 0xffff);
+				channel, period_len, SDMA_BD_MAX_CNT);
 		goto err_out;
 	}
 
@@ -1206,6 +1341,8 @@  static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 			sdmac->watermark_level = dmaengine_cfg->src_maxburst *
 						dmaengine_cfg->src_addr_width;
 			sdmac->word_size = dmaengine_cfg->src_addr_width;
+		} else if (dmaengine_cfg->direction == DMA_MEM_TO_MEM) {
+			sdmac->word_size = dmaengine_cfg->dst_addr_width;
 		} else {
 			sdmac->per_address = dmaengine_cfg->dst_addr;
 			sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -1516,6 +1653,7 @@  static int __init sdma_probe(struct platform_device *pdev)
 
 	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
 	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
 
 	INIT_LIST_HEAD(&sdma->dma_device.channels);
 	/* Initialize channel parameters */
@@ -1578,6 +1716,8 @@  static int __init sdma_probe(struct platform_device *pdev)
 	sdma->dma_device.device_tx_status = sdma_tx_status;
 	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
+	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
+	sdma->dma_device.device_prep_dma_sg = sdma_prep_memcpy_sg;
 	sdma->dma_device.device_control = sdma_control;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
 	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;