diff mbox

[v1] dma: imx-sdma: add virt-dma support

Message ID 1521735499-29138-1-git-send-email-yibin.gong@nxp.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Robin Gong March 22, 2018, 4:18 p.m. UTC
The legacy sdma driver has below limitations or drawbacks:
  1. Hardcode the max BDs number as "PAGE_SIZE / sizeof(*)", and alloc
     one page size for one channel regardless of only few BDs needed
     most time. But in few cases, the max PAGE_SIZE maybe not enough.
  2. One SDMA channel can't stop immediatley once channel disabled which
     means SDMA interrupt may come in after this channel terminated.There
     are some patches for this corner case such as commit "2746e2c389f9",
     but not cover non-cyclic.

The common virt-dma overcomes the above limitations. It can alloc bd
dynamically and free bd once this tx transfer done. No memory wasted or
maximum limititation here, only depends on how many memory can be requested
from kernel. For No.2, such issue can be workaround by checking if there
is available descript("sdmac->desc") now once the unwanted interrupt
coming. At last the common virt-dma is easier for sdma driver maintain.

Signed-off-by: Robin Gong <yibin.gong@nxp.com>
---
 drivers/dma/Kconfig    |   1 +
 drivers/dma/imx-sdma.c | 395 +++++++++++++++++++++++++++++++------------------
 2 files changed, 253 insertions(+), 143 deletions(-)

Comments

Robin Gong May 22, 2018, 6:16 a.m. UTC | #1
UGluZy4NCg0KLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCkZyb206IFJvYmluIEdvbmcgDQpT
ZW50OiAyMDE4xOoz1MIyM8jVIDA6MTgNClRvOiBkYW4uai53aWxsaWFtc0BpbnRlbC5jb207IHZp
bm9kLmtvdWxAaW50ZWwuY29tDQpDYzogZG1hZW5naW5lQHZnZXIua2VybmVsLm9yZzsgbGludXgt
YXJtLWtlcm5lbEBsaXN0cy5pbmZyYWRlYWQub3JnOyBsaW51eC1rZXJuZWxAdmdlci5rZXJuZWwu
b3JnOyBkbC1saW51eC1pbXggPGxpbnV4LWlteEBueHAuY29tPg0KU3ViamVjdDogW1BBVENIIHYx
XSBkbWE6IGlteC1zZG1hOiBhZGQgdmlydC1kbWEgc3VwcG9ydA0KDQpUaGUgbGVnYWN5IHNkbWEg
ZHJpdmVyIGhhcyBiZWxvdyBsaW1pdGF0aW9ucyBvciBkcmF3YmFja3M6DQogIDEuIEhhcmRjb2Rl
IHRoZSBtYXggQkRzIG51bWJlciBhcyAiUEFHRV9TSVpFIC8gc2l6ZW9mKCopIiwgYW5kIGFsbG9j
DQogICAgIG9uZSBwYWdlIHNpemUgZm9yIG9uZSBjaGFubmVsIHJlZ2FyZGxlc3Mgb2Ygb25seSBm
ZXcgQkRzIG5lZWRlZA0KICAgICBtb3N0IHRpbWUuIEJ1dCBpbiBmZXcgY2FzZXMsIHRoZSBtYXgg
UEFHRV9TSVpFIG1heWJlIG5vdCBlbm91Z2guDQogIDIuIE9uZSBTRE1BIGNoYW5uZWwgY2FuJ3Qg
c3RvcCBpbW1lZGlhdGxleSBvbmNlIGNoYW5uZWwgZGlzYWJsZWQgd2hpY2gNCiAgICAgbWVhbnMg
U0RNQSBpbnRlcnJ1cHQgbWF5IGNvbWUgaW4gYWZ0ZXIgdGhpcyBjaGFubmVsIHRlcm1pbmF0ZWQu
VGhlcmUNCiAgICAgYXJlIHNvbWUgcGF0Y2hlcyBmb3IgdGhpcyBjb3JuZXIgY2FzZSBzdWNoIGFz
IGNvbW1pdCAiMjc0NmUyYzM4OWY5IiwNCiAgICAgYnV0IG5vdCBjb3ZlciBub24tY3ljbGljLg0K
DQpUaGUgY29tbW9uIHZpcnQtZG1hIG92ZXJjb21lcyB0aGUgYWJvdmUgbGltaXRhdGlvbnMuIEl0
IGNhbiBhbGxvYyBiZCBkeW5hbWljYWxseSBhbmQgZnJlZSBiZCBvbmNlIHRoaXMgdHggdHJhbnNm
ZXIgZG9uZS4gTm8gbWVtb3J5IHdhc3RlZCBvciBtYXhpbXVtIGxpbWl0aXRhdGlvbiBoZXJlLCBv
bmx5IGRlcGVuZHMgb24gaG93IG1hbnkgbWVtb3J5IGNhbiBiZSByZXF1ZXN0ZWQgZnJvbSBrZXJu
ZWwuIEZvciBOby4yLCBzdWNoIGlzc3VlIGNhbiBiZSB3b3JrYXJvdW5kIGJ5IGNoZWNraW5nIGlm
IHRoZXJlIGlzIGF2YWlsYWJsZSBkZXNjcmlwdCgic2RtYWMtPmRlc2MiKSBub3cgb25jZSB0aGUg
dW53YW50ZWQgaW50ZXJydXB0IGNvbWluZy4gQXQgbGFzdCB0aGUgY29tbW9uIHZpcnQtZG1hIGlz
IGVhc2llciBmb3Igc2RtYSBkcml2ZXIgbWFpbnRhaW4uDQoNClNpZ25lZC1vZmYtYnk6IFJvYmlu
IEdvbmcgPHlpYmluLmdvbmdAbnhwLmNvbT4NCi0tLQ0KIGRyaXZlcnMvZG1hL0tjb25maWcgICAg
fCAgIDEgKw0KIGRyaXZlcnMvZG1hL2lteC1zZG1hLmMgfCAzOTUgKysrKysrKysrKysrKysrKysr
KysrKysrKysrKysrKy0tLS0tLS0tLS0tLS0tLS0tLQ0KIDIgZmlsZXMgY2hhbmdlZCwgMjUzIGlu
c2VydGlvbnMoKyksIDE0MyBkZWxldGlvbnMoLSkNCg0KZGlmZiAtLWdpdCBhL2RyaXZlcnMvZG1h
L0tjb25maWcgYi9kcml2ZXJzL2RtYS9LY29uZmlnIGluZGV4IDI3ZGYzZTIuLmM0Y2U0M2MgMTAw
NjQ0DQotLS0gYS9kcml2ZXJzL2RtYS9LY29uZmlnDQorKysgYi9kcml2ZXJzL2RtYS9LY29uZmln
DQpAQCAtMjQ3LDYgKzI0Nyw3IEBAIGNvbmZpZyBJTVhfU0RNQQ0KIAl0cmlzdGF0ZSAiaS5NWCBT
RE1BIHN1cHBvcnQiDQogCWRlcGVuZHMgb24gQVJDSF9NWEMNCiAJc2VsZWN0IERNQV9FTkdJTkUN
CisJc2VsZWN0IERNQV9WSVJUVUFMX0NIQU5ORUxTDQogCWhlbHANCiAJICBTdXBwb3J0IHRoZSBp
Lk1YIFNETUEgZW5naW5lLiBUaGlzIGVuZ2luZSBpcyBpbnRlZ3JhdGVkIGludG8NCiAJICBGcmVl
c2NhbGUgaS5NWDI1LzMxLzM1LzUxLzUzLzYgY2hpcHMuDQpkaWZmIC0tZ2l0IGEvZHJpdmVycy9k
bWEvaW14LXNkbWEuYyBiL2RyaXZlcnMvZG1hL2lteC1zZG1hLmMgaW5kZXggY2NkMDNjMy4uZGY3
OWU3MyAxMDA2NDQNCi0tLSBhL2RyaXZlcnMvZG1hL2lteC1zZG1hLmMNCisrKyBiL2RyaXZlcnMv
ZG1hL2lteC1zZG1hLmMNCkBAIC00OCw2ICs0OCw3IEBADQogI2luY2x1ZGUgPGxpbnV4L21mZC9z
eXNjb24vaW14NnEtaW9tdXhjLWdwci5oPg0KIA0KICNpbmNsdWRlICJkbWFlbmdpbmUuaCINCisj
aW5jbHVkZSAidmlydC1kbWEuaCINCiANCiAvKiBTRE1BIHJlZ2lzdGVycyAqLw0KICNkZWZpbmUg
U0RNQV9IX0MwUFRSCQkweDAwMA0KQEAgLTI5MSwxMCArMjkyLDE5IEBAIHN0cnVjdCBzZG1hX2Nv
bnRleHRfZGF0YSB7DQogCXUzMiAgc2NyYXRjaDc7DQogfSBfX2F0dHJpYnV0ZV9fICgocGFja2Vk
KSk7DQogDQotI2RlZmluZSBOVU1fQkQgKGludCkoUEFHRV9TSVpFIC8gc2l6ZW9mKHN0cnVjdCBz
ZG1hX2J1ZmZlcl9kZXNjcmlwdG9yKSkNCi0NCiBzdHJ1Y3Qgc2RtYV9lbmdpbmU7DQogDQorc3Ry
dWN0IHNkbWFfZGVzYyB7DQorCXN0cnVjdCB2aXJ0X2RtYV9kZXNjCXZkOw0KKwlzdHJ1Y3QgbGlz
dF9oZWFkCW5vZGU7DQorCXVuc2lnbmVkIGludAkJbnVtX2JkOw0KKwlkbWFfYWRkcl90CQliZF9w
aHlzOw0KKwl1bnNpZ25lZCBpbnQJCWJ1Zl90YWlsOw0KKwl1bnNpZ25lZCBpbnQJCWJ1Zl9wdGFp
bDsNCisJc3RydWN0IHNkbWFfY2hhbm5lbAkqc2RtYWM7DQorCXN0cnVjdCBzZG1hX2J1ZmZlcl9k
ZXNjcmlwdG9yICpiZDsNCit9Ow0KKw0KIC8qKg0KICAqIHN0cnVjdCBzZG1hX2NoYW5uZWwgLSBo
b3VzZWtlZXBpbmcgZm9yIGEgU0RNQSBjaGFubmVsDQogICoNCkBAIC0zMTAsMTkgKzMyMCwxNyBA
QCBzdHJ1Y3Qgc2RtYV9lbmdpbmU7DQogICogQG51bV9iZAkJbWF4IE5VTV9CRC4gbnVtYmVyIG9m
IGRlc2NyaXB0b3JzIGN1cnJlbnRseSBoYW5kbGluZw0KICAqLw0KIHN0cnVjdCBzZG1hX2NoYW5u
ZWwgew0KKwlzdHJ1Y3QgdmlydF9kbWFfY2hhbgkJdmM7DQorCXN0cnVjdCBsaXN0X2hlYWQJCXBl
bmRpbmc7DQogCXN0cnVjdCBzZG1hX2VuZ2luZQkJKnNkbWE7DQorCXN0cnVjdCBzZG1hX2Rlc2MJ
CSpkZXNjOw0KIAl1bnNpZ25lZCBpbnQJCQljaGFubmVsOw0KIAllbnVtIGRtYV90cmFuc2Zlcl9k
aXJlY3Rpb24JCWRpcmVjdGlvbjsNCiAJZW51bSBzZG1hX3BlcmlwaGVyYWxfdHlwZQlwZXJpcGhl
cmFsX3R5cGU7DQogCXVuc2lnbmVkIGludAkJCWV2ZW50X2lkMDsNCiAJdW5zaWduZWQgaW50CQkJ
ZXZlbnRfaWQxOw0KIAllbnVtIGRtYV9zbGF2ZV9idXN3aWR0aAkJd29yZF9zaXplOw0KLQl1bnNp
Z25lZCBpbnQJCQlidWZfdGFpbDsNCi0JdW5zaWduZWQgaW50CQkJYnVmX3B0YWlsOw0KLQl1bnNp
Z25lZCBpbnQJCQludW1fYmQ7DQogCXVuc2lnbmVkIGludAkJCXBlcmlvZF9sZW47DQotCXN0cnVj
dCBzZG1hX2J1ZmZlcl9kZXNjcmlwdG9yCSpiZDsNCi0JZG1hX2FkZHJfdAkJCWJkX3BoeXM7DQog
CXVuc2lnbmVkIGludAkJCXBjX2Zyb21fZGV2aWNlLCBwY190b19kZXZpY2U7DQogCXVuc2lnbmVk
IGludAkJCWRldmljZV90b19kZXZpY2U7DQogCXVuc2lnbmVkIGxvbmcJCQlmbGFnczsNCkBAIC0z
MzAsMTUgKzMzOCwxMiBAQCBzdHJ1Y3Qgc2RtYV9jaGFubmVsIHsNCiAJdW5zaWduZWQgbG9uZwkJ
CWV2ZW50X21hc2tbMl07DQogCXVuc2lnbmVkIGxvbmcJCQl3YXRlcm1hcmtfbGV2ZWw7DQogCXUz
MgkJCQlzaHBfYWRkciwgcGVyX2FkZHI7DQotCXN0cnVjdCBkbWFfY2hhbgkJCWNoYW47DQotCXNw
aW5sb2NrX3QJCQlsb2NrOw0KLQlzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3IJZGVzYzsN
CiAJZW51bSBkbWFfc3RhdHVzCQkJc3RhdHVzOw0KIAl1bnNpZ25lZCBpbnQJCQljaG5fY291bnQ7
DQogCXVuc2lnbmVkIGludAkJCWNobl9yZWFsX2NvdW50Ow0KLQlzdHJ1Y3QgdGFza2xldF9zdHJ1
Y3QJCXRhc2tsZXQ7DQogCXN0cnVjdCBpbXhfZG1hX2RhdGEJCWRhdGE7DQogCWJvb2wJCQkJZW5h
YmxlZDsNCisJdTMyCQkJCWJkX3NpemVfc3VtOw0KIH07DQogDQogI2RlZmluZSBJTVhfRE1BX1NH
X0xPT1AJCUJJVCgwKQ0KQEAgLTM5OCw2ICs0MDMsOSBAQCBzdHJ1Y3Qgc2RtYV9lbmdpbmUgew0K
IAl1MzIJCQkJc3BiYV9zdGFydF9hZGRyOw0KIAl1MzIJCQkJc3BiYV9lbmRfYWRkcjsNCiAJdW5z
aWduZWQgaW50CQkJaXJxOw0KKwkvKiBjaGFubmVsMCBiZCAqLw0KKwlkbWFfYWRkcl90CQkJYmQw
X3BoeXM7DQorCXN0cnVjdCBzZG1hX2J1ZmZlcl9kZXNjcmlwdG9yCSpiZDA7DQogfTsNCiANCiBz
dGF0aWMgc3RydWN0IHNkbWFfZHJpdmVyX2RhdGEgc2RtYV9pbXgzMSA9IHsgQEAgLTU1Myw2ICs1
NjEsOCBAQCBNT0RVTEVfREVWSUNFX1RBQkxFKG9mLCBzZG1hX2R0X2lkcyk7DQogI2RlZmluZSBT
RE1BX0hfQ09ORklHX0FDUglCSVQoNCkgIC8qIGluZGljYXRlcyBpZiBBSEIgZnJlcSAvY29yZSBm
cmVxID0gMiBvciAxICovDQogI2RlZmluZSBTRE1BX0hfQ09ORklHX0NTTQkoMykgICAgICAgLyog
aW5kaWNhdGVzIHdoaWNoIGNvbnRleHQgc3dpdGNoIG1vZGUgaXMgc2VsZWN0ZWQqLw0KIA0KK3N0
YXRpYyB2b2lkIHNkbWFfc3RhcnRfZGVzYyhzdHJ1Y3Qgc2RtYV9jaGFubmVsICpzZG1hYyk7DQor
DQogc3RhdGljIGlubGluZSB1MzIgY2huZW5ibF9vZnMoc3RydWN0IHNkbWFfZW5naW5lICpzZG1h
LCB1bnNpZ25lZCBpbnQgZXZlbnQpICB7DQogCXUzMiBjaG5lbmJsMCA9IHNkbWEtPmRydmRhdGEt
PmNobmVuYmwwOyBAQCAtNTk3LDE0ICs2MDcsNyBAQCBzdGF0aWMgaW50IHNkbWFfY29uZmlnX293
bmVyc2hpcChzdHJ1Y3Qgc2RtYV9jaGFubmVsICpzZG1hYywNCiANCiBzdGF0aWMgdm9pZCBzZG1h
X2VuYWJsZV9jaGFubmVsKHN0cnVjdCBzZG1hX2VuZ2luZSAqc2RtYSwgaW50IGNoYW5uZWwpICB7
DQotCXVuc2lnbmVkIGxvbmcgZmxhZ3M7DQotCXN0cnVjdCBzZG1hX2NoYW5uZWwgKnNkbWFjID0g
JnNkbWEtPmNoYW5uZWxbY2hhbm5lbF07DQotDQogCXdyaXRlbChCSVQoY2hhbm5lbCksIHNkbWEt
PnJlZ3MgKyBTRE1BX0hfU1RBUlQpOw0KLQ0KLQlzcGluX2xvY2tfaXJxc2F2ZSgmc2RtYWMtPmxv
Y2ssIGZsYWdzKTsNCi0Jc2RtYWMtPmVuYWJsZWQgPSB0cnVlOw0KLQlzcGluX3VubG9ja19pcnFy
ZXN0b3JlKCZzZG1hYy0+bG9jaywgZmxhZ3MpOw0KIH0NCiANCiAvKg0KQEAgLTYzMiw3ICs2MzUs
NyBAQCBzdGF0aWMgaW50IHNkbWFfcnVuX2NoYW5uZWwwKHN0cnVjdCBzZG1hX2VuZ2luZSAqc2Rt
YSkgIHN0YXRpYyBpbnQgc2RtYV9sb2FkX3NjcmlwdChzdHJ1Y3Qgc2RtYV9lbmdpbmUgKnNkbWEs
IHZvaWQgKmJ1ZiwgaW50IHNpemUsDQogCQl1MzIgYWRkcmVzcykNCiB7DQotCXN0cnVjdCBzZG1h
X2J1ZmZlcl9kZXNjcmlwdG9yICpiZDAgPSBzZG1hLT5jaGFubmVsWzBdLmJkOw0KKwlzdHJ1Y3Qg
c2RtYV9idWZmZXJfZGVzY3JpcHRvciAqYmQwID0gc2RtYS0+YmQwOw0KIAl2b2lkICpidWZfdmly
dDsNCiAJZG1hX2FkZHJfdCBidWZfcGh5czsNCiAJaW50IHJldDsNCkBAIC02OTEsMjMgKzY5NCwx
NiBAQCBzdGF0aWMgdm9pZCBzZG1hX2V2ZW50X2Rpc2FibGUoc3RydWN0IHNkbWFfY2hhbm5lbCAq
c2RtYWMsIHVuc2lnbmVkIGludCBldmVudCkgIHN0YXRpYyB2b2lkIHNkbWFfdXBkYXRlX2NoYW5u
ZWxfbG9vcChzdHJ1Y3Qgc2RtYV9jaGFubmVsICpzZG1hYykgIHsNCiAJc3RydWN0IHNkbWFfYnVm
ZmVyX2Rlc2NyaXB0b3IgKmJkOw0KKwlzdHJ1Y3Qgc2RtYV9kZXNjICpkZXNjID0gc2RtYWMtPmRl
c2M7DQogCWludCBlcnJvciA9IDA7DQogCWVudW0gZG1hX3N0YXR1cwlvbGRfc3RhdHVzID0gc2Rt
YWMtPnN0YXR1czsNCi0JdW5zaWduZWQgbG9uZyBmbGFnczsNCi0NCi0Jc3Bpbl9sb2NrX2lycXNh
dmUoJnNkbWFjLT5sb2NrLCBmbGFncyk7DQotCWlmICghc2RtYWMtPmVuYWJsZWQpIHsNCi0JCXNw
aW5fdW5sb2NrX2lycXJlc3RvcmUoJnNkbWFjLT5sb2NrLCBmbGFncyk7DQotCQlyZXR1cm47DQot
CX0NCi0Jc3Bpbl91bmxvY2tfaXJxcmVzdG9yZSgmc2RtYWMtPmxvY2ssIGZsYWdzKTsNCiANCiAJ
LyoNCiAJICogbG9vcCBtb2RlLiBJdGVyYXRlIG92ZXIgZGVzY3JpcHRvcnMsIHJlLXNldHVwIHRo
ZW0gYW5kDQogCSAqIGNhbGwgY2FsbGJhY2sgZnVuY3Rpb24uDQogCSAqLw0KLQl3aGlsZSAoMSkg
ew0KLQkJYmQgPSAmc2RtYWMtPmJkW3NkbWFjLT5idWZfdGFpbF07DQorCXdoaWxlIChkZXNjKSB7
DQorCQliZCA9ICZkZXNjLT5iZFtkZXNjLT5idWZfdGFpbF07DQogDQogCQlpZiAoYmQtPm1vZGUu
c3RhdHVzICYgQkRfRE9ORSkNCiAJCQlicmVhazsNCkBAIC03MjYsOCArNzIyLDggQEAgc3RhdGlj
IHZvaWQgc2RtYV91cGRhdGVfY2hhbm5lbF9sb29wKHN0cnVjdCBzZG1hX2NoYW5uZWwgKnNkbWFj
KQ0KIAkJc2RtYWMtPmNobl9yZWFsX2NvdW50ID0gYmQtPm1vZGUuY291bnQ7DQogCQliZC0+bW9k
ZS5zdGF0dXMgfD0gQkRfRE9ORTsNCiAJCWJkLT5tb2RlLmNvdW50ID0gc2RtYWMtPnBlcmlvZF9s
ZW47DQotCQlzZG1hYy0+YnVmX3B0YWlsID0gc2RtYWMtPmJ1Zl90YWlsOw0KLQkJc2RtYWMtPmJ1
Zl90YWlsID0gKHNkbWFjLT5idWZfdGFpbCArIDEpICUgc2RtYWMtPm51bV9iZDsNCisJCWRlc2Mt
PmJ1Zl9wdGFpbCA9IGRlc2MtPmJ1Zl90YWlsOw0KKwkJZGVzYy0+YnVmX3RhaWwgPSAoZGVzYy0+
YnVmX3RhaWwgKyAxKSAlIGRlc2MtPm51bV9iZDsNCiANCiAJCS8qDQogCQkgKiBUaGUgY2FsbGJh
Y2sgaXMgY2FsbGVkIGZyb20gdGhlIGludGVycnVwdCBjb250ZXh0IGluIG9yZGVyIEBAIC03MzUs
MTUgKzczMSwxNiBAQCBzdGF0aWMgdm9pZCBzZG1hX3VwZGF0ZV9jaGFubmVsX2xvb3Aoc3RydWN0
IHNkbWFfY2hhbm5lbCAqc2RtYWMpDQogCQkgKiBTRE1BIHRyYW5zYWN0aW9uIHN0YXR1cyBieSB0
aGUgdGltZSB0aGUgY2xpZW50IHRhc2tsZXQgaXMNCiAJCSAqIGV4ZWN1dGVkLg0KIAkJICovDQot
DQotCQlkbWFlbmdpbmVfZGVzY19nZXRfY2FsbGJhY2tfaW52b2tlKCZzZG1hYy0+ZGVzYywgTlVM
TCk7DQorCQlzcGluX3VubG9jaygmc2RtYWMtPnZjLmxvY2spOw0KKwkJZG1hZW5naW5lX2Rlc2Nf
Z2V0X2NhbGxiYWNrX2ludm9rZSgmZGVzYy0+dmQudHgsIE5VTEwpOw0KKwkJc3Bpbl9sb2NrKCZz
ZG1hYy0+dmMubG9jayk7DQogDQogCQlpZiAoZXJyb3IpDQogCQkJc2RtYWMtPnN0YXR1cyA9IG9s
ZF9zdGF0dXM7DQogCX0NCiB9DQogDQotc3RhdGljIHZvaWQgbXhjX3NkbWFfaGFuZGxlX2NoYW5u
ZWxfbm9ybWFsKHVuc2lnbmVkIGxvbmcgZGF0YSkNCitzdGF0aWMgdm9pZCBteGNfc2RtYV9oYW5k
bGVfY2hhbm5lbF9ub3JtYWwoc3RydWN0IHNkbWFfY2hhbm5lbCAqZGF0YSkNCiB7DQogCXN0cnVj
dCBzZG1hX2NoYW5uZWwgKnNkbWFjID0gKHN0cnVjdCBzZG1hX2NoYW5uZWwgKikgZGF0YTsNCiAJ
c3RydWN0IHNkbWFfYnVmZmVyX2Rlc2NyaXB0b3IgKmJkOw0KQEAgLTc1NCw4ICs3NTEsOCBAQCBz
dGF0aWMgdm9pZCBteGNfc2RtYV9oYW5kbGVfY2hhbm5lbF9ub3JtYWwodW5zaWduZWQgbG9uZyBk
YXRhKQ0KIAkgKiBub24gbG9vcCBtb2RlLiBJdGVyYXRlIG92ZXIgYWxsIGRlc2NyaXB0b3JzLCBj
b2xsZWN0DQogCSAqIGVycm9ycyBhbmQgY2FsbCBjYWxsYmFjayBmdW5jdGlvbg0KIAkgKi8NCi0J
Zm9yIChpID0gMDsgaSA8IHNkbWFjLT5udW1fYmQ7IGkrKykgew0KLQkJYmQgPSAmc2RtYWMtPmJk
W2ldOw0KKwlmb3IgKGkgPSAwOyBpIDwgc2RtYWMtPmRlc2MtPm51bV9iZDsgaSsrKSB7DQorCQli
ZCA9ICZzZG1hYy0+ZGVzYy0+YmRbaV07DQogDQogCQkgaWYgKGJkLT5tb2RlLnN0YXR1cyAmIChC
RF9ET05FIHwgQkRfUlJPUikpDQogCQkJZXJyb3IgPSAtRUlPOw0KQEAgLTc2NiwxMCArNzYzLDYg
QEAgc3RhdGljIHZvaWQgbXhjX3NkbWFfaGFuZGxlX2NoYW5uZWxfbm9ybWFsKHVuc2lnbmVkIGxv
bmcgZGF0YSkNCiAJCXNkbWFjLT5zdGF0dXMgPSBETUFfRVJST1I7DQogCWVsc2UNCiAJCXNkbWFj
LT5zdGF0dXMgPSBETUFfQ09NUExFVEU7DQotDQotCWRtYV9jb29raWVfY29tcGxldGUoJnNkbWFj
LT5kZXNjKTsNCi0NCi0JZG1hZW5naW5lX2Rlc2NfZ2V0X2NhbGxiYWNrX2ludm9rZSgmc2RtYWMt
PmRlc2MsIE5VTEwpOw0KIH0NCiANCiBzdGF0aWMgaXJxcmV0dXJuX3Qgc2RtYV9pbnRfaGFuZGxl
cihpbnQgaXJxLCB2b2lkICpkZXZfaWQpIEBAIC03ODUsMTMgKzc3OCwyNCBAQCBzdGF0aWMgaXJx
cmV0dXJuX3Qgc2RtYV9pbnRfaGFuZGxlcihpbnQgaXJxLCB2b2lkICpkZXZfaWQpDQogCXdoaWxl
IChzdGF0KSB7DQogCQlpbnQgY2hhbm5lbCA9IGZscyhzdGF0KSAtIDE7DQogCQlzdHJ1Y3Qgc2Rt
YV9jaGFubmVsICpzZG1hYyA9ICZzZG1hLT5jaGFubmVsW2NoYW5uZWxdOw0KLQ0KLQkJaWYgKHNk
bWFjLT5mbGFncyAmIElNWF9ETUFfU0dfTE9PUCkNCi0JCQlzZG1hX3VwZGF0ZV9jaGFubmVsX2xv
b3Aoc2RtYWMpOw0KLQkJZWxzZQ0KLQkJCXRhc2tsZXRfc2NoZWR1bGUoJnNkbWFjLT50YXNrbGV0
KTsNCisJCXN0cnVjdCBzZG1hX2Rlc2MgKmRlc2M7DQorDQorCQlzcGluX2xvY2soJnNkbWFjLT52
Yy5sb2NrKTsNCisJCWRlc2MgPSBzZG1hYy0+ZGVzYzsNCisJCWlmIChkZXNjKSB7DQorCQkJaWYg
KHNkbWFjLT5mbGFncyAmIElNWF9ETUFfU0dfTE9PUCkgew0KKwkJCQlzZG1hX3VwZGF0ZV9jaGFu
bmVsX2xvb3Aoc2RtYWMpOw0KKwkJCX0gZWxzZSB7DQorCQkJCW14Y19zZG1hX2hhbmRsZV9jaGFu
bmVsX25vcm1hbChzZG1hYyk7DQorCQkJCXZjaGFuX2Nvb2tpZV9jb21wbGV0ZSgmZGVzYy0+dmQp
Ow0KKwkJCQlpZiAoIWxpc3RfZW1wdHkoJnNkbWFjLT5wZW5kaW5nKSkNCisJCQkJCWxpc3RfZGVs
KCZkZXNjLT5ub2RlKTsNCisJCQkJIHNkbWFfc3RhcnRfZGVzYyhzZG1hYyk7DQorCQkJfQ0KKwkJ
fQ0KIA0KIAkJX19jbGVhcl9iaXQoY2hhbm5lbCwgJnN0YXQpOw0KKwkJc3Bpbl91bmxvY2soJnNk
bWFjLT52Yy5sb2NrKTsNCiAJfQ0KIA0KIAlyZXR1cm4gSVJRX0hBTkRMRUQ7DQpAQCAtODk3LDcg
KzkwMSw3IEBAIHN0YXRpYyBpbnQgc2RtYV9sb2FkX2NvbnRleHQoc3RydWN0IHNkbWFfY2hhbm5l
bCAqc2RtYWMpDQogCWludCBjaGFubmVsID0gc2RtYWMtPmNoYW5uZWw7DQogCWludCBsb2FkX2Fk
ZHJlc3M7DQogCXN0cnVjdCBzZG1hX2NvbnRleHRfZGF0YSAqY29udGV4dCA9IHNkbWEtPmNvbnRl
eHQ7DQotCXN0cnVjdCBzZG1hX2J1ZmZlcl9kZXNjcmlwdG9yICpiZDAgPSBzZG1hLT5jaGFubmVs
WzBdLmJkOw0KKwlzdHJ1Y3Qgc2RtYV9idWZmZXJfZGVzY3JpcHRvciAqYmQwID0gc2RtYS0+YmQw
Ow0KIAlpbnQgcmV0Ow0KIAl1bnNpZ25lZCBsb25nIGZsYWdzOw0KIA0KQEAgLTk0Niw3ICs5NTAs
NyBAQCBzdGF0aWMgaW50IHNkbWFfbG9hZF9jb250ZXh0KHN0cnVjdCBzZG1hX2NoYW5uZWwgKnNk
bWFjKQ0KIA0KIHN0YXRpYyBzdHJ1Y3Qgc2RtYV9jaGFubmVsICp0b19zZG1hX2NoYW4oc3RydWN0
IGRtYV9jaGFuICpjaGFuKSAgew0KLQlyZXR1cm4gY29udGFpbmVyX29mKGNoYW4sIHN0cnVjdCBz
ZG1hX2NoYW5uZWwsIGNoYW4pOw0KKwlyZXR1cm4gY29udGFpbmVyX29mKGNoYW4sIHN0cnVjdCBz
ZG1hX2NoYW5uZWwsIHZjLmNoYW4pOw0KIH0NCiANCiBzdGF0aWMgaW50IHNkbWFfZGlzYWJsZV9j
aGFubmVsKHN0cnVjdCBkbWFfY2hhbiAqY2hhbikgQEAgLTk1NCwxNSArOTU4LDEwIEBAIHN0YXRp
YyBpbnQgc2RtYV9kaXNhYmxlX2NoYW5uZWwoc3RydWN0IGRtYV9jaGFuICpjaGFuKQ0KIAlzdHJ1
Y3Qgc2RtYV9jaGFubmVsICpzZG1hYyA9IHRvX3NkbWFfY2hhbihjaGFuKTsNCiAJc3RydWN0IHNk
bWFfZW5naW5lICpzZG1hID0gc2RtYWMtPnNkbWE7DQogCWludCBjaGFubmVsID0gc2RtYWMtPmNo
YW5uZWw7DQotCXVuc2lnbmVkIGxvbmcgZmxhZ3M7DQogDQogCXdyaXRlbF9yZWxheGVkKEJJVChj
aGFubmVsKSwgc2RtYS0+cmVncyArIFNETUFfSF9TVEFUU1RPUCk7DQogCXNkbWFjLT5zdGF0dXMg
PSBETUFfRVJST1I7DQogDQotCXNwaW5fbG9ja19pcnFzYXZlKCZzZG1hYy0+bG9jaywgZmxhZ3Mp
Ow0KLQlzZG1hYy0+ZW5hYmxlZCA9IGZhbHNlOw0KLQlzcGluX3VubG9ja19pcnFyZXN0b3JlKCZz
ZG1hYy0+bG9jaywgZmxhZ3MpOw0KLQ0KIAlyZXR1cm4gMDsNCiB9DQogDQpAQCAtMTA5Nyw0MiAr
MTA5NiwxMDEgQEAgc3RhdGljIGludCBzZG1hX3NldF9jaGFubmVsX3ByaW9yaXR5KHN0cnVjdCBz
ZG1hX2NoYW5uZWwgKnNkbWFjLA0KIAlyZXR1cm4gMDsNCiB9DQogDQotc3RhdGljIGludCBzZG1h
X3JlcXVlc3RfY2hhbm5lbChzdHJ1Y3Qgc2RtYV9jaGFubmVsICpzZG1hYykNCitzdGF0aWMgaW50
IHNkbWFfYWxsb2NfYmQoc3RydWN0IHNkbWFfZGVzYyAqZGVzYykNCiB7DQotCXN0cnVjdCBzZG1h
X2VuZ2luZSAqc2RtYSA9IHNkbWFjLT5zZG1hOw0KLQlpbnQgY2hhbm5lbCA9IHNkbWFjLT5jaGFu
bmVsOw0KLQlpbnQgcmV0ID0gLUVCVVNZOw0KKwl1MzIgYmRfc2l6ZSA9IGRlc2MtPm51bV9iZCAq
IHNpemVvZihzdHJ1Y3Qgc2RtYV9idWZmZXJfZGVzY3JpcHRvcik7DQorCWludCByZXQgPSAwOw0K
Kwl1bnNpZ25lZCBsb25nIGZsYWdzOw0KIA0KLQlzZG1hYy0+YmQgPSBkbWFfemFsbG9jX2NvaGVy
ZW50KE5VTEwsIFBBR0VfU0laRSwgJnNkbWFjLT5iZF9waHlzLA0KKwlkZXNjLT5iZCA9IGRtYV96
YWxsb2NfY29oZXJlbnQoTlVMTCwgYmRfc2l6ZSwgJmRlc2MtPmJkX3BoeXMsDQogCQkJCQlHRlBf
S0VSTkVMKTsNCi0JaWYgKCFzZG1hYy0+YmQpIHsNCisJaWYgKCFkZXNjLT5iZCkgew0KIAkJcmV0
ID0gLUVOT01FTTsNCiAJCWdvdG8gb3V0Ow0KIAl9DQogDQotCXNkbWEtPmNoYW5uZWxfY29udHJv
bFtjaGFubmVsXS5iYXNlX2JkX3B0ciA9IHNkbWFjLT5iZF9waHlzOw0KLQlzZG1hLT5jaGFubmVs
X2NvbnRyb2xbY2hhbm5lbF0uY3VycmVudF9iZF9wdHIgPSBzZG1hYy0+YmRfcGh5czsNCisJc3Bp
bl9sb2NrX2lycXNhdmUoJmRlc2MtPnNkbWFjLT52Yy5sb2NrLCBmbGFncyk7DQorCWRlc2MtPnNk
bWFjLT5iZF9zaXplX3N1bSArPSBiZF9zaXplOw0KKwlzcGluX3VubG9ja19pcnFyZXN0b3JlKCZk
ZXNjLT5zZG1hYy0+dmMubG9jaywgZmxhZ3MpOw0KIA0KLQlzZG1hX3NldF9jaGFubmVsX3ByaW9y
aXR5KHNkbWFjLCBNWENfU0RNQV9ERUZBVUxUX1BSSU9SSVRZKTsNCi0JcmV0dXJuIDA7DQogb3V0
Og0KLQ0KIAlyZXR1cm4gcmV0Ow0KIH0NCiANCi1zdGF0aWMgZG1hX2Nvb2tpZV90IHNkbWFfdHhf
c3VibWl0KHN0cnVjdCBkbWFfYXN5bmNfdHhfZGVzY3JpcHRvciAqdHgpDQorc3RhdGljIHZvaWQg
c2RtYV9mcmVlX2JkKHN0cnVjdCBzZG1hX2Rlc2MgKmRlc2MpDQogew0KKwl1MzIgYmRfc2l6ZSA9
IGRlc2MtPm51bV9iZCAqIHNpemVvZihzdHJ1Y3Qgc2RtYV9idWZmZXJfZGVzY3JpcHRvcik7DQog
CXVuc2lnbmVkIGxvbmcgZmxhZ3M7DQotCXN0cnVjdCBzZG1hX2NoYW5uZWwgKnNkbWFjID0gdG9f
c2RtYV9jaGFuKHR4LT5jaGFuKTsNCi0JZG1hX2Nvb2tpZV90IGNvb2tpZTsNCiANCi0Jc3Bpbl9s
b2NrX2lycXNhdmUoJnNkbWFjLT5sb2NrLCBmbGFncyk7DQorCWlmIChkZXNjLT5iZCkgew0KKwkJ
ZG1hX2ZyZWVfY29oZXJlbnQoTlVMTCwgYmRfc2l6ZSwgZGVzYy0+YmQsIGRlc2MtPmJkX3BoeXMp
Ow0KKw0KKwkJc3Bpbl9sb2NrX2lycXNhdmUoJmRlc2MtPnNkbWFjLT52Yy5sb2NrLCBmbGFncyk7
DQorCQlkZXNjLT5zZG1hYy0+YmRfc2l6ZV9zdW0gLT0gYmRfc2l6ZTsNCisJCXNwaW5fdW5sb2Nr
X2lycXJlc3RvcmUoJmRlc2MtPnNkbWFjLT52Yy5sb2NrLCBmbGFncyk7DQorCX0NCit9DQorDQor
c3RhdGljIGludCBzZG1hX3JlcXVlc3RfY2hhbm5lbDAoc3RydWN0IHNkbWFfZW5naW5lICpzZG1h
KSB7DQorCWludCByZXQgPSAwOw0KKw0KKwlzZG1hLT5iZDAgPSBkbWFfemFsbG9jX2NvaGVyZW50
KE5VTEwsIFBBR0VfU0laRSwgJnNkbWEtPmJkMF9waHlzLA0KKwkJCQkJR0ZQX0tFUk5FTCk7DQor
CWlmICghc2RtYS0+YmQwKSB7DQorCQlyZXQgPSAtRU5PTUVNOw0KKwkJZ290byBvdXQ7DQorCX0N
CiANCi0JY29va2llID0gZG1hX2Nvb2tpZV9hc3NpZ24odHgpOw0KKwlzZG1hLT5jaGFubmVsX2Nv
bnRyb2xbMF0uYmFzZV9iZF9wdHIgPSBzZG1hLT5iZDBfcGh5czsNCisJc2RtYS0+Y2hhbm5lbF9j
b250cm9sWzBdLmN1cnJlbnRfYmRfcHRyID0gc2RtYS0+YmQwX3BoeXM7DQogDQotCXNwaW5fdW5s
b2NrX2lycXJlc3RvcmUoJnNkbWFjLT5sb2NrLCBmbGFncyk7DQorCXNkbWFfc2V0X2NoYW5uZWxf
cHJpb3JpdHkoJnNkbWEtPmNoYW5uZWxbMF0sIA0KK01YQ19TRE1BX0RFRkFVTFRfUFJJT1JJVFkp
Ow0KK291dDoNCiANCi0JcmV0dXJuIGNvb2tpZTsNCisJcmV0dXJuIHJldDsNCit9DQorDQorc3Rh
dGljIHN0cnVjdCBzZG1hX2Rlc2MgKnRvX3NkbWFfZGVzYyhzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rl
c2NyaXB0b3IgDQorKnQpIHsNCisJcmV0dXJuIGNvbnRhaW5lcl9vZih0LCBzdHJ1Y3Qgc2RtYV9k
ZXNjLCB2ZC50eCk7IH0NCisNCitzdGF0aWMgdm9pZCBzZG1hX2Rlc2NfZnJlZShzdHJ1Y3Qgdmly
dF9kbWFfZGVzYyAqdmQpIHsNCisJc3RydWN0IHNkbWFfZGVzYyAqZGVzYyA9IGNvbnRhaW5lcl9v
Zih2ZCwgc3RydWN0IHNkbWFfZGVzYywgdmQpOw0KKw0KKwlpZiAoZGVzYykgew0KKwkJc2RtYV9m
cmVlX2JkKGRlc2MpOw0KKwkJa2ZyZWUoZGVzYyk7DQorCX0NCit9DQorDQorc3RhdGljIGludCBz
ZG1hX3Rlcm1pbmF0ZV9hbGwoc3RydWN0IGRtYV9jaGFuICpjaGFuKSB7DQorCXN0cnVjdCBzZG1h
X2NoYW5uZWwgKnNkbWFjID0gdG9fc2RtYV9jaGFuKGNoYW4pOw0KKwl1bnNpZ25lZCBsb25nIGZs
YWdzOw0KKwlMSVNUX0hFQUQoaGVhZCk7DQorDQorCXNwaW5fbG9ja19pcnFzYXZlKCZzZG1hYy0+
dmMubG9jaywgZmxhZ3MpOw0KKwl2Y2hhbl9nZXRfYWxsX2Rlc2NyaXB0b3JzKCZzZG1hYy0+dmMs
ICZoZWFkKTsNCisJd2hpbGUgKCFsaXN0X2VtcHR5KCZzZG1hYy0+cGVuZGluZykpIHsNCisJCXN0
cnVjdCBzZG1hX2Rlc2MgKmRlc2MgPSBsaXN0X2ZpcnN0X2VudHJ5KCZzZG1hYy0+cGVuZGluZywN
CisJCQlzdHJ1Y3Qgc2RtYV9kZXNjLCBub2RlKTsNCisNCisJCSBsaXN0X2RlbCgmZGVzYy0+bm9k
ZSk7DQorCQkgc3Bpbl91bmxvY2tfaXJxcmVzdG9yZSgmc2RtYWMtPnZjLmxvY2ssIGZsYWdzKTsN
CisJCSBzZG1hYy0+dmMuZGVzY19mcmVlKCZkZXNjLT52ZCk7DQorCQkgc3Bpbl9sb2NrX2lycXNh
dmUoJnNkbWFjLT52Yy5sb2NrLCBmbGFncyk7DQorCX0NCisNCisJaWYgKHNkbWFjLT5kZXNjKQ0K
KwkJc2RtYWMtPmRlc2MgPSBOVUxMOw0KKwlzcGluX3VubG9ja19pcnFyZXN0b3JlKCZzZG1hYy0+
dmMubG9jaywgZmxhZ3MpOw0KKwl2Y2hhbl9kbWFfZGVzY19mcmVlX2xpc3QoJnNkbWFjLT52Yywg
JmhlYWQpOw0KKwlzZG1hX2Rpc2FibGVfY2hhbm5lbF93aXRoX2RlbGF5KGNoYW4pOw0KKw0KKwly
ZXR1cm4gMDsNCiB9DQogDQogc3RhdGljIGludCBzZG1hX2FsbG9jX2NoYW5fcmVzb3VyY2VzKHN0
cnVjdCBkbWFfY2hhbiAqY2hhbikgQEAgLTExNjgsMTggKzEyMjYsMTEgQEAgc3RhdGljIGludCBz
ZG1hX2FsbG9jX2NoYW5fcmVzb3VyY2VzKHN0cnVjdCBkbWFfY2hhbiAqY2hhbikNCiAJaWYgKHJl
dCkNCiAJCWdvdG8gZGlzYWJsZV9jbGtfaXBnOw0KIA0KLQlyZXQgPSBzZG1hX3JlcXVlc3RfY2hh
bm5lbChzZG1hYyk7DQotCWlmIChyZXQpDQotCQlnb3RvIGRpc2FibGVfY2xrX2FoYjsNCi0NCiAJ
cmV0ID0gc2RtYV9zZXRfY2hhbm5lbF9wcmlvcml0eShzZG1hYywgcHJpbyk7DQogCWlmIChyZXQp
DQogCQlnb3RvIGRpc2FibGVfY2xrX2FoYjsNCiANCi0JZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3Jf
aW5pdCgmc2RtYWMtPmRlc2MsIGNoYW4pOw0KLQlzZG1hYy0+ZGVzYy50eF9zdWJtaXQgPSBzZG1h
X3R4X3N1Ym1pdDsNCi0JLyogdHhkLmZsYWdzIHdpbGwgYmUgb3ZlcndyaXR0ZW4gaW4gcHJlcCBm
dW5jcyAqLw0KLQlzZG1hYy0+ZGVzYy5mbGFncyA9IERNQV9DVFJMX0FDSzsNCisJc2RtYWMtPmJk
X3NpemVfc3VtID0gMDsNCiANCiAJcmV0dXJuIDA7DQogDQpAQCAtMTE5NSw3ICsxMjQ2LDcgQEAg
c3RhdGljIHZvaWQgc2RtYV9mcmVlX2NoYW5fcmVzb3VyY2VzKHN0cnVjdCBkbWFfY2hhbiAqY2hh
bikNCiAJc3RydWN0IHNkbWFfY2hhbm5lbCAqc2RtYWMgPSB0b19zZG1hX2NoYW4oY2hhbik7DQog
CXN0cnVjdCBzZG1hX2VuZ2luZSAqc2RtYSA9IHNkbWFjLT5zZG1hOw0KIA0KLQlzZG1hX2Rpc2Fi
bGVfY2hhbm5lbChjaGFuKTsNCisJc2RtYV90ZXJtaW5hdGVfYWxsKGNoYW4pOw0KIA0KIAlpZiAo
c2RtYWMtPmV2ZW50X2lkMCkNCiAJCXNkbWFfZXZlbnRfZGlzYWJsZShzZG1hYywgc2RtYWMtPmV2
ZW50X2lkMCk7IEBAIC0xMjA3LDEyICsxMjU4LDQzIEBAIHN0YXRpYyB2b2lkIHNkbWFfZnJlZV9j
aGFuX3Jlc291cmNlcyhzdHJ1Y3QgZG1hX2NoYW4gKmNoYW4pDQogDQogCXNkbWFfc2V0X2NoYW5u
ZWxfcHJpb3JpdHkoc2RtYWMsIDApOw0KIA0KLQlkbWFfZnJlZV9jb2hlcmVudChOVUxMLCBQQUdF
X1NJWkUsIHNkbWFjLT5iZCwgc2RtYWMtPmJkX3BoeXMpOw0KLQ0KIAljbGtfZGlzYWJsZShzZG1h
LT5jbGtfaXBnKTsNCiAJY2xrX2Rpc2FibGUoc2RtYS0+Y2xrX2FoYik7DQogfQ0KIA0KK3N0YXRp
YyBzdHJ1Y3Qgc2RtYV9kZXNjICpzZG1hX3RyYW5zZmVyX2luaXQoc3RydWN0IHNkbWFfY2hhbm5l
bCAqc2RtYWMsDQorCQkJCWVudW0gZG1hX3RyYW5zZmVyX2RpcmVjdGlvbiBkaXJlY3Rpb24sIHUz
MiBiZHMpIHsNCisJc3RydWN0IHNkbWFfZGVzYyAqZGVzYzsNCisNCisJZGVzYyA9IGt6YWxsb2Mo
KHNpemVvZigqZGVzYykpLCBHRlBfS0VSTkVMKTsNCisJaWYgKCFkZXNjKQ0KKwkJZ290byBlcnJf
b3V0Ow0KKw0KKwlzZG1hYy0+c3RhdHVzID0gRE1BX0lOX1BST0dSRVNTOw0KKwlzZG1hYy0+ZGly
ZWN0aW9uID0gZGlyZWN0aW9uOw0KKwlzZG1hYy0+ZmxhZ3MgPSAwOw0KKwlzZG1hYy0+Y2huX2Nv
dW50ID0gMDsNCisJc2RtYWMtPmNobl9yZWFsX2NvdW50ID0gMDsNCisNCisJZGVzYy0+c2RtYWMg
PSBzZG1hYzsNCisJZGVzYy0+bnVtX2JkID0gYmRzOw0KKwlJTklUX0xJU1RfSEVBRCgmZGVzYy0+
bm9kZSk7DQorDQorCWlmIChzZG1hX2FsbG9jX2JkKGRlc2MpKQ0KKwkJZ290byBlcnJfZGVzY19v
dXQ7DQorDQorCWlmIChzZG1hX2xvYWRfY29udGV4dChzZG1hYykpDQorCQlnb3RvIGVycl9kZXNj
X291dDsNCisNCisJcmV0dXJuIGRlc2M7DQorDQorZXJyX2Rlc2Nfb3V0Og0KKwlrZnJlZShkZXNj
KTsNCitlcnJfb3V0Og0KKwlyZXR1cm4gTlVMTDsNCit9DQorDQogc3RhdGljIHN0cnVjdCBkbWFf
YXN5bmNfdHhfZGVzY3JpcHRvciAqc2RtYV9wcmVwX3NsYXZlX3NnKA0KIAkJc3RydWN0IGRtYV9j
aGFuICpjaGFuLCBzdHJ1Y3Qgc2NhdHRlcmxpc3QgKnNnbCwNCiAJCXVuc2lnbmVkIGludCBzZ19s
ZW4sIGVudW0gZG1hX3RyYW5zZmVyX2RpcmVjdGlvbiBkaXJlY3Rpb24sIEBAIC0xMjIzLDM1ICsx
MzA1LDI0IEBAIHN0YXRpYyBzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3IgKnNkbWFfcHJl
cF9zbGF2ZV9zZygNCiAJaW50IHJldCwgaSwgY291bnQ7DQogCWludCBjaGFubmVsID0gc2RtYWMt
PmNoYW5uZWw7DQogCXN0cnVjdCBzY2F0dGVybGlzdCAqc2c7DQorCXN0cnVjdCBzZG1hX2Rlc2Mg
KmRlc2M7DQogDQotCWlmIChzZG1hYy0+c3RhdHVzID09IERNQV9JTl9QUk9HUkVTUykNCisJaWYg
KCFjaGFuKQ0KIAkJcmV0dXJuIE5VTEw7DQotCXNkbWFjLT5zdGF0dXMgPSBETUFfSU5fUFJPR1JF
U1M7DQotDQotCXNkbWFjLT5mbGFncyA9IDA7DQogDQotCXNkbWFjLT5idWZfdGFpbCA9IDA7DQot
CXNkbWFjLT5idWZfcHRhaWwgPSAwOw0KLQlzZG1hYy0+Y2huX3JlYWxfY291bnQgPSAwOw0KKwlk
ZXNjID0gc2RtYV90cmFuc2Zlcl9pbml0KHNkbWFjLCBkaXJlY3Rpb24sIHNnX2xlbik7DQorCWlm
ICghZGVzYykNCisJCWdvdG8gZXJyX291dDsNCiANCiAJZGV2X2RiZyhzZG1hLT5kZXYsICJzZXR0
aW5nIHVwICVkIGVudHJpZXMgZm9yIGNoYW5uZWwgJWQuXG4iLA0KIAkJCXNnX2xlbiwgY2hhbm5l
bCk7DQogDQotCXNkbWFjLT5kaXJlY3Rpb24gPSBkaXJlY3Rpb247DQogCXJldCA9IHNkbWFfbG9h
ZF9jb250ZXh0KHNkbWFjKTsNCiAJaWYgKHJldCkNCiAJCWdvdG8gZXJyX291dDsNCiANCi0JaWYg
KHNnX2xlbiA+IE5VTV9CRCkgew0KLQkJZGV2X2VycihzZG1hLT5kZXYsICJTRE1BIGNoYW5uZWwg
JWQ6IG1heGltdW0gbnVtYmVyIG9mIHNnIGV4Y2VlZGVkOiAlZCA+ICVkXG4iLA0KLQkJCQljaGFu
bmVsLCBzZ19sZW4sIE5VTV9CRCk7DQotCQlyZXQgPSAtRUlOVkFMOw0KLQkJZ290byBlcnJfb3V0
Ow0KLQl9DQotDQotCXNkbWFjLT5jaG5fY291bnQgPSAwOw0KIAlmb3JfZWFjaF9zZyhzZ2wsIHNn
LCBzZ19sZW4sIGkpIHsNCi0JCXN0cnVjdCBzZG1hX2J1ZmZlcl9kZXNjcmlwdG9yICpiZCA9ICZz
ZG1hYy0+YmRbaV07DQorCQlzdHJ1Y3Qgc2RtYV9idWZmZXJfZGVzY3JpcHRvciAqYmQgPSAmZGVz
Yy0+YmRbaV07DQogCQlpbnQgcGFyYW07DQogDQogCQliZC0+YnVmZmVyX2FkZHIgPSBzZy0+ZG1h
X2FkZHJlc3M7DQpAQCAtMTI2Miw3ICsxMzMzLDcgQEAgc3RhdGljIHN0cnVjdCBkbWFfYXN5bmNf
dHhfZGVzY3JpcHRvciAqc2RtYV9wcmVwX3NsYXZlX3NnKA0KIAkJCWRldl9lcnIoc2RtYS0+ZGV2
LCAiU0RNQSBjaGFubmVsICVkOiBtYXhpbXVtIGJ5dGVzIGZvciBzZyBlbnRyeSBleGNlZWRlZDog
JWQgPiAlZFxuIiwNCiAJCQkJCWNoYW5uZWwsIGNvdW50LCAweGZmZmYpOw0KIAkJCXJldCA9IC1F
SU5WQUw7DQotCQkJZ290byBlcnJfb3V0Ow0KKwkJCWdvdG8gZXJyX2JkX291dDsNCiAJCX0NCiAN
CiAJCWJkLT5tb2RlLmNvdW50ID0gY291bnQ7DQpAQCAtMTMwNywxMCArMTM3OCwxMSBAQCBzdGF0
aWMgc3RydWN0IGRtYV9hc3luY190eF9kZXNjcmlwdG9yICpzZG1hX3ByZXBfc2xhdmVfc2coDQog
CQliZC0+bW9kZS5zdGF0dXMgPSBwYXJhbTsNCiAJfQ0KIA0KLQlzZG1hYy0+bnVtX2JkID0gc2df
bGVuOw0KLQlzZG1hLT5jaGFubmVsX2NvbnRyb2xbY2hhbm5lbF0uY3VycmVudF9iZF9wdHIgPSBz
ZG1hYy0+YmRfcGh5czsNCisJcmV0dXJuIHZjaGFuX3R4X3ByZXAoJnNkbWFjLT52YywgJmRlc2Mt
PnZkLCBmbGFncyk7DQogDQotCXJldHVybiAmc2RtYWMtPmRlc2M7DQorZXJyX2JkX291dDoNCisJ
c2RtYV9mcmVlX2JkKGRlc2MpOw0KKwlrZnJlZShkZXNjKTsNCiBlcnJfb3V0Og0KIAlzZG1hYy0+
c3RhdHVzID0gRE1BX0VSUk9SOw0KIAlyZXR1cm4gTlVMTDsNCkBAIC0xMzI2LDM5ICsxMzk4LDMy
IEBAIHN0YXRpYyBzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2NyaXB0b3IgKnNkbWFfcHJlcF9kbWFf
Y3ljbGljKA0KIAlpbnQgbnVtX3BlcmlvZHMgPSBidWZfbGVuIC8gcGVyaW9kX2xlbjsNCiAJaW50
IGNoYW5uZWwgPSBzZG1hYy0+Y2hhbm5lbDsNCiAJaW50IHJldCwgaSA9IDAsIGJ1ZiA9IDA7DQor
CXN0cnVjdCBzZG1hX2Rlc2MgKmRlc2M7DQogDQogCWRldl9kYmcoc2RtYS0+ZGV2LCAiJXMgY2hh
bm5lbDogJWRcbiIsIF9fZnVuY19fLCBjaGFubmVsKTsNCiANCi0JaWYgKHNkbWFjLT5zdGF0dXMg
PT0gRE1BX0lOX1BST0dSRVNTKQ0KLQkJcmV0dXJuIE5VTEw7DQotDQotCXNkbWFjLT5zdGF0dXMg
PSBETUFfSU5fUFJPR1JFU1M7DQorCS8qIE5vdyBhbGxvY2F0ZSBhbmQgc2V0dXAgdGhlIGRlc2Ny
aXB0b3IuICovDQorCWRlc2MgPSBzZG1hX3RyYW5zZmVyX2luaXQoc2RtYWMsIGRpcmVjdGlvbiwg
bnVtX3BlcmlvZHMpOw0KKwlpZiAoIWRlc2MpDQorCQlnb3RvIGVycl9vdXQ7DQogDQotCXNkbWFj
LT5idWZfdGFpbCA9IDA7DQotCXNkbWFjLT5idWZfcHRhaWwgPSAwOw0KLQlzZG1hYy0+Y2huX3Jl
YWxfY291bnQgPSAwOw0KKwlkZXNjLT5idWZfdGFpbCA9IDA7DQorCWRlc2MtPmJ1Zl9wdGFpbCA9
IDA7DQogCXNkbWFjLT5wZXJpb2RfbGVuID0gcGVyaW9kX2xlbjsNCi0NCiAJc2RtYWMtPmZsYWdz
IHw9IElNWF9ETUFfU0dfTE9PUDsNCi0Jc2RtYWMtPmRpcmVjdGlvbiA9IGRpcmVjdGlvbjsNCisN
CiAJcmV0ID0gc2RtYV9sb2FkX2NvbnRleHQoc2RtYWMpOw0KIAlpZiAocmV0KQ0KIAkJZ290byBl
cnJfb3V0Ow0KIA0KLQlpZiAobnVtX3BlcmlvZHMgPiBOVU1fQkQpIHsNCi0JCWRldl9lcnIoc2Rt
YS0+ZGV2LCAiU0RNQSBjaGFubmVsICVkOiBtYXhpbXVtIG51bWJlciBvZiBzZyBleGNlZWRlZDog
JWQgPiAlZFxuIiwNCi0JCQkJY2hhbm5lbCwgbnVtX3BlcmlvZHMsIE5VTV9CRCk7DQotCQlnb3Rv
IGVycl9vdXQ7DQotCX0NCi0NCiAJaWYgKHBlcmlvZF9sZW4gPiAweGZmZmYpIHsNCiAJCWRldl9l
cnIoc2RtYS0+ZGV2LCAiU0RNQSBjaGFubmVsICVkOiBtYXhpbXVtIHBlcmlvZCBzaXplIGV4Y2Vl
ZGVkOiAlenUgPiAlZFxuIiwNCiAJCQkJY2hhbm5lbCwgcGVyaW9kX2xlbiwgMHhmZmZmKTsNCi0J
CWdvdG8gZXJyX291dDsNCisJCWdvdG8gZXJyX2JkX291dDsNCiAJfQ0KIA0KIAl3aGlsZSAoYnVm
IDwgYnVmX2xlbikgew0KLQkJc3RydWN0IHNkbWFfYnVmZmVyX2Rlc2NyaXB0b3IgKmJkID0gJnNk
bWFjLT5iZFtpXTsNCisJCXN0cnVjdCBzZG1hX2J1ZmZlcl9kZXNjcmlwdG9yICpiZCA9ICZkZXNj
LT5iZFtpXTsNCiAJCWludCBwYXJhbTsNCiANCiAJCWJkLT5idWZmZXJfYWRkciA9IGRtYV9hZGRy
Ow0KQEAgLTEzNjYsNyArMTQzMSw3IEBAIHN0YXRpYyBzdHJ1Y3QgZG1hX2FzeW5jX3R4X2Rlc2Ny
aXB0b3IgKnNkbWFfcHJlcF9kbWFfY3ljbGljKA0KIAkJYmQtPm1vZGUuY291bnQgPSBwZXJpb2Rf
bGVuOw0KIA0KIAkJaWYgKHNkbWFjLT53b3JkX3NpemUgPiBETUFfU0xBVkVfQlVTV0lEVEhfNF9C
WVRFUykNCi0JCQlnb3RvIGVycl9vdXQ7DQorCQkJZ290byBlcnJfYmRfb3V0Ow0KIAkJaWYgKHNk
bWFjLT53b3JkX3NpemUgPT0gRE1BX1NMQVZFX0JVU1dJRFRIXzRfQllURVMpDQogCQkJYmQtPm1v
ZGUuY29tbWFuZCA9IDA7DQogCQllbHNlDQpAQCAtMTM4OSwxMCArMTQ1NCwxMCBAQCBzdGF0aWMg
c3RydWN0IGRtYV9hc3luY190eF9kZXNjcmlwdG9yICpzZG1hX3ByZXBfZG1hX2N5Y2xpYygNCiAJ
CWkrKzsNCiAJfQ0KIA0KLQlzZG1hYy0+bnVtX2JkID0gbnVtX3BlcmlvZHM7DQotCXNkbWEtPmNo
YW5uZWxfY29udHJvbFtjaGFubmVsXS5jdXJyZW50X2JkX3B0ciA9IHNkbWFjLT5iZF9waHlzOw0K
LQ0KLQlyZXR1cm4gJnNkbWFjLT5kZXNjOw0KKwlyZXR1cm4gdmNoYW5fdHhfcHJlcCgmc2RtYWMt
PnZjLCAmZGVzYy0+dmQsIGZsYWdzKTsNCitlcnJfYmRfb3V0Og0KKwlzZG1hX2ZyZWVfYmQoZGVz
Yyk7DQorCWtmcmVlKGRlc2MpOw0KIGVycl9vdXQ6DQogCXNkbWFjLT5zdGF0dXMgPSBETUFfRVJS
T1I7DQogCXJldHVybiBOVUxMOw0KQEAgLTE0MzIsMjYgKzE0OTcsNzQgQEAgc3RhdGljIGVudW0g
ZG1hX3N0YXR1cyBzZG1hX3R4X3N0YXR1cyhzdHJ1Y3QgZG1hX2NoYW4gKmNoYW4sICB7DQogCXN0
cnVjdCBzZG1hX2NoYW5uZWwgKnNkbWFjID0gdG9fc2RtYV9jaGFuKGNoYW4pOw0KIAl1MzIgcmVz
aWR1ZTsNCisJc3RydWN0IHZpcnRfZG1hX2Rlc2MgKnZkOw0KKwlzdHJ1Y3Qgc2RtYV9kZXNjICpk
ZXNjOw0KKwllbnVtIGRtYV9zdGF0dXMgcmV0Ow0KKwl1bnNpZ25lZCBsb25nIGZsYWdzOw0KIA0K
LQlpZiAoc2RtYWMtPmZsYWdzICYgSU1YX0RNQV9TR19MT09QKQ0KLQkJcmVzaWR1ZSA9IChzZG1h
Yy0+bnVtX2JkIC0gc2RtYWMtPmJ1Zl9wdGFpbCkgKg0KKwlyZXQgPSBkbWFfY29va2llX3N0YXR1
cyhjaGFuLCBjb29raWUsIHR4c3RhdGUpOw0KKwlpZiAocmV0ID09IERNQV9DT01QTEVURSAmJiB0
eHN0YXRlKSB7DQorCQlyZXNpZHVlID0gc2RtYWMtPmNobl9jb3VudCAtIHNkbWFjLT5jaG5fcmVh
bF9jb3VudDsNCisJCXJldHVybiByZXQ7DQorCX0NCisNCisJc3Bpbl9sb2NrX2lycXNhdmUoJnNk
bWFjLT52Yy5sb2NrLCBmbGFncyk7DQorCXZkID0gdmNoYW5fZmluZF9kZXNjKCZzZG1hYy0+dmMs
IGNvb2tpZSk7DQorCWRlc2MgPSB0b19zZG1hX2Rlc2MoJnZkLT50eCk7DQorCWlmICh2ZCkgew0K
KwkJaWYgKHNkbWFjLT5mbGFncyAmIElNWF9ETUFfU0dfTE9PUCkNCisJCQlyZXNpZHVlID0gKGRl
c2MtPm51bV9iZCAtIGRlc2MtPmJ1Zl9wdGFpbCkgKg0KIAkJCSAgIHNkbWFjLT5wZXJpb2RfbGVu
IC0gc2RtYWMtPmNobl9yZWFsX2NvdW50Ow0KLQllbHNlDQorCQllbHNlDQorCQkJcmVzaWR1ZSA9
IHNkbWFjLT5jaG5fY291bnQgLSBzZG1hYy0+Y2huX3JlYWxfY291bnQ7DQorCX0gZWxzZSBpZiAo
c2RtYWMtPmRlc2MgJiYgc2RtYWMtPmRlc2MtPnZkLnR4LmNvb2tpZSA9PSBjb29raWUpIHsNCiAJ
CXJlc2lkdWUgPSBzZG1hYy0+Y2huX2NvdW50IC0gc2RtYWMtPmNobl9yZWFsX2NvdW50Ow0KKwl9
IGVsc2Ugew0KKwkJcmVzaWR1ZSA9IDA7DQorCX0NCisJcmV0ID0gc2RtYWMtPnN0YXR1czsNCisJ
c3Bpbl91bmxvY2tfaXJxcmVzdG9yZSgmc2RtYWMtPnZjLmxvY2ssIGZsYWdzKTsNCiANCiAJZG1h
X3NldF90eF9zdGF0ZSh0eHN0YXRlLCBjaGFuLT5jb21wbGV0ZWRfY29va2llLCBjaGFuLT5jb29r
aWUsDQogCQkJIHJlc2lkdWUpOw0KIA0KLQlyZXR1cm4gc2RtYWMtPnN0YXR1czsNCisJcmV0dXJu
IHJldDsNCit9DQorDQorc3RhdGljIHZvaWQgc2RtYV9zdGFydF9kZXNjKHN0cnVjdCBzZG1hX2No
YW5uZWwgKnNkbWFjKSB7DQorCXN0cnVjdCB2aXJ0X2RtYV9kZXNjICp2ZCA9IHZjaGFuX25leHRf
ZGVzYygmc2RtYWMtPnZjKTsNCisJc3RydWN0IHNkbWFfZGVzYyAqZGVzYzsNCisJc3RydWN0IHNk
bWFfZW5naW5lICpzZG1hID0gc2RtYWMtPnNkbWE7DQorCWludCBjaGFubmVsID0gc2RtYWMtPmNo
YW5uZWw7DQorDQorCWlmICghdmQpIHsNCisJCXNkbWFjLT5kZXNjID0gTlVMTDsNCisJCXJldHVy
bjsNCisJfQ0KKwlzZG1hYy0+ZGVzYyA9IGRlc2MgPSB0b19zZG1hX2Rlc2MoJnZkLT50eCk7DQor
CS8qDQorCSAqIERvIG5vdCBkZWxldGUgdGhlIG5vZGUgaW4gZGVzY19pc3N1ZWQgbGlzdCBpbiBj
eWNsaWMgbW9kZSwgb3RoZXJ3aXNlDQorCSAqIHRoZSBkZXNjIGFsbG9jZWQgd2lsbCBuZXZlciBi
ZSBmcmVlZCBpbiB2Y2hhbl9kbWFfZGVzY19mcmVlX2xpc3QNCisJICovDQorCWlmICghKHNkbWFj
LT5mbGFncyAmIElNWF9ETUFfU0dfTE9PUCkpIHsNCisJCWxpc3RfYWRkX3RhaWwoJnNkbWFjLT5k
ZXNjLT5ub2RlLCAmc2RtYWMtPnBlbmRpbmcpOw0KKwkJbGlzdF9kZWwoJnZkLT5ub2RlKTsNCisJ
fQ0KKwlzZG1hLT5jaGFubmVsX2NvbnRyb2xbY2hhbm5lbF0uYmFzZV9iZF9wdHIgPSBkZXNjLT5i
ZF9waHlzOw0KKwlzZG1hLT5jaGFubmVsX2NvbnRyb2xbY2hhbm5lbF0uY3VycmVudF9iZF9wdHIg
PSBkZXNjLT5iZF9waHlzOw0KKwlzZG1hX2VuYWJsZV9jaGFubmVsKHNkbWEsIHNkbWFjLT5jaGFu
bmVsKTsNCiB9DQogDQogc3RhdGljIHZvaWQgc2RtYV9pc3N1ZV9wZW5kaW5nKHN0cnVjdCBkbWFf
Y2hhbiAqY2hhbikgIHsNCiAJc3RydWN0IHNkbWFfY2hhbm5lbCAqc2RtYWMgPSB0b19zZG1hX2No
YW4oY2hhbik7DQotCXN0cnVjdCBzZG1hX2VuZ2luZSAqc2RtYSA9IHNkbWFjLT5zZG1hOw0KKwl1
bnNpZ25lZCBsb25nIGZsYWdzOw0KIA0KLQlpZiAoc2RtYWMtPnN0YXR1cyA9PSBETUFfSU5fUFJP
R1JFU1MpDQotCQlzZG1hX2VuYWJsZV9jaGFubmVsKHNkbWEsIHNkbWFjLT5jaGFubmVsKTsNCisJ
c3Bpbl9sb2NrX2lycXNhdmUoJnNkbWFjLT52Yy5sb2NrLCBmbGFncyk7DQorCWlmICh2Y2hhbl9p
c3N1ZV9wZW5kaW5nKCZzZG1hYy0+dmMpICYmICFzZG1hYy0+ZGVzYykNCisJCXNkbWFfc3RhcnRf
ZGVzYyhzZG1hYyk7DQorCXNwaW5fdW5sb2NrX2lycXJlc3RvcmUoJnNkbWFjLT52Yy5sb2NrLCBm
bGFncyk7DQogfQ0KIA0KICNkZWZpbmUgU0RNQV9TQ1JJUFRfQUREUlNfQVJSQVlfU0laRV9WMQkz
NA0KQEAgLTE2NTcsNyArMTc3MCw3IEBAIHN0YXRpYyBpbnQgc2RtYV9pbml0KHN0cnVjdCBzZG1h
X2VuZ2luZSAqc2RtYSkNCiAJZm9yIChpID0gMDsgaSA8IE1BWF9ETUFfQ0hBTk5FTFM7IGkrKykN
CiAJCXdyaXRlbF9yZWxheGVkKDAsIHNkbWEtPnJlZ3MgKyBTRE1BX0NITlBSSV8wICsgaSAqIDQp
Ow0KIA0KLQlyZXQgPSBzZG1hX3JlcXVlc3RfY2hhbm5lbCgmc2RtYS0+Y2hhbm5lbFswXSk7DQor
CXJldCA9IHNkbWFfcmVxdWVzdF9jaGFubmVsMChzZG1hKTsNCiAJaWYgKHJldCkNCiAJCWdvdG8g
ZXJyX2RtYV9hbGxvYzsNCiANCkBAIC0xODE5LDIyICsxOTMyLDE3IEBAIHN0YXRpYyBpbnQgc2Rt
YV9wcm9iZShzdHJ1Y3QgcGxhdGZvcm1fZGV2aWNlICpwZGV2KQ0KIAkJc3RydWN0IHNkbWFfY2hh
bm5lbCAqc2RtYWMgPSAmc2RtYS0+Y2hhbm5lbFtpXTsNCiANCiAJCXNkbWFjLT5zZG1hID0gc2Rt
YTsNCi0JCXNwaW5fbG9ja19pbml0KCZzZG1hYy0+bG9jayk7DQotDQotCQlzZG1hYy0+Y2hhbi5k
ZXZpY2UgPSAmc2RtYS0+ZG1hX2RldmljZTsNCi0JCWRtYV9jb29raWVfaW5pdCgmc2RtYWMtPmNo
YW4pOw0KIAkJc2RtYWMtPmNoYW5uZWwgPSBpOw0KLQ0KLQkJdGFza2xldF9pbml0KCZzZG1hYy0+
dGFza2xldCwgbXhjX3NkbWFfaGFuZGxlX2NoYW5uZWxfbm9ybWFsLA0KLQkJCSAgICAgKHVuc2ln
bmVkIGxvbmcpIHNkbWFjKTsNCisJCXNkbWFjLT5zdGF0dXMgPSBETUFfSU5fUFJPR1JFU1M7DQor
CQlzZG1hYy0+dmMuZGVzY19mcmVlID0gc2RtYV9kZXNjX2ZyZWU7DQorCQlJTklUX0xJU1RfSEVB
RCgmc2RtYWMtPnBlbmRpbmcpOw0KIAkJLyoNCiAJCSAqIEFkZCB0aGUgY2hhbm5lbCB0byB0aGUg
RE1BQyBsaXN0LiBEbyBub3QgYWRkIGNoYW5uZWwgMCB0aG91Z2gNCiAJCSAqIGJlY2F1c2Ugd2Ug
bmVlZCBpdCBpbnRlcm5hbGx5IGluIHRoZSBTRE1BIGRyaXZlci4gVGhpcyBhbHNvIG1lYW5zDQog
CQkgKiB0aGF0IGNoYW5uZWwgMCBpbiBkbWFlbmdpbmUgY291bnRpbmcgbWF0Y2hlcyBzZG1hIGNo
YW5uZWwgMS4NCiAJCSAqLw0KIAkJaWYgKGkpDQotCQkJbGlzdF9hZGRfdGFpbCgmc2RtYWMtPmNo
YW4uZGV2aWNlX25vZGUsDQotCQkJCQkmc2RtYS0+ZG1hX2RldmljZS5jaGFubmVscyk7DQorCQkJ
dmNoYW5faW5pdCgmc2RtYWMtPnZjLCAmc2RtYS0+ZG1hX2RldmljZSk7DQogCX0NCiANCiAJcmV0
ID0gc2RtYV9pbml0KHNkbWEpOw0KQEAgLTE4NzksNyArMTk4Nyw3IEBAIHN0YXRpYyBpbnQgc2Rt
YV9wcm9iZShzdHJ1Y3QgcGxhdGZvcm1fZGV2aWNlICpwZGV2KQ0KIAlzZG1hLT5kbWFfZGV2aWNl
LmRldmljZV9wcmVwX3NsYXZlX3NnID0gc2RtYV9wcmVwX3NsYXZlX3NnOw0KIAlzZG1hLT5kbWFf
ZGV2aWNlLmRldmljZV9wcmVwX2RtYV9jeWNsaWMgPSBzZG1hX3ByZXBfZG1hX2N5Y2xpYzsNCiAJ
c2RtYS0+ZG1hX2RldmljZS5kZXZpY2VfY29uZmlnID0gc2RtYV9jb25maWc7DQotCXNkbWEtPmRt
YV9kZXZpY2UuZGV2aWNlX3Rlcm1pbmF0ZV9hbGwgPSBzZG1hX2Rpc2FibGVfY2hhbm5lbF93aXRo
X2RlbGF5Ow0KKwlzZG1hLT5kbWFfZGV2aWNlLmRldmljZV90ZXJtaW5hdGVfYWxsID0gc2RtYV90
ZXJtaW5hdGVfYWxsOw0KIAlzZG1hLT5kbWFfZGV2aWNlLnNyY19hZGRyX3dpZHRocyA9IFNETUFf
RE1BX0JVU1dJRFRIUzsNCiAJc2RtYS0+ZG1hX2RldmljZS5kc3RfYWRkcl93aWR0aHMgPSBTRE1B
X0RNQV9CVVNXSURUSFM7DQogCXNkbWEtPmRtYV9kZXZpY2UuZGlyZWN0aW9ucyA9IFNETUFfRE1B
X0RJUkVDVElPTlM7IEBAIC0xOTM5LDcgKzIwNDcsOCBAQCBzdGF0aWMgaW50IHNkbWFfcmVtb3Zl
KHN0cnVjdCBwbGF0Zm9ybV9kZXZpY2UgKnBkZXYpDQogCWZvciAoaSA9IDA7IGkgPCBNQVhfRE1B
X0NIQU5ORUxTOyBpKyspIHsNCiAJCXN0cnVjdCBzZG1hX2NoYW5uZWwgKnNkbWFjID0gJnNkbWEt
PmNoYW5uZWxbaV07DQogDQotCQl0YXNrbGV0X2tpbGwoJnNkbWFjLT50YXNrbGV0KTsNCisJCXRh
c2tsZXRfa2lsbCgmc2RtYWMtPnZjLnRhc2spOw0KKwkJc2RtYV9mcmVlX2NoYW5fcmVzb3VyY2Vz
KCZzZG1hYy0+dmMuY2hhbik7DQogCX0NCiANCiAJcGxhdGZvcm1fc2V0X2RydmRhdGEocGRldiwg
TlVMTCk7DQotLQ0KMi43LjQNCg0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul May 22, 2018, 6:55 a.m. UTC | #2
On 22-05-18, 06:16, Robin Gong wrote:
> Ping.

Looks like I missed this one, can you please rebase and resend.
While at it, modify the subject to dmaengine, as thats the subsystem name
Robin Gong May 22, 2018, 7:06 a.m. UTC | #3
Ok, I'll resend it after rebase and test.

On 二, 2018-05-22 at 12:25 +0530, Vinod wrote:
> On 22-05-18, 06:16, Robin Gong wrote:

> > 

> > Ping.

> Looks like I missed this one, can you please rebase and resend.

> While at it, modify the subject to dmaengine, as thats the subsystem

> name

>
Sascha Hauer May 22, 2018, 10:09 a.m. UTC | #4
Hi Robin,

Several comments inside.

Sascha

On Fri, Mar 23, 2018 at 12:18:19AM +0800, Robin Gong wrote:
> The legacy sdma driver has below limitations or drawbacks:
>   1. Hardcode the max BDs number as "PAGE_SIZE / sizeof(*)", and alloc
>      one page size for one channel regardless of only few BDs needed
>      most time. But in few cases, the max PAGE_SIZE maybe not enough.
>   2. One SDMA channel can't stop immediatley once channel disabled which
>      means SDMA interrupt may come in after this channel terminated.There
>      are some patches for this corner case such as commit "2746e2c389f9",
>      but not cover non-cyclic.
> 
> The common virt-dma overcomes the above limitations. It can alloc bd
> dynamically and free bd once this tx transfer done. No memory wasted or
> maximum limititation here, only depends on how many memory can be requested
> from kernel. For No.2, such issue can be workaround by checking if there
> is available descript("sdmac->desc") now once the unwanted interrupt
> coming. At last the common virt-dma is easier for sdma driver maintain.
> 
> Signed-off-by: Robin Gong <yibin.gong@nxp.com>
> ---
>  drivers/dma/Kconfig    |   1 +
>  drivers/dma/imx-sdma.c | 395 +++++++++++++++++++++++++++++++------------------
>  2 files changed, 253 insertions(+), 143 deletions(-)
> 
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 27df3e2..c4ce43c 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -247,6 +247,7 @@ config IMX_SDMA
>  	tristate "i.MX SDMA support"
>  	depends on ARCH_MXC
>  	select DMA_ENGINE
> +	select DMA_VIRTUAL_CHANNELS
>  	help
>  	  Support the i.MX SDMA engine. This engine is integrated into
>  	  Freescale i.MX25/31/35/51/53/6 chips.
> diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
> index ccd03c3..df79e73 100644
> --- a/drivers/dma/imx-sdma.c
> +++ b/drivers/dma/imx-sdma.c
> @@ -48,6 +48,7 @@
>  #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
>  
>  #include "dmaengine.h"
> +#include "virt-dma.h"
>  
>  /* SDMA registers */
>  #define SDMA_H_C0PTR		0x000
> @@ -291,10 +292,19 @@ struct sdma_context_data {
>  	u32  scratch7;
>  } __attribute__ ((packed));
>  
> -#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
> -
>  struct sdma_engine;
>  
> +struct sdma_desc {
> +	struct virt_dma_desc	vd;
> +	struct list_head	node;
> +	unsigned int		num_bd;
> +	dma_addr_t		bd_phys;
> +	unsigned int		buf_tail;
> +	unsigned int		buf_ptail;
> +	struct sdma_channel	*sdmac;
> +	struct sdma_buffer_descriptor *bd;
> +};
> +
>  /**
>   * struct sdma_channel - housekeeping for a SDMA channel
>   *
> @@ -310,19 +320,17 @@ struct sdma_engine;
>   * @num_bd		max NUM_BD. number of descriptors currently handling
>   */
>  struct sdma_channel {
> +	struct virt_dma_chan		vc;
> +	struct list_head		pending;
>  	struct sdma_engine		*sdma;
> +	struct sdma_desc		*desc;
>  	unsigned int			channel;
>  	enum dma_transfer_direction		direction;
>  	enum sdma_peripheral_type	peripheral_type;
>  	unsigned int			event_id0;
>  	unsigned int			event_id1;
>  	enum dma_slave_buswidth		word_size;
> -	unsigned int			buf_tail;
> -	unsigned int			buf_ptail;
> -	unsigned int			num_bd;
>  	unsigned int			period_len;
> -	struct sdma_buffer_descriptor	*bd;
> -	dma_addr_t			bd_phys;
>  	unsigned int			pc_from_device, pc_to_device;
>  	unsigned int			device_to_device;
>  	unsigned long			flags;
> @@ -330,15 +338,12 @@ struct sdma_channel {
>  	unsigned long			event_mask[2];
>  	unsigned long			watermark_level;
>  	u32				shp_addr, per_addr;
> -	struct dma_chan			chan;
> -	spinlock_t			lock;
> -	struct dma_async_tx_descriptor	desc;
>  	enum dma_status			status;
>  	unsigned int			chn_count;
>  	unsigned int			chn_real_count;
> -	struct tasklet_struct		tasklet;
>  	struct imx_dma_data		data;
>  	bool				enabled;

Usage of this variable is removed in this patch, but not the variable
itself.

> +	u32				bd_size_sum;

This variable is never used for anything.

>  };
>  
>  #define IMX_DMA_SG_LOOP		BIT(0)
> @@ -398,6 +403,9 @@ struct sdma_engine {
>  	u32				spba_start_addr;
>  	u32				spba_end_addr;
>  	unsigned int			irq;
> +	/* channel0 bd */
> +	dma_addr_t			bd0_phys;
> +	struct sdma_buffer_descriptor	*bd0;
>  };
>  
>  static struct sdma_driver_data sdma_imx31 = {
> @@ -553,6 +561,8 @@ MODULE_DEVICE_TABLE(of, sdma_dt_ids);
>  #define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
>  #define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
>  
> +static void sdma_start_desc(struct sdma_channel *sdmac);
> +
>  static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
>  {
>  	u32 chnenbl0 = sdma->drvdata->chnenbl0;
> @@ -597,14 +607,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
>  
>  static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
>  {
> -	unsigned long flags;
> -	struct sdma_channel *sdmac = &sdma->channel[channel];
> -
>  	writel(BIT(channel), sdma->regs + SDMA_H_START);
> -
> -	spin_lock_irqsave(&sdmac->lock, flags);
> -	sdmac->enabled = true;
> -	spin_unlock_irqrestore(&sdmac->lock, flags);
>  }
>  
>  /*
> @@ -632,7 +635,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
>  static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
>  		u32 address)
>  {
> -	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
> +	struct sdma_buffer_descriptor *bd0 = sdma->bd0;

This change seems to be an orthogonal change. Please make this a
separate patch.

>  	void *buf_virt;
>  	dma_addr_t buf_phys;
>  	int ret;
> @@ -691,23 +694,16 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
>  static void sdma_update_channel_loop(struct sdma_channel *sdmac)
>  {
>  	struct sdma_buffer_descriptor *bd;
> +	struct sdma_desc *desc = sdmac->desc;
>  	int error = 0;
>  	enum dma_status	old_status = sdmac->status;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&sdmac->lock, flags);
> -	if (!sdmac->enabled) {
> -		spin_unlock_irqrestore(&sdmac->lock, flags);
> -		return;
> -	}
> -	spin_unlock_irqrestore(&sdmac->lock, flags);
>  
>  	/*
>  	 * loop mode. Iterate over descriptors, re-setup them and
>  	 * call callback function.
>  	 */
> -	while (1) {
> -		bd = &sdmac->bd[sdmac->buf_tail];
> +	while (desc) {

'desc' seems to be used as a loop counter here, but this variable is
never assigned another value, so I assume it's just another way to say
"skip the loop if desc is NULL". When 'desc' NULL you won't get into
this function at all though, so this check for desc seems rather pointless.

> +		bd = &desc->bd[desc->buf_tail];
>  
>  		if (bd->mode.status & BD_DONE)
>  			break;
> @@ -726,8 +722,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
>  		sdmac->chn_real_count = bd->mode.count;
>  		bd->mode.status |= BD_DONE;
>  		bd->mode.count = sdmac->period_len;
> -		sdmac->buf_ptail = sdmac->buf_tail;
> -		sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
> +		desc->buf_ptail = desc->buf_tail;
> +		desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
>  
>  		/*
>  		 * The callback is called from the interrupt context in order
> @@ -735,15 +731,16 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
>  		 * SDMA transaction status by the time the client tasklet is
>  		 * executed.
>  		 */
> -
> -		dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
> +		spin_unlock(&sdmac->vc.lock);
> +		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
> +		spin_lock(&sdmac->vc.lock);
>  
>  		if (error)
>  			sdmac->status = old_status;
>  	}
>  }
>  
> -static void mxc_sdma_handle_channel_normal(unsigned long data)
> +static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
>  {
>  	struct sdma_channel *sdmac = (struct sdma_channel *) data;
>  	struct sdma_buffer_descriptor *bd;
> @@ -754,8 +751,8 @@ static void mxc_sdma_handle_channel_normal(unsigned long data)
>  	 * non loop mode. Iterate over all descriptors, collect
>  	 * errors and call callback function
>  	 */
> -	for (i = 0; i < sdmac->num_bd; i++) {
> -		bd = &sdmac->bd[i];
> +	for (i = 0; i < sdmac->desc->num_bd; i++) {
> +		bd = &sdmac->desc->bd[i];
>  
>  		 if (bd->mode.status & (BD_DONE | BD_RROR))
>  			error = -EIO;
> @@ -766,10 +763,6 @@ static void mxc_sdma_handle_channel_normal(unsigned long data)
>  		sdmac->status = DMA_ERROR;
>  	else
>  		sdmac->status = DMA_COMPLETE;
> -
> -	dma_cookie_complete(&sdmac->desc);
> -
> -	dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
>  }
>  
>  static irqreturn_t sdma_int_handler(int irq, void *dev_id)
> @@ -785,13 +778,24 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
>  	while (stat) {
>  		int channel = fls(stat) - 1;
>  		struct sdma_channel *sdmac = &sdma->channel[channel];
> -
> -		if (sdmac->flags & IMX_DMA_SG_LOOP)
> -			sdma_update_channel_loop(sdmac);
> -		else
> -			tasklet_schedule(&sdmac->tasklet);
> +		struct sdma_desc *desc;
> +
> +		spin_lock(&sdmac->vc.lock);
> +		desc = sdmac->desc;
> +		if (desc) {
> +			if (sdmac->flags & IMX_DMA_SG_LOOP) {
> +				sdma_update_channel_loop(sdmac);
> +			} else {
> +				mxc_sdma_handle_channel_normal(sdmac);
> +				vchan_cookie_complete(&desc->vd);
> +				if (!list_empty(&sdmac->pending))
> +					list_del(&desc->node);

What does this list_empty check protect you from? It looks like when the
list really is empty then it's a bug in your internal driver logic.

> +				 sdma_start_desc(sdmac);

Whitespace damage here.

> +			}
> +		}
>  
>  		__clear_bit(channel, &stat);
> +		spin_unlock(&sdmac->vc.lock);
>  	}
>  
>  	return IRQ_HANDLED;
> @@ -897,7 +901,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
>  	int channel = sdmac->channel;
>  	int load_address;
>  	struct sdma_context_data *context = sdma->context;
> -	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
> +	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
>  	int ret;
>  	unsigned long flags;
>  
> @@ -946,7 +950,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
>  
>  static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
>  {
> -	return container_of(chan, struct sdma_channel, chan);
> +	return container_of(chan, struct sdma_channel, vc.chan);
>  }
>  
>  static int sdma_disable_channel(struct dma_chan *chan)
> @@ -954,15 +958,10 @@ static int sdma_disable_channel(struct dma_chan *chan)
>  	struct sdma_channel *sdmac = to_sdma_chan(chan);
>  	struct sdma_engine *sdma = sdmac->sdma;
>  	int channel = sdmac->channel;
> -	unsigned long flags;
>  
>  	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
>  	sdmac->status = DMA_ERROR;
>  
> -	spin_lock_irqsave(&sdmac->lock, flags);
> -	sdmac->enabled = false;
> -	spin_unlock_irqrestore(&sdmac->lock, flags);
> -
>  	return 0;
>  }
>  
> @@ -1097,42 +1096,101 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
>  	return 0;
>  }
>  
> -static int sdma_request_channel(struct sdma_channel *sdmac)
> +static int sdma_alloc_bd(struct sdma_desc *desc)
>  {
> -	struct sdma_engine *sdma = sdmac->sdma;
> -	int channel = sdmac->channel;
> -	int ret = -EBUSY;
> +	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
> +	int ret = 0;
> +	unsigned long flags;
>  
> -	sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
> +	desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
>  					GFP_KERNEL);
> -	if (!sdmac->bd) {
> +	if (!desc->bd) {
>  		ret = -ENOMEM;
>  		goto out;
>  	}
>  
> -	sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
> -	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
> +	spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
> +	desc->sdmac->bd_size_sum += bd_size;
> +	spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
>  
> -	sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
> -	return 0;
>  out:
> -
>  	return ret;
>  }
>  
> -static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
> +static void sdma_free_bd(struct sdma_desc *desc)
>  {
> +	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
>  	unsigned long flags;
> -	struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
> -	dma_cookie_t cookie;
>  
> -	spin_lock_irqsave(&sdmac->lock, flags);
> +	if (desc->bd) {
> +		dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
> +
> +		spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
> +		desc->sdmac->bd_size_sum -= bd_size;
> +		spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
> +	}
> +}
> +
> +static int sdma_request_channel0(struct sdma_engine *sdma)
> +{
> +	int ret = 0;
> +
> +	sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
> +					GFP_KERNEL);
> +	if (!sdma->bd0) {
> +		ret = -ENOMEM;
> +		goto out;
> +	}
>  
> -	cookie = dma_cookie_assign(tx);
> +	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
> +	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
>  
> -	spin_unlock_irqrestore(&sdmac->lock, flags);
> +	sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
> +out:
>  
> -	return cookie;
> +	return ret;
> +}
> +
> +static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
> +{
> +	return container_of(t, struct sdma_desc, vd.tx);
> +}
> +
> +static void sdma_desc_free(struct virt_dma_desc *vd)
> +{
> +	struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
> +
> +	if (desc) {

Depending on the position of 'vd' in struct sdma_desc 'desc' will always
be non-NULL, even if 'vd' is NULL.

I think this test is unnecessary since this function should never be
called with an invalid pointer. If it is, then the caller really
deserved the resulting crash.

> +		sdma_free_bd(desc);
> +		kfree(desc);
> +	}
> +}
> +
> +static int sdma_terminate_all(struct dma_chan *chan)
> +{
> +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +
> +	spin_lock_irqsave(&sdmac->vc.lock, flags);
> +	vchan_get_all_descriptors(&sdmac->vc, &head);
> +	while (!list_empty(&sdmac->pending)) {
> +		struct sdma_desc *desc = list_first_entry(&sdmac->pending,
> +			struct sdma_desc, node);
> +
> +		 list_del(&desc->node);
> +		 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
> +		 sdmac->vc.desc_free(&desc->vd);
> +		 spin_lock_irqsave(&sdmac->vc.lock, flags);
> +	}

list_for_each_entry_safe?

> +
> +	if (sdmac->desc)
> +		sdmac->desc = NULL;

The test is unnecesary.

> +	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
> +	vchan_dma_desc_free_list(&sdmac->vc, &head);
> +	sdma_disable_channel_with_delay(chan);
> +
> +	return 0;
>  }
>  
>  static int sdma_alloc_chan_resources(struct dma_chan *chan)
> @@ -1168,18 +1226,11 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
>  	if (ret)
>  		goto disable_clk_ipg;
>  
> -	ret = sdma_request_channel(sdmac);
> -	if (ret)
> -		goto disable_clk_ahb;
> -
>  	ret = sdma_set_channel_priority(sdmac, prio);
>  	if (ret)
>  		goto disable_clk_ahb;
>  
> -	dma_async_tx_descriptor_init(&sdmac->desc, chan);
> -	sdmac->desc.tx_submit = sdma_tx_submit;
> -	/* txd.flags will be overwritten in prep funcs */
> -	sdmac->desc.flags = DMA_CTRL_ACK;
> +	sdmac->bd_size_sum = 0;
>  
>  	return 0;
>  
> @@ -1195,7 +1246,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
>  	struct sdma_channel *sdmac = to_sdma_chan(chan);
>  	struct sdma_engine *sdma = sdmac->sdma;
>  
> -	sdma_disable_channel(chan);
> +	sdma_terminate_all(chan);
>  
>  	if (sdmac->event_id0)
>  		sdma_event_disable(sdmac, sdmac->event_id0);
> @@ -1207,12 +1258,43 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
>  
>  	sdma_set_channel_priority(sdmac, 0);
>  
> -	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
> -
>  	clk_disable(sdma->clk_ipg);
>  	clk_disable(sdma->clk_ahb);
>  }
>  
> +static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
> +				enum dma_transfer_direction direction, u32 bds)
> +{
> +	struct sdma_desc *desc;
> +
> +	desc = kzalloc((sizeof(*desc)), GFP_KERNEL);
> +	if (!desc)
> +		goto err_out;
> +
> +	sdmac->status = DMA_IN_PROGRESS;
> +	sdmac->direction = direction;
> +	sdmac->flags = 0;
> +	sdmac->chn_count = 0;
> +	sdmac->chn_real_count = 0;
> +
> +	desc->sdmac = sdmac;
> +	desc->num_bd = bds;
> +	INIT_LIST_HEAD(&desc->node);
> +
> +	if (sdma_alloc_bd(desc))
> +		goto err_desc_out;
> +
> +	if (sdma_load_context(sdmac))
> +		goto err_desc_out;
> +
> +	return desc;
> +
> +err_desc_out:
> +	kfree(desc);
> +err_out:
> +	return NULL;
> +}
> +
>  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
>  		struct dma_chan *chan, struct scatterlist *sgl,
>  		unsigned int sg_len, enum dma_transfer_direction direction,
> @@ -1223,35 +1305,24 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
>  	int ret, i, count;
>  	int channel = sdmac->channel;
>  	struct scatterlist *sg;
> +	struct sdma_desc *desc;
>  
> -	if (sdmac->status == DMA_IN_PROGRESS)
> +	if (!chan)
>  		return NULL;
> -	sdmac->status = DMA_IN_PROGRESS;
> -
> -	sdmac->flags = 0;
>  
> -	sdmac->buf_tail = 0;
> -	sdmac->buf_ptail = 0;
> -	sdmac->chn_real_count = 0;
> +	desc = sdma_transfer_init(sdmac, direction, sg_len);
> +	if (!desc)
> +		goto err_out;
>  
>  	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
>  			sg_len, channel);
>  
> -	sdmac->direction = direction;
>  	ret = sdma_load_context(sdmac);
>  	if (ret)
>  		goto err_out;
>  
> -	if (sg_len > NUM_BD) {
> -		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
> -				channel, sg_len, NUM_BD);
> -		ret = -EINVAL;
> -		goto err_out;
> -	}
> -
> -	sdmac->chn_count = 0;
>  	for_each_sg(sgl, sg, sg_len, i) {
> -		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
> +		struct sdma_buffer_descriptor *bd = &desc->bd[i];
>  		int param;
>  
>  		bd->buffer_addr = sg->dma_address;
> @@ -1262,7 +1333,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
>  			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
>  					channel, count, 0xffff);
>  			ret = -EINVAL;
> -			goto err_out;
> +			goto err_bd_out;
>  		}
>  
>  		bd->mode.count = count;
> @@ -1307,10 +1378,11 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
>  		bd->mode.status = param;
>  	}
>  
> -	sdmac->num_bd = sg_len;
> -	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
> +	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
>  
> -	return &sdmac->desc;
> +err_bd_out:
> +	sdma_free_bd(desc);
> +	kfree(desc);
>  err_out:
>  	sdmac->status = DMA_ERROR;
>  	return NULL;
> @@ -1326,39 +1398,32 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
>  	int num_periods = buf_len / period_len;
>  	int channel = sdmac->channel;
>  	int ret, i = 0, buf = 0;
> +	struct sdma_desc *desc;
>  
>  	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
>  
> -	if (sdmac->status == DMA_IN_PROGRESS)
> -		return NULL;
> -
> -	sdmac->status = DMA_IN_PROGRESS;
> +	/* Now allocate and setup the descriptor. */
> +	desc = sdma_transfer_init(sdmac, direction, num_periods);
> +	if (!desc)
> +		goto err_out;
>  
> -	sdmac->buf_tail = 0;
> -	sdmac->buf_ptail = 0;
> -	sdmac->chn_real_count = 0;
> +	desc->buf_tail = 0;
> +	desc->buf_ptail = 0;
>  	sdmac->period_len = period_len;
> -
>  	sdmac->flags |= IMX_DMA_SG_LOOP;
> -	sdmac->direction = direction;
> +
>  	ret = sdma_load_context(sdmac);
>  	if (ret)
>  		goto err_out;
>  
> -	if (num_periods > NUM_BD) {
> -		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
> -				channel, num_periods, NUM_BD);
> -		goto err_out;
> -	}
> -
>  	if (period_len > 0xffff) {
>  		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
>  				channel, period_len, 0xffff);
> -		goto err_out;
> +		goto err_bd_out;
>  	}
>  
>  	while (buf < buf_len) {
> -		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
> +		struct sdma_buffer_descriptor *bd = &desc->bd[i];
>  		int param;
>  
>  		bd->buffer_addr = dma_addr;
> @@ -1366,7 +1431,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
>  		bd->mode.count = period_len;
>  
>  		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
> -			goto err_out;
> +			goto err_bd_out;
>  		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
>  			bd->mode.command = 0;
>  		else
> @@ -1389,10 +1454,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
>  		i++;
>  	}
>  
> -	sdmac->num_bd = num_periods;
> -	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
> -
> -	return &sdmac->desc;
> +	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
> +err_bd_out:
> +	sdma_free_bd(desc);
> +	kfree(desc);
>  err_out:
>  	sdmac->status = DMA_ERROR;
>  	return NULL;
> @@ -1432,26 +1497,74 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
>  {
>  	struct sdma_channel *sdmac = to_sdma_chan(chan);
>  	u32 residue;
> +	struct virt_dma_desc *vd;
> +	struct sdma_desc *desc;
> +	enum dma_status ret;
> +	unsigned long flags;
>  
> -	if (sdmac->flags & IMX_DMA_SG_LOOP)
> -		residue = (sdmac->num_bd - sdmac->buf_ptail) *
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +	if (ret == DMA_COMPLETE && txstate) {
> +		residue = sdmac->chn_count - sdmac->chn_real_count;
> +		return ret;
> +	}
> +
> +	spin_lock_irqsave(&sdmac->vc.lock, flags);
> +	vd = vchan_find_desc(&sdmac->vc, cookie);
> +	desc = to_sdma_desc(&vd->tx);

You should use 'vd' only after you have made sure it is valid (though I
see it causes no harm in this case, but let's be nice to the readers of
this code)

> +	if (vd) {
> +		if (sdmac->flags & IMX_DMA_SG_LOOP)
> +			residue = (desc->num_bd - desc->buf_ptail) *
>  			   sdmac->period_len - sdmac->chn_real_count;
> -	else
> +		else
> +			residue = sdmac->chn_count - sdmac->chn_real_count;
> +	} else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
>  		residue = sdmac->chn_count - sdmac->chn_real_count;
> +	} else {
> +		residue = 0;
> +	}
> +	ret = sdmac->status;
> +	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
>  
>  	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
>  			 residue);
>  
> -	return sdmac->status;
> +	return ret;
> +}
> +
> +static void sdma_start_desc(struct sdma_channel *sdmac)
> +{
> +	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
> +	struct sdma_desc *desc;
> +	struct sdma_engine *sdma = sdmac->sdma;
> +	int channel = sdmac->channel;
> +
> +	if (!vd) {
> +		sdmac->desc = NULL;
> +		return;
> +	}
> +	sdmac->desc = desc = to_sdma_desc(&vd->tx);
> +	/*
> +	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
> +	 * the desc alloced will never be freed in vchan_dma_desc_free_list
> +	 */
> +	if (!(sdmac->flags & IMX_DMA_SG_LOOP)) {
> +		list_add_tail(&sdmac->desc->node, &sdmac->pending);
> +		list_del(&vd->node);
> +	}
> +	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
> +	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
> +	sdma_enable_channel(sdma, sdmac->channel);
>  }
>  
>  static void sdma_issue_pending(struct dma_chan *chan)
>  {
>  	struct sdma_channel *sdmac = to_sdma_chan(chan);
> -	struct sdma_engine *sdma = sdmac->sdma;
> +	unsigned long flags;
>  
> -	if (sdmac->status == DMA_IN_PROGRESS)
> -		sdma_enable_channel(sdma, sdmac->channel);
> +	spin_lock_irqsave(&sdmac->vc.lock, flags);
> +	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
> +		sdma_start_desc(sdmac);
> +	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
>  }
>  
>  #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
> @@ -1657,7 +1770,7 @@ static int sdma_init(struct sdma_engine *sdma)
>  	for (i = 0; i < MAX_DMA_CHANNELS; i++)
>  		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
>  
> -	ret = sdma_request_channel(&sdma->channel[0]);
> +	ret = sdma_request_channel0(sdma);
>  	if (ret)
>  		goto err_dma_alloc;
>  
> @@ -1819,22 +1932,17 @@ static int sdma_probe(struct platform_device *pdev)
>  		struct sdma_channel *sdmac = &sdma->channel[i];
>  
>  		sdmac->sdma = sdma;
> -		spin_lock_init(&sdmac->lock);
> -
> -		sdmac->chan.device = &sdma->dma_device;
> -		dma_cookie_init(&sdmac->chan);
>  		sdmac->channel = i;
> -
> -		tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
> -			     (unsigned long) sdmac);
> +		sdmac->status = DMA_IN_PROGRESS;
> +		sdmac->vc.desc_free = sdma_desc_free;
> +		INIT_LIST_HEAD(&sdmac->pending);
>  		/*
>  		 * Add the channel to the DMAC list. Do not add channel 0 though
>  		 * because we need it internally in the SDMA driver. This also means
>  		 * that channel 0 in dmaengine counting matches sdma channel 1.
>  		 */
>  		if (i)
> -			list_add_tail(&sdmac->chan.device_node,
> -					&sdma->dma_device.channels);
> +			vchan_init(&sdmac->vc, &sdma->dma_device);
>  	}
>  
>  	ret = sdma_init(sdma);
> @@ -1879,7 +1987,7 @@ static int sdma_probe(struct platform_device *pdev)
>  	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
>  	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
>  	sdma->dma_device.device_config = sdma_config;
> -	sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
> +	sdma->dma_device.device_terminate_all = sdma_terminate_all;
>  	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
>  	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
>  	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
> @@ -1939,7 +2047,8 @@ static int sdma_remove(struct platform_device *pdev)
>  	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
>  		struct sdma_channel *sdmac = &sdma->channel[i];
>  
> -		tasklet_kill(&sdmac->tasklet);
> +		tasklet_kill(&sdmac->vc.task);
> +		sdma_free_chan_resources(&sdmac->vc.chan);
>  	}
>  
>  	platform_set_drvdata(pdev, NULL);
> -- 
> 2.7.4
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Robin Gong May 23, 2018, 10:26 a.m. UTC | #5
On 二, 2018-05-22 at 12:09 +0200, Sascha Hauer wrote:
> Hi Robin,

> 

> Several comments inside.

> 

> Sascha

> 

> On Fri, Mar 23, 2018 at 12:18:19AM +0800, Robin Gong wrote:

> > 

> > The legacy sdma driver has below limitations or drawbacks:

> >   1. Hardcode the max BDs number as "PAGE_SIZE / sizeof(*)", and

> > alloc

> >      one page size for one channel regardless of only few BDs

> > needed

> >      most time. But in few cases, the max PAGE_SIZE maybe not

> > enough.

> >   2. One SDMA channel can't stop immediatley once channel disabled

> > which

> >      means SDMA interrupt may come in after this channel

> > terminated.There

> >      are some patches for this corner case such as commit

> > "2746e2c389f9",

> >      but not cover non-cyclic.

> > 

> > The common virt-dma overcomes the above limitations. It can alloc

> > bd

> > dynamically and free bd once this tx transfer done. No memory

> > wasted or

> > maximum limititation here, only depends on how many memory can be

> > requested

> > from kernel. For No.2, such issue can be workaround by checking if

> > there

> > is available descript("sdmac->desc") now once the unwanted

> > interrupt

> > coming. At last the common virt-dma is easier for sdma driver

> > maintain.

> > 

> > Signed-off-by: Robin Gong <yibin.gong@nxp.com>

> > ---

> >  drivers/dma/Kconfig    |   1 +

> >  drivers/dma/imx-sdma.c | 395 +++++++++++++++++++++++++++++++----

> > --------------

> >  2 files changed, 253 insertions(+), 143 deletions(-)

> > 

> > diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig

> > index 27df3e2..c4ce43c 100644

> > --- a/drivers/dma/Kconfig

> > +++ b/drivers/dma/Kconfig

> > @@ -247,6 +247,7 @@ config IMX_SDMA

> >  	tristate "i.MX SDMA support"

> >  	depends on ARCH_MXC

> >  	select DMA_ENGINE

> > +	select DMA_VIRTUAL_CHANNELS

> >  	help

> >  	  Support the i.MX SDMA engine. This engine is integrated

> > into

> >  	  Freescale i.MX25/31/35/51/53/6 chips.

> > diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c

> > index ccd03c3..df79e73 100644

> > --- a/drivers/dma/imx-sdma.c

> > +++ b/drivers/dma/imx-sdma.c

> > @@ -48,6 +48,7 @@

> >  #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>

> >  

> >  #include "dmaengine.h"

> > +#include "virt-dma.h"

> >  

> >  /* SDMA registers */

> >  #define SDMA_H_C0PTR		0x000

> > @@ -291,10 +292,19 @@ struct sdma_context_data {

> >  	u32  scratch7;

> >  } __attribute__ ((packed));

> >  

> > -#define NUM_BD (int)(PAGE_SIZE / sizeof(struct

> > sdma_buffer_descriptor))

> > -

> >  struct sdma_engine;

> >  

> > +struct sdma_desc {

> > +	struct virt_dma_desc	vd;

> > +	struct list_head	node;

> > +	unsigned int		num_bd;

> > +	dma_addr_t		bd_phys;

> > +	unsigned int		buf_tail;

> > +	unsigned int		buf_ptail;

> > +	struct sdma_channel	*sdmac;

> > +	struct sdma_buffer_descriptor *bd;

> > +};

> > +

> >  /**

> >   * struct sdma_channel - housekeeping for a SDMA channel

> >   *

> > @@ -310,19 +320,17 @@ struct sdma_engine;

> >   * @num_bd		max NUM_BD. number of descriptors

> > currently handling

> >   */

> >  struct sdma_channel {

> > +	struct virt_dma_chan		vc;

> > +	struct list_head		pending;

> >  	struct sdma_engine		*sdma;

> > +	struct sdma_desc		*desc;

> >  	unsigned int			channel;

> >  	enum dma_transfer_direction		direction;

> >  	enum sdma_peripheral_type	peripheral_type;

> >  	unsigned int			event_id0;

> >  	unsigned int			event_id1;

> >  	enum dma_slave_buswidth		word_size;

> > -	unsigned int			buf_tail;

> > -	unsigned int			buf_ptail;

> > -	unsigned int			num_bd;

> >  	unsigned int			period_len;

> > -	struct sdma_buffer_descriptor	*bd;

> > -	dma_addr_t			bd_phys;

> >  	unsigned int			pc_from_device,

> > pc_to_device;

> >  	unsigned int			device_to_device;

> >  	unsigned long			flags;

> > @@ -330,15 +338,12 @@ struct sdma_channel {

> >  	unsigned long			event_mask[2];

> >  	unsigned long			watermark_level;

> >  	u32				shp_addr, per_addr;

> > -	struct dma_chan			chan;

> > -	spinlock_t			lock;

> > -	struct dma_async_tx_descriptor	desc;

> >  	enum dma_status			status;

> >  	unsigned int			chn_count;

> >  	unsigned int			chn_real_count;

> > -	struct tasklet_struct		tasklet;

> >  	struct imx_dma_data		data;

> >  	bool				enabled;

> Usage of this variable is removed in this patch, but not the variable

> itself.

Yes, will remove the usless 'enabled' in v2.
> 

> > 

> > +	u32				bd_size_sum;

> This variable is never used for anything.

Yes, it's not for significative use but debug to see how many current
bds used.
> 

> > 

> >  };

> >  

> >  #define IMX_DMA_SG_LOOP		BIT(0)

> > @@ -398,6 +403,9 @@ struct sdma_engine {

> >  	u32				spba_start_addr;

> >  	u32				spba_end_addr;

> >  	unsigned int			irq;

> > +	/* channel0 bd */

> > +	dma_addr_t			bd0_phys;

> > +	struct sdma_buffer_descriptor	*bd0;

> >  };

> >  

> >  static struct sdma_driver_data sdma_imx31 = {

> > @@ -553,6 +561,8 @@ MODULE_DEVICE_TABLE(of, sdma_dt_ids);

> >  #define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq

> > /core freq = 2 or 1 */

> >  #define SDMA_H_CONFIG_CSM	(3)       /* indicates which

> > context switch mode is selected*/

> >  

> > +static void sdma_start_desc(struct sdma_channel *sdmac);

> > +

> >  static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned

> > int event)

> >  {

> >  	u32 chnenbl0 = sdma->drvdata->chnenbl0;

> > @@ -597,14 +607,7 @@ static int sdma_config_ownership(struct

> > sdma_channel *sdmac,

> >  

> >  static void sdma_enable_channel(struct sdma_engine *sdma, int

> > channel)

> >  {

> > -	unsigned long flags;

> > -	struct sdma_channel *sdmac = &sdma->channel[channel];

> > -

> >  	writel(BIT(channel), sdma->regs + SDMA_H_START);

> > -

> > -	spin_lock_irqsave(&sdmac->lock, flags);

> > -	sdmac->enabled = true;

> > -	spin_unlock_irqrestore(&sdmac->lock, flags);

> >  }

> >  

> >  /*

> > @@ -632,7 +635,7 @@ static int sdma_run_channel0(struct sdma_engine

> > *sdma)

> >  static int sdma_load_script(struct sdma_engine *sdma, void *buf,

> > int size,

> >  		u32 address)

> >  {

> > -	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;

> > +	struct sdma_buffer_descriptor *bd0 = sdma->bd0;

> This change seems to be an orthogonal change. Please make this a

> separate patch.

It's something related with virtual dma support, because in virtual
dma framework, all bds should be allocated dynamically if they used.
but bd0 is a specail case since it's must and basic for load sdma
firmware and context for other channels. So here alloc 'bd0' for other
channels.
> 

> > 

> >  	void *buf_virt;

> >  	dma_addr_t buf_phys;

> >  	int ret;

> > @@ -691,23 +694,16 @@ static void sdma_event_disable(struct

> > sdma_channel *sdmac, unsigned int event)

> >  static void sdma_update_channel_loop(struct sdma_channel *sdmac)

> >  {

> >  	struct sdma_buffer_descriptor *bd;

> > +	struct sdma_desc *desc = sdmac->desc;

> >  	int error = 0;

> >  	enum dma_status	old_status = sdmac->status;

> > -	unsigned long flags;

> > -

> > -	spin_lock_irqsave(&sdmac->lock, flags);

> > -	if (!sdmac->enabled) {

> > -		spin_unlock_irqrestore(&sdmac->lock, flags);

> > -		return;

> > -	}

> > -	spin_unlock_irqrestore(&sdmac->lock, flags);

> >  

> >  	/*

> >  	 * loop mode. Iterate over descriptors, re-setup them and

> >  	 * call callback function.

> >  	 */

> > -	while (1) {

> > -		bd = &sdmac->bd[sdmac->buf_tail];

> > +	while (desc) {

> 'desc' seems to be used as a loop counter here, but this variable is

> never assigned another value, so I assume it's just another way to

> say

> "skip the loop if desc is NULL". When 'desc' NULL you won't get into

> this function at all though, so this check for desc seems rather

> pointless.

Good catch, should check 'sdmac->desc' here instead of 'desc' since in
the following 'sdmac->desc' may be set to NULL by sdma_terminate_all
during folllowing spin_unlock and spin_lock narrow window. Will improve
it in V2.
> 

> > 

> > +		bd = &desc->bd[desc->buf_tail];

> >  

> >  		if (bd->mode.status & BD_DONE)

> >  			break;

> > @@ -726,8 +722,8 @@ static void sdma_update_channel_loop(struct

> > sdma_channel *sdmac)

> >  		sdmac->chn_real_count = bd->mode.count;

> >  		bd->mode.status |= BD_DONE;

> >  		bd->mode.count = sdmac->period_len;

> > -		sdmac->buf_ptail = sdmac->buf_tail;

> > -		sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac-

> > >num_bd;

> > +		desc->buf_ptail = desc->buf_tail;

> > +		desc->buf_tail = (desc->buf_tail + 1) % desc-

> > >num_bd;

> >  

> >  		/*

> >  		 * The callback is called from the interrupt

> > context in order

> > @@ -735,15 +731,16 @@ static void sdma_update_channel_loop(struct

> > sdma_channel *sdmac)

> >  		 * SDMA transaction status by the time the client

> > tasklet is

> >  		 * executed.

> >  		 */

> > -

> > -		dmaengine_desc_get_callback_invoke(&sdmac->desc,

> > NULL);

> > +		spin_unlock(&sdmac->vc.lock);

> > +		dmaengine_desc_get_callback_invoke(&desc->vd.tx,

> > NULL);

> > +		spin_lock(&sdmac->vc.lock);

> >  

> >  		if (error)

> >  			sdmac->status = old_status;

> >  	}

> >  }

> >  

> > -static void mxc_sdma_handle_channel_normal(unsigned long data)

> > +static void mxc_sdma_handle_channel_normal(struct sdma_channel

> > *data)

> >  {

> >  	struct sdma_channel *sdmac = (struct sdma_channel *) data;

> >  	struct sdma_buffer_descriptor *bd;

> > @@ -754,8 +751,8 @@ static void

> > mxc_sdma_handle_channel_normal(unsigned long data)

> >  	 * non loop mode. Iterate over all descriptors, collect

> >  	 * errors and call callback function

> >  	 */

> > -	for (i = 0; i < sdmac->num_bd; i++) {

> > -		bd = &sdmac->bd[i];

> > +	for (i = 0; i < sdmac->desc->num_bd; i++) {

> > +		bd = &sdmac->desc->bd[i];

> >  

> >  		 if (bd->mode.status & (BD_DONE | BD_RROR))

> >  			error = -EIO;

> > @@ -766,10 +763,6 @@ static void

> > mxc_sdma_handle_channel_normal(unsigned long data)

> >  		sdmac->status = DMA_ERROR;

> >  	else

> >  		sdmac->status = DMA_COMPLETE;

> > -

> > -	dma_cookie_complete(&sdmac->desc);

> > -

> > -	dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);

> >  }

> >  

> >  static irqreturn_t sdma_int_handler(int irq, void *dev_id)

> > @@ -785,13 +778,24 @@ static irqreturn_t sdma_int_handler(int irq,

> > void *dev_id)

> >  	while (stat) {

> >  		int channel = fls(stat) - 1;

> >  		struct sdma_channel *sdmac = &sdma-

> > >channel[channel];

> > -

> > -		if (sdmac->flags & IMX_DMA_SG_LOOP)

> > -			sdma_update_channel_loop(sdmac);

> > -		else

> > -			tasklet_schedule(&sdmac->tasklet);

> > +		struct sdma_desc *desc;

> > +

> > +		spin_lock(&sdmac->vc.lock);

> > +		desc = sdmac->desc;

> > +		if (desc) {

> > +			if (sdmac->flags & IMX_DMA_SG_LOOP) {

> > +				sdma_update_channel_loop(sdmac);

> > +			} else {

> > +				mxc_sdma_handle_channel_normal(sdm

> > ac);

> > +				vchan_cookie_complete(&desc->vd);

> > +				if (!list_empty(&sdmac->pending))

> > +					list_del(&desc->node);

> What does this list_empty check protect you from? It looks like when

> the

> list really is empty then it's a bug in your internal driver logic.

Yes, no need here check local sdmac->pending since I directly start
setup next desc flowing in isr instead of local tasklet and virt_dma
framework will handle all lists such as desc_issued/desc_completed etc.
Will remove sdmac->pending in V2.
> 

> > 

> > +				 sdma_start_desc(sdmac);

> Whitespace damage here.

Will fix in V2.
> 

> > 

> > +			}

> > +		}

> >  

> >  		__clear_bit(channel, &stat);

> > +		spin_unlock(&sdmac->vc.lock);

> >  	}

> >  

> >  	return IRQ_HANDLED;

> > @@ -897,7 +901,7 @@ static int sdma_load_context(struct

> > sdma_channel *sdmac)

> >  	int channel = sdmac->channel;

> >  	int load_address;

> >  	struct sdma_context_data *context = sdma->context;

> > -	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;

> > +	struct sdma_buffer_descriptor *bd0 = sdma->bd0;

> >  	int ret;

> >  	unsigned long flags;

> >  

> > @@ -946,7 +950,7 @@ static int sdma_load_context(struct

> > sdma_channel *sdmac)

> >  

> >  static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)

> >  {

> > -	return container_of(chan, struct sdma_channel, chan);

> > +	return container_of(chan, struct sdma_channel, vc.chan);

> >  }

> >  

> >  static int sdma_disable_channel(struct dma_chan *chan)

> > @@ -954,15 +958,10 @@ static int sdma_disable_channel(struct

> > dma_chan *chan)

> >  	struct sdma_channel *sdmac = to_sdma_chan(chan);

> >  	struct sdma_engine *sdma = sdmac->sdma;

> >  	int channel = sdmac->channel;

> > -	unsigned long flags;

> >  

> >  	writel_relaxed(BIT(channel), sdma->regs +

> > SDMA_H_STATSTOP);

> >  	sdmac->status = DMA_ERROR;

> >  

> > -	spin_lock_irqsave(&sdmac->lock, flags);

> > -	sdmac->enabled = false;

> > -	spin_unlock_irqrestore(&sdmac->lock, flags);

> > -

> >  	return 0;

> >  }

> >  

> > @@ -1097,42 +1096,101 @@ static int

> > sdma_set_channel_priority(struct sdma_channel *sdmac,

> >  	return 0;

> >  }

> >  

> > -static int sdma_request_channel(struct sdma_channel *sdmac)

> > +static int sdma_alloc_bd(struct sdma_desc *desc)

> >  {

> > -	struct sdma_engine *sdma = sdmac->sdma;

> > -	int channel = sdmac->channel;

> > -	int ret = -EBUSY;

> > +	u32 bd_size = desc->num_bd * sizeof(struct

> > sdma_buffer_descriptor);

> > +	int ret = 0;

> > +	unsigned long flags;

> >  

> > -	sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac-

> > >bd_phys,

> > +	desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc-

> > >bd_phys,

> >  					GFP_KERNEL);

> > -	if (!sdmac->bd) {

> > +	if (!desc->bd) {

> >  		ret = -ENOMEM;

> >  		goto out;

> >  	}

> >  

> > -	sdma->channel_control[channel].base_bd_ptr = sdmac-

> > >bd_phys;

> > -	sdma->channel_control[channel].current_bd_ptr = sdmac-

> > >bd_phys;

> > +	spin_lock_irqsave(&desc->sdmac->vc.lock, flags);

> > +	desc->sdmac->bd_size_sum += bd_size;

> > +	spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);

> >  

> > -	sdma_set_channel_priority(sdmac,

> > MXC_SDMA_DEFAULT_PRIORITY);

> > -	return 0;

> >  out:

> > -

> >  	return ret;

> >  }

> >  

> > -static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor

> > *tx)

> > +static void sdma_free_bd(struct sdma_desc *desc)

> >  {

> > +	u32 bd_size = desc->num_bd * sizeof(struct

> > sdma_buffer_descriptor);

> >  	unsigned long flags;

> > -	struct sdma_channel *sdmac = to_sdma_chan(tx->chan);

> > -	dma_cookie_t cookie;

> >  

> > -	spin_lock_irqsave(&sdmac->lock, flags);

> > +	if (desc->bd) {

> > +		dma_free_coherent(NULL, bd_size, desc->bd, desc-

> > >bd_phys);

> > +

> > +		spin_lock_irqsave(&desc->sdmac->vc.lock, flags);

> > +		desc->sdmac->bd_size_sum -= bd_size;

> > +		spin_unlock_irqrestore(&desc->sdmac->vc.lock,

> > flags);

> > +	}

> > +}

> > +

> > +static int sdma_request_channel0(struct sdma_engine *sdma)

> > +{

> > +	int ret = 0;

> > +

> > +	sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma-

> > >bd0_phys,

> > +					GFP_KERNEL);

> > +	if (!sdma->bd0) {

> > +		ret = -ENOMEM;

> > +		goto out;

> > +	}

> >  

> > -	cookie = dma_cookie_assign(tx);

> > +	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;

> > +	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;

> >  

> > -	spin_unlock_irqrestore(&sdmac->lock, flags);

> > +	sdma_set_channel_priority(&sdma->channel[0],

> > MXC_SDMA_DEFAULT_PRIORITY);

> > +out:

> >  

> > -	return cookie;

> > +	return ret;

> > +}

> > +

> > +static struct sdma_desc *to_sdma_desc(struct

> > dma_async_tx_descriptor *t)

> > +{

> > +	return container_of(t, struct sdma_desc, vd.tx);

> > +}

> > +

> > +static void sdma_desc_free(struct virt_dma_desc *vd)

> > +{

> > +	struct sdma_desc *desc = container_of(vd, struct

> > sdma_desc, vd);

> > +

> > +	if (desc) {

> Depending on the position of 'vd' in struct sdma_desc 'desc' will

> always

> be non-NULL, even if 'vd' is NULL.

> 

> I think this test is unnecessary since this function should never be

> called with an invalid pointer. If it is, then the caller really

> deserved the resulting crash.

Yes, will remove it.
> 

> > 

> > +		sdma_free_bd(desc);

> > +		kfree(desc);

> > +	}

> > +}

> > +

> > +static int sdma_terminate_all(struct dma_chan *chan)

> > +{

> > +	struct sdma_channel *sdmac = to_sdma_chan(chan);

> > +	unsigned long flags;

> > +	LIST_HEAD(head);

> > +

> > +	spin_lock_irqsave(&sdmac->vc.lock, flags);

> > +	vchan_get_all_descriptors(&sdmac->vc, &head);

> > +	while (!list_empty(&sdmac->pending)) {

> > +		struct sdma_desc *desc = list_first_entry(&sdmac-

> > >pending,

> > +			struct sdma_desc, node);

> > +

> > +		 list_del(&desc->node);

> > +		 spin_unlock_irqrestore(&sdmac->vc.lock, flags);

> > +		 sdmac->vc.desc_free(&desc->vd);

> > +		 spin_lock_irqsave(&sdmac->vc.lock, flags);

> > +	}

> list_for_each_entry_safe?

Will remove here all while(sdmac->pending) checking.No need here. 
> 

> > 

> > +

> > +	if (sdmac->desc)

> > +		sdmac->desc = NULL;

> The test is unnecesary.

This 'NULL' is meaningful in case that dma done interrupt come after
terminate as you know sdma will actually stop after current transfer
done.
> > 

> > +	spin_unlock_irqrestore(&sdmac->vc.lock, flags);

> > +	vchan_dma_desc_free_list(&sdmac->vc, &head);

> > +	sdma_disable_channel_with_delay(chan);

> > +

> > +	return 0;

> >  }

> >  

> >  static int sdma_alloc_chan_resources(struct dma_chan *chan)

> > @@ -1168,18 +1226,11 @@ static int sdma_alloc_chan_resources(struct

> > dma_chan *chan)

> >  	if (ret)

> >  		goto disable_clk_ipg;

> >  

> > -	ret = sdma_request_channel(sdmac);

> > -	if (ret)

> > -		goto disable_clk_ahb;

> > -

> >  	ret = sdma_set_channel_priority(sdmac, prio);

> >  	if (ret)

> >  		goto disable_clk_ahb;

> >  

> > -	dma_async_tx_descriptor_init(&sdmac->desc, chan);

> > -	sdmac->desc.tx_submit = sdma_tx_submit;

> > -	/* txd.flags will be overwritten in prep funcs */

> > -	sdmac->desc.flags = DMA_CTRL_ACK;

> > +	sdmac->bd_size_sum = 0;

> >  

> >  	return 0;

> >  

> > @@ -1195,7 +1246,7 @@ static void sdma_free_chan_resources(struct

> > dma_chan *chan)

> >  	struct sdma_channel *sdmac = to_sdma_chan(chan);

> >  	struct sdma_engine *sdma = sdmac->sdma;

> >  

> > -	sdma_disable_channel(chan);

> > +	sdma_terminate_all(chan);

> >  

> >  	if (sdmac->event_id0)

> >  		sdma_event_disable(sdmac, sdmac->event_id0);

> > @@ -1207,12 +1258,43 @@ static void sdma_free_chan_resources(struct

> > dma_chan *chan)

> >  

> >  	sdma_set_channel_priority(sdmac, 0);

> >  

> > -	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac-

> > >bd_phys);

> > -

> >  	clk_disable(sdma->clk_ipg);

> >  	clk_disable(sdma->clk_ahb);

> >  }

> >  

> > +static struct sdma_desc *sdma_transfer_init(struct sdma_channel

> > *sdmac,

> > +				enum dma_transfer_direction

> > direction, u32 bds)

> > +{

> > +	struct sdma_desc *desc;

> > +

> > +	desc = kzalloc((sizeof(*desc)), GFP_KERNEL);

> > +	if (!desc)

> > +		goto err_out;

> > +

> > +	sdmac->status = DMA_IN_PROGRESS;

> > +	sdmac->direction = direction;

> > +	sdmac->flags = 0;

> > +	sdmac->chn_count = 0;

> > +	sdmac->chn_real_count = 0;

> > +

> > +	desc->sdmac = sdmac;

> > +	desc->num_bd = bds;

> > +	INIT_LIST_HEAD(&desc->node);

> > +

> > +	if (sdma_alloc_bd(desc))

> > +		goto err_desc_out;

> > +

> > +	if (sdma_load_context(sdmac))

> > +		goto err_desc_out;

> > +

> > +	return desc;

> > +

> > +err_desc_out:

> > +	kfree(desc);

> > +err_out:

> > +	return NULL;

> > +}

> > +

> >  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(

> >  		struct dma_chan *chan, struct scatterlist *sgl,

> >  		unsigned int sg_len, enum dma_transfer_direction

> > direction,

> > @@ -1223,35 +1305,24 @@ static struct dma_async_tx_descriptor

> > *sdma_prep_slave_sg(

> >  	int ret, i, count;

> >  	int channel = sdmac->channel;

> >  	struct scatterlist *sg;

> > +	struct sdma_desc *desc;

> >  

> > -	if (sdmac->status == DMA_IN_PROGRESS)

> > +	if (!chan)

> >  		return NULL;

> > -	sdmac->status = DMA_IN_PROGRESS;

> > -

> > -	sdmac->flags = 0;

> >  

> > -	sdmac->buf_tail = 0;

> > -	sdmac->buf_ptail = 0;

> > -	sdmac->chn_real_count = 0;

> > +	desc = sdma_transfer_init(sdmac, direction, sg_len);

> > +	if (!desc)

> > +		goto err_out;

> >  

> >  	dev_dbg(sdma->dev, "setting up %d entries for channel

> > %d.\n",

> >  			sg_len, channel);

> >  

> > -	sdmac->direction = direction;

> >  	ret = sdma_load_context(sdmac);

> >  	if (ret)

> >  		goto err_out;

> >  

> > -	if (sg_len > NUM_BD) {

> > -		dev_err(sdma->dev, "SDMA channel %d: maximum

> > number of sg exceeded: %d > %d\n",

> > -				channel, sg_len, NUM_BD);

> > -		ret = -EINVAL;

> > -		goto err_out;

> > -	}

> > -

> > -	sdmac->chn_count = 0;

> >  	for_each_sg(sgl, sg, sg_len, i) {

> > -		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];

> > +		struct sdma_buffer_descriptor *bd = &desc->bd[i];

> >  		int param;

> >  

> >  		bd->buffer_addr = sg->dma_address;

> > @@ -1262,7 +1333,7 @@ static struct dma_async_tx_descriptor

> > *sdma_prep_slave_sg(

> >  			dev_err(sdma->dev, "SDMA channel %d:

> > maximum bytes for sg entry exceeded: %d > %d\n",

> >  					channel, count, 0xffff);

> >  			ret = -EINVAL;

> > -			goto err_out;

> > +			goto err_bd_out;

> >  		}

> >  

> >  		bd->mode.count = count;

> > @@ -1307,10 +1378,11 @@ static struct dma_async_tx_descriptor

> > *sdma_prep_slave_sg(

> >  		bd->mode.status = param;

> >  	}

> >  

> > -	sdmac->num_bd = sg_len;

> > -	sdma->channel_control[channel].current_bd_ptr = sdmac-

> > >bd_phys;

> > +	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);

> >  

> > -	return &sdmac->desc;

> > +err_bd_out:

> > +	sdma_free_bd(desc);

> > +	kfree(desc);

> >  err_out:

> >  	sdmac->status = DMA_ERROR;

> >  	return NULL;

> > @@ -1326,39 +1398,32 @@ static struct dma_async_tx_descriptor

> > *sdma_prep_dma_cyclic(

> >  	int num_periods = buf_len / period_len;

> >  	int channel = sdmac->channel;

> >  	int ret, i = 0, buf = 0;

> > +	struct sdma_desc *desc;

> >  

> >  	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);

> >  

> > -	if (sdmac->status == DMA_IN_PROGRESS)

> > -		return NULL;

> > -

> > -	sdmac->status = DMA_IN_PROGRESS;

> > +	/* Now allocate and setup the descriptor. */

> > +	desc = sdma_transfer_init(sdmac, direction, num_periods);

> > +	if (!desc)

> > +		goto err_out;

> >  

> > -	sdmac->buf_tail = 0;

> > -	sdmac->buf_ptail = 0;

> > -	sdmac->chn_real_count = 0;

> > +	desc->buf_tail = 0;

> > +	desc->buf_ptail = 0;

> >  	sdmac->period_len = period_len;

> > -

> >  	sdmac->flags |= IMX_DMA_SG_LOOP;

> > -	sdmac->direction = direction;

> > +

> >  	ret = sdma_load_context(sdmac);

> >  	if (ret)

> >  		goto err_out;

> >  

> > -	if (num_periods > NUM_BD) {

> > -		dev_err(sdma->dev, "SDMA channel %d: maximum

> > number of sg exceeded: %d > %d\n",

> > -				channel, num_periods, NUM_BD);

> > -		goto err_out;

> > -	}

> > -

> >  	if (period_len > 0xffff) {

> >  		dev_err(sdma->dev, "SDMA channel %d: maximum

> > period size exceeded: %zu > %d\n",

> >  				channel, period_len, 0xffff);

> > -		goto err_out;

> > +		goto err_bd_out;

> >  	}

> >  

> >  	while (buf < buf_len) {

> > -		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];

> > +		struct sdma_buffer_descriptor *bd = &desc->bd[i];

> >  		int param;

> >  

> >  		bd->buffer_addr = dma_addr;

> > @@ -1366,7 +1431,7 @@ static struct dma_async_tx_descriptor

> > *sdma_prep_dma_cyclic(

> >  		bd->mode.count = period_len;

> >  

> >  		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)

> > -			goto err_out;

> > +			goto err_bd_out;

> >  		if (sdmac->word_size ==

> > DMA_SLAVE_BUSWIDTH_4_BYTES)

> >  			bd->mode.command = 0;

> >  		else

> > @@ -1389,10 +1454,10 @@ static struct dma_async_tx_descriptor

> > *sdma_prep_dma_cyclic(

> >  		i++;

> >  	}

> >  

> > -	sdmac->num_bd = num_periods;

> > -	sdma->channel_control[channel].current_bd_ptr = sdmac-

> > >bd_phys;

> > -

> > -	return &sdmac->desc;

> > +	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);

> > +err_bd_out:

> > +	sdma_free_bd(desc);

> > +	kfree(desc);

> >  err_out:

> >  	sdmac->status = DMA_ERROR;

> >  	return NULL;

> > @@ -1432,26 +1497,74 @@ static enum dma_status

> > sdma_tx_status(struct dma_chan *chan,

> >  {

> >  	struct sdma_channel *sdmac = to_sdma_chan(chan);

> >  	u32 residue;

> > +	struct virt_dma_desc *vd;

> > +	struct sdma_desc *desc;

> > +	enum dma_status ret;

> > +	unsigned long flags;

> >  

> > -	if (sdmac->flags & IMX_DMA_SG_LOOP)

> > -		residue = (sdmac->num_bd - sdmac->buf_ptail) *

> > +	ret = dma_cookie_status(chan, cookie, txstate);

> > +	if (ret == DMA_COMPLETE && txstate) {

> > +		residue = sdmac->chn_count - sdmac-

> > >chn_real_count;

> > +		return ret;

> > +	}

> > +

> > +	spin_lock_irqsave(&sdmac->vc.lock, flags);

> > +	vd = vchan_find_desc(&sdmac->vc, cookie);

> > +	desc = to_sdma_desc(&vd->tx);

> You should use 'vd' only after you have made sure it is valid (though

> I

> see it causes no harm in this case, but let's be nice to the readers

> of

> this code)

Ok, will move desc = to_sdma_desc(&vd->tx) into the below if(vd)... 
> 

> > 

> > +	if (vd) {

> > +		if (sdmac->flags & IMX_DMA_SG_LOOP)

> > +			residue = (desc->num_bd - desc->buf_ptail) 

> > *

> >  			   sdmac->period_len - sdmac-

> > >chn_real_count;

> > -	else

> > +		else

> > +			residue = sdmac->chn_count - sdmac-

> > >chn_real_count;

> > +	} else if (sdmac->desc && sdmac->desc->vd.tx.cookie ==

> > cookie) {

> >  		residue = sdmac->chn_count - sdmac-

> > >chn_real_count;

> > +	} else {

> > +		residue = 0;

> > +	}

> > +	ret = sdmac->status;

> > +	spin_unlock_irqrestore(&sdmac->vc.lock, flags);

> >  

> >  	dma_set_tx_state(txstate, chan->completed_cookie, chan-

> > >cookie,

> >  			 residue);

> >  

> > -	return sdmac->status;

> > +	return ret;

> > +}

> > +

> > +static void sdma_start_desc(struct sdma_channel *sdmac)

> > +{

> > +	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);

> > +	struct sdma_desc *desc;

> > +	struct sdma_engine *sdma = sdmac->sdma;

> > +	int channel = sdmac->channel;

> > +

> > +	if (!vd) {

> > +		sdmac->desc = NULL;

> > +		return;

> > +	}

> > +	sdmac->desc = desc = to_sdma_desc(&vd->tx);

> > +	/*

> > +	 * Do not delete the node in desc_issued list in cyclic

> > mode, otherwise

> > +	 * the desc alloced will never be freed in

> > vchan_dma_desc_free_list

> > +	 */

> > +	if (!(sdmac->flags & IMX_DMA_SG_LOOP)) {

> > +		list_add_tail(&sdmac->desc->node, &sdmac-

> > >pending);

> > +		list_del(&vd->node);

> > +	}

> > +	sdma->channel_control[channel].base_bd_ptr = desc-

> > >bd_phys;

> > +	sdma->channel_control[channel].current_bd_ptr = desc-

> > >bd_phys;

> > +	sdma_enable_channel(sdma, sdmac->channel);

> >  }

> >  

> >  static void sdma_issue_pending(struct dma_chan *chan)

> >  {

> >  	struct sdma_channel *sdmac = to_sdma_chan(chan);

> > -	struct sdma_engine *sdma = sdmac->sdma;

> > +	unsigned long flags;

> >  

> > -	if (sdmac->status == DMA_IN_PROGRESS)

> > -		sdma_enable_channel(sdma, sdmac->channel);

> > +	spin_lock_irqsave(&sdmac->vc.lock, flags);

> > +	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)

> > +		sdma_start_desc(sdmac);

> > +	spin_unlock_irqrestore(&sdmac->vc.lock, flags);

> >  }

> >  

> >  #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34

> > @@ -1657,7 +1770,7 @@ static int sdma_init(struct sdma_engine

> > *sdma)

> >  	for (i = 0; i < MAX_DMA_CHANNELS; i++)

> >  		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i *

> > 4);

> >  

> > -	ret = sdma_request_channel(&sdma->channel[0]);

> > +	ret = sdma_request_channel0(sdma);

> >  	if (ret)

> >  		goto err_dma_alloc;

> >  

> > @@ -1819,22 +1932,17 @@ static int sdma_probe(struct

> > platform_device *pdev)

> >  		struct sdma_channel *sdmac = &sdma->channel[i];

> >  

> >  		sdmac->sdma = sdma;

> > -		spin_lock_init(&sdmac->lock);

> > -

> > -		sdmac->chan.device = &sdma->dma_device;

> > -		dma_cookie_init(&sdmac->chan);

> >  		sdmac->channel = i;

> > -

> > -		tasklet_init(&sdmac->tasklet,

> > mxc_sdma_handle_channel_normal,

> > -			     (unsigned long) sdmac);

> > +		sdmac->status = DMA_IN_PROGRESS;

> > +		sdmac->vc.desc_free = sdma_desc_free;

> > +		INIT_LIST_HEAD(&sdmac->pending);

> >  		/*

> >  		 * Add the channel to the DMAC list. Do not add

> > channel 0 though

> >  		 * because we need it internally in the SDMA

> > driver. This also means

> >  		 * that channel 0 in dmaengine counting matches

> > sdma channel 1.

> >  		 */

> >  		if (i)

> > -			list_add_tail(&sdmac->chan.device_node,

> > -					&sdma-

> > >dma_device.channels);

> > +			vchan_init(&sdmac->vc, &sdma->dma_device);

> >  	}

> >  

> >  	ret = sdma_init(sdma);

> > @@ -1879,7 +1987,7 @@ static int sdma_probe(struct platform_device

> > *pdev)

> >  	sdma->dma_device.device_prep_slave_sg =

> > sdma_prep_slave_sg;

> >  	sdma->dma_device.device_prep_dma_cyclic =

> > sdma_prep_dma_cyclic;

> >  	sdma->dma_device.device_config = sdma_config;

> > -	sdma->dma_device.device_terminate_all =

> > sdma_disable_channel_with_delay;

> > +	sdma->dma_device.device_terminate_all =

> > sdma_terminate_all;

> >  	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;

> >  	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;

> >  	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;

> > @@ -1939,7 +2047,8 @@ static int sdma_remove(struct platform_device

> > *pdev)

> >  	for (i = 0; i < MAX_DMA_CHANNELS; i++) {

> >  		struct sdma_channel *sdmac = &sdma->channel[i];

> >  

> > -		tasklet_kill(&sdmac->tasklet);

> > +		tasklet_kill(&sdmac->vc.task);

> > +		sdma_free_chan_resources(&sdmac->vc.chan);

> >  	}

> >  

> >  	platform_set_drvdata(pdev, NULL);

> > -- 

> > 2.7.4

> > 

> > 

> > _______________________________________________

> > linux-arm-kernel mailing list

> > linux-arm-kernel@lists.infradead.org

> > https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fl

> > ists.infradead.org%2Fmailman%2Flistinfo%2Flinux-arm-

> > kernel&data=02%7C01%7Cyibin.gong%40nxp.com%7C3323f6aae75e45a3155f08

> > d5bfcc314f%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C63662580608

> > 2347660&sdata=5eDirTg4boJfw0zu320d9GZTeeDwnfCPfHFY8HXt1nI%3D&reserv

> > ed=0

> >
Sascha Hauer May 23, 2018, 10:56 a.m. UTC | #6
On Wed, May 23, 2018 at 10:26:23AM +0000, Robin Gong wrote:
> > 
> > > 
> > > +	u32				bd_size_sum;
> > This variable is never used for anything.
> Yes, it's not for significative use but debug to see how many current
> bds used.

I am not convinced this is useful. The variable could easily be added back
by someone who debugs this driver. The code has to be changed anyway to
make use of this variable.


> > > @@ -632,7 +635,7 @@ static int sdma_run_channel0(struct sdma_engine
> > > *sdma)
> > >  static int sdma_load_script(struct sdma_engine *sdma, void *buf,
> > > int size,
> > >  		u32 address)
> > >  {
> > > -	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
> > > +	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
> > This change seems to be an orthogonal change. Please make this a
> > separate patch.
> It's something related with virtual dma support, because in virtual
> dma framework, all bds should be allocated dynamically if they used.
> but bd0 is a specail case since it's must and basic for load sdma
> firmware and context for other channels. So here alloc 'bd0' for other
> channels.

Well, it's somewhat related to virtual dma support, but that's not my
point. My point is that this patch is quite big and thus hard to review.
If we find ways to make it smaller and to split it up in multiple
patches then we should do so, because it makes it easier to review and
in case you break something here we raise the chance that a "git bisect"
lands on a smaller patch which is easier to understand.

Please try and make that a separate change. I haven't really looked into
it and it may not be possible due to reasons I haven't seen, but please
at least give it a try.

> > 
> > > 
> > > +
> > > +	if (sdmac->desc)
> > > +		sdmac->desc = NULL;
> > The test is unnecesary.
> This 'NULL' is meaningful in case that dma done interrupt come after
> terminate as you know sdma will actually stop after current transfer
> done.

The setting of the variable to NULL is ok, but the test is useless.

	if (sdmac->desc)
		sdmac->desc = NULL;

is equivalent to:

	sdmac->desc = NULL;


Sascha
Vinod Koul May 23, 2018, 1:34 p.m. UTC | #7
On 23-05-18, 12:56, s.hauer@pengutronix.de wrote:

> Well, it's somewhat related to virtual dma support, but that's not my
> point. My point is that this patch is quite big and thus hard to review.
> If we find ways to make it smaller and to split it up in multiple
> patches then we should do so, because it makes it easier to review and
> in case you break something here we raise the chance that a "git bisect"
> lands on a smaller patch which is easier to understand.
> 
> Please try and make that a separate change. I haven't really looked into
> it and it may not be possible due to reasons I haven't seen, but please
> at least give it a try.

That is something would help me as well. I have reviewed the patch and am not
sure I fully understand the changes, so breaking up stuff would definitely help
in the review..
Robin Gong May 24, 2018, 1:42 a.m. UTC | #8
Okay, I'll try to split it.
On 三, 2018-05-23 at 19:04 +0530, Vinod wrote:
> On 23-05-18, 12:56, s.hauer@pengutronix.de wrote:

> 

> > 

> > Well, it's somewhat related to virtual dma support, but that's not

> > my

> > point. My point is that this patch is quite big and thus hard to

> > review.

> > If we find ways to make it smaller and to split it up in multiple

> > patches then we should do so, because it makes it easier to review

> > and

> > in case you break something here we raise the chance that a "git

> > bisect"

> > lands on a smaller patch which is easier to understand.

> > 

> > Please try and make that a separate change. I haven't really looked

> > into

> > it and it may not be possible due to reasons I haven't seen, but

> > please

> > at least give it a try.

> That is something would help me as well. I have reviewed the patch

> and am not

> sure I fully understand the changes, so breaking up stuff would

> definitely help

> in the review..

>
diff mbox

Patch

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 27df3e2..c4ce43c 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -247,6 +247,7 @@  config IMX_SDMA
 	tristate "i.MX SDMA support"
 	depends on ARCH_MXC
 	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
 	help
 	  Support the i.MX SDMA engine. This engine is integrated into
 	  Freescale i.MX25/31/35/51/53/6 chips.
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index ccd03c3..df79e73 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -48,6 +48,7 @@ 
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 
 #include "dmaengine.h"
+#include "virt-dma.h"
 
 /* SDMA registers */
 #define SDMA_H_C0PTR		0x000
@@ -291,10 +292,19 @@  struct sdma_context_data {
 	u32  scratch7;
 } __attribute__ ((packed));
 
-#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
-
 struct sdma_engine;
 
+struct sdma_desc {
+	struct virt_dma_desc	vd;
+	struct list_head	node;
+	unsigned int		num_bd;
+	dma_addr_t		bd_phys;
+	unsigned int		buf_tail;
+	unsigned int		buf_ptail;
+	struct sdma_channel	*sdmac;
+	struct sdma_buffer_descriptor *bd;
+};
+
 /**
  * struct sdma_channel - housekeeping for a SDMA channel
  *
@@ -310,19 +320,17 @@  struct sdma_engine;
  * @num_bd		max NUM_BD. number of descriptors currently handling
  */
 struct sdma_channel {
+	struct virt_dma_chan		vc;
+	struct list_head		pending;
 	struct sdma_engine		*sdma;
+	struct sdma_desc		*desc;
 	unsigned int			channel;
 	enum dma_transfer_direction		direction;
 	enum sdma_peripheral_type	peripheral_type;
 	unsigned int			event_id0;
 	unsigned int			event_id1;
 	enum dma_slave_buswidth		word_size;
-	unsigned int			buf_tail;
-	unsigned int			buf_ptail;
-	unsigned int			num_bd;
 	unsigned int			period_len;
-	struct sdma_buffer_descriptor	*bd;
-	dma_addr_t			bd_phys;
 	unsigned int			pc_from_device, pc_to_device;
 	unsigned int			device_to_device;
 	unsigned long			flags;
@@ -330,15 +338,12 @@  struct sdma_channel {
 	unsigned long			event_mask[2];
 	unsigned long			watermark_level;
 	u32				shp_addr, per_addr;
-	struct dma_chan			chan;
-	spinlock_t			lock;
-	struct dma_async_tx_descriptor	desc;
 	enum dma_status			status;
 	unsigned int			chn_count;
 	unsigned int			chn_real_count;
-	struct tasklet_struct		tasklet;
 	struct imx_dma_data		data;
 	bool				enabled;
+	u32				bd_size_sum;
 };
 
 #define IMX_DMA_SG_LOOP		BIT(0)
@@ -398,6 +403,9 @@  struct sdma_engine {
 	u32				spba_start_addr;
 	u32				spba_end_addr;
 	unsigned int			irq;
+	/* channel0 bd */
+	dma_addr_t			bd0_phys;
+	struct sdma_buffer_descriptor	*bd0;
 };
 
 static struct sdma_driver_data sdma_imx31 = {
@@ -553,6 +561,8 @@  MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 #define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 #define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 
+static void sdma_start_desc(struct sdma_channel *sdmac);
+
 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 {
 	u32 chnenbl0 = sdma->drvdata->chnenbl0;
@@ -597,14 +607,7 @@  static int sdma_config_ownership(struct sdma_channel *sdmac,
 
 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 {
-	unsigned long flags;
-	struct sdma_channel *sdmac = &sdma->channel[channel];
-
 	writel(BIT(channel), sdma->regs + SDMA_H_START);
-
-	spin_lock_irqsave(&sdmac->lock, flags);
-	sdmac->enabled = true;
-	spin_unlock_irqrestore(&sdmac->lock, flags);
 }
 
 /*
@@ -632,7 +635,7 @@  static int sdma_run_channel0(struct sdma_engine *sdma)
 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 		u32 address)
 {
-	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
 	void *buf_virt;
 	dma_addr_t buf_phys;
 	int ret;
@@ -691,23 +694,16 @@  static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 {
 	struct sdma_buffer_descriptor *bd;
+	struct sdma_desc *desc = sdmac->desc;
 	int error = 0;
 	enum dma_status	old_status = sdmac->status;
-	unsigned long flags;
-
-	spin_lock_irqsave(&sdmac->lock, flags);
-	if (!sdmac->enabled) {
-		spin_unlock_irqrestore(&sdmac->lock, flags);
-		return;
-	}
-	spin_unlock_irqrestore(&sdmac->lock, flags);
 
 	/*
 	 * loop mode. Iterate over descriptors, re-setup them and
 	 * call callback function.
 	 */
-	while (1) {
-		bd = &sdmac->bd[sdmac->buf_tail];
+	while (desc) {
+		bd = &desc->bd[desc->buf_tail];
 
 		if (bd->mode.status & BD_DONE)
 			break;
@@ -726,8 +722,8 @@  static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 		sdmac->chn_real_count = bd->mode.count;
 		bd->mode.status |= BD_DONE;
 		bd->mode.count = sdmac->period_len;
-		sdmac->buf_ptail = sdmac->buf_tail;
-		sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
+		desc->buf_ptail = desc->buf_tail;
+		desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
 
 		/*
 		 * The callback is called from the interrupt context in order
@@ -735,15 +731,16 @@  static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 		 * SDMA transaction status by the time the client tasklet is
 		 * executed.
 		 */
-
-		dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
+		spin_unlock(&sdmac->vc.lock);
+		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+		spin_lock(&sdmac->vc.lock);
 
 		if (error)
 			sdmac->status = old_status;
 	}
 }
 
-static void mxc_sdma_handle_channel_normal(unsigned long data)
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 {
 	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 	struct sdma_buffer_descriptor *bd;
@@ -754,8 +751,8 @@  static void mxc_sdma_handle_channel_normal(unsigned long data)
 	 * non loop mode. Iterate over all descriptors, collect
 	 * errors and call callback function
 	 */
-	for (i = 0; i < sdmac->num_bd; i++) {
-		bd = &sdmac->bd[i];
+	for (i = 0; i < sdmac->desc->num_bd; i++) {
+		bd = &sdmac->desc->bd[i];
 
 		 if (bd->mode.status & (BD_DONE | BD_RROR))
 			error = -EIO;
@@ -766,10 +763,6 @@  static void mxc_sdma_handle_channel_normal(unsigned long data)
 		sdmac->status = DMA_ERROR;
 	else
 		sdmac->status = DMA_COMPLETE;
-
-	dma_cookie_complete(&sdmac->desc);
-
-	dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
 }
 
 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
@@ -785,13 +778,24 @@  static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 	while (stat) {
 		int channel = fls(stat) - 1;
 		struct sdma_channel *sdmac = &sdma->channel[channel];
-
-		if (sdmac->flags & IMX_DMA_SG_LOOP)
-			sdma_update_channel_loop(sdmac);
-		else
-			tasklet_schedule(&sdmac->tasklet);
+		struct sdma_desc *desc;
+
+		spin_lock(&sdmac->vc.lock);
+		desc = sdmac->desc;
+		if (desc) {
+			if (sdmac->flags & IMX_DMA_SG_LOOP) {
+				sdma_update_channel_loop(sdmac);
+			} else {
+				mxc_sdma_handle_channel_normal(sdmac);
+				vchan_cookie_complete(&desc->vd);
+				if (!list_empty(&sdmac->pending))
+					list_del(&desc->node);
+				 sdma_start_desc(sdmac);
+			}
+		}
 
 		__clear_bit(channel, &stat);
+		spin_unlock(&sdmac->vc.lock);
 	}
 
 	return IRQ_HANDLED;
@@ -897,7 +901,7 @@  static int sdma_load_context(struct sdma_channel *sdmac)
 	int channel = sdmac->channel;
 	int load_address;
 	struct sdma_context_data *context = sdma->context;
-	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
 	int ret;
 	unsigned long flags;
 
@@ -946,7 +950,7 @@  static int sdma_load_context(struct sdma_channel *sdmac)
 
 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 {
-	return container_of(chan, struct sdma_channel, chan);
+	return container_of(chan, struct sdma_channel, vc.chan);
 }
 
 static int sdma_disable_channel(struct dma_chan *chan)
@@ -954,15 +958,10 @@  static int sdma_disable_channel(struct dma_chan *chan)
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int channel = sdmac->channel;
-	unsigned long flags;
 
 	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
 	sdmac->status = DMA_ERROR;
 
-	spin_lock_irqsave(&sdmac->lock, flags);
-	sdmac->enabled = false;
-	spin_unlock_irqrestore(&sdmac->lock, flags);
-
 	return 0;
 }
 
@@ -1097,42 +1096,101 @@  static int sdma_set_channel_priority(struct sdma_channel *sdmac,
 	return 0;
 }
 
-static int sdma_request_channel(struct sdma_channel *sdmac)
+static int sdma_alloc_bd(struct sdma_desc *desc)
 {
-	struct sdma_engine *sdma = sdmac->sdma;
-	int channel = sdmac->channel;
-	int ret = -EBUSY;
+	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+	int ret = 0;
+	unsigned long flags;
 
-	sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
+	desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
 					GFP_KERNEL);
-	if (!sdmac->bd) {
+	if (!desc->bd) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
-	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+	spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
+	desc->sdmac->bd_size_sum += bd_size;
+	spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
 
-	sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
-	return 0;
 out:
-
 	return ret;
 }
 
-static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+static void sdma_free_bd(struct sdma_desc *desc)
 {
+	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
 	unsigned long flags;
-	struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
-	dma_cookie_t cookie;
 
-	spin_lock_irqsave(&sdmac->lock, flags);
+	if (desc->bd) {
+		dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
+
+		spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
+		desc->sdmac->bd_size_sum -= bd_size;
+		spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
+	}
+}
+
+static int sdma_request_channel0(struct sdma_engine *sdma)
+{
+	int ret = 0;
+
+	sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+					GFP_KERNEL);
+	if (!sdma->bd0) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
-	cookie = dma_cookie_assign(tx);
+	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
+	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
 
-	spin_unlock_irqrestore(&sdmac->lock, flags);
+	sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
+out:
 
-	return cookie;
+	return ret;
+}
+
+static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct sdma_desc, vd.tx);
+}
+
+static void sdma_desc_free(struct virt_dma_desc *vd)
+{
+	struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
+
+	if (desc) {
+		sdma_free_bd(desc);
+		kfree(desc);
+	}
+}
+
+static int sdma_terminate_all(struct dma_chan *chan)
+{
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_get_all_descriptors(&sdmac->vc, &head);
+	while (!list_empty(&sdmac->pending)) {
+		struct sdma_desc *desc = list_first_entry(&sdmac->pending,
+			struct sdma_desc, node);
+
+		 list_del(&desc->node);
+		 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+		 sdmac->vc.desc_free(&desc->vd);
+		 spin_lock_irqsave(&sdmac->vc.lock, flags);
+	}
+
+	if (sdmac->desc)
+		sdmac->desc = NULL;
+	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+	vchan_dma_desc_free_list(&sdmac->vc, &head);
+	sdma_disable_channel_with_delay(chan);
+
+	return 0;
 }
 
 static int sdma_alloc_chan_resources(struct dma_chan *chan)
@@ -1168,18 +1226,11 @@  static int sdma_alloc_chan_resources(struct dma_chan *chan)
 	if (ret)
 		goto disable_clk_ipg;
 
-	ret = sdma_request_channel(sdmac);
-	if (ret)
-		goto disable_clk_ahb;
-
 	ret = sdma_set_channel_priority(sdmac, prio);
 	if (ret)
 		goto disable_clk_ahb;
 
-	dma_async_tx_descriptor_init(&sdmac->desc, chan);
-	sdmac->desc.tx_submit = sdma_tx_submit;
-	/* txd.flags will be overwritten in prep funcs */
-	sdmac->desc.flags = DMA_CTRL_ACK;
+	sdmac->bd_size_sum = 0;
 
 	return 0;
 
@@ -1195,7 +1246,7 @@  static void sdma_free_chan_resources(struct dma_chan *chan)
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 
-	sdma_disable_channel(chan);
+	sdma_terminate_all(chan);
 
 	if (sdmac->event_id0)
 		sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1207,12 +1258,43 @@  static void sdma_free_chan_resources(struct dma_chan *chan)
 
 	sdma_set_channel_priority(sdmac, 0);
 
-	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
-
 	clk_disable(sdma->clk_ipg);
 	clk_disable(sdma->clk_ahb);
 }
 
+static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
+				enum dma_transfer_direction direction, u32 bds)
+{
+	struct sdma_desc *desc;
+
+	desc = kzalloc((sizeof(*desc)), GFP_KERNEL);
+	if (!desc)
+		goto err_out;
+
+	sdmac->status = DMA_IN_PROGRESS;
+	sdmac->direction = direction;
+	sdmac->flags = 0;
+	sdmac->chn_count = 0;
+	sdmac->chn_real_count = 0;
+
+	desc->sdmac = sdmac;
+	desc->num_bd = bds;
+	INIT_LIST_HEAD(&desc->node);
+
+	if (sdma_alloc_bd(desc))
+		goto err_desc_out;
+
+	if (sdma_load_context(sdmac))
+		goto err_desc_out;
+
+	return desc;
+
+err_desc_out:
+	kfree(desc);
+err_out:
+	return NULL;
+}
+
 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 		struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_transfer_direction direction,
@@ -1223,35 +1305,24 @@  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 	int ret, i, count;
 	int channel = sdmac->channel;
 	struct scatterlist *sg;
+	struct sdma_desc *desc;
 
-	if (sdmac->status == DMA_IN_PROGRESS)
+	if (!chan)
 		return NULL;
-	sdmac->status = DMA_IN_PROGRESS;
-
-	sdmac->flags = 0;
 
-	sdmac->buf_tail = 0;
-	sdmac->buf_ptail = 0;
-	sdmac->chn_real_count = 0;
+	desc = sdma_transfer_init(sdmac, direction, sg_len);
+	if (!desc)
+		goto err_out;
 
 	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
 			sg_len, channel);
 
-	sdmac->direction = direction;
 	ret = sdma_load_context(sdmac);
 	if (ret)
 		goto err_out;
 
-	if (sg_len > NUM_BD) {
-		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
-				channel, sg_len, NUM_BD);
-		ret = -EINVAL;
-		goto err_out;
-	}
-
-	sdmac->chn_count = 0;
 	for_each_sg(sgl, sg, sg_len, i) {
-		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+		struct sdma_buffer_descriptor *bd = &desc->bd[i];
 		int param;
 
 		bd->buffer_addr = sg->dma_address;
@@ -1262,7 +1333,7 @@  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
 					channel, count, 0xffff);
 			ret = -EINVAL;
-			goto err_out;
+			goto err_bd_out;
 		}
 
 		bd->mode.count = count;
@@ -1307,10 +1378,11 @@  static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 		bd->mode.status = param;
 	}
 
-	sdmac->num_bd = sg_len;
-	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
 
-	return &sdmac->desc;
+err_bd_out:
+	sdma_free_bd(desc);
+	kfree(desc);
 err_out:
 	sdmac->status = DMA_ERROR;
 	return NULL;
@@ -1326,39 +1398,32 @@  static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 	int num_periods = buf_len / period_len;
 	int channel = sdmac->channel;
 	int ret, i = 0, buf = 0;
+	struct sdma_desc *desc;
 
 	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
 
-	if (sdmac->status == DMA_IN_PROGRESS)
-		return NULL;
-
-	sdmac->status = DMA_IN_PROGRESS;
+	/* Now allocate and setup the descriptor. */
+	desc = sdma_transfer_init(sdmac, direction, num_periods);
+	if (!desc)
+		goto err_out;
 
-	sdmac->buf_tail = 0;
-	sdmac->buf_ptail = 0;
-	sdmac->chn_real_count = 0;
+	desc->buf_tail = 0;
+	desc->buf_ptail = 0;
 	sdmac->period_len = period_len;
-
 	sdmac->flags |= IMX_DMA_SG_LOOP;
-	sdmac->direction = direction;
+
 	ret = sdma_load_context(sdmac);
 	if (ret)
 		goto err_out;
 
-	if (num_periods > NUM_BD) {
-		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
-				channel, num_periods, NUM_BD);
-		goto err_out;
-	}
-
 	if (period_len > 0xffff) {
 		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
 				channel, period_len, 0xffff);
-		goto err_out;
+		goto err_bd_out;
 	}
 
 	while (buf < buf_len) {
-		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+		struct sdma_buffer_descriptor *bd = &desc->bd[i];
 		int param;
 
 		bd->buffer_addr = dma_addr;
@@ -1366,7 +1431,7 @@  static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 		bd->mode.count = period_len;
 
 		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
-			goto err_out;
+			goto err_bd_out;
 		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
 			bd->mode.command = 0;
 		else
@@ -1389,10 +1454,10 @@  static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 		i++;
 	}
 
-	sdmac->num_bd = num_periods;
-	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
-	return &sdmac->desc;
+	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+	sdma_free_bd(desc);
+	kfree(desc);
 err_out:
 	sdmac->status = DMA_ERROR;
 	return NULL;
@@ -1432,26 +1497,74 @@  static enum dma_status sdma_tx_status(struct dma_chan *chan,
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	u32 residue;
+	struct virt_dma_desc *vd;
+	struct sdma_desc *desc;
+	enum dma_status ret;
+	unsigned long flags;
 
-	if (sdmac->flags & IMX_DMA_SG_LOOP)
-		residue = (sdmac->num_bd - sdmac->buf_ptail) *
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE && txstate) {
+		residue = sdmac->chn_count - sdmac->chn_real_count;
+		return ret;
+	}
+
+	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vd = vchan_find_desc(&sdmac->vc, cookie);
+	desc = to_sdma_desc(&vd->tx);
+	if (vd) {
+		if (sdmac->flags & IMX_DMA_SG_LOOP)
+			residue = (desc->num_bd - desc->buf_ptail) *
 			   sdmac->period_len - sdmac->chn_real_count;
-	else
+		else
+			residue = sdmac->chn_count - sdmac->chn_real_count;
+	} else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
 		residue = sdmac->chn_count - sdmac->chn_real_count;
+	} else {
+		residue = 0;
+	}
+	ret = sdmac->status;
+	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
 	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
 			 residue);
 
-	return sdmac->status;
+	return ret;
+}
+
+static void sdma_start_desc(struct sdma_channel *sdmac)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
+	struct sdma_desc *desc;
+	struct sdma_engine *sdma = sdmac->sdma;
+	int channel = sdmac->channel;
+
+	if (!vd) {
+		sdmac->desc = NULL;
+		return;
+	}
+	sdmac->desc = desc = to_sdma_desc(&vd->tx);
+	/*
+	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
+	 * the desc alloced will never be freed in vchan_dma_desc_free_list
+	 */
+	if (!(sdmac->flags & IMX_DMA_SG_LOOP)) {
+		list_add_tail(&sdmac->desc->node, &sdmac->pending);
+		list_del(&vd->node);
+	}
+	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+	sdma_enable_channel(sdma, sdmac->channel);
 }
 
 static void sdma_issue_pending(struct dma_chan *chan)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
-	struct sdma_engine *sdma = sdmac->sdma;
+	unsigned long flags;
 
-	if (sdmac->status == DMA_IN_PROGRESS)
-		sdma_enable_channel(sdma, sdmac->channel);
+	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
+		sdma_start_desc(sdmac);
+	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 }
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
@@ -1657,7 +1770,7 @@  static int sdma_init(struct sdma_engine *sdma)
 	for (i = 0; i < MAX_DMA_CHANNELS; i++)
 		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
 
-	ret = sdma_request_channel(&sdma->channel[0]);
+	ret = sdma_request_channel0(sdma);
 	if (ret)
 		goto err_dma_alloc;
 
@@ -1819,22 +1932,17 @@  static int sdma_probe(struct platform_device *pdev)
 		struct sdma_channel *sdmac = &sdma->channel[i];
 
 		sdmac->sdma = sdma;
-		spin_lock_init(&sdmac->lock);
-
-		sdmac->chan.device = &sdma->dma_device;
-		dma_cookie_init(&sdmac->chan);
 		sdmac->channel = i;
-
-		tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
-			     (unsigned long) sdmac);
+		sdmac->status = DMA_IN_PROGRESS;
+		sdmac->vc.desc_free = sdma_desc_free;
+		INIT_LIST_HEAD(&sdmac->pending);
 		/*
 		 * Add the channel to the DMAC list. Do not add channel 0 though
 		 * because we need it internally in the SDMA driver. This also means
 		 * that channel 0 in dmaengine counting matches sdma channel 1.
 		 */
 		if (i)
-			list_add_tail(&sdmac->chan.device_node,
-					&sdma->dma_device.channels);
+			vchan_init(&sdmac->vc, &sdma->dma_device);
 	}
 
 	ret = sdma_init(sdma);
@@ -1879,7 +1987,7 @@  static int sdma_probe(struct platform_device *pdev)
 	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
 	sdma->dma_device.device_config = sdma_config;
-	sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
+	sdma->dma_device.device_terminate_all = sdma_terminate_all;
 	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
 	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
 	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
@@ -1939,7 +2047,8 @@  static int sdma_remove(struct platform_device *pdev)
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct sdma_channel *sdmac = &sdma->channel[i];
 
-		tasklet_kill(&sdmac->tasklet);
+		tasklet_kill(&sdmac->vc.task);
+		sdma_free_chan_resources(&sdmac->vc.chan);
 	}
 
 	platform_set_drvdata(pdev, NULL);