@@ -303,7 +303,6 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
unsigned int off;
unsigned int t_size, p_cnt;
unsigned char *buf;
- struct page *pg;
unsigned long flags = 0;
if (host->req->long_data) {
@@ -318,14 +317,26 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
unsigned int uninitialized_var(p_off);
if (host->req->long_data) {
- pg = nth_page(sg_page(&host->req->sg),
- off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, length);
local_irq_save(flags);
- buf = kmap_atomic(pg) + p_off;
+ buf = sg_map_offset(&host->req->sg,
+ off - host->req->sg.offset,
+ SG_KMAP_ATOMIC);
+ if (IS_ERR(buf)) {
+ /*
+ * This should really never happen unless
+ * the code is changed to use memory that is
+ * not mappable in the sg. Seeing there doesn't
+ * seem to be any error path out of here,
+ * we can only WARN.
+ */
+ WARN(1, "Non-mappable memory used in sg!");
+ break;
+ }
+
} else {
buf = host->req->data + host->block_pos;
p_cnt = host->req->data_len - host->block_pos;
@@ -341,7 +352,9 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
: jmb38x_ms_read_reg_data(host, buf, p_cnt);
if (host->req->long_data) {
- kunmap_atomic(buf - p_off);
+ sg_unmap_offset(&host->req->sg, buf,
+ off - host->req->sg.offset,
+ SG_KMAP_ATOMIC);
local_irq_restore(flags);
}
@@ -186,7 +186,6 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
unsigned int off;
unsigned int t_size, p_cnt;
unsigned char *buf;
- struct page *pg;
unsigned long flags = 0;
if (host->req->long_data) {
@@ -203,14 +202,25 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
unsigned int uninitialized_var(p_off);
if (host->req->long_data) {
- pg = nth_page(sg_page(&host->req->sg),
- off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, length);
local_irq_save(flags);
- buf = kmap_atomic(pg) + p_off;
+ buf = sg_map_offset(&host->req->sg,
+ off - host->req->sg.offset,
+ SG_KMAP_ATOMIC);
+ if (IS_ERR(buf)) {
+ /*
+ * This should really never happen unless
+ * the code is changed to use memory that is
+ * not mappable in the sg. Seeing there doesn't
+ * seem to be any error path out of here,
+ * we can only WARN.
+ */
+ WARN(1, "Non-mappable memory used in sg!");
+ break;
+ }
} else {
buf = host->req->data + host->block_pos;
p_cnt = host->req->data_len - host->block_pos;
@@ -221,7 +231,9 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
: tifm_ms_read_data(host, buf, p_cnt);
if (host->req->long_data) {
- kunmap_atomic(buf - p_off);
+ sg_unmap_offset(&host->req->sg, buf,
+ off - host->req->sg.offset,
+ SG_KMAP_ATOMIC);
local_irq_restore(flags);
}
Straightforward conversion, but we have to WARN if unmappable memory finds its way into the sgl. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> --- drivers/memstick/host/jmb38x_ms.c | 23 ++++++++++++++++++----- drivers/memstick/host/tifm_ms.c | 22 +++++++++++++++++----- 2 files changed, 35 insertions(+), 10 deletions(-)