@@ -45,6 +45,7 @@ static const struct zcomp_ops *backends[] = {
static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *strm)
{
comp->ops->destroy_ctx(&strm->ctx);
+ vfree(strm->handle_mem_copy);
vfree(strm->buffer);
kfree(strm);
}
@@ -66,12 +67,13 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
return NULL;
}
+ strm->handle_mem_copy = vzalloc(PAGE_SIZE);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra in case if
* compressed data is larger than the original one.
*/
strm->buffer = vzalloc(2 * PAGE_SIZE);
- if (!strm->buffer) {
+ if (!strm->buffer || !strm->handle_mem_copy) {
zcomp_strm_free(comp, strm);
return NULL;
}
@@ -34,6 +34,8 @@ struct zcomp_strm {
struct list_head entry;
/* compression buffer */
void *buffer;
+ /* handle object memory copy */
+ void *handle_mem_copy;
struct zcomp_ctx ctx;
};
@@ -1558,37 +1558,43 @@ static int read_same_filled_page(struct zram *zram, struct page *page,
static int read_incompressible_page(struct zram *zram, struct page *page,
u32 index)
{
- unsigned long handle;
- void *src, *dst;
+ struct zs_handle_mapping hm;
+ void *dst;
- handle = zram_get_handle(zram, index);
- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ hm.handle = zram_get_handle(zram, index);
+ hm.mode = ZS_MM_RO;
+
+ zs_map_handle(zram->mem_pool, &hm);
dst = kmap_local_page(page);
- copy_page(dst, src);
+ copy_page(dst, hm.handle_mem);
kunmap_local(dst);
- zs_unmap_object(zram->mem_pool, handle);
+ zs_unmap_handle(zram->mem_pool, &hm);
return 0;
}
static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
{
+ struct zs_handle_mapping hm;
struct zcomp_strm *zstrm;
- unsigned long handle;
unsigned int size;
- void *src, *dst;
+ void *dst;
int ret, prio;
- handle = zram_get_handle(zram, index);
size = zram_get_obj_size(zram, index);
prio = zram_get_priority(zram, index);
zstrm = zcomp_stream_get(zram->comps[prio]);
- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ hm.handle = zram_get_handle(zram, index);
+ hm.mode = ZS_MM_RO;
+ hm.local_copy = zstrm->handle_mem_copy;
+
+ zs_map_handle(zram->mem_pool, &hm);
dst = kmap_local_page(page);
- ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
+ ret = zcomp_decompress(zram->comps[prio], zstrm,
+ hm.handle_mem, size, dst);
kunmap_local(dst);
- zs_unmap_object(zram->mem_pool, handle);
+ zs_unmap_handle(zram->mem_pool, &hm);
zcomp_stream_put(zram->comps[prio], zstrm);
return ret;
@@ -1683,33 +1689,34 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
static int write_incompressible_page(struct zram *zram, struct page *page,
u32 index)
{
- unsigned long handle;
- void *src, *dst;
+ struct zs_handle_mapping hm;
+ void *src;
/*
* This function is called from preemptible context so we don't need
* to do optimistic and fallback to pessimistic handle allocation,
* like we do for compressible pages.
*/
- handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
- GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle))
- return PTR_ERR((void *)handle);
+ hm.handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
+ if (IS_ERR_VALUE(hm.handle))
+ return PTR_ERR((void *)hm.handle);
if (!zram_can_store_page(zram)) {
- zs_free(zram->mem_pool, handle);
+ zs_free(zram->mem_pool, hm.handle);
return -ENOMEM;
}
- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ hm.mode = ZS_MM_WO;
+ zs_map_handle(zram->mem_pool, &hm);
src = kmap_local_page(page);
- memcpy(dst, src, PAGE_SIZE);
+ memcpy(hm.handle_mem, src, PAGE_SIZE);
kunmap_local(src);
- zs_unmap_object(zram->mem_pool, handle);
+ zs_unmap_handle(zram->mem_pool, &hm);
zram_slot_write_lock(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
- zram_set_handle(zram, index, handle);
+ zram_set_handle(zram, index, hm.handle);
zram_set_obj_size(zram, index, PAGE_SIZE);
zram_slot_write_unlock(zram, index);
@@ -1724,9 +1731,9 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long handle;
+ struct zs_handle_mapping hm;
unsigned int comp_len;
- void *dst, *mem;
+ void *mem;
struct zcomp_strm *zstrm;
unsigned long element;
bool same_filled;
@@ -1758,25 +1765,26 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
return write_incompressible_page(zram, page, index);
}
- handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle))
- return PTR_ERR((void *)handle);
+ hm.handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
+ if (IS_ERR_VALUE(hm.handle))
+ return PTR_ERR((void *)hm.handle);
if (!zram_can_store_page(zram)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP], zstrm);
- zs_free(zram->mem_pool, handle);
+ zs_free(zram->mem_pool, hm.handle);
return -ENOMEM;
}
- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
-
- memcpy(dst, zstrm->buffer, comp_len);
+ hm.mode = ZS_MM_WO;
+ hm.local_copy = zstrm->handle_mem_copy;
+ zs_map_handle(zram->mem_pool, &hm);
+ memcpy(hm.handle_mem, zstrm->buffer, comp_len);
+ zs_unmap_handle(zram->mem_pool, &hm);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP], zstrm);
- zs_unmap_object(zram->mem_pool, handle);
zram_slot_write_lock(zram, index);
- zram_set_handle(zram, index, handle);
+ zram_set_handle(zram, index, hm.handle);
zram_set_obj_size(zram, index, comp_len);
zram_slot_write_unlock(zram, index);
@@ -1875,14 +1883,14 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
u32 prio_max)
{
struct zcomp_strm *zstrm = NULL;
+ struct zs_handle_mapping hm;
unsigned long handle_old;
- unsigned long handle_new;
unsigned int comp_len_old;
unsigned int comp_len_new;
unsigned int class_index_old;
unsigned int class_index_new;
u32 num_recomps = 0;
- void *src, *dst;
+ void *src;
int ret;
handle_old = zram_get_handle(zram, index);
@@ -2000,34 +2008,35 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
/* zsmalloc handle allocation can schedule, unlock slot's bucket */
zram_slot_write_unlock(zram, index);
- handle_new = zs_malloc(zram->mem_pool, comp_len_new,
- GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
+ hm.handle = zs_malloc(zram->mem_pool, comp_len_new,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
zram_slot_write_lock(zram, index);
/*
* If we couldn't allocate memory for recompressed object then bail
* out and simply keep the old (existing) object in mempool.
*/
- if (IS_ERR_VALUE(handle_new)) {
+ if (IS_ERR_VALUE(hm.handle)) {
zcomp_stream_put(zram->comps[prio], zstrm);
- return PTR_ERR((void *)handle_new);
+ return PTR_ERR((void *)hm.handle);
}
/* Slot has been modified concurrently */
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) {
zcomp_stream_put(zram->comps[prio], zstrm);
- zs_free(zram->mem_pool, handle_new);
+ zs_free(zram->mem_pool, hm.handle);
return 0;
}
- dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
- memcpy(dst, zstrm->buffer, comp_len_new);
+ hm.mode = ZS_MM_WO;
+ hm.local_copy = zstrm->handle_mem_copy;
+ zs_map_handle(zram->mem_pool, &hm);
+ memcpy(hm.handle_mem, zstrm->buffer, comp_len_new);
+ zs_unmap_handle(zram->mem_pool, &hm);
zcomp_stream_put(zram->comps[prio], zstrm);
- zs_unmap_object(zram->mem_pool, handle_new);
-
zram_free_page(zram, index);
- zram_set_handle(zram, index, handle_new);
+ zram_set_handle(zram, index, hm.handle);
zram_set_obj_size(zram, index, comp_len_new);
zram_set_priority(zram, index, prio);
Use new zsmalloc handle mapping API so now zram read() becomes preemptible. Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> --- drivers/block/zram/zcomp.c | 4 +- drivers/block/zram/zcomp.h | 2 + drivers/block/zram/zram_drv.c | 103 ++++++++++++++++++---------------- 3 files changed, 61 insertions(+), 48 deletions(-)