diff mbox series

[2/3] z3fold: compact objects more accurately

Message ID 20191127152216.6ad33745a21ba71c53606acb@gmail.com (mailing list archive)
State New, archived
Headers show
Series z3fold fixes for intra-page compaction | expand

Commit Message

Vitaly Wool Nov. 27, 2019, 2:22 p.m. UTC
There are several small things to be considered regarding the
new inter-page compaction mechanism. First, we better set the
relevant size in chunks to 0 in the old z3fold header for the
object that has been moved to another z3fold page. Then, we
shouldn't do inter-page compaction if an object is mapped.
Lastly, free_handle should happen before release_z3fold_page
(but not in case the page is under reclaim, it will the handle
will be freed by reclaim then).

This patch addresses all three issues.

Signed-off-by: Vitaly Wool <vitaly.vul@sony.com>
---
 mm/z3fold.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 36bd2612f609..f2a75418e248 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -670,6 +670,7 @@  static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
 	int first_idx = __idx(zhdr, FIRST);
 	int middle_idx = __idx(zhdr, MIDDLE);
 	int last_idx = __idx(zhdr, LAST);
+	unsigned short *moved_chunks = NULL;
 
 	/*
 	 * No need to protect slots here -- all the slots are "local" and
@@ -679,14 +680,17 @@  static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
 		p += ZHDR_SIZE_ALIGNED;
 		sz = zhdr->first_chunks << CHUNK_SHIFT;
 		old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
+		moved_chunks = &zhdr->first_chunks;
 	} else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
 		p += zhdr->start_middle << CHUNK_SHIFT;
 		sz = zhdr->middle_chunks << CHUNK_SHIFT;
 		old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
+		moved_chunks = &zhdr->middle_chunks;
 	} else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
 		p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
 		sz = zhdr->last_chunks << CHUNK_SHIFT;
 		old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
+		moved_chunks = &zhdr->last_chunks;
 	}
 
 	if (sz > 0) {
@@ -743,6 +747,8 @@  static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
 		write_unlock(&zhdr->slots->lock);
 		add_to_unbuddied(pool, new_zhdr);
 		z3fold_page_unlock(new_zhdr);
+
+		*moved_chunks = 0;
 	}
 
 	return new_zhdr;
@@ -840,7 +846,7 @@  static void do_compact_page(struct z3fold_header *zhdr, bool locked)
 	}
 
 	if (!zhdr->foreign_handles && buddy_single(zhdr) &&
-			compact_single_buddy(zhdr)) {
+	    zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
 		if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
 			atomic64_dec(&pool->pages_nr);
 		else
@@ -1254,6 +1260,8 @@  static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		return;
 	}
 
+	if (!page_claimed)
+		free_handle(handle);
 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
 		atomic64_dec(&pool->pages_nr);
 		return;
@@ -1263,7 +1271,6 @@  static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		z3fold_page_unlock(zhdr);
 		return;
 	}
-	free_handle(handle);
 	if (unlikely(PageIsolated(page)) ||
 	    test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
 		put_z3fold_header(zhdr);