@@ -108,4 +108,15 @@ static inline void __zpdesc_set_movable(struct zpdesc *zpdesc,
{
__SetPageMovable(zpdesc_page(zpdesc), mops);
}
+
+static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc)
+{
+ return PageIsolated(zpdesc_page(zpdesc));
+}
+
+static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
+{
+ return page_zone(zpdesc_page(zpdesc));
+}
+
#endif
@@ -1830,19 +1830,21 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
struct size_class *class;
struct zspage *zspage;
struct zpdesc *dummy;
+ struct zpdesc *newzpdesc = page_zpdesc(newpage);
+ struct zpdesc *zpdesc = page_zpdesc(page);
void *s_addr, *d_addr, *addr;
unsigned int offset;
unsigned long handle;
unsigned long old_obj, new_obj;
unsigned int obj_idx;
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
/* We're committed, tell the world that this is a Zsmalloc page. */
- __SetPageZsmalloc(newpage);
+ __SetPageZsmalloc(zpdesc_page(newzpdesc));
/* The page is locked, so this pointer must remain valid */
- zspage = get_zspage(page);
+ zspage = get_zspage(zpdesc_page(zpdesc));
pool = zspage->pool;
/*
@@ -1859,30 +1861,30 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/* the migrate_write_lock protects zpage access via zs_map_object */
migrate_write_lock(zspage);
- offset = get_first_obj_offset(page);
- s_addr = kmap_atomic(page);
+ offset = get_first_obj_offset(zpdesc_page(zpdesc));
+ s_addr = zpdesc_kmap_atomic(zpdesc);
/*
* Here, any user cannot access all objects in the zspage so let's move.
*/
- d_addr = kmap_atomic(newpage);
+ d_addr = zpdesc_kmap_atomic(newzpdesc);
copy_page(d_addr, s_addr);
kunmap_atomic(d_addr);
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
- if (obj_allocated(page_zpdesc(page), addr, &handle)) {
+ if (obj_allocated(zpdesc, addr, &handle)) {
old_obj = handle_to_obj(handle);
obj_to_location(old_obj, &dummy, &obj_idx);
- new_obj = (unsigned long)location_to_obj(newpage,
+ new_obj = (unsigned long)location_to_obj(zpdesc_page(newzpdesc),
obj_idx);
record_obj(handle, new_obj);
}
}
kunmap_atomic(s_addr);
- replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page));
+ replace_sub_page(class, zspage, newzpdesc, zpdesc);
/*
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.
@@ -1891,14 +1893,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
spin_unlock(&class->lock);
migrate_write_unlock(zspage);
- get_page(newpage);
- if (page_zone(newpage) != page_zone(page)) {
- dec_zone_page_state(page, NR_ZSPAGES);
- inc_zone_page_state(newpage, NR_ZSPAGES);
+ zpdesc_get(newzpdesc);
+ if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) {
+ zpdesc_dec_zone_page_state(zpdesc);
+ zpdesc_inc_zone_page_state(newzpdesc);
}
reset_page(page);
- put_page(page);
+ zpdesc_put(zpdesc);
return MIGRATEPAGE_SUCCESS;
}