@@ -380,7 +380,8 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
* Device private pages have an extra refcount as they are
* ZONE_DEVICE pages.
*/
- expected_count += is_device_private_page(page);
+ expected_count +=
+ (is_device_private_page(page) || is_device_generic_page(page));
if (mapping)
expected_count += thp_nr_pages(page) + page_has_private(page);
@@ -2607,7 +2608,7 @@ static bool migrate_vma_check_page(struct page *page)
* FIXME proper solution is to rework migration_entry_wait() so
* it does not need to take a reference on page.
*/
- return is_device_private_page(page);
+ return is_device_private_page(page) | is_device_generic_page(page);
}
/* For file back page */
@@ -3069,10 +3070,12 @@ void migrate_vma_pages(struct migrate_vma *migrate)
mapping = page_mapping(page);
if (is_zone_device_page(newpage)) {
- if (is_device_private_page(newpage)) {
+ if (is_device_private_page(newpage) ||
+ is_device_generic_page(newpage)) {
/*
- * For now only support private anonymous when
- * migrating to un-addressable device memory.
+ * For now only support private and devdax/generic
+ * anonymous when migrating to un-addressable
+ * device memory.
*/
if (mapping) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;