@@ -268,7 +268,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
for (; start < end; start += page_size) {
unsigned long nr_pages, addr;
- struct vmem_altmap *altmap;
struct page *section_base;
struct page *page;
@@ -288,9 +287,8 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
section_base = pfn_to_page(vmemmap_section_start(start));
nr_pages = 1 << page_order;
- altmap = to_vmem_altmap((unsigned long) section_base);
- if (altmap) {
- vmem_altmap_free(altmap, nr_pages);
+ if (dev_pagemap_free_pages(section_base, nr_pages)) {
+ ;
} else if (PageReserved(page)) {
/* allocated from bootmem */
if (page_size < PAGE_SIZE) {
@@ -804,12 +804,9 @@ static void __meminit free_pagetable(struct page *page, int order)
{
unsigned long magic;
unsigned int nr_pages = 1 << order;
- struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
- if (altmap) {
- vmem_altmap_free(altmap, nr_pages);
+ if (dev_pagemap_free_pages(page, nr_pages))
return;
- }
/* bootmem page has reserved flag */
if (PageReserved(page)) {
@@ -27,7 +27,6 @@ struct vmem_altmap {
};
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
#ifdef CONFIG_ZONE_DEVICE
struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
@@ -139,6 +138,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
static inline bool is_zone_device_page(const struct page *page);
+bool dev_pagemap_free_pages(struct page *page, unsigned nr_pages);
#else
static inline void *devm_memremap_pages(struct device *dev,
struct resource *res, struct percpu_ref *ref,
@@ -158,6 +158,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
{
return NULL;
}
+
+static inline bool dev_pagemap_free_pages(struct page *page, unsigned nr_pages)
+{
+ return false;
+}
#endif /* CONFIG_ZONE_DEVICE */
#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
@@ -470,9 +470,14 @@ unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
return altmap->reserve + altmap->free;
}
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
+bool dev_pagemap_free_pages(struct page *page, unsigned nr_pages)
{
- altmap->alloc -= nr_pfns;
+ struct vmem_altmap *pgmap = to_vmem_altmap((uintptr_t)page);
+
+ if (!pgmap)
+ return false;
+ pgmap->alloc -= nr_pages;
+ return true;
}
struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
Add a new helper that both looks up the pagemap and updates the alloc counter. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/powerpc/mm/init_64.c | 6 ++---- arch/x86/mm/init_64.c | 5 +---- include/linux/memremap.h | 7 ++++++- kernel/memremap.c | 9 +++++++-- 4 files changed, 16 insertions(+), 11 deletions(-)