diff mbox series

[v3,1/2] mm/memblock: Add reserved memory release function

Message ID 173928522350.906035.5626965043208329135.stgit@devnote2 (mailing list archive)
State Superseded
Headers show
Series tracing: Make persistent ring buffer freeable | expand

Commit Message

Masami Hiramatsu (Google) Feb. 11, 2025, 2:47 p.m. UTC
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>

Add reserve_mem_release_by_name() to release a reserved memory region
with a given name. This allows us to release reserved memory which is
defined by kernel cmdline, after boot.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: linux-mm@kvack.org
---
 Changes in v2:
  - Rename reserved_mem_* to reserve_mem_*.
---
 include/linux/mm.h |    1 +
 mm/memblock.c      |   72 +++++++++++++++++++++++++++++++++++++++++++---------
 2 files changed, 61 insertions(+), 12 deletions(-)

Comments

Steven Rostedt Feb. 11, 2025, 5:52 p.m. UTC | #1
On Tue, 11 Feb 2025 23:47:03 +0900
"Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:

> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> 
> Add reserve_mem_release_by_name() to release a reserved memory region
> with a given name. This allows us to release reserved memory which is
> defined by kernel cmdline, after boot.
> 
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Mike Rapoport <rppt@kernel.org>
> Cc: linux-mm@kvack.org

Hi, can we get one of the Memory Management maintainers to ack this patch?

We will be having devices going out with the reserve_mem option to perform
tracing in the field. But that only happens if the user grants permission
to do so. But the kernel command line does not change between users that
granted permission and those that do not. We would like to free up the
memory for those devices where the users did not grant permission to trace,
as then the memory is just wasted.

Thanks!

-- Steve


> ---
>  Changes in v2:
>   - Rename reserved_mem_* to reserve_mem_*.
> ---
>  include/linux/mm.h |    1 +
>  mm/memblock.c      |   72 +++++++++++++++++++++++++++++++++++++++++++---------
>  2 files changed, 61 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index f02925447e59..fe5f7711df04 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -4197,6 +4197,7 @@ void vma_pgtable_walk_begin(struct vm_area_struct *vma);
>  void vma_pgtable_walk_end(struct vm_area_struct *vma);
>  
>  int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
> +int reserve_mem_release_by_name(const char *name);
>  
>  #ifdef CONFIG_64BIT
>  int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
> diff --git a/mm/memblock.c b/mm/memblock.c
> index 095c18b5c430..c8d207ebb93c 100644
> --- a/mm/memblock.c
> +++ b/mm/memblock.c
> @@ -16,6 +16,7 @@
>  #include <linux/kmemleak.h>
>  #include <linux/seq_file.h>
>  #include <linux/memblock.h>
> +#include <linux/mutex.h>
>  
>  #include <asm/sections.h>
>  #include <linux/io.h>
> @@ -2263,6 +2264,7 @@ struct reserve_mem_table {
>  };
>  static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
>  static int reserved_mem_count;
> +static DEFINE_MUTEX(reserve_mem_lock);
>  
>  /* Add wildcard region with a lookup name */
>  static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> @@ -2276,6 +2278,21 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
>  	strscpy(map->name, name);
>  }
>  
> +static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
> +{
> +	struct reserve_mem_table *map;
> +	int i;
> +
> +	for (i = 0; i < reserved_mem_count; i++) {
> +		map = &reserved_mem_table[i];
> +		if (!map->size)
> +			continue;
> +		if (strcmp(name, map->name) == 0)
> +			return map;
> +	}
> +	return NULL;
> +}
> +
>  /**
>   * reserve_mem_find_by_name - Find reserved memory region with a given name
>   * @name: The name that is attached to a reserved memory region
> @@ -2289,22 +2306,53 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
>  int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
>  {
>  	struct reserve_mem_table *map;
> -	int i;
>  
> -	for (i = 0; i < reserved_mem_count; i++) {
> -		map = &reserved_mem_table[i];
> -		if (!map->size)
> -			continue;
> -		if (strcmp(name, map->name) == 0) {
> -			*start = map->start;
> -			*size = map->size;
> -			return 1;
> -		}
> -	}
> -	return 0;
> +	guard(mutex)(&reserve_mem_lock);
> +	map = reserve_mem_find_by_name_nolock(name);
> +	if (!map)
> +		return 0;
> +
> +	*start = map->start;
> +	*size = map->size;
> +	return 1;
>  }
>  EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
>  
> +/**
> + * reserve_mem_release_by_name - Release reserved memory region with a given name
> + * @name: The name that is attatched to a reserved memory region
> + *
> + * Forcibly release the pages in the reserved memory region so that those memory
> + * can be used as free memory. After released the reserved region size becomes 0.
> + *
> + * Returns: 1 if released or 0 if not found.
> + */
> +int reserve_mem_release_by_name(const char *name)
> +{
> +	struct reserve_mem_table *map;
> +	unsigned int page_count;
> +	phys_addr_t start;
> +
> +	guard(mutex)(&reserve_mem_lock);
> +	map = reserve_mem_find_by_name_nolock(name);
> +	if (!map)
> +		return 0;
> +
> +	start = map->start;
> +	page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
> +
> +	for (int i = 0; i < page_count; i++) {
> +		phys_addr_t addr = start + i * PAGE_SIZE;
> +		struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
> +
> +		page->flags &= ~BIT(PG_reserved);
> +		__free_page(page);
> +	}
> +	map->size = 0;
> +
> +	return 1;
> +}
> +
>  /*
>   * Parse reserve_mem=nn:align:name
>   */
Masami Hiramatsu (Google) Feb. 11, 2025, 11:39 p.m. UTC | #2
On Tue, 11 Feb 2025 12:52:11 -0500
Steven Rostedt <rostedt@goodmis.org> wrote:

> On Tue, 11 Feb 2025 23:47:03 +0900
> "Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:
> 
> > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > 
> > Add reserve_mem_release_by_name() to release a reserved memory region
> > with a given name. This allows us to release reserved memory which is
> > defined by kernel cmdline, after boot.
> > 
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Mike Rapoport <rppt@kernel.org>
> > Cc: linux-mm@kvack.org
> 
> Hi, can we get one of the Memory Management maintainers to ack this patch?
> 
> We will be having devices going out with the reserve_mem option to perform
> tracing in the field. But that only happens if the user grants permission
> to do so. But the kernel command line does not change between users that
> granted permission and those that do not. We would like to free up the
> memory for those devices where the users did not grant permission to trace,
> as then the memory is just wasted.

Thanks Steve for explaining, I missed Ccing the background information.
Here is the covermail of this series.

https://lore.kernel.org/all/173928521419.906035.17750338150436695675.stgit@devnote2/

Thank you,

> 
> Thanks!
> 
> -- Steve
> 
> 
> > ---
> >  Changes in v2:
> >   - Rename reserved_mem_* to reserve_mem_*.
> > ---
> >  include/linux/mm.h |    1 +
> >  mm/memblock.c      |   72 +++++++++++++++++++++++++++++++++++++++++++---------
> >  2 files changed, 61 insertions(+), 12 deletions(-)
> > 
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index f02925447e59..fe5f7711df04 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -4197,6 +4197,7 @@ void vma_pgtable_walk_begin(struct vm_area_struct *vma);
> >  void vma_pgtable_walk_end(struct vm_area_struct *vma);
> >  
> >  int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
> > +int reserve_mem_release_by_name(const char *name);
> >  
> >  #ifdef CONFIG_64BIT
> >  int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
> > diff --git a/mm/memblock.c b/mm/memblock.c
> > index 095c18b5c430..c8d207ebb93c 100644
> > --- a/mm/memblock.c
> > +++ b/mm/memblock.c
> > @@ -16,6 +16,7 @@
> >  #include <linux/kmemleak.h>
> >  #include <linux/seq_file.h>
> >  #include <linux/memblock.h>
> > +#include <linux/mutex.h>
> >  
> >  #include <asm/sections.h>
> >  #include <linux/io.h>
> > @@ -2263,6 +2264,7 @@ struct reserve_mem_table {
> >  };
> >  static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
> >  static int reserved_mem_count;
> > +static DEFINE_MUTEX(reserve_mem_lock);
> >  
> >  /* Add wildcard region with a lookup name */
> >  static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> > @@ -2276,6 +2278,21 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> >  	strscpy(map->name, name);
> >  }
> >  
> > +static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
> > +{
> > +	struct reserve_mem_table *map;
> > +	int i;
> > +
> > +	for (i = 0; i < reserved_mem_count; i++) {
> > +		map = &reserved_mem_table[i];
> > +		if (!map->size)
> > +			continue;
> > +		if (strcmp(name, map->name) == 0)
> > +			return map;
> > +	}
> > +	return NULL;
> > +}
> > +
> >  /**
> >   * reserve_mem_find_by_name - Find reserved memory region with a given name
> >   * @name: The name that is attached to a reserved memory region
> > @@ -2289,22 +2306,53 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> >  int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
> >  {
> >  	struct reserve_mem_table *map;
> > -	int i;
> >  
> > -	for (i = 0; i < reserved_mem_count; i++) {
> > -		map = &reserved_mem_table[i];
> > -		if (!map->size)
> > -			continue;
> > -		if (strcmp(name, map->name) == 0) {
> > -			*start = map->start;
> > -			*size = map->size;
> > -			return 1;
> > -		}
> > -	}
> > -	return 0;
> > +	guard(mutex)(&reserve_mem_lock);
> > +	map = reserve_mem_find_by_name_nolock(name);
> > +	if (!map)
> > +		return 0;
> > +
> > +	*start = map->start;
> > +	*size = map->size;
> > +	return 1;
> >  }
> >  EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
> >  
> > +/**
> > + * reserve_mem_release_by_name - Release reserved memory region with a given name
> > + * @name: The name that is attatched to a reserved memory region
> > + *
> > + * Forcibly release the pages in the reserved memory region so that those memory
> > + * can be used as free memory. After released the reserved region size becomes 0.
> > + *
> > + * Returns: 1 if released or 0 if not found.
> > + */
> > +int reserve_mem_release_by_name(const char *name)
> > +{
> > +	struct reserve_mem_table *map;
> > +	unsigned int page_count;
> > +	phys_addr_t start;
> > +
> > +	guard(mutex)(&reserve_mem_lock);
> > +	map = reserve_mem_find_by_name_nolock(name);
> > +	if (!map)
> > +		return 0;
> > +
> > +	start = map->start;
> > +	page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
> > +
> > +	for (int i = 0; i < page_count; i++) {
> > +		phys_addr_t addr = start + i * PAGE_SIZE;
> > +		struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
> > +
> > +		page->flags &= ~BIT(PG_reserved);
> > +		__free_page(page);
> > +	}
> > +	map->size = 0;
> > +
> > +	return 1;
> > +}
> > +
> >  /*
> >   * Parse reserve_mem=nn:align:name
> >   */
>
Mike Rapoport Feb. 18, 2025, 7:24 a.m. UTC | #3
Hi Masami,

On Tue, Feb 11, 2025 at 11:47:03PM +0900, Masami Hiramatsu (Google) wrote:
> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> 
> Add reserve_mem_release_by_name() to release a reserved memory region
> with a given name. This allows us to release reserved memory which is
> defined by kernel cmdline, after boot.
> 
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Mike Rapoport <rppt@kernel.org>
> Cc: linux-mm@kvack.org

...

> +/**
> + * reserve_mem_release_by_name - Release reserved memory region with a given name
> + * @name: The name that is attatched to a reserved memory region
> + *
> + * Forcibly release the pages in the reserved memory region so that those memory
> + * can be used as free memory. After released the reserved region size becomes 0.
> + *
> + * Returns: 1 if released or 0 if not found.
> + */
> +int reserve_mem_release_by_name(const char *name)
> +{
> +	struct reserve_mem_table *map;
> +	unsigned int page_count;
> +	phys_addr_t start;
> +
> +	guard(mutex)(&reserve_mem_lock);
> +	map = reserve_mem_find_by_name_nolock(name);
> +	if (!map)
> +		return 0;
> +
> +	start = map->start;
> +	page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
> +
> +	for (int i = 0; i < page_count; i++) {
> +		phys_addr_t addr = start + i * PAGE_SIZE;
> +		struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
> +
> +		page->flags &= ~BIT(PG_reserved);
> +		__free_page(page);
> +	}

We have free_reserved_area(), please use it here.
Otherwise

Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

> +	map->size = 0;
> +
> +	return 1;
> +}
> +
>  /*
>   * Parse reserve_mem=nn:align:name
>   */
>
Masami Hiramatsu (Google) Feb. 18, 2025, 8:42 a.m. UTC | #4
Hi Mike,

On Tue, 18 Feb 2025 09:24:53 +0200
Mike Rapoport <rppt@kernel.org> wrote:

> Hi Masami,
> 
> On Tue, Feb 11, 2025 at 11:47:03PM +0900, Masami Hiramatsu (Google) wrote:
> > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > 
> > Add reserve_mem_release_by_name() to release a reserved memory region
> > with a given name. This allows us to release reserved memory which is
> > defined by kernel cmdline, after boot.
> > 
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Mike Rapoport <rppt@kernel.org>
> > Cc: linux-mm@kvack.org
> 
> ...
> 
> > +/**
> > + * reserve_mem_release_by_name - Release reserved memory region with a given name
> > + * @name: The name that is attatched to a reserved memory region
> > + *
> > + * Forcibly release the pages in the reserved memory region so that those memory
> > + * can be used as free memory. After released the reserved region size becomes 0.
> > + *
> > + * Returns: 1 if released or 0 if not found.
> > + */
> > +int reserve_mem_release_by_name(const char *name)
> > +{
> > +	struct reserve_mem_table *map;
> > +	unsigned int page_count;
> > +	phys_addr_t start;
> > +
> > +	guard(mutex)(&reserve_mem_lock);
> > +	map = reserve_mem_find_by_name_nolock(name);
> > +	if (!map)
> > +		return 0;
> > +
> > +	start = map->start;
> > +	page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
> > +
> > +	for (int i = 0; i < page_count; i++) {
> > +		phys_addr_t addr = start + i * PAGE_SIZE;
> > +		struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
> > +
> > +		page->flags &= ~BIT(PG_reserved);
> > +		__free_page(page);
> > +	}
> 
> We have free_reserved_area(), please use it here.
> Otherwise
> 
> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

Thanks! but I can not use free_reserved_area() here because it uses
virt_to_page(). Here we only know the physical address in the map.
I think we can use free_reserved_page() instead. Is that OK?

Thank you,


> 
> > +	map->size = 0;
> > +
> > +	return 1;
> > +}
> > +
> >  /*
> >   * Parse reserve_mem=nn:align:name
> >   */
> > 
> 
> -- 
> Sincerely yours,
> Mike.
Mike Rapoport Feb. 18, 2025, 9:47 a.m. UTC | #5
On Tue, Feb 18, 2025 at 05:42:57PM +0900, Masami Hiramatsu wrote:
> Hi Mike,
> 
> On Tue, 18 Feb 2025 09:24:53 +0200
> Mike Rapoport <rppt@kernel.org> wrote:
> 
> > Hi Masami,
> > 
> > On Tue, Feb 11, 2025 at 11:47:03PM +0900, Masami Hiramatsu (Google) wrote:
> > > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > > 
> > > Add reserve_mem_release_by_name() to release a reserved memory region
> > > with a given name. This allows us to release reserved memory which is
> > > defined by kernel cmdline, after boot.
> > > 
> > > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > > Cc: Andrew Morton <akpm@linux-foundation.org>
> > > Cc: Mike Rapoport <rppt@kernel.org>
> > > Cc: linux-mm@kvack.org
> > 
> > ...
> > 
> > > +/**
> > > + * reserve_mem_release_by_name - Release reserved memory region with a given name
> > > + * @name: The name that is attatched to a reserved memory region
> > > + *
> > > + * Forcibly release the pages in the reserved memory region so that those memory
> > > + * can be used as free memory. After released the reserved region size becomes 0.
> > > + *
> > > + * Returns: 1 if released or 0 if not found.
> > > + */
> > > +int reserve_mem_release_by_name(const char *name)
> > > +{
> > > +	struct reserve_mem_table *map;
> > > +	unsigned int page_count;
> > > +	phys_addr_t start;
> > > +
> > > +	guard(mutex)(&reserve_mem_lock);
> > > +	map = reserve_mem_find_by_name_nolock(name);
> > > +	if (!map)
> > > +		return 0;
> > > +
> > > +	start = map->start;
> > > +	page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
> > > +
> > > +	for (int i = 0; i < page_count; i++) {
> > > +		phys_addr_t addr = start + i * PAGE_SIZE;
> > > +		struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
> > > +
> > > +		page->flags &= ~BIT(PG_reserved);
> > > +		__free_page(page);
> > > +	}
> > 
> > We have free_reserved_area(), please use it here.
> > Otherwise
> > 
> > Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> 
> Thanks! but I can not use free_reserved_area() here because it uses
> virt_to_page(). Here we only know the physical address in the map.
> I think we can use free_reserved_page() instead. Is that OK?

For reserve_mem ranges virt_to_phys() will work, they are allocated from the
memory that is covered by the direct map.
 
> Thank you,
> 
> -- 
> Masami Hiramatsu (Google) <mhiramat@kernel.org>
Mike Rapoport Feb. 18, 2025, 11:34 a.m. UTC | #6
On Tue, Feb 18, 2025 at 11:47:11AM +0200, Mike Rapoport wrote:
> On Tue, Feb 18, 2025 at 05:42:57PM +0900, Masami Hiramatsu wrote:
> > Hi Mike,
> > 
> > 
> > Thanks! but I can not use free_reserved_area() here because it uses
> > virt_to_page(). Here we only know the physical address in the map.
> > I think we can use free_reserved_page() instead. Is that OK?
> 
> For reserve_mem ranges virt_to_phys() will work, they are allocated from the

I meant phys_to_virt() of course :)

> memory that is covered by the direct map.
Masami Hiramatsu (Google) Feb. 18, 2025, 1:06 p.m. UTC | #7
On Tue, 18 Feb 2025 13:34:51 +0200
Mike Rapoport <rppt@kernel.org> wrote:

> On Tue, Feb 18, 2025 at 11:47:11AM +0200, Mike Rapoport wrote:
> > On Tue, Feb 18, 2025 at 05:42:57PM +0900, Masami Hiramatsu wrote:
> > > Hi Mike,
> > > 
> > > 
> > > Thanks! but I can not use free_reserved_area() here because it uses
> > > virt_to_page(). Here we only know the physical address in the map.
> > > I think we can use free_reserved_page() instead. Is that OK?
> > 
> > For reserve_mem ranges virt_to_phys() will work, they are allocated from the
> 
> I meant phys_to_virt() of course :)

Ah, I got it :). Let me update it.

Thanks!

> 
> > memory that is covered by the direct map.
> 
> -- 
> Sincerely yours,
> Mike.
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index f02925447e59..fe5f7711df04 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4197,6 +4197,7 @@  void vma_pgtable_walk_begin(struct vm_area_struct *vma);
 void vma_pgtable_walk_end(struct vm_area_struct *vma);
 
 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
+int reserve_mem_release_by_name(const char *name);
 
 #ifdef CONFIG_64BIT
 int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
diff --git a/mm/memblock.c b/mm/memblock.c
index 095c18b5c430..c8d207ebb93c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -16,6 +16,7 @@ 
 #include <linux/kmemleak.h>
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
+#include <linux/mutex.h>
 
 #include <asm/sections.h>
 #include <linux/io.h>
@@ -2263,6 +2264,7 @@  struct reserve_mem_table {
 };
 static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
 static int reserved_mem_count;
+static DEFINE_MUTEX(reserve_mem_lock);
 
 /* Add wildcard region with a lookup name */
 static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
@@ -2276,6 +2278,21 @@  static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
 	strscpy(map->name, name);
 }
 
+static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
+{
+	struct reserve_mem_table *map;
+	int i;
+
+	for (i = 0; i < reserved_mem_count; i++) {
+		map = &reserved_mem_table[i];
+		if (!map->size)
+			continue;
+		if (strcmp(name, map->name) == 0)
+			return map;
+	}
+	return NULL;
+}
+
 /**
  * reserve_mem_find_by_name - Find reserved memory region with a given name
  * @name: The name that is attached to a reserved memory region
@@ -2289,22 +2306,53 @@  static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
 {
 	struct reserve_mem_table *map;
-	int i;
 
-	for (i = 0; i < reserved_mem_count; i++) {
-		map = &reserved_mem_table[i];
-		if (!map->size)
-			continue;
-		if (strcmp(name, map->name) == 0) {
-			*start = map->start;
-			*size = map->size;
-			return 1;
-		}
-	}
-	return 0;
+	guard(mutex)(&reserve_mem_lock);
+	map = reserve_mem_find_by_name_nolock(name);
+	if (!map)
+		return 0;
+
+	*start = map->start;
+	*size = map->size;
+	return 1;
 }
 EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
 
+/**
+ * reserve_mem_release_by_name - Release reserved memory region with a given name
+ * @name: The name that is attatched to a reserved memory region
+ *
+ * Forcibly release the pages in the reserved memory region so that those memory
+ * can be used as free memory. After released the reserved region size becomes 0.
+ *
+ * Returns: 1 if released or 0 if not found.
+ */
+int reserve_mem_release_by_name(const char *name)
+{
+	struct reserve_mem_table *map;
+	unsigned int page_count;
+	phys_addr_t start;
+
+	guard(mutex)(&reserve_mem_lock);
+	map = reserve_mem_find_by_name_nolock(name);
+	if (!map)
+		return 0;
+
+	start = map->start;
+	page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
+
+	for (int i = 0; i < page_count; i++) {
+		phys_addr_t addr = start + i * PAGE_SIZE;
+		struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
+
+		page->flags &= ~BIT(PG_reserved);
+		__free_page(page);
+	}
+	map->size = 0;
+
+	return 1;
+}
+
 /*
  * Parse reserve_mem=nn:align:name
  */