diff mbox series

[2/3] mm/kmemleak: fix partially freeing unknown object warning

Message ID 20230927035923.1425340-3-liushixin2@huawei.com (mailing list archive)
State New
Headers show
Series Some bugfix about kmemleak | expand

Commit Message

Liu Shixin Sept. 27, 2023, 3:59 a.m. UTC
delete_object_part() can be called by multiple callers in the same time.
If an object is found and removed by a caller, and then another caller
try to find it too, it failed and return directly. The secound part still
be recorded by kmemleak even if it has alreadly been freed to buddy.
With DEBUG on, kmemleak will report the following warning:

 kmemleak: Partially freeing unknown object at 0xa1af86000 (size 4096)
 CPU: 0 PID: 742 Comm: test_huge Not tainted 6.6.0-rc3kmemleak+ #54
 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
 Call Trace:
  <TASK>
  dump_stack_lvl+0x37/0x50
  kmemleak_free_part_phys+0x50/0x60
  hugetlb_vmemmap_optimize+0x172/0x290
  ? __pfx_vmemmap_remap_pte+0x10/0x10
  __prep_new_hugetlb_folio+0xe/0x30
  prep_new_hugetlb_folio.isra.0+0xe/0x40
  alloc_fresh_hugetlb_folio+0xc3/0xd0
  alloc_surplus_hugetlb_folio.constprop.0+0x6e/0xd0
  hugetlb_acct_memory.part.0+0xe6/0x2a0
  hugetlb_reserve_pages+0x110/0x2c0
  hugetlbfs_file_mmap+0x11d/0x1b0
  mmap_region+0x248/0x9a0
  ? hugetlb_get_unmapped_area+0x15c/0x2d0
  do_mmap+0x38b/0x580
  vm_mmap_pgoff+0xe6/0x190
  ksys_mmap_pgoff+0x18a/0x1f0
  do_syscall_64+0x3f/0x90
  entry_SYSCALL_64_after_hwframe+0x6e/0xd8

Fixes: 53238a60dd4a ("kmemleak: Allow partial freeing of memory blocks")
Signed-off-by: Liu Shixin <liushixin2@huawei.com>
---
 mm/kmemleak.c | 6 ++++++
 1 file changed, 6 insertions(+)

Comments

Catalin Marinas Sept. 27, 2023, 5:06 p.m. UTC | #1
On Wed, Sep 27, 2023 at 11:59:22AM +0800, Liu Shixin wrote:
> diff --git a/mm/kmemleak.c b/mm/kmemleak.c
> index 54c2c90d3abc..5a2bbd85df57 100644
> --- a/mm/kmemleak.c
> +++ b/mm/kmemleak.c
> @@ -208,6 +208,8 @@ static struct rb_root object_tree_root = RB_ROOT;
>  static struct rb_root object_phys_tree_root = RB_ROOT;
>  /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
>  static DEFINE_RAW_SPINLOCK(kmemleak_lock);
> +/* Serial delete_object_part() to ensure all objects is deleted correctly */
> +static DEFINE_RAW_SPINLOCK(delete_object_part_mutex);

Don't call this mutex, it implies sleeping.

>  
>  /* allocation caches for kmemleak internal data */
>  static struct kmem_cache *object_cache;
> @@ -784,13 +786,16 @@ static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
>  {
>  	struct kmemleak_object *object;
>  	unsigned long start, end;
> +	unsigned long flags;
>  
> +	raw_spin_lock_irqsave(&delete_object_part_mutex, flags);
>  	object = find_and_remove_object(ptr, 1, is_phys);
>  	if (!object) {
>  #ifdef DEBUG
>  		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
>  			      ptr, size);
>  #endif
> +		raw_spin_unlock_irqrestore(&delete_object_part_mutex, flags);

I prefer a goto out and a single place for unlocking.

However, we already take the kmemleak_lock in find_and_remove_object().
So better to open-code that function here and avoid introducing a new
lock. __create_object() may need a new bool argument, no_lock or
something. Or just split it into separate functions for allocating the
kmemleak structure and adding it to the corresponding trees/lists under
a lock.
Liu Shixin Sept. 28, 2023, 1:22 a.m. UTC | #2
On 2023/9/28 1:06, Catalin Marinas wrote:
> On Wed, Sep 27, 2023 at 11:59:22AM +0800, Liu Shixin wrote:
>> diff --git a/mm/kmemleak.c b/mm/kmemleak.c
>> index 54c2c90d3abc..5a2bbd85df57 100644
>> --- a/mm/kmemleak.c
>> +++ b/mm/kmemleak.c
>> @@ -208,6 +208,8 @@ static struct rb_root object_tree_root = RB_ROOT;
>>  static struct rb_root object_phys_tree_root = RB_ROOT;
>>  /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
>>  static DEFINE_RAW_SPINLOCK(kmemleak_lock);
>> +/* Serial delete_object_part() to ensure all objects is deleted correctly */
>> +static DEFINE_RAW_SPINLOCK(delete_object_part_mutex);
> Don't call this mutex, it implies sleeping.
Sorry, I used to define it as a mutex lock and forgot to change it.
>
>>  
>>  /* allocation caches for kmemleak internal data */
>>  static struct kmem_cache *object_cache;
>> @@ -784,13 +786,16 @@ static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
>>  {
>>  	struct kmemleak_object *object;
>>  	unsigned long start, end;
>> +	unsigned long flags;
>>  
>> +	raw_spin_lock_irqsave(&delete_object_part_mutex, flags);
>>  	object = find_and_remove_object(ptr, 1, is_phys);
>>  	if (!object) {
>>  #ifdef DEBUG
>>  		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
>>  			      ptr, size);
>>  #endif
>> +		raw_spin_unlock_irqrestore(&delete_object_part_mutex, flags);
> I prefer a goto out and a single place for unlocking.
>
> However, we already take the kmemleak_lock in find_and_remove_object().
> So better to open-code that function here and avoid introducing a new
> lock. __create_object() may need a new bool argument, no_lock or
> something. Or just split it into separate functions for allocating the
> kmemleak structure and adding it to the corresponding trees/lists under
> a lock.
>
diff mbox series

Patch

diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 54c2c90d3abc..5a2bbd85df57 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -208,6 +208,8 @@  static struct rb_root object_tree_root = RB_ROOT;
 static struct rb_root object_phys_tree_root = RB_ROOT;
 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
+/* Serial delete_object_part() to ensure all objects is deleted correctly */
+static DEFINE_RAW_SPINLOCK(delete_object_part_mutex);
 
 /* allocation caches for kmemleak internal data */
 static struct kmem_cache *object_cache;
@@ -784,13 +786,16 @@  static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
 {
 	struct kmemleak_object *object;
 	unsigned long start, end;
+	unsigned long flags;
 
+	raw_spin_lock_irqsave(&delete_object_part_mutex, flags);
 	object = find_and_remove_object(ptr, 1, is_phys);
 	if (!object) {
 #ifdef DEBUG
 		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
 			      ptr, size);
 #endif
+		raw_spin_unlock_irqrestore(&delete_object_part_mutex, flags);
 		return;
 	}
 
@@ -809,6 +814,7 @@  static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
 			      GFP_KERNEL, is_phys);
 
 	__delete_object(object);
+	raw_spin_unlock_irqrestore(&delete_object_part_mutex, flags);
 }
 
 static void __paint_it(struct kmemleak_object *object, int color)