diff mbox series

[v2,1/4] mm: kmemleak: add OBJECT_PHYS flag for objects allocated with physical address

Message ID 20220603035415.1243913-2-patrick.wang.shcn@gmail.com (mailing list archive)
State New
Headers show
Series mm: kmemleak: store objects allocated with physical address separately and check when scan | expand

Commit Message

patrick wang June 3, 2022, 3:54 a.m. UTC
Add OBJECT_PHYS flag for object. This flag is used
to identify the objects allocated with physical
address.The create_object() function is added an
argument to set that flag.

Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Patrick Wang <patrick.wang.shcn@gmail.com>
---
 mm/kmemleak.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

Comments

Catalin Marinas June 6, 2022, 11:55 a.m. UTC | #1
On Fri, Jun 03, 2022 at 11:54:12AM +0800, Patrick Wang wrote:
> diff --git a/mm/kmemleak.c b/mm/kmemleak.c
> index a182f5ddaf68..1e9e0aa93ae5 100644
> --- a/mm/kmemleak.c
> +++ b/mm/kmemleak.c
> @@ -172,6 +172,8 @@ struct kmemleak_object {
>  #define OBJECT_NO_SCAN		(1 << 2)
>  /* flag set to fully scan the object when scan_area allocation failed */
>  #define OBJECT_FULL_SCAN	(1 << 3)
> +/* flag set for object allocated with physical address */
> +#define OBJECT_PHYS		(1 << 4)
>  
>  #define HEX_PREFIX		"    "
>  /* number of bytes to print per line; must be 16 or 32 */
> @@ -575,7 +577,8 @@ static int __save_stack_trace(unsigned long *trace)
>   * memory block and add it to the object_list and object_tree_root.
>   */
>  static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
> -					     int min_count, gfp_t gfp)
> +					     int min_count, gfp_t gfp,
> +					     bool is_phys)

The patch looks fine but I wonder whether we should have different
functions for is_phys true/false, we may end up fewer changes overall
since most places simply pass is_phys == false:

static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
					       int min_count, gfp_t gfp,
					       bool is_phys);

static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
					     int min_count, gfp_t gfp)
{
	return __create_object(ptr, size, min_count, gfp, false);
}

static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
						  int min_count, gfp_t gfp)
{
	return __create_object(ptr, size, min_count, gfp, true);
}

Same for the other patches that change a few more functions.
patrick wang June 7, 2022, 2:32 p.m. UTC | #2
On 2022/6/6 19:55, Catalin Marinas wrote:
> On Fri, Jun 03, 2022 at 11:54:12AM +0800, Patrick Wang wrote:
>> diff --git a/mm/kmemleak.c b/mm/kmemleak.c
>> index a182f5ddaf68..1e9e0aa93ae5 100644
>> --- a/mm/kmemleak.c
>> +++ b/mm/kmemleak.c
>> @@ -172,6 +172,8 @@ struct kmemleak_object {
>>   #define OBJECT_NO_SCAN		(1 << 2)
>>   /* flag set to fully scan the object when scan_area allocation failed */
>>   #define OBJECT_FULL_SCAN	(1 << 3)
>> +/* flag set for object allocated with physical address */
>> +#define OBJECT_PHYS		(1 << 4)
>>   
>>   #define HEX_PREFIX		"    "
>>   /* number of bytes to print per line; must be 16 or 32 */
>> @@ -575,7 +577,8 @@ static int __save_stack_trace(unsigned long *trace)
>>    * memory block and add it to the object_list and object_tree_root.
>>    */
>>   static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
>> -					     int min_count, gfp_t gfp)
>> +					     int min_count, gfp_t gfp,
>> +					     bool is_phys)
> 
> The patch looks fine but I wonder whether we should have different
> functions for is_phys true/false, we may end up fewer changes overall
> since most places simply pass is_phys == false:

This should be better. Will do.

> 
> static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
> 					       int min_count, gfp_t gfp,
> 					       bool is_phys);
> 
> static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
> 					     int min_count, gfp_t gfp)
> {
> 	return __create_object(ptr, size, min_count, gfp, false);
> }
> 
> static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
> 						  int min_count, gfp_t gfp)
> {
> 	return __create_object(ptr, size, min_count, gfp, true);
> }
> 
> Same for the other patches that change a few more functions.

Since the physical objects are only used as gray objects.
Changes on irrelevant places will be removed.

The leak check could be taken on physical objects. Conversion
of block address from virtual to physical before lookup should
make this work (this is useless currently). I think we'd better
know about this.

Thanks,
Patrick
Catalin Marinas June 9, 2022, 9:54 a.m. UTC | #3
On Tue, Jun 07, 2022 at 10:32:26PM +0800, Patrick Wang wrote:
> The leak check could be taken on physical objects. Conversion
> of block address from virtual to physical before lookup should
> make this work (this is useless currently). I think we'd better
> know about this.

Yes, we could add this, but since all the phys objects are currently
'gray', it won't make any difference, other than an extra lookup in the
phys tree.
diff mbox series

Patch

diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a182f5ddaf68..1e9e0aa93ae5 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -172,6 +172,8 @@  struct kmemleak_object {
 #define OBJECT_NO_SCAN		(1 << 2)
 /* flag set to fully scan the object when scan_area allocation failed */
 #define OBJECT_FULL_SCAN	(1 << 3)
+/* flag set for object allocated with physical address */
+#define OBJECT_PHYS		(1 << 4)
 
 #define HEX_PREFIX		"    "
 /* number of bytes to print per line; must be 16 or 32 */
@@ -575,7 +577,8 @@  static int __save_stack_trace(unsigned long *trace)
  * memory block and add it to the object_list and object_tree_root.
  */
 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
-					     int min_count, gfp_t gfp)
+					     int min_count, gfp_t gfp,
+					     bool is_phys)
 {
 	unsigned long flags;
 	struct kmemleak_object *object, *parent;
@@ -595,7 +598,7 @@  static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 	INIT_HLIST_HEAD(&object->area_list);
 	raw_spin_lock_init(&object->lock);
 	atomic_set(&object->use_count, 1);
-	object->flags = OBJECT_ALLOCATED;
+	object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
 	object->pointer = ptr;
 	object->size = kfence_ksize((void *)ptr) ?: size;
 	object->excess_ref = 0;
@@ -729,10 +732,10 @@  static void delete_object_part(unsigned long ptr, size_t size)
 	end = object->pointer + object->size;
 	if (ptr > start)
 		create_object(start, ptr - start, object->min_count,
-			      GFP_KERNEL);
+			      GFP_KERNEL, object->flags & OBJECT_PHYS);
 	if (ptr + size < end)
 		create_object(ptr + size, end - ptr - size, object->min_count,
-			      GFP_KERNEL);
+			      GFP_KERNEL, object->flags & OBJECT_PHYS);
 
 	__delete_object(object);
 }
@@ -904,7 +907,7 @@  void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 
 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
-		create_object((unsigned long)ptr, size, min_count, gfp);
+		create_object((unsigned long)ptr, size, min_count, gfp, false);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc);
 
@@ -931,7 +934,7 @@  void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 		for_each_possible_cpu(cpu)
 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
-				      size, 0, gfp);
+				      size, 0, gfp, false);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 
@@ -953,7 +956,7 @@  void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp
 	 * the virtual address of the vmalloc'ed block.
 	 */
 	if (kmemleak_enabled) {
-		create_object((unsigned long)area->addr, size, 2, gfp);
+		create_object((unsigned long)area->addr, size, 2, gfp, false);
 		object_set_excess_ref((unsigned long)area,
 				      (unsigned long)area->addr);
 	}
@@ -1966,14 +1969,14 @@  void __init kmemleak_init(void)
 
 	/* register the data/bss sections */
 	create_object((unsigned long)_sdata, _edata - _sdata,
-		      KMEMLEAK_GREY, GFP_ATOMIC);
+		      KMEMLEAK_GREY, GFP_ATOMIC, false);
 	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
-		      KMEMLEAK_GREY, GFP_ATOMIC);
+		      KMEMLEAK_GREY, GFP_ATOMIC, false);
 	/* only register .data..ro_after_init if not within .data */
 	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
 		create_object((unsigned long)__start_ro_after_init,
 			      __end_ro_after_init - __start_ro_after_init,
-			      KMEMLEAK_GREY, GFP_ATOMIC);
+			      KMEMLEAK_GREY, GFP_ATOMIC, false);
 }
 
 /*