diff mbox series

ftrace: Fix use-after-free issue in ftrace_location()

Message ID 20240401125543.1282845-1-zhengyejian1@huawei.com (mailing list archive)
State Changes Requested
Headers show
Series ftrace: Fix use-after-free issue in ftrace_location() | expand

Commit Message

Zheng Yejian April 1, 2024, 12:55 p.m. UTC
KASAN reports a bug:

  BUG: KASAN: use-after-free in ftrace_location+0x90/0x120
  Read of size 8 at addr ffff888141d40010 by task insmod/424
  CPU: 8 PID: 424 Comm: insmod Tainted: G        W          6.9.0-rc2+ #213
  [...]
  Call Trace:
   <TASK>
   dump_stack_lvl+0x68/0xa0
   print_report+0xcf/0x610
   kasan_report+0xb5/0xe0
   ftrace_location+0x90/0x120
   register_kprobe+0x14b/0xa40
   kprobe_init+0x2d/0xff0 [kprobe_example]
   do_one_initcall+0x8f/0x2d0
   do_init_module+0x13a/0x3c0
   load_module+0x3082/0x33d0
   init_module_from_file+0xd2/0x130
   __x64_sys_finit_module+0x306/0x440
   do_syscall_64+0x68/0x140
   entry_SYSCALL_64_after_hwframe+0x71/0x79

The root cause is that when lookup_rec() is lookuping ftrace record of
an address in some module, and at the same time in ftrace_release_mod(),
the memory that saving ftrace records has been freed as that module is
being deleted.

  register_kprobes() {
    check_kprobe_address_safe() {
      arch_check_ftrace_location() {
        ftrace_location() {
          lookup_rec()  // access memory that has been freed by
                        // ftrace_release_mod() !!!

It seems that the ftrace_lock is required when lookuping records in
ftrace_location(), so is ftrace_location_range().

Fixes: ae6aa16fdc16 ("kprobes: introduce ftrace based optimization")
Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
---
 kernel/trace/ftrace.c | 28 ++++++++++++++++++----------
 1 file changed, 18 insertions(+), 10 deletions(-)

Comments

Steven Rostedt April 10, 2024, 3:28 p.m. UTC | #1
On Mon, 1 Apr 2024 20:55:43 +0800
Zheng Yejian <zhengyejian1@huawei.com> wrote:

> KASAN reports a bug:
> 
>   BUG: KASAN: use-after-free in ftrace_location+0x90/0x120
>   Read of size 8 at addr ffff888141d40010 by task insmod/424
>   CPU: 8 PID: 424 Comm: insmod Tainted: G        W          6.9.0-rc2+ #213
>   [...]
>   Call Trace:
>    <TASK>
>    dump_stack_lvl+0x68/0xa0
>    print_report+0xcf/0x610
>    kasan_report+0xb5/0xe0
>    ftrace_location+0x90/0x120
>    register_kprobe+0x14b/0xa40
>    kprobe_init+0x2d/0xff0 [kprobe_example]
>    do_one_initcall+0x8f/0x2d0
>    do_init_module+0x13a/0x3c0
>    load_module+0x3082/0x33d0
>    init_module_from_file+0xd2/0x130
>    __x64_sys_finit_module+0x306/0x440
>    do_syscall_64+0x68/0x140
>    entry_SYSCALL_64_after_hwframe+0x71/0x79
> 
> The root cause is that when lookup_rec() is lookuping ftrace record of
> an address in some module, and at the same time in ftrace_release_mod(),
> the memory that saving ftrace records has been freed as that module is
> being deleted.
> 
>   register_kprobes() {
>     check_kprobe_address_safe() {
>       arch_check_ftrace_location() {
>         ftrace_location() {
>           lookup_rec()  // access memory that has been freed by
>                         // ftrace_release_mod() !!!
> 
> It seems that the ftrace_lock is required when lookuping records in
> ftrace_location(), so is ftrace_location_range().
> 
> Fixes: ae6aa16fdc16 ("kprobes: introduce ftrace based optimization")
> Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
> ---
>  kernel/trace/ftrace.c | 28 ++++++++++++++++++----------
>  1 file changed, 18 insertions(+), 10 deletions(-)
> 
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index da1710499698..838d175709c1 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -1581,7 +1581,7 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
>  }
>  
>  /**
> - * ftrace_location_range - return the first address of a traced location
> + * ftrace_location_range_locked - return the first address of a traced location
>   *	if it touches the given ip range
>   * @start: start of range to search.
>   * @end: end of range to search (inclusive). @end points to the last byte
> @@ -1592,7 +1592,7 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
>   * that is either a NOP or call to the function tracer. It checks the ftrace
>   * internal tables to determine if the address belongs or not.
>   */
> -unsigned long ftrace_location_range(unsigned long start, unsigned long end)
> +static unsigned long ftrace_location_range_locked(unsigned long start, unsigned long end)
>  {
>  	struct dyn_ftrace *rec;
>  
> @@ -1603,6 +1603,17 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
>  	return 0;
>  }
>  
> +unsigned long ftrace_location_range(unsigned long start, unsigned long end)
> +{
> +	unsigned long loc;
> +
> +	mutex_lock(&ftrace_lock);
> +	loc = ftrace_location_range_locked(start, end);
> +	mutex_unlock(&ftrace_lock);

I'm not so sure we can take a mutex in all places that call this function.

What about using RCU?

	rcu_read_lock();
	loc = ftrace_location_range_rcu(start, end);
	rcu_read_unlock();

Then in ftrace_release_mod() we can have:

 out_unlock:
	mutex_unlock();

	/* Need to synchronize with ftrace_location() */
	if (tmp_pages)
		synchronize_rcu();

-- Steve


> +
> +	return loc;
> +}
> +
>  /**
>   * ftrace_location - return the ftrace location
>   * @ip: the instruction pointer to check
> @@ -1614,25 +1625,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
>   */
>  unsigned long ftrace_location(unsigned long ip)
>  {
> -	struct dyn_ftrace *rec;
> +	unsigned long loc;
>  	unsigned long offset;
>  	unsigned long size;
>  
> -	rec = lookup_rec(ip, ip);
> -	if (!rec) {
> +	loc = ftrace_location_range(ip, ip);
> +	if (!loc) {
>  		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
>  			goto out;
>  
>  		/* map sym+0 to __fentry__ */
>  		if (!offset)
> -			rec = lookup_rec(ip, ip + size - 1);
> +			loc = ftrace_location_range(ip, ip + size - 1);
>  	}
>  
> -	if (rec)
> -		return rec->ip;
> -
>  out:
> -	return 0;
> +	return loc;
>  }
>  
>  /**
Zheng Yejian April 11, 2024, 1:48 a.m. UTC | #2
On 2024/4/10 23:28, Steven Rostedt wrote:
> On Mon, 1 Apr 2024 20:55:43 +0800
> Zheng Yejian <zhengyejian1@huawei.com> wrote:
> 
>> KASAN reports a bug:
>>
>>    BUG: KASAN: use-after-free in ftrace_location+0x90/0x120
>>    Read of size 8 at addr ffff888141d40010 by task insmod/424
>>    CPU: 8 PID: 424 Comm: insmod Tainted: G        W          6.9.0-rc2+ #213
>>    [...]
>>    Call Trace:
>>     <TASK>
>>     dump_stack_lvl+0x68/0xa0
>>     print_report+0xcf/0x610
>>     kasan_report+0xb5/0xe0
>>     ftrace_location+0x90/0x120
>>     register_kprobe+0x14b/0xa40
>>     kprobe_init+0x2d/0xff0 [kprobe_example]
>>     do_one_initcall+0x8f/0x2d0
>>     do_init_module+0x13a/0x3c0
>>     load_module+0x3082/0x33d0
>>     init_module_from_file+0xd2/0x130
>>     __x64_sys_finit_module+0x306/0x440
>>     do_syscall_64+0x68/0x140
>>     entry_SYSCALL_64_after_hwframe+0x71/0x79
>>
>> The root cause is that when lookup_rec() is lookuping ftrace record of
>> an address in some module, and at the same time in ftrace_release_mod(),
>> the memory that saving ftrace records has been freed as that module is
>> being deleted.
>>
>>    register_kprobes() {
>>      check_kprobe_address_safe() {
>>        arch_check_ftrace_location() {
>>          ftrace_location() {
>>            lookup_rec()  // access memory that has been freed by
>>                          // ftrace_release_mod() !!!
>>
>> It seems that the ftrace_lock is required when lookuping records in
>> ftrace_location(), so is ftrace_location_range().
>>
>> Fixes: ae6aa16fdc16 ("kprobes: introduce ftrace based optimization")
>> Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
>> ---
>>   kernel/trace/ftrace.c | 28 ++++++++++++++++++----------
>>   1 file changed, 18 insertions(+), 10 deletions(-)
>>
>> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
>> index da1710499698..838d175709c1 100644
>> --- a/kernel/trace/ftrace.c
>> +++ b/kernel/trace/ftrace.c
>> @@ -1581,7 +1581,7 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
>>   }
>>   
>>   /**
>> - * ftrace_location_range - return the first address of a traced location
>> + * ftrace_location_range_locked - return the first address of a traced location
>>    *	if it touches the given ip range
>>    * @start: start of range to search.
>>    * @end: end of range to search (inclusive). @end points to the last byte
>> @@ -1592,7 +1592,7 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
>>    * that is either a NOP or call to the function tracer. It checks the ftrace
>>    * internal tables to determine if the address belongs or not.
>>    */
>> -unsigned long ftrace_location_range(unsigned long start, unsigned long end)
>> +static unsigned long ftrace_location_range_locked(unsigned long start, unsigned long end)
>>   {
>>   	struct dyn_ftrace *rec;
>>   
>> @@ -1603,6 +1603,17 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
>>   	return 0;
>>   }
>>   
>> +unsigned long ftrace_location_range(unsigned long start, unsigned long end)
>> +{
>> +	unsigned long loc;
>> +
>> +	mutex_lock(&ftrace_lock);
>> +	loc = ftrace_location_range_locked(start, end);
>> +	mutex_unlock(&ftrace_lock);
> 
> I'm not so sure we can take a mutex in all places that call this function.
> 
> What about using RCU?
> 
> 	rcu_read_lock();
> 	loc = ftrace_location_range_rcu(start, end);
> 	rcu_read_unlock();
> 
> Then in ftrace_release_mod() we can have:
> 
>   out_unlock:
> 	mutex_unlock();
> 
> 	/* Need to synchronize with ftrace_location() */
> 	if (tmp_pages)
> 		synchronize_rcu();
> 
> -- Steve

Yes, it is better to use RCU, I'll do it in v2.

--
Thanks
Zheng Yejian

> 
> 
>> +
>> +	return loc;
>> +}
>> +
>>   /**
>>    * ftrace_location - return the ftrace location
>>    * @ip: the instruction pointer to check
>> @@ -1614,25 +1625,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
>>    */
>>   unsigned long ftrace_location(unsigned long ip)
>>   {
>> -	struct dyn_ftrace *rec;
>> +	unsigned long loc;
>>   	unsigned long offset;
>>   	unsigned long size;
>>   
>> -	rec = lookup_rec(ip, ip);
>> -	if (!rec) {
>> +	loc = ftrace_location_range(ip, ip);
>> +	if (!loc) {
>>   		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
>>   			goto out;
>>   
>>   		/* map sym+0 to __fentry__ */
>>   		if (!offset)
>> -			rec = lookup_rec(ip, ip + size - 1);
>> +			loc = ftrace_location_range(ip, ip + size - 1);
>>   	}
>>   
>> -	if (rec)
>> -		return rec->ip;
>> -
>>   out:
>> -	return 0;
>> +	return loc;
>>   }
>>   
>>   /**
>
diff mbox series

Patch

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index da1710499698..838d175709c1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1581,7 +1581,7 @@  static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
 }
 
 /**
- * ftrace_location_range - return the first address of a traced location
+ * ftrace_location_range_locked - return the first address of a traced location
  *	if it touches the given ip range
  * @start: start of range to search.
  * @end: end of range to search (inclusive). @end points to the last byte
@@ -1592,7 +1592,7 @@  static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
  * that is either a NOP or call to the function tracer. It checks the ftrace
  * internal tables to determine if the address belongs or not.
  */
-unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+static unsigned long ftrace_location_range_locked(unsigned long start, unsigned long end)
 {
 	struct dyn_ftrace *rec;
 
@@ -1603,6 +1603,17 @@  unsigned long ftrace_location_range(unsigned long start, unsigned long end)
 	return 0;
 }
 
+unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+{
+	unsigned long loc;
+
+	mutex_lock(&ftrace_lock);
+	loc = ftrace_location_range_locked(start, end);
+	mutex_unlock(&ftrace_lock);
+
+	return loc;
+}
+
 /**
  * ftrace_location - return the ftrace location
  * @ip: the instruction pointer to check
@@ -1614,25 +1625,22 @@  unsigned long ftrace_location_range(unsigned long start, unsigned long end)
  */
 unsigned long ftrace_location(unsigned long ip)
 {
-	struct dyn_ftrace *rec;
+	unsigned long loc;
 	unsigned long offset;
 	unsigned long size;
 
-	rec = lookup_rec(ip, ip);
-	if (!rec) {
+	loc = ftrace_location_range(ip, ip);
+	if (!loc) {
 		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
 			goto out;
 
 		/* map sym+0 to __fentry__ */
 		if (!offset)
-			rec = lookup_rec(ip, ip + size - 1);
+			loc = ftrace_location_range(ip, ip + size - 1);
 	}
 
-	if (rec)
-		return rec->ip;
-
 out:
-	return 0;
+	return loc;
 }
 
 /**